diff --git a/ckpts/universal/global_step40/zero/14.attention.query_key_value.weight/exp_avg.pt b/ckpts/universal/global_step40/zero/14.attention.query_key_value.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..08c660fea0a383b3502e19294f55ceb7753b9a80 --- /dev/null +++ b/ckpts/universal/global_step40/zero/14.attention.query_key_value.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7b5aaa4a9b2d2e1b5ceeb53749e127800f86c8aa5e016482a9534a82ca859ff3 +size 50332828 diff --git a/ckpts/universal/global_step40/zero/14.attention.query_key_value.weight/exp_avg_sq.pt b/ckpts/universal/global_step40/zero/14.attention.query_key_value.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..74256fc5920be22c6a49b1e4006fe1a145555e98 --- /dev/null +++ b/ckpts/universal/global_step40/zero/14.attention.query_key_value.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b6e8bafe566227b5eaee5a5cd13fa86d2ece4511bd3aa07e9a9f8ff19634301a +size 50332843 diff --git a/ckpts/universal/global_step40/zero/14.attention.query_key_value.weight/fp32.pt b/ckpts/universal/global_step40/zero/14.attention.query_key_value.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..41a059ed6f08285c46202d97c1b9d2115b284048 --- /dev/null +++ b/ckpts/universal/global_step40/zero/14.attention.query_key_value.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a375f12df0268d460178f978a3df67864885bc7826e1e3cd7efc12c82f39fe3b +size 50332749 diff --git a/ckpts/universal/global_step40/zero/9.post_attention_layernorm.weight/fp32.pt b/ckpts/universal/global_step40/zero/9.post_attention_layernorm.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..a808dc122ef57028dd02bae35f734566714de42f --- /dev/null +++ b/ckpts/universal/global_step40/zero/9.post_attention_layernorm.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:56892653a52e4bcdb41de2d759d8c75f2b8408862fd6b787eeccb121b2dbffa5 +size 9293 diff --git a/venv/lib/python3.10/site-packages/transformers/data/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/data/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..aa163b9b83d9bcf1d4339308d0285a73e3afb1cd Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/data/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/data/__pycache__/data_collator.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/data/__pycache__/data_collator.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4d01a6b407a4395d4796d7ca05a8451c60e79819 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/data/__pycache__/data_collator.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/data/datasets/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/data/datasets/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ffd30dfa0fe8d4722894cb67b6c38124d45f13f2 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/data/datasets/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/data/datasets/__pycache__/glue.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/data/datasets/__pycache__/glue.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4a1cf371471c50eec4d74db852b67eb707cfd60c Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/data/datasets/__pycache__/glue.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/data/datasets/__pycache__/language_modeling.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/data/datasets/__pycache__/language_modeling.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c5959c96300a6983af4494cd135459e133df8281 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/data/datasets/__pycache__/language_modeling.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/data/datasets/__pycache__/squad.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/data/datasets/__pycache__/squad.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9024c486f2c734c57c04d63443d75af1f78f3d2c Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/data/datasets/__pycache__/squad.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/data/datasets/glue.py b/venv/lib/python3.10/site-packages/transformers/data/datasets/glue.py new file mode 100644 index 0000000000000000000000000000000000000000..72df3bece21925d15748d53bd82def67bfdd82bb --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/data/datasets/glue.py @@ -0,0 +1,161 @@ +# Copyright 2020 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import time +import warnings +from dataclasses import dataclass, field +from enum import Enum +from typing import List, Optional, Union + +import torch +from filelock import FileLock +from torch.utils.data import Dataset + +from ...tokenization_utils_base import PreTrainedTokenizerBase +from ...utils import logging +from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors +from ..processors.utils import InputFeatures + + +logger = logging.get_logger(__name__) + + +@dataclass +class GlueDataTrainingArguments: + """ + Arguments pertaining to what data we are going to input our model for training and eval. + + Using `HfArgumentParser` we can turn this class into argparse arguments to be able to specify them on the command + line. + """ + + task_name: str = field(metadata={"help": "The name of the task to train on: " + ", ".join(glue_processors.keys())}) + data_dir: str = field( + metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."} + ) + max_seq_length: int = field( + default=128, + metadata={ + "help": ( + "The maximum total input sequence length after tokenization. Sequences longer " + "than this will be truncated, sequences shorter will be padded." + ) + }, + ) + overwrite_cache: bool = field( + default=False, metadata={"help": "Overwrite the cached training and evaluation sets"} + ) + + def __post_init__(self): + self.task_name = self.task_name.lower() + + +class Split(Enum): + train = "train" + dev = "dev" + test = "test" + + +class GlueDataset(Dataset): + """ + This will be superseded by a framework-agnostic approach soon. + """ + + args: GlueDataTrainingArguments + output_mode: str + features: List[InputFeatures] + + def __init__( + self, + args: GlueDataTrainingArguments, + tokenizer: PreTrainedTokenizerBase, + limit_length: Optional[int] = None, + mode: Union[str, Split] = Split.train, + cache_dir: Optional[str] = None, + ): + warnings.warn( + "This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets " + "library. You can have a look at this example script for pointers: " + "https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py", + FutureWarning, + ) + self.args = args + self.processor = glue_processors[args.task_name]() + self.output_mode = glue_output_modes[args.task_name] + if isinstance(mode, str): + try: + mode = Split[mode] + except KeyError: + raise KeyError("mode is not a valid split name") + # Load data features from cache or dataset file + cached_features_file = os.path.join( + cache_dir if cache_dir is not None else args.data_dir, + f"cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}", + ) + label_list = self.processor.get_labels() + if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in ( + "RobertaTokenizer", + "RobertaTokenizerFast", + "XLMRobertaTokenizer", + "BartTokenizer", + "BartTokenizerFast", + ): + # HACK(label indices are swapped in RoBERTa pretrained model) + label_list[1], label_list[2] = label_list[2], label_list[1] + self.label_list = label_list + + # Make sure only the first process in distributed training processes the dataset, + # and the others will use the cache. + lock_path = cached_features_file + ".lock" + with FileLock(lock_path): + if os.path.exists(cached_features_file) and not args.overwrite_cache: + start = time.time() + self.features = torch.load(cached_features_file) + logger.info( + f"Loading features from cached file {cached_features_file} [took %.3f s]", time.time() - start + ) + else: + logger.info(f"Creating features from dataset file at {args.data_dir}") + + if mode == Split.dev: + examples = self.processor.get_dev_examples(args.data_dir) + elif mode == Split.test: + examples = self.processor.get_test_examples(args.data_dir) + else: + examples = self.processor.get_train_examples(args.data_dir) + if limit_length is not None: + examples = examples[:limit_length] + self.features = glue_convert_examples_to_features( + examples, + tokenizer, + max_length=args.max_seq_length, + label_list=label_list, + output_mode=self.output_mode, + ) + start = time.time() + torch.save(self.features, cached_features_file) + # ^ This seems to take a lot of time so I want to investigate why and how we can improve. + logger.info( + f"Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]" + ) + + def __len__(self): + return len(self.features) + + def __getitem__(self, i) -> InputFeatures: + return self.features[i] + + def get_labels(self): + return self.label_list diff --git a/venv/lib/python3.10/site-packages/transformers/data/processors/__init__.py b/venv/lib/python3.10/site-packages/transformers/data/processors/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a26ab5776d74715428b10c4d9cd943e53b253785 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/data/processors/__init__.py @@ -0,0 +1,18 @@ +# Copyright 2020 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels +from .squad import SquadExample, SquadFeatures, SquadV1Processor, SquadV2Processor, squad_convert_examples_to_features +from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor +from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels diff --git a/venv/lib/python3.10/site-packages/transformers/data/processors/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/data/processors/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ff5ce10fe62e6967a651cf8c58058172a25e53f2 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/data/processors/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/data/processors/__pycache__/glue.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/data/processors/__pycache__/glue.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..aee602f483a440b9ddf6ad3b617e61a9ba52c867 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/data/processors/__pycache__/glue.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/data/processors/__pycache__/squad.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/data/processors/__pycache__/squad.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6bca0c44b845e86747206fa71b954cf314eae0e5 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/data/processors/__pycache__/squad.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/data/processors/__pycache__/utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/data/processors/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..69694fda36a1afb62f1304c8f185a0194ed5eeb5 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/data/processors/__pycache__/utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/data/processors/__pycache__/xnli.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/data/processors/__pycache__/xnli.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..669d73c642ce8c8ee64302baf4fc478a7db37ca7 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/data/processors/__pycache__/xnli.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/data/processors/glue.py b/venv/lib/python3.10/site-packages/transformers/data/processors/glue.py new file mode 100644 index 0000000000000000000000000000000000000000..3d22968c9d06323c7c1cd4b00e5fcd2e6cf3f35d --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/data/processors/glue.py @@ -0,0 +1,643 @@ +# coding=utf-8 +# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. +# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" GLUE processors and helpers""" + +import os +import warnings +from dataclasses import asdict +from enum import Enum +from typing import List, Optional, Union + +from ...tokenization_utils import PreTrainedTokenizer +from ...utils import is_tf_available, logging +from .utils import DataProcessor, InputExample, InputFeatures + + +if is_tf_available(): + import tensorflow as tf + +logger = logging.get_logger(__name__) + +DEPRECATION_WARNING = ( + "This {0} will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets " + "library. You can have a look at this example script for pointers: " + "https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py" +) + + +def glue_convert_examples_to_features( + examples: Union[List[InputExample], "tf.data.Dataset"], + tokenizer: PreTrainedTokenizer, + max_length: Optional[int] = None, + task=None, + label_list=None, + output_mode=None, +): + """ + Loads a data file into a list of `InputFeatures` + + Args: + examples: List of `InputExamples` or `tf.data.Dataset` containing the examples. + tokenizer: Instance of a tokenizer that will tokenize the examples + max_length: Maximum example length. Defaults to the tokenizer's max_len + task: GLUE task + label_list: List of labels. Can be obtained from the processor using the `processor.get_labels()` method + output_mode: String indicating the output mode. Either `regression` or `classification` + + Returns: + If the `examples` input is a `tf.data.Dataset`, will return a `tf.data.Dataset` containing the task-specific + features. If the input is a list of `InputExamples`, will return a list of task-specific `InputFeatures` which + can be fed to the model. + + """ + warnings.warn(DEPRECATION_WARNING.format("function"), FutureWarning) + if is_tf_available() and isinstance(examples, tf.data.Dataset): + if task is None: + raise ValueError("When calling glue_convert_examples_to_features from TF, the task parameter is required.") + return _tf_glue_convert_examples_to_features(examples, tokenizer, max_length=max_length, task=task) + return _glue_convert_examples_to_features( + examples, tokenizer, max_length=max_length, task=task, label_list=label_list, output_mode=output_mode + ) + + +if is_tf_available(): + + def _tf_glue_convert_examples_to_features( + examples: tf.data.Dataset, + tokenizer: PreTrainedTokenizer, + task=str, + max_length: Optional[int] = None, + ) -> tf.data.Dataset: + """ + Returns: + A `tf.data.Dataset` containing the task-specific features. + + """ + processor = glue_processors[task]() + examples = [processor.tfds_map(processor.get_example_from_tensor_dict(example)) for example in examples] + features = glue_convert_examples_to_features(examples, tokenizer, max_length=max_length, task=task) + label_type = tf.float32 if task == "sts-b" else tf.int64 + + def gen(): + for ex in features: + d = {k: v for k, v in asdict(ex).items() if v is not None} + label = d.pop("label") + yield (d, label) + + input_names = tokenizer.model_input_names + + return tf.data.Dataset.from_generator( + gen, + ({k: tf.int32 for k in input_names}, label_type), + ({k: tf.TensorShape([None]) for k in input_names}, tf.TensorShape([])), + ) + + +def _glue_convert_examples_to_features( + examples: List[InputExample], + tokenizer: PreTrainedTokenizer, + max_length: Optional[int] = None, + task=None, + label_list=None, + output_mode=None, +): + if max_length is None: + max_length = tokenizer.model_max_length + + if task is not None: + processor = glue_processors[task]() + if label_list is None: + label_list = processor.get_labels() + logger.info(f"Using label list {label_list} for task {task}") + if output_mode is None: + output_mode = glue_output_modes[task] + logger.info(f"Using output mode {output_mode} for task {task}") + + label_map = {label: i for i, label in enumerate(label_list)} + + def label_from_example(example: InputExample) -> Union[int, float, None]: + if example.label is None: + return None + if output_mode == "classification": + return label_map[example.label] + elif output_mode == "regression": + return float(example.label) + raise KeyError(output_mode) + + labels = [label_from_example(example) for example in examples] + + batch_encoding = tokenizer( + [(example.text_a, example.text_b) for example in examples], + max_length=max_length, + padding="max_length", + truncation=True, + ) + + features = [] + for i in range(len(examples)): + inputs = {k: batch_encoding[k][i] for k in batch_encoding} + + feature = InputFeatures(**inputs, label=labels[i]) + features.append(feature) + + for i, example in enumerate(examples[:5]): + logger.info("*** Example ***") + logger.info(f"guid: {example.guid}") + logger.info(f"features: {features[i]}") + + return features + + +class OutputMode(Enum): + classification = "classification" + regression = "regression" + + +class MrpcProcessor(DataProcessor): + """Processor for the MRPC data set (GLUE version).""" + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + warnings.warn(DEPRECATION_WARNING.format("processor"), FutureWarning) + + def get_example_from_tensor_dict(self, tensor_dict): + """See base class.""" + return InputExample( + tensor_dict["idx"].numpy(), + tensor_dict["sentence1"].numpy().decode("utf-8"), + tensor_dict["sentence2"].numpy().decode("utf-8"), + str(tensor_dict["label"].numpy()), + ) + + def get_train_examples(self, data_dir): + """See base class.""" + logger.info(f"LOOKING AT {os.path.join(data_dir, 'train.tsv')}") + return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") + + def get_dev_examples(self, data_dir): + """See base class.""" + return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev") + + def get_test_examples(self, data_dir): + """See base class.""" + return self._create_examples(self._read_tsv(os.path.join(data_dir, "test.tsv")), "test") + + def get_labels(self): + """See base class.""" + return ["0", "1"] + + def _create_examples(self, lines, set_type): + """Creates examples for the training, dev and test sets.""" + examples = [] + for i, line in enumerate(lines): + if i == 0: + continue + guid = f"{set_type}-{i}" + text_a = line[3] + text_b = line[4] + label = None if set_type == "test" else line[0] + examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) + return examples + + +class MnliProcessor(DataProcessor): + """Processor for the MultiNLI data set (GLUE version).""" + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + warnings.warn(DEPRECATION_WARNING.format("processor"), FutureWarning) + + def get_example_from_tensor_dict(self, tensor_dict): + """See base class.""" + return InputExample( + tensor_dict["idx"].numpy(), + tensor_dict["premise"].numpy().decode("utf-8"), + tensor_dict["hypothesis"].numpy().decode("utf-8"), + str(tensor_dict["label"].numpy()), + ) + + def get_train_examples(self, data_dir): + """See base class.""" + return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") + + def get_dev_examples(self, data_dir): + """See base class.""" + return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev_matched.tsv")), "dev_matched") + + def get_test_examples(self, data_dir): + """See base class.""" + return self._create_examples(self._read_tsv(os.path.join(data_dir, "test_matched.tsv")), "test_matched") + + def get_labels(self): + """See base class.""" + return ["contradiction", "entailment", "neutral"] + + def _create_examples(self, lines, set_type): + """Creates examples for the training, dev and test sets.""" + examples = [] + for i, line in enumerate(lines): + if i == 0: + continue + guid = f"{set_type}-{line[0]}" + text_a = line[8] + text_b = line[9] + label = None if set_type.startswith("test") else line[-1] + examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) + return examples + + +class MnliMismatchedProcessor(MnliProcessor): + """Processor for the MultiNLI Mismatched data set (GLUE version).""" + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + warnings.warn(DEPRECATION_WARNING.format("processor"), FutureWarning) + + def get_dev_examples(self, data_dir): + """See base class.""" + return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev_mismatched.tsv")), "dev_mismatched") + + def get_test_examples(self, data_dir): + """See base class.""" + return self._create_examples(self._read_tsv(os.path.join(data_dir, "test_mismatched.tsv")), "test_mismatched") + + +class ColaProcessor(DataProcessor): + """Processor for the CoLA data set (GLUE version).""" + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + warnings.warn(DEPRECATION_WARNING.format("processor"), FutureWarning) + + def get_example_from_tensor_dict(self, tensor_dict): + """See base class.""" + return InputExample( + tensor_dict["idx"].numpy(), + tensor_dict["sentence"].numpy().decode("utf-8"), + None, + str(tensor_dict["label"].numpy()), + ) + + def get_train_examples(self, data_dir): + """See base class.""" + return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") + + def get_dev_examples(self, data_dir): + """See base class.""" + return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev") + + def get_test_examples(self, data_dir): + """See base class.""" + return self._create_examples(self._read_tsv(os.path.join(data_dir, "test.tsv")), "test") + + def get_labels(self): + """See base class.""" + return ["0", "1"] + + def _create_examples(self, lines, set_type): + """Creates examples for the training, dev and test sets.""" + test_mode = set_type == "test" + if test_mode: + lines = lines[1:] + text_index = 1 if test_mode else 3 + examples = [] + for i, line in enumerate(lines): + guid = f"{set_type}-{i}" + text_a = line[text_index] + label = None if test_mode else line[1] + examples.append(InputExample(guid=guid, text_a=text_a, text_b=None, label=label)) + return examples + + +class Sst2Processor(DataProcessor): + """Processor for the SST-2 data set (GLUE version).""" + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + warnings.warn(DEPRECATION_WARNING.format("processor"), FutureWarning) + + def get_example_from_tensor_dict(self, tensor_dict): + """See base class.""" + return InputExample( + tensor_dict["idx"].numpy(), + tensor_dict["sentence"].numpy().decode("utf-8"), + None, + str(tensor_dict["label"].numpy()), + ) + + def get_train_examples(self, data_dir): + """See base class.""" + return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") + + def get_dev_examples(self, data_dir): + """See base class.""" + return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev") + + def get_test_examples(self, data_dir): + """See base class.""" + return self._create_examples(self._read_tsv(os.path.join(data_dir, "test.tsv")), "test") + + def get_labels(self): + """See base class.""" + return ["0", "1"] + + def _create_examples(self, lines, set_type): + """Creates examples for the training, dev and test sets.""" + examples = [] + text_index = 1 if set_type == "test" else 0 + for i, line in enumerate(lines): + if i == 0: + continue + guid = f"{set_type}-{i}" + text_a = line[text_index] + label = None if set_type == "test" else line[1] + examples.append(InputExample(guid=guid, text_a=text_a, text_b=None, label=label)) + return examples + + +class StsbProcessor(DataProcessor): + """Processor for the STS-B data set (GLUE version).""" + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + warnings.warn(DEPRECATION_WARNING.format("processor"), FutureWarning) + + def get_example_from_tensor_dict(self, tensor_dict): + """See base class.""" + return InputExample( + tensor_dict["idx"].numpy(), + tensor_dict["sentence1"].numpy().decode("utf-8"), + tensor_dict["sentence2"].numpy().decode("utf-8"), + str(tensor_dict["label"].numpy()), + ) + + def get_train_examples(self, data_dir): + """See base class.""" + return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") + + def get_dev_examples(self, data_dir): + """See base class.""" + return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev") + + def get_test_examples(self, data_dir): + """See base class.""" + return self._create_examples(self._read_tsv(os.path.join(data_dir, "test.tsv")), "test") + + def get_labels(self): + """See base class.""" + return [None] + + def _create_examples(self, lines, set_type): + """Creates examples for the training, dev and test sets.""" + examples = [] + for i, line in enumerate(lines): + if i == 0: + continue + guid = f"{set_type}-{line[0]}" + text_a = line[7] + text_b = line[8] + label = None if set_type == "test" else line[-1] + examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) + return examples + + +class QqpProcessor(DataProcessor): + """Processor for the QQP data set (GLUE version).""" + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + warnings.warn(DEPRECATION_WARNING.format("processor"), FutureWarning) + + def get_example_from_tensor_dict(self, tensor_dict): + """See base class.""" + return InputExample( + tensor_dict["idx"].numpy(), + tensor_dict["question1"].numpy().decode("utf-8"), + tensor_dict["question2"].numpy().decode("utf-8"), + str(tensor_dict["label"].numpy()), + ) + + def get_train_examples(self, data_dir): + """See base class.""" + return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") + + def get_dev_examples(self, data_dir): + """See base class.""" + return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev") + + def get_test_examples(self, data_dir): + """See base class.""" + return self._create_examples(self._read_tsv(os.path.join(data_dir, "test.tsv")), "test") + + def get_labels(self): + """See base class.""" + return ["0", "1"] + + def _create_examples(self, lines, set_type): + """Creates examples for the training, dev and test sets.""" + test_mode = set_type == "test" + q1_index = 1 if test_mode else 3 + q2_index = 2 if test_mode else 4 + examples = [] + for i, line in enumerate(lines): + if i == 0: + continue + guid = f"{set_type}-{line[0]}" + try: + text_a = line[q1_index] + text_b = line[q2_index] + label = None if test_mode else line[5] + except IndexError: + continue + examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) + return examples + + +class QnliProcessor(DataProcessor): + """Processor for the QNLI data set (GLUE version).""" + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + warnings.warn(DEPRECATION_WARNING.format("processor"), FutureWarning) + + def get_example_from_tensor_dict(self, tensor_dict): + """See base class.""" + return InputExample( + tensor_dict["idx"].numpy(), + tensor_dict["question"].numpy().decode("utf-8"), + tensor_dict["sentence"].numpy().decode("utf-8"), + str(tensor_dict["label"].numpy()), + ) + + def get_train_examples(self, data_dir): + """See base class.""" + return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") + + def get_dev_examples(self, data_dir): + """See base class.""" + return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev") + + def get_test_examples(self, data_dir): + """See base class.""" + return self._create_examples(self._read_tsv(os.path.join(data_dir, "test.tsv")), "test") + + def get_labels(self): + """See base class.""" + return ["entailment", "not_entailment"] + + def _create_examples(self, lines, set_type): + """Creates examples for the training, dev and test sets.""" + examples = [] + for i, line in enumerate(lines): + if i == 0: + continue + guid = f"{set_type}-{line[0]}" + text_a = line[1] + text_b = line[2] + label = None if set_type == "test" else line[-1] + examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) + return examples + + +class RteProcessor(DataProcessor): + """Processor for the RTE data set (GLUE version).""" + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + warnings.warn(DEPRECATION_WARNING.format("processor"), FutureWarning) + + def get_example_from_tensor_dict(self, tensor_dict): + """See base class.""" + return InputExample( + tensor_dict["idx"].numpy(), + tensor_dict["sentence1"].numpy().decode("utf-8"), + tensor_dict["sentence2"].numpy().decode("utf-8"), + str(tensor_dict["label"].numpy()), + ) + + def get_train_examples(self, data_dir): + """See base class.""" + return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") + + def get_dev_examples(self, data_dir): + """See base class.""" + return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev") + + def get_test_examples(self, data_dir): + """See base class.""" + return self._create_examples(self._read_tsv(os.path.join(data_dir, "test.tsv")), "test") + + def get_labels(self): + """See base class.""" + return ["entailment", "not_entailment"] + + def _create_examples(self, lines, set_type): + """Creates examples for the training, dev and test sets.""" + examples = [] + for i, line in enumerate(lines): + if i == 0: + continue + guid = f"{set_type}-{line[0]}" + text_a = line[1] + text_b = line[2] + label = None if set_type == "test" else line[-1] + examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) + return examples + + +class WnliProcessor(DataProcessor): + """Processor for the WNLI data set (GLUE version).""" + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + warnings.warn(DEPRECATION_WARNING.format("processor"), FutureWarning) + + def get_example_from_tensor_dict(self, tensor_dict): + """See base class.""" + return InputExample( + tensor_dict["idx"].numpy(), + tensor_dict["sentence1"].numpy().decode("utf-8"), + tensor_dict["sentence2"].numpy().decode("utf-8"), + str(tensor_dict["label"].numpy()), + ) + + def get_train_examples(self, data_dir): + """See base class.""" + return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") + + def get_dev_examples(self, data_dir): + """See base class.""" + return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev") + + def get_test_examples(self, data_dir): + """See base class.""" + return self._create_examples(self._read_tsv(os.path.join(data_dir, "test.tsv")), "test") + + def get_labels(self): + """See base class.""" + return ["0", "1"] + + def _create_examples(self, lines, set_type): + """Creates examples for the training, dev and test sets.""" + examples = [] + for i, line in enumerate(lines): + if i == 0: + continue + guid = f"{set_type}-{line[0]}" + text_a = line[1] + text_b = line[2] + label = None if set_type == "test" else line[-1] + examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) + return examples + + +glue_tasks_num_labels = { + "cola": 2, + "mnli": 3, + "mrpc": 2, + "sst-2": 2, + "sts-b": 1, + "qqp": 2, + "qnli": 2, + "rte": 2, + "wnli": 2, +} + +glue_processors = { + "cola": ColaProcessor, + "mnli": MnliProcessor, + "mnli-mm": MnliMismatchedProcessor, + "mrpc": MrpcProcessor, + "sst-2": Sst2Processor, + "sts-b": StsbProcessor, + "qqp": QqpProcessor, + "qnli": QnliProcessor, + "rte": RteProcessor, + "wnli": WnliProcessor, +} + +glue_output_modes = { + "cola": "classification", + "mnli": "classification", + "mnli-mm": "classification", + "mrpc": "classification", + "sst-2": "classification", + "sts-b": "regression", + "qqp": "classification", + "qnli": "classification", + "rte": "classification", + "wnli": "classification", +} diff --git a/venv/lib/python3.10/site-packages/transformers/data/processors/squad.py b/venv/lib/python3.10/site-packages/transformers/data/processors/squad.py new file mode 100644 index 0000000000000000000000000000000000000000..0f8bd2480551158c9916215e43436c8e027dbed0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/data/processors/squad.py @@ -0,0 +1,845 @@ +# Copyright 2020 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json +import os +from functools import partial +from multiprocessing import Pool, cpu_count + +import numpy as np +from tqdm import tqdm + +from ...models.bert.tokenization_bert import whitespace_tokenize +from ...tokenization_utils_base import BatchEncoding, PreTrainedTokenizerBase, TruncationStrategy +from ...utils import is_tf_available, is_torch_available, logging +from .utils import DataProcessor + + +# Store the tokenizers which insert 2 separators tokens +MULTI_SEP_TOKENS_TOKENIZERS_SET = {"roberta", "camembert", "bart", "mpnet"} + + +if is_torch_available(): + import torch + from torch.utils.data import TensorDataset + +if is_tf_available(): + import tensorflow as tf + +logger = logging.get_logger(__name__) + + +def _improve_answer_span(doc_tokens, input_start, input_end, tokenizer, orig_answer_text): + """Returns tokenized answer spans that better match the annotated answer.""" + tok_answer_text = " ".join(tokenizer.tokenize(orig_answer_text)) + + for new_start in range(input_start, input_end + 1): + for new_end in range(input_end, new_start - 1, -1): + text_span = " ".join(doc_tokens[new_start : (new_end + 1)]) + if text_span == tok_answer_text: + return (new_start, new_end) + + return (input_start, input_end) + + +def _check_is_max_context(doc_spans, cur_span_index, position): + """Check if this is the 'max context' doc span for the token.""" + best_score = None + best_span_index = None + for span_index, doc_span in enumerate(doc_spans): + end = doc_span.start + doc_span.length - 1 + if position < doc_span.start: + continue + if position > end: + continue + num_left_context = position - doc_span.start + num_right_context = end - position + score = min(num_left_context, num_right_context) + 0.01 * doc_span.length + if best_score is None or score > best_score: + best_score = score + best_span_index = span_index + + return cur_span_index == best_span_index + + +def _new_check_is_max_context(doc_spans, cur_span_index, position): + """Check if this is the 'max context' doc span for the token.""" + # if len(doc_spans) == 1: + # return True + best_score = None + best_span_index = None + for span_index, doc_span in enumerate(doc_spans): + end = doc_span["start"] + doc_span["length"] - 1 + if position < doc_span["start"]: + continue + if position > end: + continue + num_left_context = position - doc_span["start"] + num_right_context = end - position + score = min(num_left_context, num_right_context) + 0.01 * doc_span["length"] + if best_score is None or score > best_score: + best_score = score + best_span_index = span_index + + return cur_span_index == best_span_index + + +def _is_whitespace(c): + if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F: + return True + return False + + +def squad_convert_example_to_features( + example, max_seq_length, doc_stride, max_query_length, padding_strategy, is_training +): + features = [] + if is_training and not example.is_impossible: + # Get start and end position + start_position = example.start_position + end_position = example.end_position + + # If the answer cannot be found in the text, then skip this example. + actual_text = " ".join(example.doc_tokens[start_position : (end_position + 1)]) + cleaned_answer_text = " ".join(whitespace_tokenize(example.answer_text)) + if actual_text.find(cleaned_answer_text) == -1: + logger.warning(f"Could not find answer: '{actual_text}' vs. '{cleaned_answer_text}'") + return [] + + tok_to_orig_index = [] + orig_to_tok_index = [] + all_doc_tokens = [] + for i, token in enumerate(example.doc_tokens): + orig_to_tok_index.append(len(all_doc_tokens)) + if tokenizer.__class__.__name__ in [ + "RobertaTokenizer", + "LongformerTokenizer", + "BartTokenizer", + "RobertaTokenizerFast", + "LongformerTokenizerFast", + "BartTokenizerFast", + ]: + sub_tokens = tokenizer.tokenize(token, add_prefix_space=True) + else: + sub_tokens = tokenizer.tokenize(token) + for sub_token in sub_tokens: + tok_to_orig_index.append(i) + all_doc_tokens.append(sub_token) + + if is_training and not example.is_impossible: + tok_start_position = orig_to_tok_index[example.start_position] + if example.end_position < len(example.doc_tokens) - 1: + tok_end_position = orig_to_tok_index[example.end_position + 1] - 1 + else: + tok_end_position = len(all_doc_tokens) - 1 + + (tok_start_position, tok_end_position) = _improve_answer_span( + all_doc_tokens, tok_start_position, tok_end_position, tokenizer, example.answer_text + ) + + spans = [] + + truncated_query = tokenizer.encode( + example.question_text, add_special_tokens=False, truncation=True, max_length=max_query_length + ) + + # Tokenizers who insert 2 SEP tokens in-between & need to have special handling + # in the way they compute mask of added tokens. + tokenizer_type = type(tokenizer).__name__.replace("Tokenizer", "").lower() + sequence_added_tokens = ( + tokenizer.model_max_length - tokenizer.max_len_single_sentence + 1 + if tokenizer_type in MULTI_SEP_TOKENS_TOKENIZERS_SET + else tokenizer.model_max_length - tokenizer.max_len_single_sentence + ) + sequence_pair_added_tokens = tokenizer.model_max_length - tokenizer.max_len_sentences_pair + + span_doc_tokens = all_doc_tokens + while len(spans) * doc_stride < len(all_doc_tokens): + # Define the side we want to truncate / pad and the text/pair sorting + if tokenizer.padding_side == "right": + texts = truncated_query + pairs = span_doc_tokens + truncation = TruncationStrategy.ONLY_SECOND.value + else: + texts = span_doc_tokens + pairs = truncated_query + truncation = TruncationStrategy.ONLY_FIRST.value + + encoded_dict = tokenizer.encode_plus( # TODO(thom) update this logic + texts, + pairs, + truncation=truncation, + padding=padding_strategy, + max_length=max_seq_length, + return_overflowing_tokens=True, + stride=max_seq_length - doc_stride - len(truncated_query) - sequence_pair_added_tokens, + return_token_type_ids=True, + ) + + paragraph_len = min( + len(all_doc_tokens) - len(spans) * doc_stride, + max_seq_length - len(truncated_query) - sequence_pair_added_tokens, + ) + + if tokenizer.pad_token_id in encoded_dict["input_ids"]: + if tokenizer.padding_side == "right": + non_padded_ids = encoded_dict["input_ids"][: encoded_dict["input_ids"].index(tokenizer.pad_token_id)] + else: + last_padding_id_position = ( + len(encoded_dict["input_ids"]) - 1 - encoded_dict["input_ids"][::-1].index(tokenizer.pad_token_id) + ) + non_padded_ids = encoded_dict["input_ids"][last_padding_id_position + 1 :] + + else: + non_padded_ids = encoded_dict["input_ids"] + + tokens = tokenizer.convert_ids_to_tokens(non_padded_ids) + + token_to_orig_map = {} + for i in range(paragraph_len): + index = len(truncated_query) + sequence_added_tokens + i if tokenizer.padding_side == "right" else i + token_to_orig_map[index] = tok_to_orig_index[len(spans) * doc_stride + i] + + encoded_dict["paragraph_len"] = paragraph_len + encoded_dict["tokens"] = tokens + encoded_dict["token_to_orig_map"] = token_to_orig_map + encoded_dict["truncated_query_with_special_tokens_length"] = len(truncated_query) + sequence_added_tokens + encoded_dict["token_is_max_context"] = {} + encoded_dict["start"] = len(spans) * doc_stride + encoded_dict["length"] = paragraph_len + + spans.append(encoded_dict) + + if "overflowing_tokens" not in encoded_dict or ( + "overflowing_tokens" in encoded_dict and len(encoded_dict["overflowing_tokens"]) == 0 + ): + break + span_doc_tokens = encoded_dict["overflowing_tokens"] + + for doc_span_index in range(len(spans)): + for j in range(spans[doc_span_index]["paragraph_len"]): + is_max_context = _new_check_is_max_context(spans, doc_span_index, doc_span_index * doc_stride + j) + index = ( + j + if tokenizer.padding_side == "left" + else spans[doc_span_index]["truncated_query_with_special_tokens_length"] + j + ) + spans[doc_span_index]["token_is_max_context"][index] = is_max_context + + for span in spans: + # Identify the position of the CLS token + cls_index = span["input_ids"].index(tokenizer.cls_token_id) + + # p_mask: mask with 1 for token than cannot be in the answer (0 for token which can be in an answer) + # Original TF implementation also keep the classification token (set to 0) + p_mask = np.ones_like(span["token_type_ids"]) + if tokenizer.padding_side == "right": + p_mask[len(truncated_query) + sequence_added_tokens :] = 0 + else: + p_mask[-len(span["tokens"]) : -(len(truncated_query) + sequence_added_tokens)] = 0 + + pad_token_indices = np.where(span["input_ids"] == tokenizer.pad_token_id) + special_token_indices = np.asarray( + tokenizer.get_special_tokens_mask(span["input_ids"], already_has_special_tokens=True) + ).nonzero() + + p_mask[pad_token_indices] = 1 + p_mask[special_token_indices] = 1 + + # Set the cls index to 0: the CLS index can be used for impossible answers + p_mask[cls_index] = 0 + + span_is_impossible = example.is_impossible + start_position = 0 + end_position = 0 + if is_training and not span_is_impossible: + # For training, if our document chunk does not contain an annotation + # we throw it out, since there is nothing to predict. + doc_start = span["start"] + doc_end = span["start"] + span["length"] - 1 + out_of_span = False + + if not (tok_start_position >= doc_start and tok_end_position <= doc_end): + out_of_span = True + + if out_of_span: + start_position = cls_index + end_position = cls_index + span_is_impossible = True + else: + if tokenizer.padding_side == "left": + doc_offset = 0 + else: + doc_offset = len(truncated_query) + sequence_added_tokens + + start_position = tok_start_position - doc_start + doc_offset + end_position = tok_end_position - doc_start + doc_offset + + features.append( + SquadFeatures( + span["input_ids"], + span["attention_mask"], + span["token_type_ids"], + cls_index, + p_mask.tolist(), + example_index=0, # Can not set unique_id and example_index here. They will be set after multiple processing. + unique_id=0, + paragraph_len=span["paragraph_len"], + token_is_max_context=span["token_is_max_context"], + tokens=span["tokens"], + token_to_orig_map=span["token_to_orig_map"], + start_position=start_position, + end_position=end_position, + is_impossible=span_is_impossible, + qas_id=example.qas_id, + ) + ) + return features + + +def squad_convert_example_to_features_init(tokenizer_for_convert: PreTrainedTokenizerBase): + global tokenizer + tokenizer = tokenizer_for_convert + + +def squad_convert_examples_to_features( + examples, + tokenizer, + max_seq_length, + doc_stride, + max_query_length, + is_training, + padding_strategy="max_length", + return_dataset=False, + threads=1, + tqdm_enabled=True, +): + """ + Converts a list of examples into a list of features that can be directly given as input to a model. It is + model-dependant and takes advantage of many of the tokenizer's features to create the model's inputs. + + Args: + examples: list of [`~data.processors.squad.SquadExample`] + tokenizer: an instance of a child of [`PreTrainedTokenizer`] + max_seq_length: The maximum sequence length of the inputs. + doc_stride: The stride used when the context is too large and is split across several features. + max_query_length: The maximum length of the query. + is_training: whether to create features for model evaluation or model training. + padding_strategy: Default to "max_length". Which padding strategy to use + return_dataset: Default False. Either 'pt' or 'tf'. + if 'pt': returns a torch.data.TensorDataset, if 'tf': returns a tf.data.Dataset + threads: multiple processing threads. + + + Returns: + list of [`~data.processors.squad.SquadFeatures`] + + Example: + + ```python + processor = SquadV2Processor() + examples = processor.get_dev_examples(data_dir) + + features = squad_convert_examples_to_features( + examples=examples, + tokenizer=tokenizer, + max_seq_length=args.max_seq_length, + doc_stride=args.doc_stride, + max_query_length=args.max_query_length, + is_training=not evaluate, + ) + ```""" + # Defining helper methods + features = [] + + threads = min(threads, cpu_count()) + with Pool(threads, initializer=squad_convert_example_to_features_init, initargs=(tokenizer,)) as p: + annotate_ = partial( + squad_convert_example_to_features, + max_seq_length=max_seq_length, + doc_stride=doc_stride, + max_query_length=max_query_length, + padding_strategy=padding_strategy, + is_training=is_training, + ) + features = list( + tqdm( + p.imap(annotate_, examples, chunksize=32), + total=len(examples), + desc="convert squad examples to features", + disable=not tqdm_enabled, + ) + ) + + new_features = [] + unique_id = 1000000000 + example_index = 0 + for example_features in tqdm( + features, total=len(features), desc="add example index and unique id", disable=not tqdm_enabled + ): + if not example_features: + continue + for example_feature in example_features: + example_feature.example_index = example_index + example_feature.unique_id = unique_id + new_features.append(example_feature) + unique_id += 1 + example_index += 1 + features = new_features + del new_features + if return_dataset == "pt": + if not is_torch_available(): + raise RuntimeError("PyTorch must be installed to return a PyTorch dataset.") + + # Convert to Tensors and build dataset + all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long) + all_attention_masks = torch.tensor([f.attention_mask for f in features], dtype=torch.long) + all_token_type_ids = torch.tensor([f.token_type_ids for f in features], dtype=torch.long) + all_cls_index = torch.tensor([f.cls_index for f in features], dtype=torch.long) + all_p_mask = torch.tensor([f.p_mask for f in features], dtype=torch.float) + all_is_impossible = torch.tensor([f.is_impossible for f in features], dtype=torch.float) + + if not is_training: + all_feature_index = torch.arange(all_input_ids.size(0), dtype=torch.long) + dataset = TensorDataset( + all_input_ids, all_attention_masks, all_token_type_ids, all_feature_index, all_cls_index, all_p_mask + ) + else: + all_start_positions = torch.tensor([f.start_position for f in features], dtype=torch.long) + all_end_positions = torch.tensor([f.end_position for f in features], dtype=torch.long) + dataset = TensorDataset( + all_input_ids, + all_attention_masks, + all_token_type_ids, + all_start_positions, + all_end_positions, + all_cls_index, + all_p_mask, + all_is_impossible, + ) + + return features, dataset + elif return_dataset == "tf": + if not is_tf_available(): + raise RuntimeError("TensorFlow must be installed to return a TensorFlow dataset.") + + def gen(): + for i, ex in enumerate(features): + if ex.token_type_ids is None: + yield ( + { + "input_ids": ex.input_ids, + "attention_mask": ex.attention_mask, + "feature_index": i, + "qas_id": ex.qas_id, + }, + { + "start_positions": ex.start_position, + "end_positions": ex.end_position, + "cls_index": ex.cls_index, + "p_mask": ex.p_mask, + "is_impossible": ex.is_impossible, + }, + ) + else: + yield ( + { + "input_ids": ex.input_ids, + "attention_mask": ex.attention_mask, + "token_type_ids": ex.token_type_ids, + "feature_index": i, + "qas_id": ex.qas_id, + }, + { + "start_positions": ex.start_position, + "end_positions": ex.end_position, + "cls_index": ex.cls_index, + "p_mask": ex.p_mask, + "is_impossible": ex.is_impossible, + }, + ) + + # Why have we split the batch into a tuple? PyTorch just has a list of tensors. + if "token_type_ids" in tokenizer.model_input_names: + train_types = ( + { + "input_ids": tf.int32, + "attention_mask": tf.int32, + "token_type_ids": tf.int32, + "feature_index": tf.int64, + "qas_id": tf.string, + }, + { + "start_positions": tf.int64, + "end_positions": tf.int64, + "cls_index": tf.int64, + "p_mask": tf.int32, + "is_impossible": tf.int32, + }, + ) + + train_shapes = ( + { + "input_ids": tf.TensorShape([None]), + "attention_mask": tf.TensorShape([None]), + "token_type_ids": tf.TensorShape([None]), + "feature_index": tf.TensorShape([]), + "qas_id": tf.TensorShape([]), + }, + { + "start_positions": tf.TensorShape([]), + "end_positions": tf.TensorShape([]), + "cls_index": tf.TensorShape([]), + "p_mask": tf.TensorShape([None]), + "is_impossible": tf.TensorShape([]), + }, + ) + else: + train_types = ( + {"input_ids": tf.int32, "attention_mask": tf.int32, "feature_index": tf.int64, "qas_id": tf.string}, + { + "start_positions": tf.int64, + "end_positions": tf.int64, + "cls_index": tf.int64, + "p_mask": tf.int32, + "is_impossible": tf.int32, + }, + ) + + train_shapes = ( + { + "input_ids": tf.TensorShape([None]), + "attention_mask": tf.TensorShape([None]), + "feature_index": tf.TensorShape([]), + "qas_id": tf.TensorShape([]), + }, + { + "start_positions": tf.TensorShape([]), + "end_positions": tf.TensorShape([]), + "cls_index": tf.TensorShape([]), + "p_mask": tf.TensorShape([None]), + "is_impossible": tf.TensorShape([]), + }, + ) + + return tf.data.Dataset.from_generator(gen, train_types, train_shapes) + else: + return features + + +class SquadProcessor(DataProcessor): + """ + Processor for the SQuAD data set. overridden by SquadV1Processor and SquadV2Processor, used by the version 1.1 and + version 2.0 of SQuAD, respectively. + """ + + train_file = None + dev_file = None + + def _get_example_from_tensor_dict(self, tensor_dict, evaluate=False): + if not evaluate: + answer = tensor_dict["answers"]["text"][0].numpy().decode("utf-8") + answer_start = tensor_dict["answers"]["answer_start"][0].numpy() + answers = [] + else: + answers = [ + {"answer_start": start.numpy(), "text": text.numpy().decode("utf-8")} + for start, text in zip(tensor_dict["answers"]["answer_start"], tensor_dict["answers"]["text"]) + ] + + answer = None + answer_start = None + + return SquadExample( + qas_id=tensor_dict["id"].numpy().decode("utf-8"), + question_text=tensor_dict["question"].numpy().decode("utf-8"), + context_text=tensor_dict["context"].numpy().decode("utf-8"), + answer_text=answer, + start_position_character=answer_start, + title=tensor_dict["title"].numpy().decode("utf-8"), + answers=answers, + ) + + def get_examples_from_dataset(self, dataset, evaluate=False): + """ + Creates a list of [`~data.processors.squad.SquadExample`] using a TFDS dataset. + + Args: + dataset: The tfds dataset loaded from *tensorflow_datasets.load("squad")* + evaluate: Boolean specifying if in evaluation mode or in training mode + + Returns: + List of SquadExample + + Examples: + + ```python + >>> import tensorflow_datasets as tfds + + >>> dataset = tfds.load("squad") + + >>> training_examples = get_examples_from_dataset(dataset, evaluate=False) + >>> evaluation_examples = get_examples_from_dataset(dataset, evaluate=True) + ```""" + + if evaluate: + dataset = dataset["validation"] + else: + dataset = dataset["train"] + + examples = [] + for tensor_dict in tqdm(dataset): + examples.append(self._get_example_from_tensor_dict(tensor_dict, evaluate=evaluate)) + + return examples + + def get_train_examples(self, data_dir, filename=None): + """ + Returns the training examples from the data directory. + + Args: + data_dir: Directory containing the data files used for training and evaluating. + filename: None by default, specify this if the training file has a different name than the original one + which is `train-v1.1.json` and `train-v2.0.json` for squad versions 1.1 and 2.0 respectively. + + """ + if data_dir is None: + data_dir = "" + + if self.train_file is None: + raise ValueError("SquadProcessor should be instantiated via SquadV1Processor or SquadV2Processor") + + with open( + os.path.join(data_dir, self.train_file if filename is None else filename), "r", encoding="utf-8" + ) as reader: + input_data = json.load(reader)["data"] + return self._create_examples(input_data, "train") + + def get_dev_examples(self, data_dir, filename=None): + """ + Returns the evaluation example from the data directory. + + Args: + data_dir: Directory containing the data files used for training and evaluating. + filename: None by default, specify this if the evaluation file has a different name than the original one + which is `dev-v1.1.json` and `dev-v2.0.json` for squad versions 1.1 and 2.0 respectively. + """ + if data_dir is None: + data_dir = "" + + if self.dev_file is None: + raise ValueError("SquadProcessor should be instantiated via SquadV1Processor or SquadV2Processor") + + with open( + os.path.join(data_dir, self.dev_file if filename is None else filename), "r", encoding="utf-8" + ) as reader: + input_data = json.load(reader)["data"] + return self._create_examples(input_data, "dev") + + def _create_examples(self, input_data, set_type): + is_training = set_type == "train" + examples = [] + for entry in tqdm(input_data): + title = entry["title"] + for paragraph in entry["paragraphs"]: + context_text = paragraph["context"] + for qa in paragraph["qas"]: + qas_id = qa["id"] + question_text = qa["question"] + start_position_character = None + answer_text = None + answers = [] + + is_impossible = qa.get("is_impossible", False) + if not is_impossible: + if is_training: + answer = qa["answers"][0] + answer_text = answer["text"] + start_position_character = answer["answer_start"] + else: + answers = qa["answers"] + + example = SquadExample( + qas_id=qas_id, + question_text=question_text, + context_text=context_text, + answer_text=answer_text, + start_position_character=start_position_character, + title=title, + is_impossible=is_impossible, + answers=answers, + ) + examples.append(example) + return examples + + +class SquadV1Processor(SquadProcessor): + train_file = "train-v1.1.json" + dev_file = "dev-v1.1.json" + + +class SquadV2Processor(SquadProcessor): + train_file = "train-v2.0.json" + dev_file = "dev-v2.0.json" + + +class SquadExample: + """ + A single training/test example for the Squad dataset, as loaded from disk. + + Args: + qas_id: The example's unique identifier + question_text: The question string + context_text: The context string + answer_text: The answer string + start_position_character: The character position of the start of the answer + title: The title of the example + answers: None by default, this is used during evaluation. Holds answers as well as their start positions. + is_impossible: False by default, set to True if the example has no possible answer. + """ + + def __init__( + self, + qas_id, + question_text, + context_text, + answer_text, + start_position_character, + title, + answers=[], + is_impossible=False, + ): + self.qas_id = qas_id + self.question_text = question_text + self.context_text = context_text + self.answer_text = answer_text + self.title = title + self.is_impossible = is_impossible + self.answers = answers + + self.start_position, self.end_position = 0, 0 + + doc_tokens = [] + char_to_word_offset = [] + prev_is_whitespace = True + + # Split on whitespace so that different tokens may be attributed to their original position. + for c in self.context_text: + if _is_whitespace(c): + prev_is_whitespace = True + else: + if prev_is_whitespace: + doc_tokens.append(c) + else: + doc_tokens[-1] += c + prev_is_whitespace = False + char_to_word_offset.append(len(doc_tokens) - 1) + + self.doc_tokens = doc_tokens + self.char_to_word_offset = char_to_word_offset + + # Start and end positions only has a value during evaluation. + if start_position_character is not None and not is_impossible: + self.start_position = char_to_word_offset[start_position_character] + self.end_position = char_to_word_offset[ + min(start_position_character + len(answer_text) - 1, len(char_to_word_offset) - 1) + ] + + +class SquadFeatures: + """ + Single squad example features to be fed to a model. Those features are model-specific and can be crafted from + [`~data.processors.squad.SquadExample`] using the + :method:*~transformers.data.processors.squad.squad_convert_examples_to_features* method. + + Args: + input_ids: Indices of input sequence tokens in the vocabulary. + attention_mask: Mask to avoid performing attention on padding token indices. + token_type_ids: Segment token indices to indicate first and second portions of the inputs. + cls_index: the index of the CLS token. + p_mask: Mask identifying tokens that can be answers vs. tokens that cannot. + Mask with 1 for tokens than cannot be in the answer and 0 for token that can be in an answer + example_index: the index of the example + unique_id: The unique Feature identifier + paragraph_len: The length of the context + token_is_max_context: + List of booleans identifying which tokens have their maximum context in this feature object. If a token + does not have their maximum context in this feature object, it means that another feature object has more + information related to that token and should be prioritized over this feature for that token. + tokens: list of tokens corresponding to the input ids + token_to_orig_map: mapping between the tokens and the original text, needed in order to identify the answer. + start_position: start of the answer token index + end_position: end of the answer token index + encoding: optionally store the BatchEncoding with the fast-tokenizer alignment methods. + """ + + def __init__( + self, + input_ids, + attention_mask, + token_type_ids, + cls_index, + p_mask, + example_index, + unique_id, + paragraph_len, + token_is_max_context, + tokens, + token_to_orig_map, + start_position, + end_position, + is_impossible, + qas_id: str = None, + encoding: BatchEncoding = None, + ): + self.input_ids = input_ids + self.attention_mask = attention_mask + self.token_type_ids = token_type_ids + self.cls_index = cls_index + self.p_mask = p_mask + + self.example_index = example_index + self.unique_id = unique_id + self.paragraph_len = paragraph_len + self.token_is_max_context = token_is_max_context + self.tokens = tokens + self.token_to_orig_map = token_to_orig_map + + self.start_position = start_position + self.end_position = end_position + self.is_impossible = is_impossible + self.qas_id = qas_id + + self.encoding = encoding + + +class SquadResult: + """ + Constructs a SquadResult which can be used to evaluate a model's output on the SQuAD dataset. + + Args: + unique_id: The unique identifier corresponding to that example. + start_logits: The logits corresponding to the start of the answer + end_logits: The logits corresponding to the end of the answer + """ + + def __init__(self, unique_id, start_logits, end_logits, start_top_index=None, end_top_index=None, cls_logits=None): + self.start_logits = start_logits + self.end_logits = end_logits + self.unique_id = unique_id + + if start_top_index: + self.start_top_index = start_top_index + self.end_top_index = end_top_index + self.cls_logits = cls_logits diff --git a/venv/lib/python3.10/site-packages/transformers/data/processors/utils.py b/venv/lib/python3.10/site-packages/transformers/data/processors/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..936f5a51e9fcf4c4189eb444e567d761e8fa0865 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/data/processors/utils.py @@ -0,0 +1,349 @@ +# coding=utf-8 +# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. +# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import csv +import dataclasses +import json +from dataclasses import dataclass +from typing import List, Optional, Union + +from ...utils import is_tf_available, is_torch_available, logging + + +logger = logging.get_logger(__name__) + + +@dataclass +class InputExample: + """ + A single training/test example for simple sequence classification. + + Args: + guid: Unique id for the example. + text_a: string. The untokenized text of the first sequence. For single + sequence tasks, only this sequence must be specified. + text_b: (Optional) string. The untokenized text of the second sequence. + Only must be specified for sequence pair tasks. + label: (Optional) string. The label of the example. This should be + specified for train and dev examples, but not for test examples. + """ + + guid: str + text_a: str + text_b: Optional[str] = None + label: Optional[str] = None + + def to_json_string(self): + """Serializes this instance to a JSON string.""" + return json.dumps(dataclasses.asdict(self), indent=2) + "\n" + + +@dataclass(frozen=True) +class InputFeatures: + """ + A single set of features of data. Property names are the same names as the corresponding inputs to a model. + + Args: + input_ids: Indices of input sequence tokens in the vocabulary. + attention_mask: Mask to avoid performing attention on padding token indices. + Mask values selected in `[0, 1]`: Usually `1` for tokens that are NOT MASKED, `0` for MASKED (padded) + tokens. + token_type_ids: (Optional) Segment token indices to indicate first and second + portions of the inputs. Only some models use them. + label: (Optional) Label corresponding to the input. Int for classification problems, + float for regression problems. + """ + + input_ids: List[int] + attention_mask: Optional[List[int]] = None + token_type_ids: Optional[List[int]] = None + label: Optional[Union[int, float]] = None + + def to_json_string(self): + """Serializes this instance to a JSON string.""" + return json.dumps(dataclasses.asdict(self)) + "\n" + + +class DataProcessor: + """Base class for data converters for sequence classification data sets.""" + + def get_example_from_tensor_dict(self, tensor_dict): + """ + Gets an example from a dict with tensorflow tensors. + + Args: + tensor_dict: Keys and values should match the corresponding Glue + tensorflow_dataset examples. + """ + raise NotImplementedError() + + def get_train_examples(self, data_dir): + """Gets a collection of [`InputExample`] for the train set.""" + raise NotImplementedError() + + def get_dev_examples(self, data_dir): + """Gets a collection of [`InputExample`] for the dev set.""" + raise NotImplementedError() + + def get_test_examples(self, data_dir): + """Gets a collection of [`InputExample`] for the test set.""" + raise NotImplementedError() + + def get_labels(self): + """Gets the list of labels for this data set.""" + raise NotImplementedError() + + def tfds_map(self, example): + """ + Some tensorflow_datasets datasets are not formatted the same way the GLUE datasets are. This method converts + examples to the correct format. + """ + if len(self.get_labels()) > 1: + example.label = self.get_labels()[int(example.label)] + return example + + @classmethod + def _read_tsv(cls, input_file, quotechar=None): + """Reads a tab separated value file.""" + with open(input_file, "r", encoding="utf-8-sig") as f: + return list(csv.reader(f, delimiter="\t", quotechar=quotechar)) + + +class SingleSentenceClassificationProcessor(DataProcessor): + """Generic processor for a single sentence classification data set.""" + + def __init__(self, labels=None, examples=None, mode="classification", verbose=False): + self.labels = [] if labels is None else labels + self.examples = [] if examples is None else examples + self.mode = mode + self.verbose = verbose + + def __len__(self): + return len(self.examples) + + def __getitem__(self, idx): + if isinstance(idx, slice): + return SingleSentenceClassificationProcessor(labels=self.labels, examples=self.examples[idx]) + return self.examples[idx] + + @classmethod + def create_from_csv( + cls, file_name, split_name="", column_label=0, column_text=1, column_id=None, skip_first_row=False, **kwargs + ): + processor = cls(**kwargs) + processor.add_examples_from_csv( + file_name, + split_name=split_name, + column_label=column_label, + column_text=column_text, + column_id=column_id, + skip_first_row=skip_first_row, + overwrite_labels=True, + overwrite_examples=True, + ) + return processor + + @classmethod + def create_from_examples(cls, texts_or_text_and_labels, labels=None, **kwargs): + processor = cls(**kwargs) + processor.add_examples(texts_or_text_and_labels, labels=labels) + return processor + + def add_examples_from_csv( + self, + file_name, + split_name="", + column_label=0, + column_text=1, + column_id=None, + skip_first_row=False, + overwrite_labels=False, + overwrite_examples=False, + ): + lines = self._read_tsv(file_name) + if skip_first_row: + lines = lines[1:] + texts = [] + labels = [] + ids = [] + for i, line in enumerate(lines): + texts.append(line[column_text]) + labels.append(line[column_label]) + if column_id is not None: + ids.append(line[column_id]) + else: + guid = f"{split_name}-{i}" if split_name else str(i) + ids.append(guid) + + return self.add_examples( + texts, labels, ids, overwrite_labels=overwrite_labels, overwrite_examples=overwrite_examples + ) + + def add_examples( + self, texts_or_text_and_labels, labels=None, ids=None, overwrite_labels=False, overwrite_examples=False + ): + if labels is not None and len(texts_or_text_and_labels) != len(labels): + raise ValueError( + f"Text and labels have mismatched lengths {len(texts_or_text_and_labels)} and {len(labels)}" + ) + if ids is not None and len(texts_or_text_and_labels) != len(ids): + raise ValueError(f"Text and ids have mismatched lengths {len(texts_or_text_and_labels)} and {len(ids)}") + if ids is None: + ids = [None] * len(texts_or_text_and_labels) + if labels is None: + labels = [None] * len(texts_or_text_and_labels) + examples = [] + added_labels = set() + for text_or_text_and_label, label, guid in zip(texts_or_text_and_labels, labels, ids): + if isinstance(text_or_text_and_label, (tuple, list)) and label is None: + text, label = text_or_text_and_label + else: + text = text_or_text_and_label + added_labels.add(label) + examples.append(InputExample(guid=guid, text_a=text, text_b=None, label=label)) + + # Update examples + if overwrite_examples: + self.examples = examples + else: + self.examples.extend(examples) + + # Update labels + if overwrite_labels: + self.labels = list(added_labels) + else: + self.labels = list(set(self.labels).union(added_labels)) + + return self.examples + + def get_features( + self, + tokenizer, + max_length=None, + pad_on_left=False, + pad_token=0, + mask_padding_with_zero=True, + return_tensors=None, + ): + """ + Convert examples in a list of `InputFeatures` + + Args: + tokenizer: Instance of a tokenizer that will tokenize the examples + max_length: Maximum example length + pad_on_left: If set to `True`, the examples will be padded on the left rather than on the right (default) + pad_token: Padding token + mask_padding_with_zero: If set to `True`, the attention mask will be filled by `1` for actual values + and by `0` for padded values. If set to `False`, inverts it (`1` for padded values, `0` for actual + values) + + Returns: + If the `examples` input is a `tf.data.Dataset`, will return a `tf.data.Dataset` containing the + task-specific features. If the input is a list of `InputExamples`, will return a list of task-specific + `InputFeatures` which can be fed to the model. + + """ + if max_length is None: + max_length = tokenizer.max_len + + label_map = {label: i for i, label in enumerate(self.labels)} + + all_input_ids = [] + for ex_index, example in enumerate(self.examples): + if ex_index % 10000 == 0: + logger.info(f"Tokenizing example {ex_index}") + + input_ids = tokenizer.encode( + example.text_a, + add_special_tokens=True, + max_length=min(max_length, tokenizer.max_len), + ) + all_input_ids.append(input_ids) + + batch_length = max(len(input_ids) for input_ids in all_input_ids) + + features = [] + for ex_index, (input_ids, example) in enumerate(zip(all_input_ids, self.examples)): + if ex_index % 10000 == 0: + logger.info(f"Writing example {ex_index}/{len(self.examples)}") + # The mask has 1 for real tokens and 0 for padding tokens. Only real + # tokens are attended to. + attention_mask = [1 if mask_padding_with_zero else 0] * len(input_ids) + + # Zero-pad up to the sequence length. + padding_length = batch_length - len(input_ids) + if pad_on_left: + input_ids = ([pad_token] * padding_length) + input_ids + attention_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + attention_mask + else: + input_ids = input_ids + ([pad_token] * padding_length) + attention_mask = attention_mask + ([0 if mask_padding_with_zero else 1] * padding_length) + + if len(input_ids) != batch_length: + raise ValueError(f"Error with input length {len(input_ids)} vs {batch_length}") + if len(attention_mask) != batch_length: + raise ValueError(f"Error with input length {len(attention_mask)} vs {batch_length}") + + if self.mode == "classification": + label = label_map[example.label] + elif self.mode == "regression": + label = float(example.label) + else: + raise ValueError(self.mode) + + if ex_index < 5 and self.verbose: + logger.info("*** Example ***") + logger.info(f"guid: {example.guid}") + logger.info(f"input_ids: {' '.join([str(x) for x in input_ids])}") + logger.info(f"attention_mask: {' '.join([str(x) for x in attention_mask])}") + logger.info(f"label: {example.label} (id = {label})") + + features.append(InputFeatures(input_ids=input_ids, attention_mask=attention_mask, label=label)) + + if return_tensors is None: + return features + elif return_tensors == "tf": + if not is_tf_available(): + raise RuntimeError("return_tensors set to 'tf' but TensorFlow 2.0 can't be imported") + import tensorflow as tf + + def gen(): + for ex in features: + yield ({"input_ids": ex.input_ids, "attention_mask": ex.attention_mask}, ex.label) + + dataset = tf.data.Dataset.from_generator( + gen, + ({"input_ids": tf.int32, "attention_mask": tf.int32}, tf.int64), + ({"input_ids": tf.TensorShape([None]), "attention_mask": tf.TensorShape([None])}, tf.TensorShape([])), + ) + return dataset + elif return_tensors == "pt": + if not is_torch_available(): + raise RuntimeError("return_tensors set to 'pt' but PyTorch can't be imported") + import torch + from torch.utils.data import TensorDataset + + all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long) + all_attention_mask = torch.tensor([f.attention_mask for f in features], dtype=torch.long) + if self.mode == "classification": + all_labels = torch.tensor([f.label for f in features], dtype=torch.long) + elif self.mode == "regression": + all_labels = torch.tensor([f.label for f in features], dtype=torch.float) + + dataset = TensorDataset(all_input_ids, all_attention_mask, all_labels) + return dataset + else: + raise ValueError("return_tensors should be one of 'tf' or 'pt'") diff --git a/venv/lib/python3.10/site-packages/transformers/data/processors/xnli.py b/venv/lib/python3.10/site-packages/transformers/data/processors/xnli.py new file mode 100644 index 0000000000000000000000000000000000000000..3f1a11fcd6b4ef167fc77fb1cc6d9acbbadaccf0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/data/processors/xnli.py @@ -0,0 +1,97 @@ +# coding=utf-8 +# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. +# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" XNLI utils (dataset loading and evaluation)""" + + +import os + +from ...utils import logging +from .utils import DataProcessor, InputExample + + +logger = logging.get_logger(__name__) + + +class XnliProcessor(DataProcessor): + """ + Processor for the XNLI dataset. Adapted from + https://github.com/google-research/bert/blob/f39e881b169b9d53bea03d2d341b31707a6c052b/run_classifier.py#L207 + """ + + def __init__(self, language, train_language=None): + self.language = language + self.train_language = train_language + + def get_train_examples(self, data_dir): + """See base class.""" + lg = self.language if self.train_language is None else self.train_language + lines = self._read_tsv(os.path.join(data_dir, f"XNLI-MT-1.0/multinli/multinli.train.{lg}.tsv")) + examples = [] + for i, line in enumerate(lines): + if i == 0: + continue + guid = f"train-{i}" + text_a = line[0] + text_b = line[1] + label = "contradiction" if line[2] == "contradictory" else line[2] + if not isinstance(text_a, str): + raise ValueError(f"Training input {text_a} is not a string") + if not isinstance(text_b, str): + raise ValueError(f"Training input {text_b} is not a string") + if not isinstance(label, str): + raise ValueError(f"Training label {label} is not a string") + examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) + return examples + + def get_test_examples(self, data_dir): + """See base class.""" + lines = self._read_tsv(os.path.join(data_dir, "XNLI-1.0/xnli.test.tsv")) + examples = [] + for i, line in enumerate(lines): + if i == 0: + continue + language = line[0] + if language != self.language: + continue + guid = f"test-{i}" + text_a = line[6] + text_b = line[7] + label = line[1] + if not isinstance(text_a, str): + raise ValueError(f"Training input {text_a} is not a string") + if not isinstance(text_b, str): + raise ValueError(f"Training input {text_b} is not a string") + if not isinstance(label, str): + raise ValueError(f"Training label {label} is not a string") + examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) + return examples + + def get_labels(self): + """See base class.""" + return ["contradiction", "entailment", "neutral"] + + +xnli_processors = { + "xnli": XnliProcessor, +} + +xnli_output_modes = { + "xnli": "classification", +} + +xnli_tasks_num_labels = { + "xnli": 3, +} diff --git a/venv/lib/python3.10/site-packages/transformers/onnx/__init__.py b/venv/lib/python3.10/site-packages/transformers/onnx/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..33350c83a2c161ee228677e8f6fc4b495e9c05bb --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/onnx/__init__.py @@ -0,0 +1,49 @@ +# Copyright 2020 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import TYPE_CHECKING + +from ..utils import _LazyModule + + +_import_structure = { + "config": [ + "EXTERNAL_DATA_FORMAT_SIZE_LIMIT", + "OnnxConfig", + "OnnxConfigWithPast", + "OnnxSeq2SeqConfigWithPast", + "PatchingSpec", + ], + "convert": ["export", "validate_model_outputs"], + "features": ["FeaturesManager"], + "utils": ["ParameterFormat", "compute_serialized_parameters_size"], +} + + +if TYPE_CHECKING: + from .config import ( + EXTERNAL_DATA_FORMAT_SIZE_LIMIT, + OnnxConfig, + OnnxConfigWithPast, + OnnxSeq2SeqConfigWithPast, + PatchingSpec, + ) + from .convert import export, validate_model_outputs + from .features import FeaturesManager + from .utils import ParameterFormat, compute_serialized_parameters_size + +else: + import sys + + sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) diff --git a/venv/lib/python3.10/site-packages/transformers/onnx/__main__.py b/venv/lib/python3.10/site-packages/transformers/onnx/__main__.py new file mode 100644 index 0000000000000000000000000000000000000000..e3dc6dfb78aa16fa32ea4a30eb4933a0653c7264 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/onnx/__main__.py @@ -0,0 +1,242 @@ +# Copyright 2021 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import subprocess +import sys +import warnings +from argparse import ArgumentParser +from pathlib import Path + +from packaging import version + +from .. import AutoFeatureExtractor, AutoImageProcessor, AutoProcessor, AutoTokenizer +from ..utils import logging +from ..utils.import_utils import is_optimum_available +from .convert import export, validate_model_outputs +from .features import FeaturesManager +from .utils import get_preprocessor + + +MIN_OPTIMUM_VERSION = "1.5.0" + +ENCODER_DECODER_MODELS = ["vision-encoder-decoder"] + + +def export_with_optimum(args): + if is_optimum_available(): + from optimum.version import __version__ as optimum_version + + parsed_optimum_version = version.parse(optimum_version) + if parsed_optimum_version < version.parse(MIN_OPTIMUM_VERSION): + raise RuntimeError( + f"transformers.onnx requires optimum >= {MIN_OPTIMUM_VERSION} but {optimum_version} is installed. You " + "can upgrade optimum by running: pip install -U optimum[exporters]" + ) + else: + raise RuntimeError( + "transformers.onnx requires optimum to run, you can install the library by running: pip install " + "optimum[exporters]" + ) + cmd_line = [ + sys.executable, + "-m", + "optimum.exporters.onnx", + f"--model {args.model}", + f"--task {args.feature}", + f"--framework {args.framework}" if args.framework is not None else "", + f"{args.output}", + ] + proc = subprocess.Popen(cmd_line, stdout=subprocess.PIPE) + proc.wait() + + logger.info( + "The export was done by optimum.exporters.onnx. We recommend using to use this package directly in future, as " + "transformers.onnx is deprecated, and will be removed in v5. You can find more information here: " + "https://huggingface.co/docs/optimum/exporters/onnx/usage_guides/export_a_model." + ) + + +def export_with_transformers(args): + args.output = args.output if args.output.is_file() else args.output.joinpath("model.onnx") + if not args.output.parent.exists(): + args.output.parent.mkdir(parents=True) + + # Allocate the model + model = FeaturesManager.get_model_from_feature( + args.feature, args.model, framework=args.framework, cache_dir=args.cache_dir + ) + + model_kind, model_onnx_config = FeaturesManager.check_supported_model_or_raise(model, feature=args.feature) + onnx_config = model_onnx_config(model.config) + + if model_kind in ENCODER_DECODER_MODELS: + encoder_model = model.get_encoder() + decoder_model = model.get_decoder() + + encoder_onnx_config = onnx_config.get_encoder_config(encoder_model.config) + decoder_onnx_config = onnx_config.get_decoder_config( + encoder_model.config, decoder_model.config, feature=args.feature + ) + + if args.opset is None: + args.opset = max(encoder_onnx_config.default_onnx_opset, decoder_onnx_config.default_onnx_opset) + + if args.opset < min(encoder_onnx_config.default_onnx_opset, decoder_onnx_config.default_onnx_opset): + raise ValueError( + f"Opset {args.opset} is not sufficient to export {model_kind}. At least " + f" {min(encoder_onnx_config.default_onnx_opset, decoder_onnx_config.default_onnx_opset)} is required." + ) + + preprocessor = AutoFeatureExtractor.from_pretrained(args.model) + + onnx_inputs, onnx_outputs = export( + preprocessor, + encoder_model, + encoder_onnx_config, + args.opset, + args.output.parent.joinpath("encoder_model.onnx"), + ) + + validate_model_outputs( + encoder_onnx_config, + preprocessor, + encoder_model, + args.output.parent.joinpath("encoder_model.onnx"), + onnx_outputs, + args.atol if args.atol else encoder_onnx_config.atol_for_validation, + ) + + preprocessor = AutoTokenizer.from_pretrained(args.model) + + onnx_inputs, onnx_outputs = export( + preprocessor, + decoder_model, + decoder_onnx_config, + args.opset, + args.output.parent.joinpath("decoder_model.onnx"), + ) + + validate_model_outputs( + decoder_onnx_config, + preprocessor, + decoder_model, + args.output.parent.joinpath("decoder_model.onnx"), + onnx_outputs, + args.atol if args.atol else decoder_onnx_config.atol_for_validation, + ) + logger.info( + f"All good, model saved at: {args.output.parent.joinpath('encoder_model.onnx').as_posix()}," + f" {args.output.parent.joinpath('decoder_model.onnx').as_posix()}" + ) + + else: + # Instantiate the appropriate preprocessor + if args.preprocessor == "auto": + preprocessor = get_preprocessor(args.model) + elif args.preprocessor == "tokenizer": + preprocessor = AutoTokenizer.from_pretrained(args.model) + elif args.preprocessor == "image_processor": + preprocessor = AutoImageProcessor.from_pretrained(args.model) + elif args.preprocessor == "feature_extractor": + preprocessor = AutoFeatureExtractor.from_pretrained(args.model) + elif args.preprocessor == "processor": + preprocessor = AutoProcessor.from_pretrained(args.model) + else: + raise ValueError(f"Unknown preprocessor type '{args.preprocessor}'") + + # Ensure the requested opset is sufficient + if args.opset is None: + args.opset = onnx_config.default_onnx_opset + + if args.opset < onnx_config.default_onnx_opset: + raise ValueError( + f"Opset {args.opset} is not sufficient to export {model_kind}. " + f"At least {onnx_config.default_onnx_opset} is required." + ) + + onnx_inputs, onnx_outputs = export( + preprocessor, + model, + onnx_config, + args.opset, + args.output, + ) + + if args.atol is None: + args.atol = onnx_config.atol_for_validation + + validate_model_outputs(onnx_config, preprocessor, model, args.output, onnx_outputs, args.atol) + logger.info(f"All good, model saved at: {args.output.as_posix()}") + warnings.warn( + "The export was done by transformers.onnx which is deprecated and will be removed in v5. We recommend" + " using optimum.exporters.onnx in future. You can find more information here:" + " https://huggingface.co/docs/optimum/exporters/onnx/usage_guides/export_a_model.", + FutureWarning, + ) + + +def main(): + parser = ArgumentParser("Hugging Face Transformers ONNX exporter") + parser.add_argument( + "-m", "--model", type=str, required=True, help="Model ID on huggingface.co or path on disk to load model from." + ) + parser.add_argument( + "--feature", + default="default", + help="The type of features to export the model with.", + ) + parser.add_argument("--opset", type=int, default=None, help="ONNX opset version to export the model with.") + parser.add_argument( + "--atol", type=float, default=None, help="Absolute difference tolerance when validating the model." + ) + parser.add_argument( + "--framework", + type=str, + choices=["pt", "tf"], + default=None, + help=( + "The framework to use for the ONNX export." + " If not provided, will attempt to use the local checkpoint's original framework" + " or what is available in the environment." + ), + ) + parser.add_argument("output", type=Path, help="Path indicating where to store generated ONNX model.") + parser.add_argument("--cache_dir", type=str, default=None, help="Path indicating where to store cache.") + parser.add_argument( + "--preprocessor", + type=str, + choices=["auto", "tokenizer", "feature_extractor", "image_processor", "processor"], + default="auto", + help="Which type of preprocessor to use. 'auto' tries to automatically detect it.", + ) + parser.add_argument( + "--export_with_transformers", + action="store_true", + help=( + "Whether to use transformers.onnx instead of optimum.exporters.onnx to perform the ONNX export. It can be " + "useful when exporting a model supported in transformers but not in optimum, otherwise it is not " + "recommended." + ), + ) + + args = parser.parse_args() + if args.export_with_transformers or not is_optimum_available(): + export_with_transformers(args) + else: + export_with_optimum(args) + + +if __name__ == "__main__": + logger = logging.get_logger("transformers.onnx") # pylint: disable=invalid-name + logger.setLevel(logging.INFO) + main() diff --git a/venv/lib/python3.10/site-packages/transformers/onnx/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/onnx/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..82a06f23c841b78160f11f931fca6cf3a47ecc56 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/onnx/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/onnx/__pycache__/__main__.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/onnx/__pycache__/__main__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b046331dde82b1724b19e76dd9e396ac0d118a06 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/onnx/__pycache__/__main__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/onnx/__pycache__/config.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/onnx/__pycache__/config.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..89fcc70a8b30cb99266b49fc8f71da799a2cdb17 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/onnx/__pycache__/config.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/onnx/__pycache__/convert.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/onnx/__pycache__/convert.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9a28473efc5e4afd692d95df61ba35a2550413f6 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/onnx/__pycache__/convert.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/onnx/__pycache__/features.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/onnx/__pycache__/features.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3a77f2bca3ee2628fe5c05b46d416f23328a9354 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/onnx/__pycache__/features.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/onnx/__pycache__/utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/onnx/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fc1d9ae41f47073c6260e4e3fc48939379f3bc65 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/onnx/__pycache__/utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/onnx/config.py b/venv/lib/python3.10/site-packages/transformers/onnx/config.py new file mode 100644 index 0000000000000000000000000000000000000000..02bf2421f4d2f6dde0c9595b030dfcb9f82031f0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/onnx/config.py @@ -0,0 +1,741 @@ +# Copyright 2021 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import copy +import dataclasses +import warnings +from abc import ABC, abstractmethod +from collections import OrderedDict +from typing import TYPE_CHECKING, Any, Callable, Dict, Iterable, List, Mapping, Optional, Tuple, Union + +import numpy as np +from packaging import version + +from ..utils import TensorType, is_torch_available, is_vision_available, logging +from .utils import ParameterFormat, compute_effective_axis_dimension, compute_serialized_parameters_size + + +if TYPE_CHECKING: + from ..configuration_utils import PretrainedConfig + from ..feature_extraction_utils import FeatureExtractionMixin + from ..image_processing_utils import ImageProcessingMixin + from ..tokenization_utils_base import PreTrainedTokenizerBase + + +if is_vision_available(): + from PIL import Image + +logger = logging.get_logger(__name__) + + +DEFAULT_ONNX_OPSET = 11 + +# 2 Gb +EXTERNAL_DATA_FORMAT_SIZE_LIMIT = 2 * 1024 * 1024 * 1024 + + +@dataclasses.dataclass +class PatchingSpec: + """ + Data class that holds patching specifications. + + Args: + o: Module / object where the op to patch is located + name: Name of the op to monkey patch + custom_op: Custom op that patches the original op + orig_op: Original op that is being patched + op_wrapper: Wrapper (optional) that wraps both the original and custom ops. + It is useful for ops that are class or static methods for instance. + """ + + o: Any + name: str + custom_op: Callable + orig_op: Optional[Callable] = None + op_wrapper: Optional[Callable] = None + + +class OnnxConfig(ABC): + """ + Base class for ONNX exportable model describing metadata on how to export the model through the ONNX format. + """ + + default_fixed_batch = 2 + default_fixed_sequence = 8 + default_fixed_num_choices = 4 + torch_onnx_minimum_version = version.parse("1.8") + _tasks_to_common_outputs = { + "causal-lm": OrderedDict({"logits": {0: "batch", 1: "sequence"}}), + "default": OrderedDict({"last_hidden_state": {0: "batch", 1: "sequence"}}), + "image-classification": OrderedDict({"logits": {0: "batch", 1: "sequence"}}), + "image-segmentation": OrderedDict( + { + "logits": {0: "batch", 1: "sequence"}, + "pred_boxes": {0: "batch", 1: "sequence"}, + "pred_masks": {0: "batch", 1: "sequence"}, + } + ), + "masked-im": OrderedDict({"logits": {0: "batch", 1: "sequence"}}), + "masked-lm": OrderedDict({"logits": {0: "batch", 1: "sequence"}}), + "multiple-choice": OrderedDict({"logits": {0: "batch"}}), + "object-detection": OrderedDict( + { + "logits": {0: "batch", 1: "sequence"}, + "pred_boxes": {0: "batch", 1: "sequence"}, + } + ), + "question-answering": OrderedDict( + { + "start_logits": {0: "batch", 1: "sequence"}, + "end_logits": {0: "batch", 1: "sequence"}, + } + ), + "semantic-segmentation": OrderedDict({"logits": {0: "batch", 1: "num_labels", 2: "height", 3: "width"}}), + "seq2seq-lm": OrderedDict({"logits": {0: "batch", 1: "decoder_sequence"}}), + "sequence-classification": OrderedDict({"logits": {0: "batch"}}), + "token-classification": OrderedDict({"logits": {0: "batch", 1: "sequence"}}), + "vision2seq-lm": OrderedDict({"logits": {0: "batch", 1: "sequence"}}), + "speech2seq-lm": OrderedDict({"logits": {0: "batch", 1: "sequence"}}), + } + + def __init__(self, config: "PretrainedConfig", task: str = "default", patching_specs: List[PatchingSpec] = None): + self._config = config + + if task not in self._tasks_to_common_outputs: + raise ValueError( + f"{task} is not a supported task, supported tasks: {self._tasks_to_common_outputs.keys()}" + ) + self.task = task + + self._patching_specs = [] + for spec in patching_specs if patching_specs is not None else []: + final_spec = spec + if spec.orig_op is None: + final_spec = dataclasses.replace(spec, orig_op=getattr(spec.o, spec.name)) + self._patching_specs.append(final_spec) + + @classmethod + def from_model_config(cls, config: "PretrainedConfig", task: str = "default") -> "OnnxConfig": + """ + Instantiate a OnnxConfig for a specific model + + Args: + config: The model's configuration to use when exporting to ONNX + + Returns: + OnnxConfig for this model + """ + return cls(config, task=task) + + @property + @abstractmethod + def inputs(self) -> Mapping[str, Mapping[int, str]]: + """ + Mapping containing the axis definition of the input tensors to provide to the model + + Returns: + For each input: its name associated to the axes symbolic name and the axis position within the tensor + """ + raise NotImplementedError() + + @property + def outputs(self) -> Mapping[str, Mapping[int, str]]: + """ + Mapping containing the axis definition of the output tensors to provide to the model + + Returns: + For each output: its name associated to the axes symbolic name and the axis position within the tensor + """ + common_outputs = self._tasks_to_common_outputs[self.task] + return copy.deepcopy(common_outputs) + + @property + def values_override(self) -> Optional[Mapping[str, Any]]: + """ + Dictionary of keys to override in the model's config before exporting + + Returns: + Dictionary with the keys (and their corresponding values) to override + """ + if hasattr(self._config, "use_cache"): + return {"use_cache": False} + + return None + + @property + def default_batch_size(self) -> int: + """ + The default batch size to use if no other indication + + Returns: + Integer > 0 + """ + # Using 2 avoid ONNX making assumption about single sample batch + return OnnxConfig.default_fixed_batch + + @property + def default_sequence_length(self) -> int: + """ + The default sequence length to use if no other indication + + Returns: + Integer > 0 + """ + return OnnxConfig.default_fixed_sequence + + @property + def default_num_choices(self) -> int: + """ + The default number of choices to use if no other indication + + Returns: + Integer > 0 + """ + return OnnxConfig.default_fixed_num_choices + + @property + def default_onnx_opset(self) -> int: + """ + Which onnx opset to use when exporting the model + + Returns: + Integer ONNX Opset version + """ + return DEFAULT_ONNX_OPSET + + @property + def atol_for_validation(self) -> float: + """ + What absolute tolerance value to use during model conversion validation. + + Returns: + Float absolute tolerance value. + """ + return 1e-5 + + @property + def is_torch_support_available(self) -> bool: + """ + The minimum PyTorch version required to export the model. + + Returns: + `bool`: Whether the installed version of PyTorch is compatible with the model. + """ + if is_torch_available(): + from transformers.utils import get_torch_version + + return version.parse(get_torch_version()) >= self.torch_onnx_minimum_version + else: + return False + + @staticmethod + def use_external_data_format(num_parameters: int) -> bool: + """ + Flag indicating if the model requires using external data format + + Args: + num_parameters: Number of parameter on the model + + Returns: + True if model.num_parameters() * size_of(float32) >= 2Gb False otherwise + """ + + return ( + compute_serialized_parameters_size(num_parameters, ParameterFormat.Float) + >= EXTERNAL_DATA_FORMAT_SIZE_LIMIT + ) + + def _generate_dummy_images( + self, batch_size: int = 2, num_channels: int = 3, image_height: int = 40, image_width: int = 40 + ): + images = [] + for _ in range(batch_size): + data = np.random.rand(image_height, image_width, num_channels) * 255 + images.append(Image.fromarray(data.astype("uint8")).convert("RGB")) + return images + + def _generate_dummy_audio( + self, batch_size: int = 2, sampling_rate: int = 22050, time_duration: float = 5.0, frequency: int = 220 + ): + audio_data = [] + for _ in range(batch_size): + # time variable + t = np.linspace(0, time_duration, int(time_duration * sampling_rate), endpoint=False) + + # generate pure sine wave at `frequency` Hz + audio_data.append(0.5 * np.sin(2 * np.pi * frequency * t)) + + return audio_data + + def generate_dummy_inputs( + self, + preprocessor: Union["PreTrainedTokenizerBase", "FeatureExtractionMixin", "ImageProcessingMixin"], + batch_size: int = -1, + seq_length: int = -1, + num_choices: int = -1, + is_pair: bool = False, + framework: Optional[TensorType] = None, + num_channels: int = 3, + image_width: int = 40, + image_height: int = 40, + sampling_rate: int = 22050, + time_duration: float = 5.0, + frequency: int = 220, + tokenizer: "PreTrainedTokenizerBase" = None, + ) -> Mapping[str, Any]: + """ + Generate inputs to provide to the ONNX exporter for the specific framework + + Args: + preprocessor: ([`PreTrainedTokenizerBase`], [`FeatureExtractionMixin`], or [`ImageProcessingMixin`]): + The preprocessor associated with this model configuration. + batch_size (`int`, *optional*, defaults to -1): + The batch size to export the model for (-1 means dynamic axis). + num_choices (`int`, *optional*, defaults to -1): + The number of candidate answers provided for multiple choice task (-1 means dynamic axis). + seq_length (`int`, *optional*, defaults to -1): + The sequence length to export the model for (-1 means dynamic axis). + is_pair (`bool`, *optional*, defaults to `False`): + Indicate if the input is a pair (sentence 1, sentence 2) + framework (`TensorType`, *optional*, defaults to `None`): + The framework (PyTorch or TensorFlow) that the tokenizer will generate tensors for. + num_channels (`int`, *optional*, defaults to 3): + The number of channels of the generated images. + image_width (`int`, *optional*, defaults to 40): + The width of the generated images. + image_height (`int`, *optional*, defaults to 40): + The height of the generated images. + sampling_rate (`int`, *optional* defaults to 22050) + The sampling rate for audio data generation. + time_duration (`float`, *optional* defaults to 5.0) + Total seconds of sampling for audio data generation. + frequency (`int`, *optional* defaults to 220) + The desired natural frequency of generated audio. + + Returns: + Mapping[str, Tensor] holding the kwargs to provide to the model's forward function + """ + from ..feature_extraction_utils import FeatureExtractionMixin + from ..image_processing_utils import ImageProcessingMixin + from ..tokenization_utils_base import PreTrainedTokenizerBase + + if isinstance(preprocessor, PreTrainedTokenizerBase) and tokenizer is not None: + raise ValueError("You cannot provide both a tokenizer and a preprocessor to generate dummy inputs.") + if tokenizer is not None: + warnings.warn( + "The `tokenizer` argument is deprecated and will be removed in version 5 of Transformers. Use" + " `preprocessor` instead.", + FutureWarning, + ) + logger.warning("Overwriting the `preprocessor` argument with `tokenizer` to generate dummmy inputs.") + preprocessor = tokenizer + if isinstance(preprocessor, PreTrainedTokenizerBase): + # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX + batch_size = compute_effective_axis_dimension( + batch_size, fixed_dimension=OnnxConfig.default_fixed_batch, num_token_to_add=0 + ) + # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX + token_to_add = preprocessor.num_special_tokens_to_add(is_pair) + seq_length = compute_effective_axis_dimension( + seq_length, fixed_dimension=OnnxConfig.default_fixed_sequence, num_token_to_add=token_to_add + ) + # Generate dummy inputs according to compute batch and sequence + input_token = ( + preprocessor.unk_token + if (preprocessor.unk_token is not None and len(preprocessor.unk_token) > 0) + else "0" + ) + dummy_input = [" ".join([input_token]) * seq_length] * batch_size + if self.task == "multiple-choice": + # If dynamic axis (-1) we forward with a fixed dimension of 4 candidate answers to avoid optimizations + # made by ONNX + num_choices = compute_effective_axis_dimension( + num_choices, fixed_dimension=OnnxConfig.default_fixed_num_choices, num_token_to_add=0 + ) + dummy_input = dummy_input * num_choices + # The shape of the tokenized inputs values is [batch_size * num_choices, seq_length] + tokenized_input = preprocessor(dummy_input, text_pair=dummy_input) + # Unflatten the tokenized inputs values expanding it to the shape [batch_size, num_choices, seq_length] + for k, v in tokenized_input.items(): + tokenized_input[k] = [v[i : i + num_choices] for i in range(0, len(v), num_choices)] + return dict(tokenized_input.convert_to_tensors(tensor_type=framework)) + return dict(preprocessor(dummy_input, return_tensors=framework)) + elif isinstance(preprocessor, ImageProcessingMixin): + if preprocessor.model_input_names[0] != "pixel_values": + raise ValueError( + f"The `preprocessor` is an image processor ({preprocessor.__class__.__name__}) and expects" + f' `model_input_names[0]` to be "pixel_values", but got {preprocessor.model_input_names[0]}' + ) + # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX + batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch) + dummy_input = self._generate_dummy_images(batch_size, num_channels, image_height, image_width) + return dict(preprocessor(images=dummy_input, return_tensors=framework)) + elif isinstance(preprocessor, FeatureExtractionMixin) and preprocessor.model_input_names[0] == "pixel_values": + # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX + batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch) + dummy_input = self._generate_dummy_images(batch_size, num_channels, image_height, image_width) + return dict(preprocessor(images=dummy_input, return_tensors=framework)) + elif ( + isinstance(preprocessor, FeatureExtractionMixin) and preprocessor.model_input_names[0] == "input_features" + ): + # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX + batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch) + dummy_input = self._generate_dummy_audio(batch_size, sampling_rate, time_duration, frequency) + return dict(preprocessor(dummy_input, return_tensors=framework)) + else: + raise ValueError( + "Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor." + ) + + def generate_dummy_inputs_onnxruntime(self, reference_model_inputs: Mapping[str, Any]) -> Mapping[str, Any]: + """ + Generate inputs for ONNX Runtime using the reference model inputs. Override this to run inference with seq2seq + models which have the encoder and decoder exported as separate ONNX files. + + Args: + reference_model_inputs ([`Mapping[str, Tensor]`): + Reference inputs for the model. + + Returns: + `Mapping[str, Tensor]`: The mapping holding the kwargs to provide to the model's forward function + """ + return reference_model_inputs + + def patch_ops(self): + for spec in self._patching_specs: + custom_op = spec.custom_op if spec.op_wrapper is None else spec.op_wrapper(spec.custom_op) + setattr(spec.o, spec.name, custom_op) + + def restore_ops(self): + for spec in self._patching_specs: + orig_op = spec.orig_op if spec.op_wrapper is None else spec.op_wrapper(spec.orig_op) + setattr(spec.o, spec.name, orig_op) + + @classmethod + def flatten_output_collection_property(cls, name: str, field: Iterable[Any]) -> Dict[str, Any]: + """ + Flatten any potential nested structure expanding the name of the field with the index of the element within the + structure. + + Args: + name: The name of the nested structure + field: The structure to, potentially, be flattened + + Returns: + (Dict[str, Any]): Outputs with flattened structure and key mapping this new structure. + + """ + from itertools import chain + + return {f"{name}.{idx}": item for idx, item in enumerate(chain.from_iterable(field))} + + +class OnnxConfigWithPast(OnnxConfig, ABC): + def __init__( + self, + config: "PretrainedConfig", + task: str = "default", + patching_specs: List[PatchingSpec] = None, + use_past: bool = False, + ): + super().__init__(config, task=task, patching_specs=patching_specs) + self.use_past = use_past + + @classmethod + def with_past(cls, config: "PretrainedConfig", task: str = "default") -> "OnnxConfigWithPast": + """ + Instantiate a OnnxConfig with `use_past` attribute set to True + + Args: + config: The underlying model's config to use when exporting to ONNX + + Returns: + OnnxConfig with `.use_past = True` + """ + return cls(config, task=task, use_past=True) + + @property + def outputs(self) -> Mapping[str, Mapping[int, str]]: + common_outputs = super().outputs + if self.use_past: + self.fill_with_past_key_values_(common_outputs, direction="outputs") + + return common_outputs + + @property + def values_override(self) -> Optional[Mapping[str, Any]]: + if hasattr(self._config, "use_cache"): + return {"use_cache": self.use_past} + + return None + + @property + def num_layers(self) -> int: + """ + The number of layers attribute retrieved from the model config. Override this for model configs where the + number of layers attribute is not called `num_layers`. + """ + if not hasattr(self._config, "num_layers"): + raise AttributeError( + "could not find the number of layers attribute in the model configuration, override the num_layers" + " property of the model OnnxConfig to solve this" + ) + return self._config.num_layers + + @property + def num_attention_heads(self) -> int: + """ + The number of attention heads attribute retrieved from the model config. Override this for model configs where + the number of attention heads attribute is not called `num_attention_heads`. + """ + if not hasattr(self._config, "num_attention_heads"): + raise AttributeError( + "could not find the number of attention heads attribute in the model configuration, override the" + " num_attention_heads property of the model OnnxConfig to solve this" + ) + return self._config.num_attention_heads + + def generate_dummy_inputs( + self, + tokenizer: "PreTrainedTokenizerBase", + batch_size: int = -1, + seq_length: int = -1, + is_pair: bool = False, + framework: Optional[TensorType] = None, + ) -> Mapping[str, Any]: + # TODO: should we set seq_length = 1 when self.use_past = True? + common_inputs = super().generate_dummy_inputs( + tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework + ) + + if self.use_past: + if not is_torch_available(): + raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed.") + else: + import torch + + batch, seqlen = common_inputs["input_ids"].shape + # Not using the same length for past_key_values + past_key_values_length = seqlen + 2 + shape = ( + batch, + self.num_attention_heads, + past_key_values_length, + self._config.hidden_size // self.num_attention_heads, + ) + + if "attention_mask" in common_inputs: + mask_dtype = common_inputs["attention_mask"].dtype + common_inputs["attention_mask"] = torch.cat( + [common_inputs["attention_mask"], torch.ones(batch, past_key_values_length, dtype=mask_dtype)], + dim=1, + ) + + common_inputs["past_key_values"] = [] + for _ in range(self.num_layers): + common_inputs["past_key_values"].append((torch.zeros(shape), torch.zeros(shape))) + + return common_inputs + + def fill_with_past_key_values_( + self, inputs_or_outputs: Mapping[str, Mapping[int, str]], direction: str, inverted_values_shape: bool = False + ): + """ + Fill the input_or_outputs mapping with past_key_values dynamic axes considering. + + Args: + inputs_or_outputs: The mapping to fill. + direction: either "inputs" or "outputs", it specifies whether input_or_outputs is the input mapping or the + output mapping, this is important for axes naming. + inverted_values_shape: + If `True`, store values on dynamic axis 1, else on axis 2. + + """ + if direction not in ["inputs", "outputs"]: + raise ValueError(f'direction must either be "inputs" or "outputs", but {direction} was given') + + name = "past_key_values" if direction == "inputs" else "present" + for i in range(self.num_layers): + inputs_or_outputs[f"{name}.{i}.key"] = {0: "batch", 2: "past_sequence + sequence"} + if inverted_values_shape: + inputs_or_outputs[f"{name}.{i}.value"] = {0: "batch", 1: "past_sequence + sequence"} + else: + inputs_or_outputs[f"{name}.{i}.value"] = {0: "batch", 2: "past_sequence + sequence"} + + def _flatten_past_key_values_(self, flattened_output, name, idx, t): + flattened_output[f"{name}.{idx}.key"] = t[0] + flattened_output[f"{name}.{idx}.value"] = t[1] + + def flatten_output_collection_property(self, name: str, field: Iterable[Any]) -> Dict[str, Any]: + flattened_output = {} + if name in ["present", "past_key_values"]: + for idx, t in enumerate(field): + self._flatten_past_key_values_(flattened_output, name, idx, t) + else: + flattened_output = super().flatten_output_collection_property(name, field) + + return flattened_output + + +class OnnxSeq2SeqConfigWithPast(OnnxConfigWithPast): + @property + def outputs(self) -> Mapping[str, Mapping[int, str]]: + common_outputs = super(OnnxConfigWithPast, self).outputs + # Renaming the outputs axes properly. + for name, axes_names in common_outputs.items(): + sequence_name = "encoder_sequence" if "encoder" in name else "decoder_sequence" + for axis_idx, name in axes_names.items(): + if "sequence" in name: + axes_names[axis_idx] = sequence_name + # We reset the value as the order in common_outputs (OrderedDict) is lost otherwise + else: + axes_names[axis_idx] = name + if self.use_past: + self.fill_with_past_key_values_(common_outputs, direction="outputs") + + return common_outputs + + @property + def num_layers(self) -> Tuple[int]: + try: + num_layers = super().num_layers + num_layers = (num_layers, num_layers) + except AttributeError: + if hasattr(self._config, "encoder_layers") and hasattr(self._config, "decoder_layers"): + num_layers = (self._config.encoder_layers, self._config.decoder_layers) + else: + raise AttributeError( + "could not find the number of encoder and decoder layers attributes in the model configuration," + " override the num_layers property of the model OnnxConfig to solve this" + ) + + return num_layers + + @property + def num_attention_heads(self) -> Tuple[int]: + try: + num_attention_heads = super().num_attention_heads + num_attention_heads = (num_attention_heads, num_attention_heads) + except AttributeError: + if hasattr(self._config, "encoder_attention_heads") and hasattr(self._config, "decoder_attention_heads"): + num_attention_heads = (self._config.encoder_attention_heads, self._config.decoder_attention_heads) + else: + raise AttributeError( + "could not find the number of attention heads for the encoder and the decoder attributes in the" + " model configuration, override the num_attention_heads property of the model OnnxConfig to solve" + " this" + ) + return num_attention_heads + + def generate_dummy_inputs( + self, + tokenizer: "PreTrainedTokenizerBase", + batch_size: int = -1, + seq_length: int = -1, + is_pair: bool = False, + framework: Optional[TensorType] = None, + ) -> Mapping[str, Any]: + encoder_inputs = super(OnnxConfigWithPast, self).generate_dummy_inputs( + tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework + ) + + # Generate decoder inputs + decoder_seq_length = seq_length if not self.use_past else 1 + decoder_inputs = super(OnnxConfigWithPast, self).generate_dummy_inputs( + tokenizer, batch_size=batch_size, seq_length=decoder_seq_length, is_pair=is_pair, framework=framework + ) + decoder_inputs = {f"decoder_{name}": tensor for name, tensor in decoder_inputs.items()} + common_inputs = dict(**encoder_inputs, **decoder_inputs) + + if self.use_past: + if not is_torch_available(): + raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed.") + else: + import torch + batch = common_inputs["input_ids"].shape[0] + encoder_seq_length = common_inputs["input_ids"].shape[1] + decoder_seq_length = common_inputs["decoder_input_ids"].shape[1] + num_encoder_attention_heads, num_decoder_attention_heads = self.num_attention_heads + encoder_shape = ( + batch, + num_encoder_attention_heads, + encoder_seq_length, + self._config.hidden_size // num_encoder_attention_heads, + ) + decoder_shape = ( + batch, + num_decoder_attention_heads, + # Not using the same length for past_key_values + decoder_seq_length + 3, + self._config.hidden_size // num_decoder_attention_heads, + ) + + common_inputs["past_key_values"] = [] + # If the number of encoder and decoder layers are present in the model configuration, both are considered + num_encoder_layers, num_decoder_layers = self.num_layers + min_num_layers = min(num_encoder_layers, num_decoder_layers) + max_num_layers = max(num_encoder_layers, num_decoder_layers) - min_num_layers + remaining_side_name = "encoder" if num_encoder_layers > num_decoder_layers else "decoder" + + for _ in range(min_num_layers): + # For encoder-decoder models, past_key_values contains pre-computed values for both the encoder and the + # decoder layers, hence a tuple of 4 tensors instead of 2 + common_inputs["past_key_values"].append( + ( + torch.zeros(decoder_shape), + torch.zeros(decoder_shape), + torch.zeros(encoder_shape), + torch.zeros(encoder_shape), + ) + ) + + # TODO: test this. + shape = encoder_shape if remaining_side_name == "encoder" else decoder_shape + for _ in range(min_num_layers, max_num_layers): + common_inputs["past_key_values"].append((torch.zeros(shape), torch.zeros(shape))) + + return common_inputs + + def fill_with_past_key_values_(self, inputs_or_outputs: Mapping[str, Mapping[int, str]], direction: str): + if direction not in ["inputs", "outputs"]: + raise ValueError(f'direction must either be "inputs" or "outputs", but {direction} was given') + + name = "past_key_values" if direction == "inputs" else "present" + + # If the number of encoder and decoder layers are present in the model configuration, both are considered + num_encoder_layers, num_decoder_layers = self.num_layers + min_num_layers = min(num_encoder_layers, num_decoder_layers) + max_num_layers = max(num_encoder_layers, num_decoder_layers) - min_num_layers + remaining_side_name = "encoder" if num_encoder_layers > num_decoder_layers else "decoder" + + encoder_sequence = "past_encoder_sequence" + decoder_sequence = "past_decoder_sequence" if direction == "inputs" else "past_decoder_sequence + sequence" + + for i in range(min_num_layers): + inputs_or_outputs[f"{name}.{i}.decoder.key"] = {0: "batch", 2: decoder_sequence} + inputs_or_outputs[f"{name}.{i}.decoder.value"] = {0: "batch", 2: decoder_sequence} + inputs_or_outputs[f"{name}.{i}.encoder.key"] = {0: "batch", 2: encoder_sequence} + inputs_or_outputs[f"{name}.{i}.encoder.value"] = {0: "batch", 2: encoder_sequence} + + for i in range(min_num_layers, max_num_layers): + if remaining_side_name == "encoder": + axes_info = {0: "batch", 2: encoder_sequence} + else: + axes_info = {0: "batch", 2: decoder_sequence} + inputs_or_outputs[f"{name}.{i}.{remaining_side_name}.key"] = axes_info + + def _flatten_past_key_values_(self, flattened_output, name, idx, t): + flattened_output[f"{name}.{idx}.decoder.key"] = t[0] + flattened_output[f"{name}.{idx}.decoder.value"] = t[1] + flattened_output[f"{name}.{idx}.encoder.key"] = t[2] + flattened_output[f"{name}.{idx}.encoder.value"] = t[3] diff --git a/venv/lib/python3.10/site-packages/transformers/onnx/convert.py b/venv/lib/python3.10/site-packages/transformers/onnx/convert.py new file mode 100644 index 0000000000000000000000000000000000000000..36cd3232d4ba454d70b0565c8f48c895c510536a --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/onnx/convert.py @@ -0,0 +1,460 @@ +# Copyright 2021 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import warnings +from inspect import signature +from itertools import chain +from pathlib import Path +from typing import TYPE_CHECKING, Iterable, List, Tuple, Union + +import numpy as np +from packaging.version import Version, parse + +from ..tokenization_utils_base import PreTrainedTokenizerBase +from ..utils import ( + TensorType, + is_tf_available, + is_torch_available, + logging, +) +from .config import OnnxConfig + + +if is_torch_available(): + from ..modeling_utils import PreTrainedModel + +if is_tf_available(): + from ..modeling_tf_utils import TFPreTrainedModel + +if TYPE_CHECKING: + from ..feature_extraction_utils import FeatureExtractionMixin + from ..processing_utils import ProcessorMixin + from ..tokenization_utils import PreTrainedTokenizer + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +# This is the minimal required version to support some ONNX Runtime features +ORT_QUANTIZE_MINIMUM_VERSION = parse("1.4.0") + + +def check_onnxruntime_requirements(minimum_version: Version): + """ + Check onnxruntime is installed and if the installed version match is recent enough + + Raises: + ImportError: If onnxruntime is not installed or too old version is found + """ + try: + import onnxruntime + + # Parse the version of the installed onnxruntime + ort_version = parse(onnxruntime.__version__) + + # We require 1.4.0 minimum + if ort_version < ORT_QUANTIZE_MINIMUM_VERSION: + raise ImportError( + f"We found an older version of onnxruntime ({onnxruntime.__version__}) " + f"but we require onnxruntime to be >= {minimum_version} to enable all the conversions options.\n" + "Please update onnxruntime by running `pip install --upgrade onnxruntime`" + ) + + except ImportError: + raise ImportError( + "onnxruntime doesn't seem to be currently installed. " + "Please install the onnxruntime by running `pip install onnxruntime`" + " and relaunch the conversion." + ) + + +def export_pytorch( + preprocessor: Union["PreTrainedTokenizer", "FeatureExtractionMixin", "ProcessorMixin"], + model: "PreTrainedModel", + config: OnnxConfig, + opset: int, + output: Path, + tokenizer: "PreTrainedTokenizer" = None, + device: str = "cpu", +) -> Tuple[List[str], List[str]]: + """ + Export a PyTorch model to an ONNX Intermediate Representation (IR) + + Args: + preprocessor: ([`PreTrainedTokenizer`], [`FeatureExtractionMixin`] or [`ProcessorMixin`]): + The preprocessor used for encoding the data. + model ([`PreTrainedModel`]): + The model to export. + config ([`~onnx.config.OnnxConfig`]): + The ONNX configuration associated with the exported model. + opset (`int`): + The version of the ONNX operator set to use. + output (`Path`): + Directory to store the exported ONNX model. + device (`str`, *optional*, defaults to `cpu`): + The device on which the ONNX model will be exported. Either `cpu` or `cuda`. + + Returns: + `Tuple[List[str], List[str]]`: A tuple with an ordered list of the model's inputs, and the named inputs from + the ONNX configuration. + """ + + if isinstance(preprocessor, PreTrainedTokenizerBase) and tokenizer is not None: + raise ValueError("You cannot provide both a tokenizer and a preprocessor to export the model.") + if tokenizer is not None: + warnings.warn( + "The `tokenizer` argument is deprecated and will be removed in version 5 of Transformers. Use" + " `preprocessor` instead.", + FutureWarning, + ) + logger.info("Overwriting the `preprocessor` argument with `tokenizer` to generate dummmy inputs.") + preprocessor = tokenizer + + if issubclass(type(model), PreTrainedModel): + import torch + from torch.onnx import export as onnx_export + + logger.info(f"Using framework PyTorch: {torch.__version__}") + with torch.no_grad(): + model.config.return_dict = True + model.eval() + + # Check if we need to override certain configuration item + if config.values_override is not None: + logger.info(f"Overriding {len(config.values_override)} configuration item(s)") + for override_config_key, override_config_value in config.values_override.items(): + logger.info(f"\t- {override_config_key} -> {override_config_value}") + setattr(model.config, override_config_key, override_config_value) + + # Ensure inputs match + # TODO: Check when exporting QA we provide "is_pair=True" + model_inputs = config.generate_dummy_inputs(preprocessor, framework=TensorType.PYTORCH) + device = torch.device(device) + if device.type == "cuda" and torch.cuda.is_available(): + model.to(device) + model_inputs_device = {} + for k, v in model_inputs.items(): + if isinstance(v, Tuple): + model_inputs_device[k] = tuple( + x.to(device) if isinstance(x, torch.Tensor) else None for x in v + ) + elif isinstance(v, List): + model_inputs_device[k] = [ + tuple(x.to(device) if isinstance(x, torch.Tensor) else None for x in t) for t in v + ] + else: + model_inputs_device[k] = v.to(device) + + model_inputs = model_inputs_device + + inputs_match, matched_inputs = ensure_model_and_config_inputs_match(model, model_inputs.keys()) + onnx_outputs = list(config.outputs.keys()) + + if not inputs_match: + raise ValueError("Model and config inputs doesn't match") + + config.patch_ops() + + onnx_export( + model, + (model_inputs,), + f=output.as_posix(), + input_names=list(config.inputs.keys()), + output_names=onnx_outputs, + dynamic_axes=dict(chain(config.inputs.items(), config.outputs.items())), + do_constant_folding=True, + opset_version=opset, + ) + + config.restore_ops() + + return matched_inputs, onnx_outputs + + +def export_tensorflow( + preprocessor: Union["PreTrainedTokenizer", "FeatureExtractionMixin"], + model: "TFPreTrainedModel", + config: OnnxConfig, + opset: int, + output: Path, + tokenizer: "PreTrainedTokenizer" = None, +) -> Tuple[List[str], List[str]]: + """ + Export a TensorFlow model to an ONNX Intermediate Representation (IR) + + Args: + preprocessor: ([`PreTrainedTokenizer`] or [`FeatureExtractionMixin`]): + The preprocessor used for encoding the data. + model ([`TFPreTrainedModel`]): + The model to export. + config ([`~onnx.config.OnnxConfig`]): + The ONNX configuration associated with the exported model. + opset (`int`): + The version of the ONNX operator set to use. + output (`Path`): + Directory to store the exported ONNX model. + + Returns: + `Tuple[List[str], List[str]]`: A tuple with an ordered list of the model's inputs, and the named inputs from + the ONNX configuration. + """ + import onnx + import tensorflow as tf + import tf2onnx + + if isinstance(preprocessor, PreTrainedTokenizerBase) and tokenizer is not None: + raise ValueError("You cannot provide both a tokenizer and preprocessor to export the model.") + if tokenizer is not None: + warnings.warn( + "The `tokenizer` argument is deprecated and will be removed in version 5 of Transformers. Use" + " `preprocessor` instead.", + FutureWarning, + ) + logger.info("Overwriting the `preprocessor` argument with `tokenizer` to generate dummmy inputs.") + preprocessor = tokenizer + + model.config.return_dict = True + + # Check if we need to override certain configuration item + if config.values_override is not None: + logger.info(f"Overriding {len(config.values_override)} configuration item(s)") + for override_config_key, override_config_value in config.values_override.items(): + logger.info(f"\t- {override_config_key} -> {override_config_value}") + setattr(model.config, override_config_key, override_config_value) + + # Ensure inputs match + model_inputs = config.generate_dummy_inputs(preprocessor, framework=TensorType.TENSORFLOW) + inputs_match, matched_inputs = ensure_model_and_config_inputs_match(model, model_inputs.keys()) + onnx_outputs = list(config.outputs.keys()) + + input_signature = [ + tf.TensorSpec([None] * tensor.ndim, dtype=tensor.dtype, name=key) for key, tensor in model_inputs.items() + ] + onnx_model, _ = tf2onnx.convert.from_keras(model, input_signature, opset=opset) + onnx.save(onnx_model, output.as_posix()) + config.restore_ops() + + return matched_inputs, onnx_outputs + + +def export( + preprocessor: Union["PreTrainedTokenizer", "FeatureExtractionMixin", "ProcessorMixin"], + model: Union["PreTrainedModel", "TFPreTrainedModel"], + config: OnnxConfig, + opset: int, + output: Path, + tokenizer: "PreTrainedTokenizer" = None, + device: str = "cpu", +) -> Tuple[List[str], List[str]]: + """ + Export a Pytorch or TensorFlow model to an ONNX Intermediate Representation (IR) + + Args: + preprocessor: ([`PreTrainedTokenizer`], [`FeatureExtractionMixin`] or [`ProcessorMixin`]): + The preprocessor used for encoding the data. + model ([`PreTrainedModel`] or [`TFPreTrainedModel`]): + The model to export. + config ([`~onnx.config.OnnxConfig`]): + The ONNX configuration associated with the exported model. + opset (`int`): + The version of the ONNX operator set to use. + output (`Path`): + Directory to store the exported ONNX model. + device (`str`, *optional*, defaults to `cpu`): + The device on which the ONNX model will be exported. Either `cpu` or `cuda`. Only PyTorch is supported for + export on CUDA devices. + + Returns: + `Tuple[List[str], List[str]]`: A tuple with an ordered list of the model's inputs, and the named inputs from + the ONNX configuration. + """ + if not (is_torch_available() or is_tf_available()): + raise ImportError( + "Cannot convert because neither PyTorch nor TensorFlow are not installed. " + "Please install torch or tensorflow first." + ) + + if is_tf_available() and isinstance(model, TFPreTrainedModel) and device == "cuda": + raise RuntimeError("`tf2onnx` does not support export on CUDA device.") + + if isinstance(preprocessor, PreTrainedTokenizerBase) and tokenizer is not None: + raise ValueError("You cannot provide both a tokenizer and a preprocessor to export the model.") + if tokenizer is not None: + warnings.warn( + "The `tokenizer` argument is deprecated and will be removed in version 5 of Transformers. Use" + " `preprocessor` instead.", + FutureWarning, + ) + logger.info("Overwriting the `preprocessor` argument with `tokenizer` to generate dummmy inputs.") + preprocessor = tokenizer + + if is_torch_available(): + from ..utils import get_torch_version + + if not config.is_torch_support_available: + logger.warning( + f"Unsupported PyTorch version for this model. Minimum required is {config.torch_onnx_minimum_version}," + f" got: {get_torch_version()}" + ) + + if is_torch_available() and issubclass(type(model), PreTrainedModel): + return export_pytorch(preprocessor, model, config, opset, output, tokenizer=tokenizer, device=device) + elif is_tf_available() and issubclass(type(model), TFPreTrainedModel): + return export_tensorflow(preprocessor, model, config, opset, output, tokenizer=tokenizer) + + +def validate_model_outputs( + config: OnnxConfig, + preprocessor: Union["PreTrainedTokenizer", "FeatureExtractionMixin", "ProcessorMixin"], + reference_model: Union["PreTrainedModel", "TFPreTrainedModel"], + onnx_model: Path, + onnx_named_outputs: List[str], + atol: float, + tokenizer: "PreTrainedTokenizer" = None, +): + from onnxruntime import InferenceSession, SessionOptions + + logger.info("Validating ONNX model...") + + if isinstance(preprocessor, PreTrainedTokenizerBase) and tokenizer is not None: + raise ValueError("You cannot provide both a tokenizer and a preprocessor to validate the model outputs.") + if tokenizer is not None: + warnings.warn( + "The `tokenizer` argument is deprecated and will be removed in version 5 of Transformers. Use" + " `preprocessor` instead.", + FutureWarning, + ) + logger.info("Overwriting the `preprocessor` argument with `tokenizer` to generate dummmy inputs.") + preprocessor = tokenizer + + # generate inputs with a different batch_size and seq_len that was used for conversion to properly test + # dynamic input shapes. + if is_torch_available() and issubclass(type(reference_model), PreTrainedModel): + reference_model_inputs = config.generate_dummy_inputs( + preprocessor, + batch_size=config.default_fixed_batch + 1, + seq_length=config.default_fixed_sequence + 1, + framework=TensorType.PYTORCH, + ) + else: + reference_model_inputs = config.generate_dummy_inputs( + preprocessor, + batch_size=config.default_fixed_batch + 1, + seq_length=config.default_fixed_sequence + 1, + framework=TensorType.TENSORFLOW, + ) + + # Create ONNX Runtime session + options = SessionOptions() + session = InferenceSession(onnx_model.as_posix(), options, providers=["CPUExecutionProvider"]) + + # Compute outputs from the reference model + if is_torch_available() and issubclass(type(reference_model), PreTrainedModel): + reference_model.to("cpu") + ref_outputs = reference_model(**reference_model_inputs) + ref_outputs_dict = {} + + # We flatten potential collection of outputs (i.e. past_keys) to a flat structure + for name, value in ref_outputs.items(): + # Overwriting the output name as "present" since it is the name used for the ONNX outputs + # ("past_key_values" being taken for the ONNX inputs) + if name == "past_key_values": + name = "present" + if isinstance(value, (list, tuple)): + value = config.flatten_output_collection_property(name, value) + ref_outputs_dict.update(value) + else: + ref_outputs_dict[name] = value + + # Create onnxruntime inputs from the reference model inputs + reference_model_inputs_onnxruntime = config.generate_dummy_inputs_onnxruntime(reference_model_inputs) + + # We flatten potential collection of inputs (i.e. past_keys) + onnx_inputs = {} + for name, value in reference_model_inputs_onnxruntime.items(): + if isinstance(value, (list, tuple)): + value = config.flatten_output_collection_property(name, value) + onnx_inputs.update({tensor_name: pt_tensor.numpy() for tensor_name, pt_tensor in value.items()}) + else: + onnx_inputs[name] = value.numpy() + + # Compute outputs from the ONNX model + onnx_outputs = session.run(onnx_named_outputs, onnx_inputs) + + # Check we have a subset of the keys into onnx_outputs against ref_outputs + ref_outputs_set, onnx_outputs_set = set(ref_outputs_dict.keys()), set(onnx_named_outputs) + if not onnx_outputs_set.issubset(ref_outputs_set): + logger.info( + f"\t-[x] ONNX model output names {onnx_outputs_set} do not match reference model {ref_outputs_set}" + ) + + raise ValueError( + "Outputs doesn't match between reference model and ONNX exported model: " + f"{onnx_outputs_set.difference(ref_outputs_set)}" + ) + else: + logger.info(f"\t-[✓] ONNX model output names match reference model ({onnx_outputs_set})") + + # Check the shape and values match + for name, ort_value in zip(onnx_named_outputs, onnx_outputs): + if is_torch_available() and issubclass(type(reference_model), PreTrainedModel): + ref_value = ref_outputs_dict[name].detach().numpy() + else: + ref_value = ref_outputs_dict[name].numpy() + logger.info(f'\t- Validating ONNX Model output "{name}":') + + # Shape + if not ort_value.shape == ref_value.shape: + logger.info(f"\t\t-[x] shape {ort_value.shape} doesn't match {ref_value.shape}") + raise ValueError( + "Outputs shape doesn't match between reference model and ONNX exported model: " + f"Got {ref_value.shape} (reference) and {ort_value.shape} (ONNX)" + ) + else: + logger.info(f"\t\t-[✓] {ort_value.shape} matches {ref_value.shape}") + + # Values + if not np.allclose(ref_value, ort_value, atol=atol): + bad_indices = np.logical_not(np.isclose(ref_value, ort_value, atol=atol)) + logger.info(f"\t\t-[x] values not close enough (atol: {atol})") + raise ValueError( + "Outputs values doesn't match between reference model and ONNX exported model: " + f"Got max absolute difference of: {np.amax(np.abs(ref_value - ort_value))} for " + f"{ref_value[bad_indices]} vs {ort_value[bad_indices]}" + ) + else: + logger.info(f"\t\t-[✓] all values close (atol: {atol})") + + +def ensure_model_and_config_inputs_match( + model: Union["PreTrainedModel", "TFPreTrainedModel"], model_inputs: Iterable[str] +) -> Tuple[bool, List[str]]: + """ + + :param model_inputs: :param config_inputs: :return: + """ + if is_torch_available() and issubclass(type(model), PreTrainedModel): + forward_parameters = signature(model.forward).parameters + else: + forward_parameters = signature(model.call).parameters + model_inputs_set = set(model_inputs) + + # We are fine if config_inputs has more keys than model_inputs + forward_inputs_set = set(forward_parameters.keys()) + is_ok = model_inputs_set.issubset(forward_inputs_set) + + # Make sure the input order match (VERY IMPORTANT !!!!) + matching_inputs = forward_inputs_set.intersection(model_inputs_set) + ordered_inputs = [parameter for parameter in forward_parameters.keys() if parameter in matching_inputs] + return is_ok, ordered_inputs diff --git a/venv/lib/python3.10/site-packages/transformers/onnx/features.py b/venv/lib/python3.10/site-packages/transformers/onnx/features.py new file mode 100644 index 0000000000000000000000000000000000000000..1b0bf23d61213cf94a2c1e04650103a70cab4cfb --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/onnx/features.py @@ -0,0 +1,749 @@ +import os +from functools import partial, reduce +from typing import TYPE_CHECKING, Callable, Dict, Optional, Tuple, Type, Union + +import transformers + +from .. import PretrainedConfig, is_tf_available, is_torch_available +from ..utils import TF2_WEIGHTS_NAME, WEIGHTS_NAME, logging +from .config import OnnxConfig + + +if TYPE_CHECKING: + from transformers import PreTrainedModel, TFPreTrainedModel + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +if is_torch_available(): + from transformers.models.auto import ( + AutoModel, + AutoModelForCausalLM, + AutoModelForImageClassification, + AutoModelForImageSegmentation, + AutoModelForMaskedImageModeling, + AutoModelForMaskedLM, + AutoModelForMultipleChoice, + AutoModelForObjectDetection, + AutoModelForQuestionAnswering, + AutoModelForSemanticSegmentation, + AutoModelForSeq2SeqLM, + AutoModelForSequenceClassification, + AutoModelForSpeechSeq2Seq, + AutoModelForTokenClassification, + AutoModelForVision2Seq, + ) +if is_tf_available(): + from transformers.models.auto import ( + TFAutoModel, + TFAutoModelForCausalLM, + TFAutoModelForMaskedLM, + TFAutoModelForMultipleChoice, + TFAutoModelForQuestionAnswering, + TFAutoModelForSemanticSegmentation, + TFAutoModelForSeq2SeqLM, + TFAutoModelForSequenceClassification, + TFAutoModelForTokenClassification, + ) +if not is_torch_available() and not is_tf_available(): + logger.warning( + "The ONNX export features are only supported for PyTorch or TensorFlow. You will not be able to export models" + " without one of these libraries installed." + ) + + +def supported_features_mapping( + *supported_features: str, onnx_config_cls: str = None +) -> Dict[str, Callable[[PretrainedConfig], OnnxConfig]]: + """ + Generate the mapping between supported the features and their corresponding OnnxConfig for a given model. + + Args: + *supported_features: The names of the supported features. + onnx_config_cls: The OnnxConfig full name corresponding to the model. + + Returns: + The dictionary mapping a feature to an OnnxConfig constructor. + """ + if onnx_config_cls is None: + raise ValueError("A OnnxConfig class must be provided") + + config_cls = transformers + for attr_name in onnx_config_cls.split("."): + config_cls = getattr(config_cls, attr_name) + mapping = {} + for feature in supported_features: + if "-with-past" in feature: + task = feature.replace("-with-past", "") + mapping[feature] = partial(config_cls.with_past, task=task) + else: + mapping[feature] = partial(config_cls.from_model_config, task=feature) + + return mapping + + +class FeaturesManager: + _TASKS_TO_AUTOMODELS = {} + _TASKS_TO_TF_AUTOMODELS = {} + if is_torch_available(): + _TASKS_TO_AUTOMODELS = { + "default": AutoModel, + "masked-lm": AutoModelForMaskedLM, + "causal-lm": AutoModelForCausalLM, + "seq2seq-lm": AutoModelForSeq2SeqLM, + "sequence-classification": AutoModelForSequenceClassification, + "token-classification": AutoModelForTokenClassification, + "multiple-choice": AutoModelForMultipleChoice, + "object-detection": AutoModelForObjectDetection, + "question-answering": AutoModelForQuestionAnswering, + "image-classification": AutoModelForImageClassification, + "image-segmentation": AutoModelForImageSegmentation, + "masked-im": AutoModelForMaskedImageModeling, + "semantic-segmentation": AutoModelForSemanticSegmentation, + "vision2seq-lm": AutoModelForVision2Seq, + "speech2seq-lm": AutoModelForSpeechSeq2Seq, + } + if is_tf_available(): + _TASKS_TO_TF_AUTOMODELS = { + "default": TFAutoModel, + "masked-lm": TFAutoModelForMaskedLM, + "causal-lm": TFAutoModelForCausalLM, + "seq2seq-lm": TFAutoModelForSeq2SeqLM, + "sequence-classification": TFAutoModelForSequenceClassification, + "token-classification": TFAutoModelForTokenClassification, + "multiple-choice": TFAutoModelForMultipleChoice, + "question-answering": TFAutoModelForQuestionAnswering, + "semantic-segmentation": TFAutoModelForSemanticSegmentation, + } + + # Set of model topologies we support associated to the features supported by each topology and the factory + _SUPPORTED_MODEL_TYPE = { + "albert": supported_features_mapping( + "default", + "masked-lm", + "sequence-classification", + "multiple-choice", + "token-classification", + "question-answering", + onnx_config_cls="models.albert.AlbertOnnxConfig", + ), + "bart": supported_features_mapping( + "default", + "default-with-past", + "causal-lm", + "causal-lm-with-past", + "seq2seq-lm", + "seq2seq-lm-with-past", + "sequence-classification", + "question-answering", + onnx_config_cls="models.bart.BartOnnxConfig", + ), + # BEiT cannot be used with the masked image modeling autoclass, so this feature is excluded here + "beit": supported_features_mapping( + "default", "image-classification", onnx_config_cls="models.beit.BeitOnnxConfig" + ), + "bert": supported_features_mapping( + "default", + "masked-lm", + "causal-lm", + "sequence-classification", + "multiple-choice", + "token-classification", + "question-answering", + onnx_config_cls="models.bert.BertOnnxConfig", + ), + "big-bird": supported_features_mapping( + "default", + "masked-lm", + "causal-lm", + "sequence-classification", + "multiple-choice", + "token-classification", + "question-answering", + onnx_config_cls="models.big_bird.BigBirdOnnxConfig", + ), + "bigbird-pegasus": supported_features_mapping( + "default", + "default-with-past", + "causal-lm", + "causal-lm-with-past", + "seq2seq-lm", + "seq2seq-lm-with-past", + "sequence-classification", + "question-answering", + onnx_config_cls="models.bigbird_pegasus.BigBirdPegasusOnnxConfig", + ), + "blenderbot": supported_features_mapping( + "default", + "default-with-past", + "causal-lm", + "causal-lm-with-past", + "seq2seq-lm", + "seq2seq-lm-with-past", + onnx_config_cls="models.blenderbot.BlenderbotOnnxConfig", + ), + "blenderbot-small": supported_features_mapping( + "default", + "default-with-past", + "causal-lm", + "causal-lm-with-past", + "seq2seq-lm", + "seq2seq-lm-with-past", + onnx_config_cls="models.blenderbot_small.BlenderbotSmallOnnxConfig", + ), + "bloom": supported_features_mapping( + "default", + "default-with-past", + "causal-lm", + "causal-lm-with-past", + "sequence-classification", + "token-classification", + onnx_config_cls="models.bloom.BloomOnnxConfig", + ), + "camembert": supported_features_mapping( + "default", + "masked-lm", + "causal-lm", + "sequence-classification", + "multiple-choice", + "token-classification", + "question-answering", + onnx_config_cls="models.camembert.CamembertOnnxConfig", + ), + "clip": supported_features_mapping( + "default", + onnx_config_cls="models.clip.CLIPOnnxConfig", + ), + "codegen": supported_features_mapping( + "default", + "causal-lm", + onnx_config_cls="models.codegen.CodeGenOnnxConfig", + ), + "convbert": supported_features_mapping( + "default", + "masked-lm", + "sequence-classification", + "multiple-choice", + "token-classification", + "question-answering", + onnx_config_cls="models.convbert.ConvBertOnnxConfig", + ), + "convnext": supported_features_mapping( + "default", + "image-classification", + onnx_config_cls="models.convnext.ConvNextOnnxConfig", + ), + "data2vec-text": supported_features_mapping( + "default", + "masked-lm", + "sequence-classification", + "multiple-choice", + "token-classification", + "question-answering", + onnx_config_cls="models.data2vec.Data2VecTextOnnxConfig", + ), + "data2vec-vision": supported_features_mapping( + "default", + "image-classification", + # ONNX doesn't support `adaptive_avg_pool2d` yet + # "semantic-segmentation", + onnx_config_cls="models.data2vec.Data2VecVisionOnnxConfig", + ), + "deberta": supported_features_mapping( + "default", + "masked-lm", + "sequence-classification", + "token-classification", + "question-answering", + onnx_config_cls="models.deberta.DebertaOnnxConfig", + ), + "deberta-v2": supported_features_mapping( + "default", + "masked-lm", + "sequence-classification", + "multiple-choice", + "token-classification", + "question-answering", + onnx_config_cls="models.deberta_v2.DebertaV2OnnxConfig", + ), + "deit": supported_features_mapping( + "default", "image-classification", onnx_config_cls="models.deit.DeiTOnnxConfig" + ), + "detr": supported_features_mapping( + "default", + "object-detection", + "image-segmentation", + onnx_config_cls="models.detr.DetrOnnxConfig", + ), + "distilbert": supported_features_mapping( + "default", + "masked-lm", + "sequence-classification", + "multiple-choice", + "token-classification", + "question-answering", + onnx_config_cls="models.distilbert.DistilBertOnnxConfig", + ), + "electra": supported_features_mapping( + "default", + "masked-lm", + "causal-lm", + "sequence-classification", + "multiple-choice", + "token-classification", + "question-answering", + onnx_config_cls="models.electra.ElectraOnnxConfig", + ), + "flaubert": supported_features_mapping( + "default", + "masked-lm", + "causal-lm", + "sequence-classification", + "multiple-choice", + "token-classification", + "question-answering", + onnx_config_cls="models.flaubert.FlaubertOnnxConfig", + ), + "gpt2": supported_features_mapping( + "default", + "default-with-past", + "causal-lm", + "causal-lm-with-past", + "sequence-classification", + "token-classification", + onnx_config_cls="models.gpt2.GPT2OnnxConfig", + ), + "gptj": supported_features_mapping( + "default", + "default-with-past", + "causal-lm", + "causal-lm-with-past", + "question-answering", + "sequence-classification", + onnx_config_cls="models.gptj.GPTJOnnxConfig", + ), + "gpt-neo": supported_features_mapping( + "default", + "default-with-past", + "causal-lm", + "causal-lm-with-past", + "sequence-classification", + onnx_config_cls="models.gpt_neo.GPTNeoOnnxConfig", + ), + "groupvit": supported_features_mapping( + "default", + onnx_config_cls="models.groupvit.GroupViTOnnxConfig", + ), + "ibert": supported_features_mapping( + "default", + "masked-lm", + "sequence-classification", + "multiple-choice", + "token-classification", + "question-answering", + onnx_config_cls="models.ibert.IBertOnnxConfig", + ), + "imagegpt": supported_features_mapping( + "default", "image-classification", onnx_config_cls="models.imagegpt.ImageGPTOnnxConfig" + ), + "layoutlm": supported_features_mapping( + "default", + "masked-lm", + "sequence-classification", + "token-classification", + onnx_config_cls="models.layoutlm.LayoutLMOnnxConfig", + ), + "layoutlmv3": supported_features_mapping( + "default", + "question-answering", + "sequence-classification", + "token-classification", + onnx_config_cls="models.layoutlmv3.LayoutLMv3OnnxConfig", + ), + "levit": supported_features_mapping( + "default", "image-classification", onnx_config_cls="models.levit.LevitOnnxConfig" + ), + "longt5": supported_features_mapping( + "default", + "default-with-past", + "seq2seq-lm", + "seq2seq-lm-with-past", + onnx_config_cls="models.longt5.LongT5OnnxConfig", + ), + "longformer": supported_features_mapping( + "default", + "masked-lm", + "multiple-choice", + "question-answering", + "sequence-classification", + "token-classification", + onnx_config_cls="models.longformer.LongformerOnnxConfig", + ), + "marian": supported_features_mapping( + "default", + "default-with-past", + "seq2seq-lm", + "seq2seq-lm-with-past", + "causal-lm", + "causal-lm-with-past", + onnx_config_cls="models.marian.MarianOnnxConfig", + ), + "mbart": supported_features_mapping( + "default", + "default-with-past", + "causal-lm", + "causal-lm-with-past", + "seq2seq-lm", + "seq2seq-lm-with-past", + "sequence-classification", + "question-answering", + onnx_config_cls="models.mbart.MBartOnnxConfig", + ), + "mobilebert": supported_features_mapping( + "default", + "masked-lm", + "sequence-classification", + "multiple-choice", + "token-classification", + "question-answering", + onnx_config_cls="models.mobilebert.MobileBertOnnxConfig", + ), + "mobilenet-v1": supported_features_mapping( + "default", + "image-classification", + onnx_config_cls="models.mobilenet_v1.MobileNetV1OnnxConfig", + ), + "mobilenet-v2": supported_features_mapping( + "default", + "image-classification", + onnx_config_cls="models.mobilenet_v2.MobileNetV2OnnxConfig", + ), + "mobilevit": supported_features_mapping( + "default", + "image-classification", + onnx_config_cls="models.mobilevit.MobileViTOnnxConfig", + ), + "mt5": supported_features_mapping( + "default", + "default-with-past", + "seq2seq-lm", + "seq2seq-lm-with-past", + onnx_config_cls="models.mt5.MT5OnnxConfig", + ), + "m2m-100": supported_features_mapping( + "default", + "default-with-past", + "seq2seq-lm", + "seq2seq-lm-with-past", + onnx_config_cls="models.m2m_100.M2M100OnnxConfig", + ), + "owlvit": supported_features_mapping( + "default", + onnx_config_cls="models.owlvit.OwlViTOnnxConfig", + ), + "perceiver": supported_features_mapping( + "image-classification", + "masked-lm", + "sequence-classification", + onnx_config_cls="models.perceiver.PerceiverOnnxConfig", + ), + "poolformer": supported_features_mapping( + "default", "image-classification", onnx_config_cls="models.poolformer.PoolFormerOnnxConfig" + ), + "rembert": supported_features_mapping( + "default", + "masked-lm", + "causal-lm", + "sequence-classification", + "multiple-choice", + "token-classification", + "question-answering", + onnx_config_cls="models.rembert.RemBertOnnxConfig", + ), + "resnet": supported_features_mapping( + "default", + "image-classification", + onnx_config_cls="models.resnet.ResNetOnnxConfig", + ), + "roberta": supported_features_mapping( + "default", + "masked-lm", + "causal-lm", + "sequence-classification", + "multiple-choice", + "token-classification", + "question-answering", + onnx_config_cls="models.roberta.RobertaOnnxConfig", + ), + "roformer": supported_features_mapping( + "default", + "masked-lm", + "causal-lm", + "sequence-classification", + "token-classification", + "multiple-choice", + "question-answering", + "token-classification", + onnx_config_cls="models.roformer.RoFormerOnnxConfig", + ), + "segformer": supported_features_mapping( + "default", + "image-classification", + "semantic-segmentation", + onnx_config_cls="models.segformer.SegformerOnnxConfig", + ), + "squeezebert": supported_features_mapping( + "default", + "masked-lm", + "sequence-classification", + "multiple-choice", + "token-classification", + "question-answering", + onnx_config_cls="models.squeezebert.SqueezeBertOnnxConfig", + ), + "swin": supported_features_mapping( + "default", "image-classification", onnx_config_cls="models.swin.SwinOnnxConfig" + ), + "t5": supported_features_mapping( + "default", + "default-with-past", + "seq2seq-lm", + "seq2seq-lm-with-past", + onnx_config_cls="models.t5.T5OnnxConfig", + ), + "vision-encoder-decoder": supported_features_mapping( + "vision2seq-lm", onnx_config_cls="models.vision_encoder_decoder.VisionEncoderDecoderOnnxConfig" + ), + "vit": supported_features_mapping( + "default", "image-classification", onnx_config_cls="models.vit.ViTOnnxConfig" + ), + "whisper": supported_features_mapping( + "default", + "default-with-past", + "speech2seq-lm", + "speech2seq-lm-with-past", + onnx_config_cls="models.whisper.WhisperOnnxConfig", + ), + "xlm": supported_features_mapping( + "default", + "masked-lm", + "causal-lm", + "sequence-classification", + "multiple-choice", + "token-classification", + "question-answering", + onnx_config_cls="models.xlm.XLMOnnxConfig", + ), + "xlm-roberta": supported_features_mapping( + "default", + "masked-lm", + "causal-lm", + "sequence-classification", + "multiple-choice", + "token-classification", + "question-answering", + onnx_config_cls="models.xlm_roberta.XLMRobertaOnnxConfig", + ), + "yolos": supported_features_mapping( + "default", + "object-detection", + onnx_config_cls="models.yolos.YolosOnnxConfig", + ), + } + + AVAILABLE_FEATURES = sorted(reduce(lambda s1, s2: s1 | s2, (v.keys() for v in _SUPPORTED_MODEL_TYPE.values()))) + + @staticmethod + def get_supported_features_for_model_type( + model_type: str, model_name: Optional[str] = None + ) -> Dict[str, Callable[[PretrainedConfig], OnnxConfig]]: + """ + Tries to retrieve the feature -> OnnxConfig constructor map from the model type. + + Args: + model_type (`str`): + The model type to retrieve the supported features for. + model_name (`str`, *optional*): + The name attribute of the model object, only used for the exception message. + + Returns: + The dictionary mapping each feature to a corresponding OnnxConfig constructor. + """ + model_type = model_type.lower() + if model_type not in FeaturesManager._SUPPORTED_MODEL_TYPE: + model_type_and_model_name = f"{model_type} ({model_name})" if model_name else model_type + raise KeyError( + f"{model_type_and_model_name} is not supported yet. " + f"Only {list(FeaturesManager._SUPPORTED_MODEL_TYPE.keys())} are supported. " + f"If you want to support {model_type} please propose a PR or open up an issue." + ) + return FeaturesManager._SUPPORTED_MODEL_TYPE[model_type] + + @staticmethod + def feature_to_task(feature: str) -> str: + return feature.replace("-with-past", "") + + @staticmethod + def _validate_framework_choice(framework: str): + """ + Validates if the framework requested for the export is both correct and available, otherwise throws an + exception. + """ + if framework not in ["pt", "tf"]: + raise ValueError( + f"Only two frameworks are supported for ONNX export: pt or tf, but {framework} was provided." + ) + elif framework == "pt" and not is_torch_available(): + raise RuntimeError("Cannot export model to ONNX using PyTorch because no PyTorch package was found.") + elif framework == "tf" and not is_tf_available(): + raise RuntimeError("Cannot export model to ONNX using TensorFlow because no TensorFlow package was found.") + + @staticmethod + def get_model_class_for_feature(feature: str, framework: str = "pt") -> Type: + """ + Attempts to retrieve an AutoModel class from a feature name. + + Args: + feature (`str`): + The feature required. + framework (`str`, *optional*, defaults to `"pt"`): + The framework to use for the export. + + Returns: + The AutoModel class corresponding to the feature. + """ + task = FeaturesManager.feature_to_task(feature) + FeaturesManager._validate_framework_choice(framework) + if framework == "pt": + task_to_automodel = FeaturesManager._TASKS_TO_AUTOMODELS + else: + task_to_automodel = FeaturesManager._TASKS_TO_TF_AUTOMODELS + if task not in task_to_automodel: + raise KeyError( + f"Unknown task: {feature}. Possible values are {list(FeaturesManager._TASKS_TO_AUTOMODELS.values())}" + ) + + return task_to_automodel[task] + + @staticmethod + def determine_framework(model: str, framework: str = None) -> str: + """ + Determines the framework to use for the export. + + The priority is in the following order: + 1. User input via `framework`. + 2. If local checkpoint is provided, use the same framework as the checkpoint. + 3. Available framework in environment, with priority given to PyTorch + + Args: + model (`str`): + The name of the model to export. + framework (`str`, *optional*, defaults to `None`): + The framework to use for the export. See above for priority if none provided. + + Returns: + The framework to use for the export. + + """ + if framework is not None: + return framework + + framework_map = {"pt": "PyTorch", "tf": "TensorFlow"} + exporter_map = {"pt": "torch", "tf": "tf2onnx"} + + if os.path.isdir(model): + if os.path.isfile(os.path.join(model, WEIGHTS_NAME)): + framework = "pt" + elif os.path.isfile(os.path.join(model, TF2_WEIGHTS_NAME)): + framework = "tf" + else: + raise FileNotFoundError( + "Cannot determine framework from given checkpoint location." + f" There should be a {WEIGHTS_NAME} for PyTorch" + f" or {TF2_WEIGHTS_NAME} for TensorFlow." + ) + logger.info(f"Local {framework_map[framework]} model found.") + else: + if is_torch_available(): + framework = "pt" + elif is_tf_available(): + framework = "tf" + else: + raise EnvironmentError("Neither PyTorch nor TensorFlow found in environment. Cannot export to ONNX.") + + logger.info(f"Framework not requested. Using {exporter_map[framework]} to export to ONNX.") + + return framework + + @staticmethod + def get_model_from_feature( + feature: str, model: str, framework: str = None, cache_dir: str = None + ) -> Union["PreTrainedModel", "TFPreTrainedModel"]: + """ + Attempts to retrieve a model from a model's name and the feature to be enabled. + + Args: + feature (`str`): + The feature required. + model (`str`): + The name of the model to export. + framework (`str`, *optional*, defaults to `None`): + The framework to use for the export. See `FeaturesManager.determine_framework` for the priority should + none be provided. + + Returns: + The instance of the model. + + """ + framework = FeaturesManager.determine_framework(model, framework) + model_class = FeaturesManager.get_model_class_for_feature(feature, framework) + try: + model = model_class.from_pretrained(model, cache_dir=cache_dir) + except OSError: + if framework == "pt": + logger.info("Loading TensorFlow model in PyTorch before exporting to ONNX.") + model = model_class.from_pretrained(model, from_tf=True, cache_dir=cache_dir) + else: + logger.info("Loading PyTorch model in TensorFlow before exporting to ONNX.") + model = model_class.from_pretrained(model, from_pt=True, cache_dir=cache_dir) + return model + + @staticmethod + def check_supported_model_or_raise( + model: Union["PreTrainedModel", "TFPreTrainedModel"], feature: str = "default" + ) -> Tuple[str, Callable]: + """ + Check whether or not the model has the requested features. + + Args: + model: The model to export. + feature: The name of the feature to check if it is available. + + Returns: + (str) The type of the model (OnnxConfig) The OnnxConfig instance holding the model export properties. + + """ + model_type = model.config.model_type.replace("_", "-") + model_name = getattr(model, "name", "") + model_features = FeaturesManager.get_supported_features_for_model_type(model_type, model_name=model_name) + if feature not in model_features: + raise ValueError( + f"{model.config.model_type} doesn't support feature {feature}. Supported values are: {model_features}" + ) + + return model.config.model_type, FeaturesManager._SUPPORTED_MODEL_TYPE[model_type][feature] + + def get_config(model_type: str, feature: str) -> OnnxConfig: + """ + Gets the OnnxConfig for a model_type and feature combination. + + Args: + model_type (`str`): + The model type to retrieve the config for. + feature (`str`): + The feature to retrieve the config for. + + Returns: + `OnnxConfig`: config for the combination + """ + return FeaturesManager._SUPPORTED_MODEL_TYPE[model_type][feature] diff --git a/venv/lib/python3.10/site-packages/transformers/onnx/utils.py b/venv/lib/python3.10/site-packages/transformers/onnx/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..9672b0a96af88ffa2c7e791d1f4d7c818174247f --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/onnx/utils.py @@ -0,0 +1,109 @@ +# Copyright 2021 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from ctypes import c_float, sizeof +from enum import Enum +from typing import TYPE_CHECKING, Optional, Union + + +if TYPE_CHECKING: + from .. import AutoFeatureExtractor, AutoProcessor, AutoTokenizer # tests_ignore + + +class ParameterFormat(Enum): + Float = c_float + + @property + def size(self) -> int: + """ + Number of byte required for this data type + + Returns: + Integer > 0 + """ + return sizeof(self.value) + + +def compute_effective_axis_dimension(dimension: int, fixed_dimension: int, num_token_to_add: int = 0) -> int: + """ + + Args: + dimension: + fixed_dimension: + num_token_to_add: + + Returns: + + """ + # < 0 is possible if using a dynamic axis + if dimension <= 0: + dimension = fixed_dimension + + dimension -= num_token_to_add + return dimension + + +def compute_serialized_parameters_size(num_parameters: int, dtype: ParameterFormat) -> int: + """ + Compute the size taken by all the parameters in the given the storage format when serializing the model + + Args: + num_parameters: Number of parameters to be saved + dtype: The data format each parameter will be saved + + Returns: + Size (in byte) taken to save all the parameters + """ + return num_parameters * dtype.size + + +def get_preprocessor(model_name: str) -> Optional[Union["AutoTokenizer", "AutoFeatureExtractor", "AutoProcessor"]]: + """ + Gets a preprocessor (tokenizer, feature extractor or processor) that is available for `model_name`. + + Args: + model_name (`str`): Name of the model for which a preprocessor are loaded. + + Returns: + `Optional[Union[AutoTokenizer, AutoFeatureExtractor, AutoProcessor]]`: + If a processor is found, it is returned. Otherwise, if a tokenizer or a feature extractor exists, it is + returned. If both a tokenizer and a feature extractor exist, an error is raised. The function returns + `None` if no preprocessor is found. + """ + # Avoid circular imports by only importing this here. + from .. import AutoFeatureExtractor, AutoProcessor, AutoTokenizer # tests_ignore + + try: + return AutoProcessor.from_pretrained(model_name) + except (ValueError, OSError, KeyError): + tokenizer, feature_extractor = None, None + try: + tokenizer = AutoTokenizer.from_pretrained(model_name) + except (OSError, KeyError): + pass + try: + feature_extractor = AutoFeatureExtractor.from_pretrained(model_name) + except (OSError, KeyError): + pass + + if tokenizer is not None and feature_extractor is not None: + raise ValueError( + f"Couldn't auto-detect preprocessor for {model_name}. Found both a tokenizer and a feature extractor." + ) + elif tokenizer is None and feature_extractor is None: + return None + elif tokenizer is not None: + return tokenizer + else: + return feature_extractor diff --git a/venv/lib/python3.10/site-packages/transformers/quantizers/__init__.py b/venv/lib/python3.10/site-packages/transformers/quantizers/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3409af4cd78c62daa258303472fa5a6345b090d5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/quantizers/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from .auto import AutoHfQuantizer, AutoQuantizationConfig +from .base import HfQuantizer diff --git a/venv/lib/python3.10/site-packages/transformers/quantizers/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/quantizers/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d7b67b57b28f9990b188a391fe164b8b62a3f0dd Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/quantizers/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/quantizers/__pycache__/auto.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/quantizers/__pycache__/auto.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6d66dfcf885de936b3609b1b75f958bcb6b118aa Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/quantizers/__pycache__/auto.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/quantizers/__pycache__/base.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/quantizers/__pycache__/base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8c3ce87760c00c3a8a0ee0476ff032fd67538e23 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/quantizers/__pycache__/base.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/quantizers/__pycache__/quantizer_aqlm.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/quantizers/__pycache__/quantizer_aqlm.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f41f4086e56ce01605503a22775bb6546e15a094 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/quantizers/__pycache__/quantizer_aqlm.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/quantizers/__pycache__/quantizer_awq.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/quantizers/__pycache__/quantizer_awq.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b0a4e75a77fd8bde3be5e780beaf3a7fa2e565c0 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/quantizers/__pycache__/quantizer_awq.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/quantizers/__pycache__/quantizer_bnb_4bit.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/quantizers/__pycache__/quantizer_bnb_4bit.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..40394f53a7a1c5230e73a580f5893830cf063c97 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/quantizers/__pycache__/quantizer_bnb_4bit.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/quantizers/__pycache__/quantizer_bnb_8bit.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/quantizers/__pycache__/quantizer_bnb_8bit.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..526b5bf514b6ef764321318c6af4295b95f3d77c Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/quantizers/__pycache__/quantizer_bnb_8bit.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/quantizers/__pycache__/quantizer_gptq.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/quantizers/__pycache__/quantizer_gptq.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5aee76cf1cde65d39cccea31e443afaa31303a13 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/quantizers/__pycache__/quantizer_gptq.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/quantizers/__pycache__/quantizer_quanto.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/quantizers/__pycache__/quantizer_quanto.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b37f03dba9c6ddf7ede324de3cde725f70beba42 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/quantizers/__pycache__/quantizer_quanto.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/quantizers/__pycache__/quantizers_utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/quantizers/__pycache__/quantizers_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8b7d31f41906b1bdcc7bf2151175cf6f14bd2ecf Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/quantizers/__pycache__/quantizers_utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/quantizers/auto.py b/venv/lib/python3.10/site-packages/transformers/quantizers/auto.py new file mode 100644 index 0000000000000000000000000000000000000000..616e206a45b596caacca8cfacc417de7e9a3cd8e --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/quantizers/auto.py @@ -0,0 +1,161 @@ +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import warnings +from typing import Dict, Optional, Union + +from ..models.auto.configuration_auto import AutoConfig +from ..utils.quantization_config import ( + AqlmConfig, + AwqConfig, + BitsAndBytesConfig, + GPTQConfig, + QuantizationConfigMixin, + QuantizationMethod, + QuantoConfig, +) +from .quantizer_aqlm import AqlmHfQuantizer +from .quantizer_awq import AwqQuantizer +from .quantizer_bnb_4bit import Bnb4BitHfQuantizer +from .quantizer_bnb_8bit import Bnb8BitHfQuantizer +from .quantizer_gptq import GptqHfQuantizer +from .quantizer_quanto import QuantoHfQuantizer + + +AUTO_QUANTIZER_MAPPING = { + "awq": AwqQuantizer, + "bitsandbytes_4bit": Bnb4BitHfQuantizer, + "bitsandbytes_8bit": Bnb8BitHfQuantizer, + "gptq": GptqHfQuantizer, + "aqlm": AqlmHfQuantizer, + "quanto": QuantoHfQuantizer, +} + +AUTO_QUANTIZATION_CONFIG_MAPPING = { + "awq": AwqConfig, + "bitsandbytes_4bit": BitsAndBytesConfig, + "bitsandbytes_8bit": BitsAndBytesConfig, + "gptq": GPTQConfig, + "aqlm": AqlmConfig, + "quanto": QuantoConfig, +} + + +class AutoQuantizationConfig: + """ + The Auto-HF quantization config class that takes care of automatically dispatching to the correct + quantization config given a quantization config stored in a dictionary. + """ + + @classmethod + def from_dict(cls, quantization_config_dict: Dict): + quant_method = quantization_config_dict.get("quant_method", None) + # We need a special care for bnb models to make sure everything is BC .. + if quantization_config_dict.get("load_in_8bit", False) or quantization_config_dict.get("load_in_4bit", False): + suffix = "_4bit" if quantization_config_dict.get("load_in_4bit", False) else "_8bit" + quant_method = QuantizationMethod.BITS_AND_BYTES + suffix + elif quant_method is None: + raise ValueError( + "The model's quantization config from the arguments has no `quant_method` attribute. Make sure that the model has been correctly quantized" + ) + + if quant_method not in AUTO_QUANTIZATION_CONFIG_MAPPING.keys(): + raise ValueError( + f"Unknown quantization type, got {quant_method} - supported types are:" + f" {list(AUTO_QUANTIZER_MAPPING.keys())}" + ) + + target_cls = AUTO_QUANTIZATION_CONFIG_MAPPING[quant_method] + return target_cls.from_dict(quantization_config_dict) + + @classmethod + def from_pretrained(cls, pretrained_model_name_or_path, **kwargs): + model_config = AutoConfig.from_pretrained(pretrained_model_name_or_path, **kwargs) + if getattr(model_config, "quantization_config", None) is None: + raise ValueError( + f"Did not found a `quantization_config` in {pretrained_model_name_or_path}. Make sure that the model is correctly quantized." + ) + quantization_config_dict = model_config.quantization_config + quantization_config = cls.from_dict(quantization_config_dict) + # Update with potential kwargs that are passed through from_pretrained. + quantization_config.update(kwargs) + return quantization_config + + +class AutoHfQuantizer: + """ + The Auto-HF quantizer class that takes care of automatically instantiating to the correct + `HfQuantizer` given the `QuantizationConfig`. + """ + + @classmethod + def from_config(cls, quantization_config: Union[QuantizationConfigMixin, Dict], **kwargs): + # Convert it to a QuantizationConfig if the q_config is a dict + if isinstance(quantization_config, dict): + quantization_config = AutoQuantizationConfig.from_dict(quantization_config) + + quant_method = quantization_config.quant_method + + # Again, we need a special care for bnb as we have a single quantization config + # class for both 4-bit and 8-bit quantization + if quant_method == QuantizationMethod.BITS_AND_BYTES: + if quantization_config.load_in_8bit: + quant_method += "_8bit" + else: + quant_method += "_4bit" + + if quant_method not in AUTO_QUANTIZER_MAPPING.keys(): + raise ValueError( + f"Unknown quantization type, got {quant_method} - supported types are:" + f" {list(AUTO_QUANTIZER_MAPPING.keys())}" + ) + + target_cls = AUTO_QUANTIZER_MAPPING[quant_method] + return target_cls(quantization_config, **kwargs) + + @classmethod + def from_pretrained(cls, pretrained_model_name_or_path, **kwargs): + quantization_config = AutoQuantizationConfig.from_pretrained(pretrained_model_name_or_path, **kwargs) + return cls.from_config(quantization_config) + + @classmethod + def merge_quantization_configs( + cls, + quantization_config: Union[dict, QuantizationConfigMixin], + quantization_config_from_args: Optional[QuantizationConfigMixin], + ): + """ + handles situations where both quantization_config from args and quantization_config from model config are present. + """ + if quantization_config_from_args is not None: + warning_msg = ( + "You passed `quantization_config` or equivalent parameters to `from_pretrained` but the model you're loading" + " already has a `quantization_config` attribute. The `quantization_config` from the model will be used." + ) + else: + warning_msg = "" + + if isinstance(quantization_config, dict): + quantization_config = AutoQuantizationConfig.from_dict(quantization_config) + + if isinstance(quantization_config, (GPTQConfig, AwqConfig)) and quantization_config_from_args is not None: + # special case for GPTQ / AWQ config collision + loading_attr_dict = quantization_config_from_args.get_loading_attributes() + for attr, val in loading_attr_dict.items(): + setattr(quantization_config, attr, val) + warning_msg += f"However, loading attributes (e.g. {list(loading_attr_dict.keys())}) will be overwritten with the one you passed to `from_pretrained`. The rest will be ignored." + + if warning_msg != "": + warnings.warn(warning_msg) + + return quantization_config diff --git a/venv/lib/python3.10/site-packages/transformers/quantizers/base.py b/venv/lib/python3.10/site-packages/transformers/quantizers/base.py new file mode 100644 index 0000000000000000000000000000000000000000..226995eea0ca5bcbbff371169a0dcb48ef036c77 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/quantizers/base.py @@ -0,0 +1,213 @@ +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from abc import ABC, abstractmethod +from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union + +from ..utils import is_torch_available +from ..utils.quantization_config import QuantizationConfigMixin + + +if TYPE_CHECKING: + from ..modeling_utils import PreTrainedModel + +if is_torch_available(): + import torch + + +class HfQuantizer(ABC): + """ + Abstract class of the HuggingFace quantizer. Supports for now quantizing HF transformers models for inference and/or quantization. + This class is used only for transformers.PreTrainedModel.from_pretrained and cannot be easily used outside the scope of that method + yet. + + Attributes + quantization_config (`transformers.utils.quantization_config.QuantizationConfigMixin`): + The quantization config that defines the quantization parameters of your model that you want to quantize. + modules_to_not_convert (`List[str]`, *optional*): + The list of module names to not convert when quantizing the model. + required_packages (`List[str]`, *optional*): + The list of required pip packages to install prior to using the quantizer + requires_calibration (`bool`): + Whether the quantization method requires to calibrate the model before using it. + requires_parameters_quantization (`bool`): + Whether the quantization method requires to create a new Parameter. For example, for bitsandbytes, it is + required to create a new xxxParameter in order to properly quantize the model. + """ + + requires_calibration = False + required_packages = None + requires_parameters_quantization = False + + def __init__(self, quantization_config: QuantizationConfigMixin, **kwargs): + self.quantization_config = quantization_config + + # -- Handle extra kwargs below -- + self.modules_to_not_convert = kwargs.pop("modules_to_not_convert", []) + self.pre_quantized = kwargs.pop("pre_quantized", True) + + if not self.pre_quantized and self.requires_calibration: + raise ValueError( + f"The quantization method {quantization_config.quant_method} does require the model to be pre-quantized." + f" You explicitly passed `pre_quantized=False` meaning your model weights are not quantized. Make sure to " + f"pass `pre_quantized=True` while knowing what you are doing." + ) + + def update_torch_dtype(self, torch_dtype: "torch.dtype") -> "torch.dtype": + """ + Some quantization methods require to explicitly set the dtype of the model to a + target dtype. You need to override this method in case you want to make sure that behavior is + preserved + + Args: + torch_dtype (`torch.dtype`): + The input dtype that is passed in `from_pretrained` + """ + return torch_dtype + + def update_device_map(self, device_map: Optional[Dict[str, Any]]) -> Optional[Dict[str, Any]]: + """ + Override this method if you want to pass a override the existing device map with a new + one. E.g. for bitsandbytes, since `accelerate` is a hard requirement, if no device_map is + passed, the device_map is set to `"auto"`` + + Args: + device_map (`Union[dict, str]`, *optional*): + The device_map that is passed through the `from_pretrained` method. + """ + return device_map + + def adjust_target_dtype(self, torch_dtype: "torch.dtype") -> "torch.dtype": + """ + Override this method if you want to adjust the `target_dtype` variable used in `from_pretrained` + to compute the device_map in case the device_map is a `str`. E.g. for bitsandbytes we force-set `target_dtype` + to `torch.int8` and for 4-bit we pass a custom enum `accelerate.CustomDtype.int4`. + + Args: + torch_dtype (`torch.dtype`, *optional*): + The torch_dtype that is used to compute the device_map. + """ + return torch_dtype + + def update_missing_keys(self, model, missing_keys: List[str], prefix: str) -> List[str]: + """ + Override this method if you want to adjust the `missing_keys`. + + Args: + missing_keys (`List[str]`, *optional*): + The list of missing keys in the checkpoint compared to the state dict of the model + """ + return missing_keys + + def get_special_dtypes_update(self, model, torch_dtype: "torch.dtype") -> Dict[str, "torch.dtype"]: + """ + returns dtypes for modules that are not quantized - used for the computation of the device_map in case + one passes a str as a device_map. The method will use the `modules_to_not_convert` that is modified + in `_process_model_before_weight_loading`. + + Args: + model (`~transformers.PreTrainedModel`): + The model to quantize + torch_dtype (`torch.dtype`): + The dtype passed in `from_pretrained` method. + """ + + return { + name: torch_dtype + for name, _ in model.named_parameters() + if any(m in name for m in self.modules_to_not_convert) + } + + def adjust_max_memory(self, max_memory: Dict[str, Union[int, str]]) -> Dict[str, Union[int, str]]: + """adjust max_memory argument for infer_auto_device_map() if extra memory is needed for quantization""" + return max_memory + + def check_quantized_param( + self, + model: "PreTrainedModel", + param_value: "torch.Tensor", + param_name: str, + state_dict: Dict[str, Any], + **kwargs, + ) -> bool: + """ + checks if a loaded state_dict component is part of quantized param + some validation; only defined if + requires_parameters_quantization == True for quantization methods that require to create a new parameters + for quantization. + """ + return False + + def create_quantized_param(self, *args, **kwargs) -> "torch.nn.Parameter": + """ + takes needed components from state_dict and creates quantized param; only applicable if + requires_parameters_quantization == True + """ + if not self.requires_parameters_quantization: + raise AttributeError( + f"`.create_quantized_param()` method is not supported by quantizer class {self.__class__.__name__}." + ) + + def validate_environment(self, *args, **kwargs): + """ + This method is used to potentially check for potential conflicts with arguments that are + passed in `from_pretrained`. You need to define it for all future quantizers that are integrated with transformers. + If no explicit check are needed, simply return nothing. + """ + return + + def preprocess_model(self, model: "PreTrainedModel", **kwargs): + """ + Setting model attributes and/or converting model before weights loading. At this point + the model should be initialized on the meta device so you can freely manipulate the skeleton + of the model in order to replace modules in-place. Make sure to override the abstract method `_process_model_before_weight_loading`. + + Args: + model (`~transformers.PreTrainedModel`): + The model to quantize + kwargs (`dict`, *optional*): + The keyword arguments that are passed along `_process_model_before_weight_loading`. + """ + model.is_quantized = True + model.quantization_method = self.quantization_config.quant_method + return self._process_model_before_weight_loading(model, **kwargs) + + def postprocess_model(self, model: "PreTrainedModel", **kwargs): + """ + Post-process the model post weights loading. + Make sure to override the abstract method `_process_model_after_weight_loading`. + + Args: + model (`~transformers.PreTrainedModel`): + The model to quantize + kwargs (`dict`, *optional*): + The keyword arguments that are passed along `_process_model_after_weight_loading`. + """ + return self._process_model_after_weight_loading(model, **kwargs) + + @abstractmethod + def _process_model_before_weight_loading(self, model, **kwargs): + ... + + @abstractmethod + def _process_model_after_weight_loading(self, model, **kwargs): + ... + + @property + @abstractmethod + def is_serializable(self): + ... + + @property + @abstractmethod + def is_trainable(self): + ... diff --git a/venv/lib/python3.10/site-packages/transformers/quantizers/quantizer_aqlm.py b/venv/lib/python3.10/site-packages/transformers/quantizers/quantizer_aqlm.py new file mode 100644 index 0000000000000000000000000000000000000000..530071616115ac9ab58cdcbdfcf38b90589b10c1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/quantizers/quantizer_aqlm.py @@ -0,0 +1,98 @@ +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import importlib +from typing import TYPE_CHECKING, Optional + +from packaging import version + +from .base import HfQuantizer + + +if TYPE_CHECKING: + from ..modeling_utils import PreTrainedModel + +from ..integrations import replace_with_aqlm_linear +from ..utils import is_accelerate_available, is_aqlm_available, is_torch_available, logging +from ..utils.quantization_config import QuantizationConfigMixin + + +if is_torch_available(): + import torch + +logger = logging.get_logger(__name__) + + +class AqlmHfQuantizer(HfQuantizer): + """ + Quantizer of the AQLM method. Enables the loading of prequantized models. + """ + + requires_calibration = True + required_packages = ["aqlm"] + optimum_quantizer = None + + def __init__(self, quantization_config: QuantizationConfigMixin, **kwargs): + super().__init__(quantization_config, **kwargs) + self.quantization_config = quantization_config + + def validate_environment(self, *args, **kwargs): + if not is_accelerate_available(): + raise ImportError("Using `aqlm` quantization requires Accelerate: `pip install accelerate`") + + if not is_aqlm_available(): + raise ImportError("Using `aqlm` quantization requires AQLM: `pip install aqlm[gpu,cpu]`") + + def update_torch_dtype(self, torch_dtype: "torch.dtype") -> "torch.dtype": + if torch_dtype is None: + if torch.cuda.is_available(): + torch_dtype = torch.float16 + logger.info( + "CUDA available. Assuming AQLM inference on GPU and loading the model in `torch.float16`. To overwrite it, set `torch_dtype` manually." + ) + else: + torch_dtype = torch.float32 + logger.info( + "CUDA is unavailable. Assuming AQLM inference on CPU and loading the model in `torch.float32`. To overwrite it, set `torch_dtype` manually." + ) + return torch_dtype + + def _process_model_before_weight_loading( + self, + model: "PreTrainedModel", + **kwargs, + ): + replace_with_aqlm_linear( + model, + quantization_config=self.quantization_config, + linear_weights_not_to_quantize=self.quantization_config.linear_weights_not_to_quantize, + ) + model.config.quantization_config = self.quantization_config + + def _process_model_after_weight_loading(self, model: "PreTrainedModel", **kwargs): + return model + + @property + def is_trainable(self, model: Optional["PreTrainedModel"] = None): + aqlm_supports_training = version.parse(importlib.metadata.version("aqlm")) >= version.parse("1.0.2") + if aqlm_supports_training: + return True + else: + logger.warning( + f"Currently installed `aqlm` version ({importlib.metadata.version('aqlm')}) doesn't support training. If you wish to train a quantized model, please update `aqlm` with `pip install aqlm>=1.0.2`" + ) + return False + + @property + def is_serializable(self): + return True diff --git a/venv/lib/python3.10/site-packages/transformers/quantizers/quantizer_awq.py b/venv/lib/python3.10/site-packages/transformers/quantizers/quantizer_awq.py new file mode 100644 index 0000000000000000000000000000000000000000..5e66f9baf1c0a76fd44e56ca9bdfb7d6fddc236e --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/quantizers/quantizer_awq.py @@ -0,0 +1,124 @@ +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import importlib.metadata +from typing import TYPE_CHECKING + +from packaging import version + +from .base import HfQuantizer + + +if TYPE_CHECKING: + from ..modeling_utils import PreTrainedModel + +from ..utils import is_accelerate_available, is_auto_awq_available, is_torch_available, logging +from ..utils.quantization_config import AWQLinearVersion + + +if is_torch_available(): + import torch + +logger = logging.get_logger(__name__) + + +class AwqQuantizer(HfQuantizer): + """ + 4-bit quantization for Activation-aware Weight Quantization(AWQ) (https://arxiv.org/abs/2306.00978) + """ + + # AWQ requires data callibration - we support only inference + requires_calibration = True + + required_packages = ["awq", "accelerate"] + + def __init__(self, quantization_config, **kwargs): + super().__init__(quantization_config, **kwargs) + + def validate_environment(self, device_map, **kwargs): + if not torch.cuda.is_available(): + raise RuntimeError("GPU is required to run AWQ quantized model.") + + if not is_auto_awq_available(): + raise ImportError("Loading an AWQ quantized model requires auto-awq library (`pip install autoawq`)") + + if not is_accelerate_available(): + raise ImportError("Loading an AWQ quantized model requires accelerate (`pip install accelerate`)") + + if device_map is None: + logger.warning_once( + "You have loaded an AWQ model on CPU and have a CUDA device available, make sure to set " + "your model on a GPU device in order to run your model." + ) + elif device_map is not None: + if isinstance(device_map, dict) and ("cpu" in device_map.values() or "disk" in device_map.values()): + raise ValueError( + "You are attempting to load an AWQ model with a device_map that contains a CPU or disk device." + " This is not supported. Please remove the CPU or disk device from the device_map." + ) + + def update_torch_dtype(self, torch_dtype): + if torch_dtype is None: + torch_dtype = torch.float16 + elif torch_dtype != torch.float16: + logger.warning("We suggest you to set `torch_dtype=torch.float16` for better efficiency with AWQ.") + return torch_dtype + + def _process_model_before_weight_loading(self, model: "PreTrainedModel", **kwargs): + from ..integrations import get_keys_to_not_convert, replace_with_awq_linear + + self.modules_to_not_convert = get_keys_to_not_convert(model) + + if self.quantization_config.modules_to_not_convert is not None: + self.modules_to_not_convert.extend(self.quantization_config.modules_to_not_convert) + + model, has_been_replaced = replace_with_awq_linear( + model, quantization_config=self.quantization_config, modules_to_not_convert=self.modules_to_not_convert + ) + + if not has_been_replaced: + logger.warning( + "You are loading an AWQ model but no linear modules were found in your model." + " Please double check your model architecture, or submit an issue on github if you think this is a bug." + ) + + def _process_model_after_weight_loading(self, model): + if self.quantization_config.do_fuse: + from ..integrations import fuse_awq_modules + + model = fuse_awq_modules(model, self.quantization_config) + model._awq_is_fused = True # TODO: consider storing this flag in model.config instead + + if self.quantization_config.version == AWQLinearVersion.EXLLAMA: + from ..integrations import post_init_awq_exllama_modules + + model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) + + @property + def is_serializable(self): + # AWQ through auto-awq has been always serializable, except if the model is fused. + if self.quantization_config.do_fuse: + logger.warning("You cannot save an AWQ model that uses fused modules!") + return False + + if self.quantization_config.version == AWQLinearVersion.EXLLAMA: + logger.warning("You cannot save an AWQ model that uses Exllama backend!") + return False + + return True + + @property + def is_trainable(self): + # AWQ supports PEFT fine-tuning from version 0.2.0 + MIN_AWQ_VERSION_FOR_PEFT = "0.2.0" + return version.parse(importlib.metadata.version("autoawq")) >= version.parse(MIN_AWQ_VERSION_FOR_PEFT) diff --git a/venv/lib/python3.10/site-packages/transformers/quantizers/quantizer_bnb_4bit.py b/venv/lib/python3.10/site-packages/transformers/quantizers/quantizer_bnb_4bit.py new file mode 100644 index 0000000000000000000000000000000000000000..112cfd644f15703c4018fa4384ac3fdaf1c1d97a --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/quantizers/quantizer_bnb_4bit.py @@ -0,0 +1,317 @@ +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import importlib +from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union + +from packaging import version + +from .base import HfQuantizer +from .quantizers_utils import get_module_from_name + + +if TYPE_CHECKING: + from ..modeling_utils import PreTrainedModel + +from ..utils import is_accelerate_available, is_bitsandbytes_available, is_torch_available, logging + + +if is_torch_available(): + import torch + + from ..pytorch_utils import Conv1D + +logger = logging.get_logger(__name__) + + +class Bnb4BitHfQuantizer(HfQuantizer): + """ + 4-bit quantization from bitsandbytes.py quantization method: + before loading: converts transformer layers into Linear4bit during loading: load 16bit weight and pass to the + layer object after: quantizes individual weights in Linear4bit into 4bit at the first .cuda() call + saving: + from state dict, as usual; saves weights and `quant_state` components + loading: + need to locate `quant_state` components and pass to Param4bit constructor + """ + + use_keep_in_fp32_modules = True + requires_parameters_quantization = True + requires_calibration = False + + required_packages = ["bitsandbytes", "accelerate"] + + def __init__(self, quantization_config, **kwargs): + super().__init__(quantization_config, **kwargs) + + if self.quantization_config.llm_int8_skip_modules is not None: + self.modules_to_not_convert = self.quantization_config.llm_int8_skip_modules + + def validate_environment(self, *args, **kwargs): + if not (is_accelerate_available() and is_bitsandbytes_available()): + raise ImportError( + "Using `bitsandbytes` 8-bit quantization requires Accelerate: `pip install accelerate` " + "and the latest version of bitsandbytes: `pip install -i https://pypi.org/simple/ bitsandbytes`" + ) + + if kwargs.get("from_tf", False) or kwargs.get("from_flax", False): + raise ValueError( + "Converting into 4-bit or 8-bit weights from tf/flax weights is currently not supported, please make" + " sure the weights are in PyTorch format." + ) + + if not torch.cuda.is_available(): + raise RuntimeError("No GPU found. A GPU is needed for quantization.") + + device_map = kwargs.get("device_map", None) + if ( + device_map is not None + and isinstance(device_map, dict) + and not self.quantization_config.llm_int8_enable_fp32_cpu_offload + ): + device_map_without_lm_head = { + key: device_map[key] for key in device_map.keys() if key not in self.modules_to_not_convert + } + if "cpu" in device_map_without_lm_head.values() or "disk" in device_map_without_lm_head.values(): + raise ValueError( + """ + Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit the + quantized model. If you want to dispatch the model on the CPU or the disk while keeping these modules + in 32-bit, you need to set `llm_int8_enable_fp32_cpu_offload=True` and pass a custom `device_map` to + `from_pretrained`. Check + https://huggingface.co/docs/transformers/main/en/main_classes/quantization#offload-between-cpu-and-gpu + for more details. + """ + ) + + if version.parse(importlib.metadata.version("bitsandbytes")) < version.parse("0.39.0"): + raise ValueError( + "You have a version of `bitsandbytes` that is not compatible with 4bit inference and training" + " make sure you have the latest version of `bitsandbytes` installed" + ) + + def adjust_target_dtype(self, target_dtype: "torch.dtype") -> "torch.dtype": + if version.parse(importlib.metadata.version("accelerate")) > version.parse("0.19.0"): + from accelerate.utils import CustomDtype + + if target_dtype != torch.int8: + logger.info("target_dtype {target_dtype} is replaced by `CustomDtype.INT4` for 4-bit BnB quantization") + return CustomDtype.INT4 + else: + raise ValueError( + "You are using `device_map='auto'` on a 4bit loaded version of the model. To automatically compute" + " the appropriate device map, you should upgrade your `accelerate` library," + "`pip install --upgrade accelerate` or install it from source to support fp4 auto device map" + "calculation. You may encounter unexpected behavior, or pass your own device map" + ) + + def check_quantized_param( + self, + model: "PreTrainedModel", + param_value: "torch.Tensor", + param_name: str, + state_dict: Dict[str, Any], + **kwargs, + ) -> bool: + import bitsandbytes as bnb + + module, tensor_name = get_module_from_name(model, param_name) + if isinstance(module._parameters.get(tensor_name, None), bnb.nn.Params4bit): + # Add here check for loaded components' dtypes once serialization is implemented + return True + elif isinstance(module, bnb.nn.Linear4bit) and tensor_name == "bias": + # bias could be loaded by regular set_module_tensor_to_device() from accelerate, + # but it would wrongly use uninitialized weight there. + return True + else: + return False + + def create_quantized_param( + self, + model: "PreTrainedModel", + param_value: "torch.Tensor", + param_name: str, + target_device: "torch.device", + state_dict: Dict[str, Any], + unexpected_keys: Optional[List[str]] = None, + ): + """ + combines logic from _load_state_dict_into_meta_model and .integrations.bitsandbytes.py::set_module_quantized_tensor_to_device() + """ + import bitsandbytes as bnb + + module, tensor_name = get_module_from_name(model, param_name) + + if tensor_name not in module._parameters: + raise ValueError(f"{module} does not have a parameter or a buffer named {tensor_name}.") + + old_value = getattr(module, tensor_name) + + if tensor_name == "bias": + if param_value is None: + new_value = old_value.to(target_device) + else: + new_value = param_value.to(target_device) + + new_value = torch.nn.Parameter(new_value, requires_grad=old_value.requires_grad) + module._parameters[tensor_name] = new_value + return + + if not isinstance(module._parameters[tensor_name], bnb.nn.Params4bit): + raise ValueError("this function only loads `Linear4bit components`") + if ( + old_value.device == torch.device("meta") + and target_device not in ["meta", torch.device("meta")] + and param_value is None + ): + raise ValueError(f"{tensor_name} is on the meta device, we need a `value` to put in on {target_device}.") + + # construct `new_value` for the module._parameters[tensor_name]: + if self.pre_quantized: + # 4bit loading. Collecting components for restoring quantized weight + # This can be expanded to make a universal call for any quantized weight loading + + if not self.is_serializable: + raise ValueError( + "Detected int4 weights but the version of bitsandbytes is not compatible with int4 serialization. " + "Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`." + ) + + if (param_name + ".quant_state.bitsandbytes__fp4" not in state_dict) and ( + param_name + ".quant_state.bitsandbytes__nf4" not in state_dict + ): + raise ValueError( + f"Supplied state dict for {param_name} does not contain `bitsandbytes__*` and possibly other `quantized_stats` components." + ) + + quantized_stats = {} + for k, v in state_dict.items(): + if param_name + "." in k: + quantized_stats[k] = v + if unexpected_keys is not None and k in unexpected_keys: + unexpected_keys.remove(k) + + new_value = bnb.nn.Params4bit.from_prequantized( + data=param_value, + quantized_stats=quantized_stats, + requires_grad=False, + device=target_device, + ) + else: + new_value = param_value.to("cpu") + + # Support models using `Conv1D` in place of `nn.Linear` (e.g. openai-community/gpt2) by transposing the weight matrix prior to quantization. + # Since weights are saved in the correct "orientation", we skip transposing when loading. + if issubclass(module.source_cls, Conv1D): + new_value = new_value.T + + kwargs = old_value.__dict__ + new_value = bnb.nn.Params4bit(new_value, requires_grad=False, **kwargs).to(target_device) + + module._parameters[tensor_name] = new_value + + # Copied from transformers.quantizers.quantizer_bnb_8bit.Bnb8BitHfQuantizer.adjust_max_memory + def adjust_max_memory(self, max_memory: Dict[str, Union[int, str]]) -> Dict[str, Union[int, str]]: + # need more space for buffers that are created during quantization + max_memory = {key: val * 0.90 for key, val in max_memory.items()} + return max_memory + + # Copied from transformers.quantizers.quantizer_bnb_8bit.Bnb8BitHfQuantizer.update_torch_dtype + def update_torch_dtype(self, torch_dtype: "torch.dtype") -> "torch.dtype": + if torch_dtype is None: + # We force the `dtype` to be float16, this is a requirement from `bitsandbytes` + logger.info( + "Overriding torch_dtype=%s with `torch_dtype=torch.float16` due to " + "requirements of `bitsandbytes` to enable model loading in 8-bit or 4-bit. " + "Pass your own torch_dtype to specify the dtype of the remaining non-linear layers or pass" + " torch_dtype=torch.float16 to remove this warning.", + torch_dtype, + ) + torch_dtype = torch.float16 + return torch_dtype + + # Copied from transformers.quantizers.quantizer_bnb_8bit.Bnb8BitHfQuantizer.update_device_map + def update_device_map(self, device_map): + if device_map is None: + device_map = {"": torch.cuda.current_device()} + logger.info( + "The device_map was not initialized. " + "Setting device_map to {'':torch.cuda.current_device()}. " + "If you want to use the model for inference, please set device_map ='auto' " + ) + return device_map + + # Copied from transformers.quantizers.quantizer_bnb_8bit.Bnb8BitHfQuantizer._process_model_before_weight_loading + def _process_model_before_weight_loading( + self, + model: "PreTrainedModel", + device_map, + keep_in_fp32_modules: List[str] = [], + **kwargs, + ): + from ..integrations import get_keys_to_not_convert, replace_with_bnb_linear + + load_in_8bit_fp32_cpu_offload = self.quantization_config.llm_int8_enable_fp32_cpu_offload + + # We keep some modules such as the lm_head in their original dtype for numerical stability reasons + if self.quantization_config.llm_int8_skip_modules is None: + self.modules_to_not_convert = get_keys_to_not_convert(model) + else: + self.modules_to_not_convert = self.quantization_config.llm_int8_skip_modules + + if not isinstance(self.modules_to_not_convert, list): + self.modules_to_not_convert = [self.modules_to_not_convert] + + self.modules_to_not_convert.extend(keep_in_fp32_modules) + + # Extend `self.modules_to_not_convert` to keys that are supposed to be offloaded to `cpu` or `disk` + if isinstance(device_map, dict) and len(device_map.keys()) > 1: + keys_on_cpu = [key for key, value in device_map.items() if value in ["disk", "cpu"]] + + if len(keys_on_cpu) > 0 and not load_in_8bit_fp32_cpu_offload: + raise ValueError( + "If you want to offload some keys to `cpu` or `disk`, you need to set " + "`llm_int8_enable_fp32_cpu_offload=True`. Note that these modules will not be " + " converted to 8-bit but kept in 32-bit." + ) + self.modules_to_not_convert.extend(keys_on_cpu) + + model = replace_with_bnb_linear( + model, modules_to_not_convert=self.modules_to_not_convert, quantization_config=self.quantization_config + ) + # TODO: consider bringing replace_with_bnb_linear() code from ..integrations/bitsandbyter.py to here + + model.config.quantization_config = self.quantization_config + + # Copied from transformers.quantizers.quantizer_bnb_8bit.Bnb8BitHfQuantizer._process_model_after_weight_loading with 8bit->4bit + def _process_model_after_weight_loading(self, model: "PreTrainedModel", **kwargs): + model.is_loaded_in_4bit = True + model.is_4bit_serializable = self.is_serializable + return model + + @property + def is_serializable(self): + _is_4bit_serializable = version.parse(importlib.metadata.version("bitsandbytes")) >= version.parse("0.41.3") + + if not _is_4bit_serializable: + logger.warning( + "You are calling `save_pretrained` to a 4-bit converted model, but your `bitsandbytes` version doesn't support it. " + "If you want to save 4-bit models, make sure to have `bitsandbytes>=0.41.3` installed." + ) + return False + + return True + + @property + def is_trainable(self) -> bool: + return True diff --git a/venv/lib/python3.10/site-packages/transformers/quantizers/quantizer_bnb_8bit.py b/venv/lib/python3.10/site-packages/transformers/quantizers/quantizer_bnb_8bit.py new file mode 100644 index 0000000000000000000000000000000000000000..b80e9bd3a1dfa27694bab0132c9b9306ee3f0e1f --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/quantizers/quantizer_bnb_8bit.py @@ -0,0 +1,285 @@ +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import importlib +from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union + +from packaging import version + +from .base import HfQuantizer + + +if TYPE_CHECKING: + from ..modeling_utils import PreTrainedModel + +from ..utils import is_accelerate_available, is_bitsandbytes_available, is_torch_available, logging +from .quantizers_utils import get_module_from_name + + +if is_torch_available(): + import torch + + from ..pytorch_utils import Conv1D + +logger = logging.get_logger(__name__) + + +class Bnb8BitHfQuantizer(HfQuantizer): + """ + 8-bit quantization from bitsandbytes quantization method: + before loading: converts transformer layers into Linear8bitLt during loading: load 16bit weight and pass to the + layer object after: quantizes individual weights in Linear8bitLt into 8bit at fitst .cuda() call + saving: + from state dict, as usual; saves weights and 'SCB' component + loading: + need to locate SCB component and pass to the Linear8bitLt object + """ + + use_keep_in_fp32_modules = True + requires_parameters_quantization = True + requires_calibration = False + + required_packages = ["bitsandbytes", "accelerate"] + + def __init__(self, quantization_config, **kwargs): + super().__init__(quantization_config, **kwargs) + + if self.quantization_config.llm_int8_skip_modules is not None: + self.modules_to_not_convert = self.quantization_config.llm_int8_skip_modules + + def validate_environment(self, *args, **kwargs): + if not (is_accelerate_available() and is_bitsandbytes_available()): + raise ImportError( + "Using `bitsandbytes` 8-bit quantization requires Accelerate: `pip install accelerate` " + "and the latest version of bitsandbytes: `pip install -i https://pypi.org/simple/ bitsandbytes`" + ) + + if kwargs.get("from_tf", False) or kwargs.get("from_flax", False): + raise ValueError( + "Converting into 4-bit or 8-bit weights from tf/flax weights is currently not supported, please make" + " sure the weights are in PyTorch format." + ) + + if not torch.cuda.is_available(): + raise RuntimeError("No GPU found. A GPU is needed for quantization.") + + device_map = kwargs.get("device_map", None) + if ( + device_map is not None + and isinstance(device_map, dict) + and not self.quantization_config.llm_int8_enable_fp32_cpu_offload + ): + device_map_without_lm_head = { + key: device_map[key] for key in device_map.keys() if key not in self.modules_to_not_convert + } + if "cpu" in device_map_without_lm_head.values() or "disk" in device_map_without_lm_head.values(): + raise ValueError( + """ + Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit the + quantized model. If you want to dispatch the model on the CPU or the disk while keeping these modules + in 32-bit, you need to set `llm_int8_enable_fp32_cpu_offload=True` and pass a custom `device_map` to + `from_pretrained`. Check + https://huggingface.co/docs/transformers/main/en/main_classes/quantization#offload-between-cpu-and-gpu + for more details. + """ + ) + + if version.parse(importlib.metadata.version("bitsandbytes")) < version.parse("0.37.2"): + raise ValueError( + "You have a version of `bitsandbytes` that is not compatible with 8bit inference and training" + " make sure you have the latest version of `bitsandbytes` installed" + ) + + def adjust_max_memory(self, max_memory: Dict[str, Union[int, str]]) -> Dict[str, Union[int, str]]: + # need more space for buffers that are created during quantization + max_memory = {key: val * 0.90 for key, val in max_memory.items()} + return max_memory + + def update_torch_dtype(self, torch_dtype: "torch.dtype") -> "torch.dtype": + if torch_dtype is None: + # We force the `dtype` to be float16, this is a requirement from `bitsandbytes` + logger.info( + "Overriding torch_dtype=%s with `torch_dtype=torch.float16` due to " + "requirements of `bitsandbytes` to enable model loading in 8-bit or 4-bit. " + "Pass your own torch_dtype to specify the dtype of the remaining non-linear layers or pass" + " torch_dtype=torch.float16 to remove this warning.", + torch_dtype, + ) + torch_dtype = torch.float16 + return torch_dtype + + def update_device_map(self, device_map): + if device_map is None: + device_map = {"": torch.cuda.current_device()} + logger.info( + "The device_map was not initialized. " + "Setting device_map to {'':torch.cuda.current_device()}. " + "If you want to use the model for inference, please set device_map ='auto' " + ) + return device_map + + def adjust_target_dtype(self, target_dtype: "torch.dtype") -> "torch.dtype": + if target_dtype != torch.int8: + logger.info("target_dtype {target_dtype} is replaced by `torch.int8` for 8-bit BnB quantization") + return torch.int8 + + def check_quantized_param( + self, + model: "PreTrainedModel", + param_value: "torch.Tensor", + param_name: str, + state_dict: Dict[str, Any], + **kwargs, + ): + import bitsandbytes as bnb + + module, tensor_name = get_module_from_name(model, param_name) + if isinstance(module._parameters.get(tensor_name, None), bnb.nn.Int8Params): + if self.pre_quantized: + if param_name.replace("weight", "SCB") not in state_dict.keys(): + raise ValueError("Missing quantization component `SCB`") + if param_value.dtype != torch.int8: + raise ValueError( + f"Incompatible dtype `{param_value.dtype}` when loading 8-bit prequantized weight. Expected `torch.int8`." + ) + return True + return False + + def create_quantized_param( + self, + model: "PreTrainedModel", + param_value: "torch.Tensor", + param_name: str, + target_device: "torch.device", + state_dict: Dict[str, Any], + unexpected_keys: Optional[List[str]] = None, + ): + """ + combines logic from _load_state_dict_into_meta_model and .integrations.bitsandbytes.py::set_module_quantized_tensor_to_device() + needs aux items from state dicts, if found - removes them from unexpected_keys + """ + import bitsandbytes as bnb + + fp16_statistics_key = param_name.replace("weight", "SCB") + fp16_weights_format_key = param_name.replace("weight", "weight_format") + + fp16_statistics = state_dict.get(fp16_statistics_key, None) + fp16_weights_format = state_dict.get(fp16_weights_format_key, None) + + module, tensor_name = get_module_from_name(model, param_name) + if tensor_name not in module._parameters: + raise ValueError(f"{module} does not have a parameter or a buffer named {tensor_name}.") + + old_value = getattr(module, tensor_name) + + if not isinstance(module._parameters[tensor_name], bnb.nn.Int8Params): + raise ValueError(f"Parameter `{tensor_name}` should only be a `bnb.nn.Int8Params` instance.") + if ( + old_value.device == torch.device("meta") + and target_device not in ["meta", torch.device("meta")] + and param_value is None + ): + raise ValueError(f"{tensor_name} is on the meta device, we need a `value` to put in on {target_device}.") + + new_value = param_value.to("cpu") + if self.pre_quantized and not self.is_serializable: + raise ValueError( + "Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. " + "Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`." + ) + + # Support models using `Conv1D` in place of `nn.Linear` (e.g. openai-community/gpt2) by transposing the weight matrix prior to quantization. + # Since weights are saved in the correct "orientation", we skip transposing when loading. + if issubclass(module.source_cls, Conv1D): + if fp16_statistics is None: + new_value = new_value.T + + kwargs = old_value.__dict__ + new_value = bnb.nn.Int8Params(new_value, requires_grad=False, **kwargs).to(target_device) + + module._parameters[tensor_name] = new_value + if fp16_statistics is not None: + setattr(module.weight, "SCB", fp16_statistics.to(target_device)) + if unexpected_keys is not None: + unexpected_keys.remove(fp16_statistics_key) + + # We just need to pop the `weight_format` keys from the state dict to remove unneeded + # messages. The correct format is correctly retrieved during the first forward pass. + if fp16_weights_format is not None and unexpected_keys is not None: + unexpected_keys.remove(fp16_weights_format_key) + + def _process_model_after_weight_loading(self, model: "PreTrainedModel", **kwargs): + model.is_loaded_in_8bit = True + model.is_8bit_serializable = self.is_serializable + return model + + def _process_model_before_weight_loading( + self, + model: "PreTrainedModel", + device_map, + keep_in_fp32_modules: List[str] = [], + **kwargs, + ): + from ..integrations import get_keys_to_not_convert, replace_with_bnb_linear + + load_in_8bit_fp32_cpu_offload = self.quantization_config.llm_int8_enable_fp32_cpu_offload + + # We keep some modules such as the lm_head in their original dtype for numerical stability reasons + if self.quantization_config.llm_int8_skip_modules is None: + self.modules_to_not_convert = get_keys_to_not_convert(model) + else: + self.modules_to_not_convert = self.quantization_config.llm_int8_skip_modules + + if not isinstance(self.modules_to_not_convert, list): + self.modules_to_not_convert = [self.modules_to_not_convert] + + self.modules_to_not_convert.extend(keep_in_fp32_modules) + + # Extend `self.modules_to_not_convert` to keys that are supposed to be offloaded to `cpu` or `disk` + if isinstance(device_map, dict) and len(device_map.keys()) > 1: + keys_on_cpu = [key for key, value in device_map.items() if value in ["disk", "cpu"]] + + if len(keys_on_cpu) > 0 and not load_in_8bit_fp32_cpu_offload: + raise ValueError( + "If you want to offload some keys to `cpu` or `disk`, you need to set " + "`llm_int8_enable_fp32_cpu_offload=True`. Note that these modules will not be " + " converted to 8-bit but kept in 32-bit." + ) + self.modules_to_not_convert.extend(keys_on_cpu) + + model = replace_with_bnb_linear( + model, modules_to_not_convert=self.modules_to_not_convert, quantization_config=self.quantization_config + ) + # TODO: consider bringing replace_with_bnb_linear() code from ..integrations/bitsandbyter.py to here + + model.config.quantization_config = self.quantization_config + + @property + def is_serializable(self): + _bnb_supports_8bit_serialization = version.parse(importlib.metadata.version("bitsandbytes")) > version.parse( + "0.37.2" + ) + + if not _bnb_supports_8bit_serialization: + logger.warning( + "You are calling `save_pretrained` to a 8-bit converted model, but your `bitsandbytes` version doesn't support it. " + "If you want to save 8-bit models, make sure to have `bitsandbytes>0.37.2` installed. You will most likely face errors or" + " unexpected behaviours." + ) + return False + + return True + + @property + def is_trainable(self) -> bool: + return version.parse(importlib.metadata.version("bitsandbytes")) >= version.parse("0.37.0") diff --git a/venv/lib/python3.10/site-packages/transformers/quantizers/quantizer_gptq.py b/venv/lib/python3.10/site-packages/transformers/quantizers/quantizer_gptq.py new file mode 100644 index 0000000000000000000000000000000000000000..ffc6f2090a8a7b3d6ac896655fda4f6c0632dea7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/quantizers/quantizer_gptq.py @@ -0,0 +1,94 @@ +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import importlib +from typing import TYPE_CHECKING, Optional + +from packaging import version + +from .base import HfQuantizer + + +if TYPE_CHECKING: + from ..modeling_utils import PreTrainedModel + +from ..utils import is_auto_gptq_available, is_optimum_available, is_torch_available, logging +from ..utils.quantization_config import GPTQConfig, QuantizationConfigMixin + + +if is_torch_available(): + import torch + +logger = logging.get_logger(__name__) + + +class GptqHfQuantizer(HfQuantizer): + """ + Quantizer of the GPTQ method - for GPTQ the quantizer support calibration of the model through + `auto_gptq` package. Quantization is done under the hood for users if they load a non-prequantized model. + """ + + requires_calibration = False + required_packages = ["optimum", "auto_gptq"] + optimum_quantizer = None + + def __init__(self, quantization_config: QuantizationConfigMixin, **kwargs): + super().__init__(quantization_config, **kwargs) + from optimum.gptq import GPTQQuantizer + + self.optimum_quantizer = GPTQQuantizer.from_dict(self.quantization_config.to_dict_optimum()) + + def validate_environment(self, *args, **kwargs): + gptq_supports_cpu = version.parse(importlib.metadata.version("auto-gptq")) > version.parse("0.4.2") + if not gptq_supports_cpu and not torch.cuda.is_available(): + raise RuntimeError("GPU is required to quantize or run quantize model.") + elif not (is_optimum_available() and is_auto_gptq_available()): + raise ImportError( + "Loading a GPTQ quantized model requires optimum (`pip install optimum`) and auto-gptq library (`pip install auto-gptq`)" + ) + elif version.parse(importlib.metadata.version("auto_gptq")) < version.parse("0.4.2"): + raise ImportError( + "You need a version of auto_gptq >= 0.4.2 to use GPTQ: `pip install --upgrade auto-gptq`" + ) + + def update_torch_dtype(self, torch_dtype: "torch.dtype") -> "torch.dtype": + if torch_dtype is None: + torch_dtype = torch.float16 + elif torch_dtype != torch.float16: + logger.info("We suggest you to set `torch_dtype=torch.float16` for better efficiency with GPTQ.") + return torch_dtype + + def _process_model_before_weight_loading(self, model: "PreTrainedModel", **kwargs): + if model.__class__.main_input_name != "input_ids": + raise RuntimeError("We can only quantize pure text model.") + + if self.pre_quantized: + model = self.optimum_quantizer.convert_model(model) + + def _process_model_after_weight_loading(self, model: "PreTrainedModel", **kwargs): + if self.pre_quantized: + model = self.optimum_quantizer.post_init_model(model) + else: + if self.quantization_config.tokenizer is None: + self.quantization_config.tokenizer = model.name_or_path + + self.optimum_quantizer.quantize_model(model, self.quantization_config.tokenizer) + model.config.quantization_config = GPTQConfig.from_dict(self.optimum_quantizer.to_dict()) + + @property + def is_trainable(self, model: Optional["PreTrainedModel"] = None): + return True + + @property + def is_serializable(self): + return True diff --git a/venv/lib/python3.10/site-packages/transformers/quantizers/quantizer_quanto.py b/venv/lib/python3.10/site-packages/transformers/quantizers/quantizer_quanto.py new file mode 100644 index 0000000000000000000000000000000000000000..e7e2219ab6621715e3275312d547ec2bfbd4b676 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/quantizers/quantizer_quanto.py @@ -0,0 +1,200 @@ +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import importlib +from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union + +from packaging import version + +from .base import HfQuantizer +from .quantizers_utils import get_module_from_name + + +if TYPE_CHECKING: + from ..modeling_utils import PreTrainedModel + +from ..utils import is_accelerate_available, is_quanto_available, is_torch_available, logging +from ..utils.quantization_config import QuantoConfig + + +if is_torch_available(): + import torch + +logger = logging.get_logger(__name__) + + +class QuantoHfQuantizer(HfQuantizer): + """ + Quantizer for the quanto library + """ + + required_packages = ["quanto", "accelerate"] + requires_parameters_quantization = True + requires_calibration = False + + def __init__(self, quantization_config: QuantoConfig, **kwargs): + super().__init__(quantization_config, **kwargs) + self.post_init() + + def post_init(self): + r""" + Safety checker + """ + if self.quantization_config.activations is not None and not self.pre_quantized: + raise ValueError( + "We don't support quantizing the activations with transformers library." + "Use quanto library for more complex use cases such as activations quantization, calibration and quantization aware training." + ) + + def validate_environment(self, *args, **kwargs): + if not is_quanto_available(): + raise ImportError("Loading a quanto quantized model requires quanto library (`pip install quanto`)") + if not is_accelerate_available(): + raise ImportError( + "Loading a quanto quantized model requires accelerate library (`pip install accelerate`)" + ) + + def update_device_map(self, device_map): + if device_map is None: + device_map = {"": "cpu"} + logger.info( + "The device_map was not initialized. " + "Setting device_map to {'':'cpu'}. " + "If you want to use the model for inference, please set device_map ='auto'" + ) + return device_map + + def update_torch_dtype(self, torch_dtype: "torch.dtype") -> "torch.dtype": + if torch_dtype is None: + logger.info("You did not specify `torch_dtype` in `from_pretrained`. Setting it to `torch.float32`.") + torch_dtype = torch.float32 + return torch_dtype + + def update_missing_keys(self, model, missing_keys: List[str], prefix: str) -> List[str]: + import quanto + + not_missing_keys = [] + for name, module in model.named_modules(): + if isinstance(module, quanto.QModuleMixin): + for missing in missing_keys: + if ( + (name in missing or name in f"{prefix}.{missing}") + and not missing.endswith(".weight") + and not missing.endswith(".bias") + ): + not_missing_keys.append(missing) + return [k for k in missing_keys if k not in not_missing_keys] + + def check_quantized_param( + self, + model: "PreTrainedModel", + param_value: "torch.Tensor", + param_name: str, + state_dict: Dict[str, Any], + **kwargs, + ) -> bool: + """ + Check if a parameter needs to be quantized. + """ + import quanto + + device_map = kwargs.get("device_map", None) + param_device = kwargs.get("param_device", None) + # we don't quantize the model if the module is going to be offloaded to the cpu + if device_map is not None and param_device is not None: + device_map_values = set(device_map.values()) + if param_device == "cpu" and len(device_map_values) > 1: + if not (device_map_values == {"cpu"} or device_map_values == {"cpu", "disk"}): + return False + + module, tensor_name = get_module_from_name(model, param_name) + # We only quantize the weights and the bias is not quantized. + if isinstance(module, quanto.QModuleMixin) and "weight" in tensor_name: + # if the weights are quantized, don't need to recreate it again with `create_quantized_param` + return not module.frozen + else: + return False + + def adjust_max_memory(self, max_memory: Dict[str, Union[int, str]]) -> Dict[str, Union[int, str]]: + max_memory = {key: val * 0.90 for key, val in max_memory.items()} + return max_memory + + def create_quantized_param( + self, + model: "PreTrainedModel", + param_value: "torch.Tensor", + param_name: str, + target_device: "torch.device", + *args, + **kwargs, + ): + """ + Create the quantized parameter by calling .freeze() after setting it to the module. + """ + from accelerate.utils import set_module_tensor_to_device + + set_module_tensor_to_device(model, param_name, target_device, param_value) + module, _ = get_module_from_name(model, param_name) + module.freeze() + module.weight.requires_grad = False + + def adjust_target_dtype(self, target_dtype: "torch.dtype") -> "torch.dtype": + if version.parse(importlib.metadata.version("accelerate")) > version.parse("0.27.0"): + from accelerate.utils import CustomDtype + + mapping = { + "int8": torch.int8, + "float8": CustomDtype.FP8, + "int4": CustomDtype.INT4, + "int2": CustomDtype.INT2, + } + target_dtype = mapping[self.quantization_config.weights] + return target_dtype + else: + raise ValueError( + "You are using `device_map='auto'` on a quanto quantized model. To automatically compute" + " the appropriate device map, you should upgrade your `accelerate` library," + "`pip install --upgrade accelerate` or install it from source." + ) + + def _process_model_before_weight_loading( + self, model: "PreTrainedModel", keep_in_fp32_modules: List[str] = [], **kwargs + ): + from ..integrations import get_keys_to_not_convert, replace_with_quanto_layers + + # We keep some modules such as the lm_head in their original dtype for numerical stability reasons + if self.quantization_config.modules_to_not_convert is None: + self.modules_to_not_convert = get_keys_to_not_convert(model) + else: + self.modules_to_not_convert = self.quantization_config.modules_to_not_convert + + if not isinstance(self.modules_to_not_convert, list): + self.modules_to_not_convert = [self.modules_to_not_convert] + + self.modules_to_not_convert.extend(keep_in_fp32_modules) + + model, _ = replace_with_quanto_layers( + model, modules_to_not_convert=self.modules_to_not_convert, quantization_config=self.quantization_config + ) + model.config.quantization_config = self.quantization_config + + def _process_model_after_weight_loading(self, model): + return model + + @property + def is_trainable(self, model: Optional["PreTrainedModel"] = None): + return False + + @property + def is_serializable(self): + return False diff --git a/venv/lib/python3.10/site-packages/transformers/quantizers/quantizers_utils.py b/venv/lib/python3.10/site-packages/transformers/quantizers/quantizers_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..6ae287bf251b51337b8588b2e0176178316e7e96 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/quantizers/quantizers_utils.py @@ -0,0 +1,26 @@ +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any, Tuple + + +def get_module_from_name(module, tensor_name: str) -> Tuple[Any, str]: + if "." in tensor_name: + splits = tensor_name.split(".") + for split in splits[:-1]: + new_module = getattr(module, split) + if new_module is None: + raise ValueError(f"{module} has no attribute {split}.") + module = new_module + tensor_name = splits[-1] + return module, tensor_name diff --git a/venv/lib/python3.10/site-packages/transformers/utils/__init__.py b/venv/lib/python3.10/site-packages/transformers/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..121c4dc1361e4e21617045e399cbae91e3c659dc --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/utils/__init__.py @@ -0,0 +1,258 @@ +#!/usr/bin/env python +# coding=utf-8 + +# Copyright 2021 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from huggingface_hub import get_full_repo_name # for backward compatibility +from huggingface_hub.constants import HF_HUB_DISABLE_TELEMETRY as DISABLE_TELEMETRY # for backward compatibility +from packaging import version + +from .. import __version__ +from .backbone_utils import BackboneConfigMixin, BackboneMixin +from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD +from .doc import ( + add_code_sample_docstrings, + add_end_docstrings, + add_start_docstrings, + add_start_docstrings_to_model_forward, + copy_func, + replace_return_docstrings, +) +from .generic import ( + ContextManagers, + ExplicitEnum, + ModelOutput, + PaddingStrategy, + TensorType, + add_model_info_to_auto_map, + cached_property, + can_return_loss, + expand_dims, + find_labels, + flatten_dict, + infer_framework, + is_jax_tensor, + is_numpy_array, + is_tensor, + is_tf_symbolic_tensor, + is_tf_tensor, + is_torch_device, + is_torch_dtype, + is_torch_tensor, + reshape, + squeeze, + strtobool, + tensor_size, + to_numpy, + to_py_obj, + transpose, + working_or_temp_dir, +) +from .hub import ( + CLOUDFRONT_DISTRIB_PREFIX, + HF_MODULES_CACHE, + HUGGINGFACE_CO_PREFIX, + HUGGINGFACE_CO_RESOLVE_ENDPOINT, + PYTORCH_PRETRAINED_BERT_CACHE, + PYTORCH_TRANSFORMERS_CACHE, + S3_BUCKET_PREFIX, + TRANSFORMERS_CACHE, + TRANSFORMERS_DYNAMIC_MODULE_NAME, + EntryNotFoundError, + PushInProgress, + PushToHubMixin, + RepositoryNotFoundError, + RevisionNotFoundError, + cached_file, + default_cache_path, + define_sagemaker_information, + download_url, + extract_commit_hash, + get_cached_models, + get_file_from_repo, + has_file, + http_user_agent, + is_offline_mode, + is_remote_url, + move_cache, + send_example_telemetry, + try_to_load_from_cache, +) +from .import_utils import ( + ACCELERATE_MIN_VERSION, + ENV_VARS_TRUE_AND_AUTO_VALUES, + ENV_VARS_TRUE_VALUES, + TORCH_FX_REQUIRED_VERSION, + USE_JAX, + USE_TF, + USE_TORCH, + XLA_FSDPV2_MIN_VERSION, + DummyObject, + OptionalDependencyNotAvailable, + _LazyModule, + ccl_version, + direct_transformers_import, + get_torch_version, + is_accelerate_available, + is_apex_available, + is_aqlm_available, + is_auto_awq_available, + is_auto_gptq_available, + is_av_available, + is_bitsandbytes_available, + is_bs4_available, + is_coloredlogs_available, + is_cv2_available, + is_cython_available, + is_datasets_available, + is_decord_available, + is_detectron2_available, + is_essentia_available, + is_faiss_available, + is_flash_attn_2_available, + is_flash_attn_greater_or_equal_2_10, + is_flax_available, + is_fsdp_available, + is_ftfy_available, + is_g2p_en_available, + is_galore_torch_available, + is_in_notebook, + is_ipex_available, + is_jieba_available, + is_jinja_available, + is_jumanpp_available, + is_kenlm_available, + is_keras_nlp_available, + is_levenshtein_available, + is_librosa_available, + is_mlx_available, + is_natten_available, + is_ninja_available, + is_nltk_available, + is_onnx_available, + is_openai_available, + is_optimum_available, + is_pandas_available, + is_peft_available, + is_phonemizer_available, + is_pretty_midi_available, + is_protobuf_available, + is_psutil_available, + is_py3nvml_available, + is_pyctcdecode_available, + is_pytesseract_available, + is_pytest_available, + is_pytorch_quantization_available, + is_quanto_available, + is_rjieba_available, + is_sacremoses_available, + is_safetensors_available, + is_sagemaker_dp_enabled, + is_sagemaker_mp_enabled, + is_scipy_available, + is_sentencepiece_available, + is_seqio_available, + is_sklearn_available, + is_soundfile_availble, + is_spacy_available, + is_speech_available, + is_sudachi_available, + is_sudachi_projection_available, + is_tensorflow_probability_available, + is_tensorflow_text_available, + is_tf2onnx_available, + is_tf_available, + is_timm_available, + is_tokenizers_available, + is_torch_available, + is_torch_bf16_available, + is_torch_bf16_available_on_device, + is_torch_bf16_cpu_available, + is_torch_bf16_gpu_available, + is_torch_compile_available, + is_torch_cuda_available, + is_torch_fp16_available_on_device, + is_torch_fx_available, + is_torch_fx_proxy, + is_torch_mlu_available, + is_torch_mps_available, + is_torch_neuroncore_available, + is_torch_npu_available, + is_torch_sdpa_available, + is_torch_tensorrt_fx_available, + is_torch_tf32_available, + is_torch_tpu_available, + is_torch_xla_available, + is_torch_xpu_available, + is_torchaudio_available, + is_torchdistx_available, + is_torchdynamo_available, + is_torchdynamo_compiling, + is_torchvision_available, + is_training_run_on_sagemaker, + is_vision_available, + requires_backends, + torch_only_method, +) +from .peft_utils import ( + ADAPTER_CONFIG_NAME, + ADAPTER_SAFE_WEIGHTS_NAME, + ADAPTER_WEIGHTS_NAME, + check_peft_version, + find_adapter_config_file, +) + + +WEIGHTS_NAME = "pytorch_model.bin" +WEIGHTS_INDEX_NAME = "pytorch_model.bin.index.json" +TF2_WEIGHTS_NAME = "tf_model.h5" +TF2_WEIGHTS_INDEX_NAME = "tf_model.h5.index.json" +TF_WEIGHTS_NAME = "model.ckpt" +FLAX_WEIGHTS_NAME = "flax_model.msgpack" +FLAX_WEIGHTS_INDEX_NAME = "flax_model.msgpack.index.json" +SAFE_WEIGHTS_NAME = "model.safetensors" +SAFE_WEIGHTS_INDEX_NAME = "model.safetensors.index.json" +CONFIG_NAME = "config.json" +FEATURE_EXTRACTOR_NAME = "preprocessor_config.json" +IMAGE_PROCESSOR_NAME = FEATURE_EXTRACTOR_NAME +PROCESSOR_NAME = "processor_config.json" +GENERATION_CONFIG_NAME = "generation_config.json" +MODEL_CARD_NAME = "modelcard.json" + +SENTENCEPIECE_UNDERLINE = "▁" +SPIECE_UNDERLINE = SENTENCEPIECE_UNDERLINE # Kept for backward compatibility + +MULTIPLE_CHOICE_DUMMY_INPUTS = [ + [[0, 1, 0, 1], [1, 0, 0, 1]] +] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too. +DUMMY_INPUTS = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]] +DUMMY_MASK = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]] + + +def check_min_version(min_version): + if version.parse(__version__) < version.parse(min_version): + if "dev" in min_version: + error_message = ( + "This example requires a source install from HuggingFace Transformers (see " + "`https://huggingface.co/docs/transformers/installation#install-from-source`)," + ) + else: + error_message = f"This example requires a minimum version of {min_version}," + error_message += f" but the version found is {__version__}.\n" + raise ImportError( + error_message + + "Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other " + "versions of HuggingFace Transformers." + ) diff --git a/venv/lib/python3.10/site-packages/transformers/utils/backbone_utils.py b/venv/lib/python3.10/site-packages/transformers/utils/backbone_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..14fcfe4a50a2d2dd0082905aed3001e2c2f74280 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/utils/backbone_utils.py @@ -0,0 +1,350 @@ +# coding=utf-8 +# Copyright 2023 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" Collection of utils to be used by backbones and their components.""" + +import enum +import inspect +from typing import Iterable, List, Optional, Tuple, Union + + +class BackboneType(enum.Enum): + TIMM = "timm" + TRANSFORMERS = "transformers" + + +def verify_out_features_out_indices( + out_features: Optional[Iterable[str]], out_indices: Optional[Iterable[int]], stage_names: Optional[Iterable[str]] +): + """ + Verify that out_indices and out_features are valid for the given stage_names. + """ + if stage_names is None: + raise ValueError("Stage_names must be set for transformers backbones") + + if out_features is not None: + if not isinstance(out_features, (list,)): + raise ValueError(f"out_features must be a list got {type(out_features)}") + if any(feat not in stage_names for feat in out_features): + raise ValueError(f"out_features must be a subset of stage_names: {stage_names} got {out_features}") + if len(out_features) != len(set(out_features)): + raise ValueError(f"out_features must not contain any duplicates, got {out_features}") + if out_features != (sorted_feats := [feat for feat in stage_names if feat in out_features]): + raise ValueError( + f"out_features must be in the same order as stage_names, expected {sorted_feats} got {out_features}" + ) + + if out_indices is not None: + if not isinstance(out_indices, (list, tuple)): + raise ValueError(f"out_indices must be a list or tuple, got {type(out_indices)}") + # Convert negative indices to their positive equivalent: [-1,] -> [len(stage_names) - 1,] + positive_indices = tuple(idx % len(stage_names) if idx < 0 else idx for idx in out_indices) + if any(idx for idx in positive_indices if idx not in range(len(stage_names))): + raise ValueError(f"out_indices must be valid indices for stage_names {stage_names}, got {out_indices}") + if len(positive_indices) != len(set(positive_indices)): + msg = f"out_indices must not contain any duplicates, got {out_indices}" + msg += f"(equivalent to {positive_indices}))" if positive_indices != out_indices else "" + raise ValueError(msg) + if positive_indices != tuple(sorted(positive_indices)): + sorted_negative = tuple(idx for _, idx in sorted(zip(positive_indices, out_indices), key=lambda x: x[0])) + raise ValueError( + f"out_indices must be in the same order as stage_names, expected {sorted_negative} got {out_indices}" + ) + + if out_features is not None and out_indices is not None: + if len(out_features) != len(out_indices): + raise ValueError("out_features and out_indices should have the same length if both are set") + if out_features != [stage_names[idx] for idx in out_indices]: + raise ValueError("out_features and out_indices should correspond to the same stages if both are set") + + +def _align_output_features_output_indices( + out_features: Optional[List[str]], + out_indices: Optional[Union[List[int], Tuple[int]]], + stage_names: List[str], +): + """ + Finds the corresponding `out_features` and `out_indices` for the given `stage_names`. + + The logic is as follows: + - `out_features` not set, `out_indices` set: `out_features` is set to the `out_features` corresponding to the + `out_indices`. + - `out_indices` not set, `out_features` set: `out_indices` is set to the `out_indices` corresponding to the + `out_features`. + - `out_indices` and `out_features` not set: `out_indices` and `out_features` are set to the last stage. + - `out_indices` and `out_features` set: input `out_indices` and `out_features` are returned. + + Args: + out_features (`List[str]`): The names of the features for the backbone to output. + out_indices (`List[int]` or `Tuple[int]`): The indices of the features for the backbone to output. + stage_names (`List[str]`): The names of the stages of the backbone. + """ + if out_indices is None and out_features is None: + out_indices = [len(stage_names) - 1] + out_features = [stage_names[-1]] + elif out_indices is None and out_features is not None: + out_indices = [stage_names.index(layer) for layer in out_features] + elif out_features is None and out_indices is not None: + out_features = [stage_names[idx] for idx in out_indices] + return out_features, out_indices + + +def get_aligned_output_features_output_indices( + out_features: Optional[List[str]], + out_indices: Optional[Union[List[int], Tuple[int]]], + stage_names: List[str], +) -> Tuple[List[str], List[int]]: + """ + Get the `out_features` and `out_indices` so that they are aligned. + + The logic is as follows: + - `out_features` not set, `out_indices` set: `out_features` is set to the `out_features` corresponding to the + `out_indices`. + - `out_indices` not set, `out_features` set: `out_indices` is set to the `out_indices` corresponding to the + `out_features`. + - `out_indices` and `out_features` not set: `out_indices` and `out_features` are set to the last stage. + - `out_indices` and `out_features` set: they are verified to be aligned. + + Args: + out_features (`List[str]`): The names of the features for the backbone to output. + out_indices (`List[int]` or `Tuple[int]`): The indices of the features for the backbone to output. + stage_names (`List[str]`): The names of the stages of the backbone. + """ + # First verify that the out_features and out_indices are valid + verify_out_features_out_indices(out_features=out_features, out_indices=out_indices, stage_names=stage_names) + output_features, output_indices = _align_output_features_output_indices( + out_features=out_features, out_indices=out_indices, stage_names=stage_names + ) + # Verify that the aligned out_features and out_indices are valid + verify_out_features_out_indices(out_features=output_features, out_indices=output_indices, stage_names=stage_names) + return output_features, output_indices + + +class BackboneMixin: + backbone_type: Optional[BackboneType] = None + + def _init_timm_backbone(self, config) -> None: + """ + Initialize the backbone model from timm The backbone must already be loaded to self._backbone + """ + if getattr(self, "_backbone", None) is None: + raise ValueError("self._backbone must be set before calling _init_timm_backbone") + + # These will diagree with the defaults for the transformers models e.g. for resnet50 + # the transformer model has out_features = ['stem', 'stage1', 'stage2', 'stage3', 'stage4'] + # the timm model has out_features = ['act', 'layer1', 'layer2', 'layer3', 'layer4'] + self.stage_names = [stage["module"] for stage in self._backbone.feature_info.info] + self.num_features = [stage["num_chs"] for stage in self._backbone.feature_info.info] + out_indices = self._backbone.feature_info.out_indices + out_features = self._backbone.feature_info.module_name() + + # We verify the out indices and out features are valid + verify_out_features_out_indices( + out_features=out_features, out_indices=out_indices, stage_names=self.stage_names + ) + self._out_features, self._out_indices = out_features, out_indices + + def _init_transformers_backbone(self, config) -> None: + stage_names = getattr(config, "stage_names") + out_features = getattr(config, "out_features", None) + out_indices = getattr(config, "out_indices", None) + + self.stage_names = stage_names + self._out_features, self._out_indices = get_aligned_output_features_output_indices( + out_features=out_features, out_indices=out_indices, stage_names=stage_names + ) + # Number of channels for each stage. This is set in the transformer backbone model init + self.num_features = None + + def _init_backbone(self, config) -> None: + """ + Method to initialize the backbone. This method is called by the constructor of the base class after the + pretrained model weights have been loaded. + """ + self.config = config + + self.use_timm_backbone = getattr(config, "use_timm_backbone", False) + self.backbone_type = BackboneType.TIMM if self.use_timm_backbone else BackboneType.TRANSFORMERS + + if self.backbone_type == BackboneType.TIMM: + self._init_timm_backbone(config) + elif self.backbone_type == BackboneType.TRANSFORMERS: + self._init_transformers_backbone(config) + else: + raise ValueError(f"backbone_type {self.backbone_type} not supported.") + + @property + def out_features(self): + return self._out_features + + @out_features.setter + def out_features(self, out_features: List[str]): + """ + Set the out_features attribute. This will also update the out_indices attribute to match the new out_features. + """ + self._out_features, self._out_indices = get_aligned_output_features_output_indices( + out_features=out_features, out_indices=None, stage_names=self.stage_names + ) + + @property + def out_indices(self): + return self._out_indices + + @out_indices.setter + def out_indices(self, out_indices: Union[Tuple[int], List[int]]): + """ + Set the out_indices attribute. This will also update the out_features attribute to match the new out_indices. + """ + self._out_features, self._out_indices = get_aligned_output_features_output_indices( + out_features=None, out_indices=out_indices, stage_names=self.stage_names + ) + + @property + def out_feature_channels(self): + # the current backbones will output the number of channels for each stage + # even if that stage is not in the out_features list. + return {stage: self.num_features[i] for i, stage in enumerate(self.stage_names)} + + @property + def channels(self): + return [self.out_feature_channels[name] for name in self.out_features] + + def forward_with_filtered_kwargs(self, *args, **kwargs): + signature = dict(inspect.signature(self.forward).parameters) + filtered_kwargs = {k: v for k, v in kwargs.items() if k in signature} + return self(*args, **filtered_kwargs) + + def forward( + self, + pixel_values, + output_hidden_states: Optional[bool] = None, + output_attentions: Optional[bool] = None, + return_dict: Optional[bool] = None, + ): + raise NotImplementedError("This method should be implemented by the derived class.") + + def to_dict(self): + """ + Serializes this instance to a Python dictionary. Override the default `to_dict()` from `PretrainedConfig` to + include the `out_features` and `out_indices` attributes. + """ + output = super().to_dict() + output["out_features"] = output.pop("_out_features") + output["out_indices"] = output.pop("_out_indices") + return output + + +class BackboneConfigMixin: + """ + A Mixin to support handling the `out_features` and `out_indices` attributes for the backbone configurations. + """ + + @property + def out_features(self): + return self._out_features + + @out_features.setter + def out_features(self, out_features: List[str]): + """ + Set the out_features attribute. This will also update the out_indices attribute to match the new out_features. + """ + self._out_features, self._out_indices = get_aligned_output_features_output_indices( + out_features=out_features, out_indices=None, stage_names=self.stage_names + ) + + @property + def out_indices(self): + return self._out_indices + + @out_indices.setter + def out_indices(self, out_indices: Union[Tuple[int], List[int]]): + """ + Set the out_indices attribute. This will also update the out_features attribute to match the new out_indices. + """ + self._out_features, self._out_indices = get_aligned_output_features_output_indices( + out_features=None, out_indices=out_indices, stage_names=self.stage_names + ) + + def to_dict(self): + """ + Serializes this instance to a Python dictionary. Override the default `to_dict()` from `PretrainedConfig` to + include the `out_features` and `out_indices` attributes. + """ + output = super().to_dict() + output["out_features"] = output.pop("_out_features") + output["out_indices"] = output.pop("_out_indices") + return output + + +def load_backbone(config): + """ + Loads the backbone model from a config object. + + If the config is from the backbone model itself, then we return a backbone model with randomly initialized + weights. + + If the config is from the parent model of the backbone model itself, then we load the pretrained backbone weights + if specified. + """ + from transformers import AutoBackbone, AutoConfig + + backbone_config = getattr(config, "backbone_config", None) + use_timm_backbone = getattr(config, "use_timm_backbone", None) + use_pretrained_backbone = getattr(config, "use_pretrained_backbone", None) + backbone_checkpoint = getattr(config, "backbone", None) + backbone_kwargs = getattr(config, "backbone_kwargs", None) + + backbone_kwargs = {} if backbone_kwargs is None else backbone_kwargs + + if backbone_kwargs and backbone_config is not None: + raise ValueError("You can't specify both `backbone_kwargs` and `backbone_config`.") + + # If there is a backbone_config and a backbone checkpoint, and use_pretrained_backbone=False then the desired + # behaviour is ill-defined: do you want to load from the checkpoint's config or the backbone_config? + if backbone_config is not None and backbone_checkpoint is not None and use_pretrained_backbone is not None: + raise ValueError("Cannot specify both config.backbone_config and config.backbone") + + # If any of thhe following are set, then the config passed in is from a model which contains a backbone. + if ( + backbone_config is None + and use_timm_backbone is None + and backbone_checkpoint is None + and backbone_checkpoint is None + ): + return AutoBackbone.from_config(config=config, **backbone_kwargs) + + # config from the parent model that has a backbone + if use_timm_backbone: + if backbone_checkpoint is None: + raise ValueError("config.backbone must be set if use_timm_backbone is True") + # Because of how timm backbones were originally added to models, we need to pass in use_pretrained_backbone + # to determine whether to load the pretrained weights. + backbone = AutoBackbone.from_pretrained( + backbone_checkpoint, + use_timm_backbone=use_timm_backbone, + use_pretrained_backbone=use_pretrained_backbone, + **backbone_kwargs, + ) + elif use_pretrained_backbone: + if backbone_checkpoint is None: + raise ValueError("config.backbone must be set if use_pretrained_backbone is True") + backbone = AutoBackbone.from_pretrained(backbone_checkpoint, **backbone_kwargs) + else: + if backbone_config is None and backbone_checkpoint is None: + raise ValueError("Either config.backbone_config or config.backbone must be set") + if backbone_config is None: + backbone_config = AutoConfig.from_pretrained(backbone_checkpoint, **backbone_kwargs) + backbone = AutoBackbone.from_config(config=backbone_config) + return backbone diff --git a/venv/lib/python3.10/site-packages/transformers/utils/constants.py b/venv/lib/python3.10/site-packages/transformers/utils/constants.py new file mode 100644 index 0000000000000000000000000000000000000000..fefd1b4601da04e073ff2880099ccaf87d0b1666 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/utils/constants.py @@ -0,0 +1,6 @@ +IMAGENET_DEFAULT_MEAN = [0.485, 0.456, 0.406] +IMAGENET_DEFAULT_STD = [0.229, 0.224, 0.225] +IMAGENET_STANDARD_MEAN = [0.5, 0.5, 0.5] +IMAGENET_STANDARD_STD = [0.5, 0.5, 0.5] +OPENAI_CLIP_MEAN = [0.48145466, 0.4578275, 0.40821073] +OPENAI_CLIP_STD = [0.26862954, 0.26130258, 0.27577711] diff --git a/venv/lib/python3.10/site-packages/transformers/utils/doc.py b/venv/lib/python3.10/site-packages/transformers/utils/doc.py new file mode 100644 index 0000000000000000000000000000000000000000..23679f31a3e2ec231e2c4c64ecdf9db5a5eb1c97 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/utils/doc.py @@ -0,0 +1,1190 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Doc utilities: Utilities related to documentation +""" + +import functools +import re +import types + + +def add_start_docstrings(*docstr): + def docstring_decorator(fn): + fn.__doc__ = "".join(docstr) + (fn.__doc__ if fn.__doc__ is not None else "") + return fn + + return docstring_decorator + + +def add_start_docstrings_to_model_forward(*docstr): + def docstring_decorator(fn): + docstring = "".join(docstr) + (fn.__doc__ if fn.__doc__ is not None else "") + class_name = f"[`{fn.__qualname__.split('.')[0]}`]" + intro = f" The {class_name} forward method, overrides the `__call__` special method." + note = r""" + + + + Although the recipe for forward pass needs to be defined within this function, one should call the [`Module`] + instance afterwards instead of this since the former takes care of running the pre and post processing steps while + the latter silently ignores them. + + +""" + + fn.__doc__ = intro + note + docstring + return fn + + return docstring_decorator + + +def add_end_docstrings(*docstr): + def docstring_decorator(fn): + fn.__doc__ = (fn.__doc__ if fn.__doc__ is not None else "") + "".join(docstr) + return fn + + return docstring_decorator + + +PT_RETURN_INTRODUCTION = r""" + Returns: + [`{full_output_type}`] or `tuple(torch.FloatTensor)`: A [`{full_output_type}`] or a tuple of + `torch.FloatTensor` (if `return_dict=False` is passed or when `config.return_dict=False`) comprising various + elements depending on the configuration ([`{config_class}`]) and inputs. + +""" + + +TF_RETURN_INTRODUCTION = r""" + Returns: + [`{full_output_type}`] or `tuple(tf.Tensor)`: A [`{full_output_type}`] or a tuple of `tf.Tensor` (if + `return_dict=False` is passed or when `config.return_dict=False`) comprising various elements depending on the + configuration ([`{config_class}`]) and inputs. + +""" + + +def _get_indent(t): + """Returns the indentation in the first line of t""" + search = re.search(r"^(\s*)\S", t) + return "" if search is None else search.groups()[0] + + +def _convert_output_args_doc(output_args_doc): + """Convert output_args_doc to display properly.""" + # Split output_arg_doc in blocks argument/description + indent = _get_indent(output_args_doc) + blocks = [] + current_block = "" + for line in output_args_doc.split("\n"): + # If the indent is the same as the beginning, the line is the name of new arg. + if _get_indent(line) == indent: + if len(current_block) > 0: + blocks.append(current_block[:-1]) + current_block = f"{line}\n" + else: + # Otherwise it's part of the description of the current arg. + # We need to remove 2 spaces to the indentation. + current_block += f"{line[2:]}\n" + blocks.append(current_block[:-1]) + + # Format each block for proper rendering + for i in range(len(blocks)): + blocks[i] = re.sub(r"^(\s+)(\S+)(\s+)", r"\1- **\2**\3", blocks[i]) + blocks[i] = re.sub(r":\s*\n\s*(\S)", r" -- \1", blocks[i]) + + return "\n".join(blocks) + + +def _prepare_output_docstrings(output_type, config_class, min_indent=None): + """ + Prepares the return part of the docstring using `output_type`. + """ + output_docstring = output_type.__doc__ + + # Remove the head of the docstring to keep the list of args only + lines = output_docstring.split("\n") + i = 0 + while i < len(lines) and re.search(r"^\s*(Args|Parameters):\s*$", lines[i]) is None: + i += 1 + if i < len(lines): + params_docstring = "\n".join(lines[(i + 1) :]) + params_docstring = _convert_output_args_doc(params_docstring) + else: + raise ValueError( + f"No `Args` or `Parameters` section is found in the docstring of `{output_type.__name__}`. Make sure it has " + "docstring and contain either `Args` or `Parameters`." + ) + + # Add the return introduction + full_output_type = f"{output_type.__module__}.{output_type.__name__}" + intro = TF_RETURN_INTRODUCTION if output_type.__name__.startswith("TF") else PT_RETURN_INTRODUCTION + intro = intro.format(full_output_type=full_output_type, config_class=config_class) + result = intro + params_docstring + + # Apply minimum indent if necessary + if min_indent is not None: + lines = result.split("\n") + # Find the indent of the first nonempty line + i = 0 + while len(lines[i]) == 0: + i += 1 + indent = len(_get_indent(lines[i])) + # If too small, add indentation to all nonempty lines + if indent < min_indent: + to_add = " " * (min_indent - indent) + lines = [(f"{to_add}{line}" if len(line) > 0 else line) for line in lines] + result = "\n".join(lines) + + return result + + +FAKE_MODEL_DISCLAIMER = """ + + + This example uses a random model as the real ones are all very big. To get proper results, you should use + {real_checkpoint} instead of {fake_checkpoint}. If you get out-of-memory when loading that checkpoint, you can try + adding `device_map="auto"` in the `from_pretrained` call. + + +""" + + +PT_TOKEN_CLASSIFICATION_SAMPLE = r""" + Example: + + ```python + >>> from transformers import AutoTokenizer, {model_class} + >>> import torch + + >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") + >>> model = {model_class}.from_pretrained("{checkpoint}") + + >>> inputs = tokenizer( + ... "HuggingFace is a company based in Paris and New York", add_special_tokens=False, return_tensors="pt" + ... ) + + >>> with torch.no_grad(): + ... logits = model(**inputs).logits + + >>> predicted_token_class_ids = logits.argmax(-1) + + >>> # Note that tokens are classified rather then input words which means that + >>> # there might be more predicted token classes than words. + >>> # Multiple token classes might account for the same word + >>> predicted_tokens_classes = [model.config.id2label[t.item()] for t in predicted_token_class_ids[0]] + >>> predicted_tokens_classes + {expected_output} + + >>> labels = predicted_token_class_ids + >>> loss = model(**inputs, labels=labels).loss + >>> round(loss.item(), 2) + {expected_loss} + ``` +""" + +PT_QUESTION_ANSWERING_SAMPLE = r""" + Example: + + ```python + >>> from transformers import AutoTokenizer, {model_class} + >>> import torch + + >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") + >>> model = {model_class}.from_pretrained("{checkpoint}") + + >>> question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet" + + >>> inputs = tokenizer(question, text, return_tensors="pt") + >>> with torch.no_grad(): + ... outputs = model(**inputs) + + >>> answer_start_index = outputs.start_logits.argmax() + >>> answer_end_index = outputs.end_logits.argmax() + + >>> predict_answer_tokens = inputs.input_ids[0, answer_start_index : answer_end_index + 1] + >>> tokenizer.decode(predict_answer_tokens, skip_special_tokens=True) + {expected_output} + + >>> # target is "nice puppet" + >>> target_start_index = torch.tensor([{qa_target_start_index}]) + >>> target_end_index = torch.tensor([{qa_target_end_index}]) + + >>> outputs = model(**inputs, start_positions=target_start_index, end_positions=target_end_index) + >>> loss = outputs.loss + >>> round(loss.item(), 2) + {expected_loss} + ``` +""" + +PT_SEQUENCE_CLASSIFICATION_SAMPLE = r""" + Example of single-label classification: + + ```python + >>> import torch + >>> from transformers import AutoTokenizer, {model_class} + + >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") + >>> model = {model_class}.from_pretrained("{checkpoint}") + + >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") + + >>> with torch.no_grad(): + ... logits = model(**inputs).logits + + >>> predicted_class_id = logits.argmax().item() + >>> model.config.id2label[predicted_class_id] + {expected_output} + + >>> # To train a model on `num_labels` classes, you can pass `num_labels=num_labels` to `.from_pretrained(...)` + >>> num_labels = len(model.config.id2label) + >>> model = {model_class}.from_pretrained("{checkpoint}", num_labels=num_labels) + + >>> labels = torch.tensor([1]) + >>> loss = model(**inputs, labels=labels).loss + >>> round(loss.item(), 2) + {expected_loss} + ``` + + Example of multi-label classification: + + ```python + >>> import torch + >>> from transformers import AutoTokenizer, {model_class} + + >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") + >>> model = {model_class}.from_pretrained("{checkpoint}", problem_type="multi_label_classification") + + >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") + + >>> with torch.no_grad(): + ... logits = model(**inputs).logits + + >>> predicted_class_ids = torch.arange(0, logits.shape[-1])[torch.sigmoid(logits).squeeze(dim=0) > 0.5] + + >>> # To train a model on `num_labels` classes, you can pass `num_labels=num_labels` to `.from_pretrained(...)` + >>> num_labels = len(model.config.id2label) + >>> model = {model_class}.from_pretrained( + ... "{checkpoint}", num_labels=num_labels, problem_type="multi_label_classification" + ... ) + + >>> labels = torch.sum( + ... torch.nn.functional.one_hot(predicted_class_ids[None, :].clone(), num_classes=num_labels), dim=1 + ... ).to(torch.float) + >>> loss = model(**inputs, labels=labels).loss + ``` +""" + +PT_MASKED_LM_SAMPLE = r""" + Example: + + ```python + >>> from transformers import AutoTokenizer, {model_class} + >>> import torch + + >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") + >>> model = {model_class}.from_pretrained("{checkpoint}") + + >>> inputs = tokenizer("The capital of France is {mask}.", return_tensors="pt") + + >>> with torch.no_grad(): + ... logits = model(**inputs).logits + + >>> # retrieve index of {mask} + >>> mask_token_index = (inputs.input_ids == tokenizer.mask_token_id)[0].nonzero(as_tuple=True)[0] + + >>> predicted_token_id = logits[0, mask_token_index].argmax(axis=-1) + >>> tokenizer.decode(predicted_token_id) + {expected_output} + + >>> labels = tokenizer("The capital of France is Paris.", return_tensors="pt")["input_ids"] + >>> # mask labels of non-{mask} tokens + >>> labels = torch.where(inputs.input_ids == tokenizer.mask_token_id, labels, -100) + + >>> outputs = model(**inputs, labels=labels) + >>> round(outputs.loss.item(), 2) + {expected_loss} + ``` +""" + +PT_BASE_MODEL_SAMPLE = r""" + Example: + + ```python + >>> from transformers import AutoTokenizer, {model_class} + >>> import torch + + >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") + >>> model = {model_class}.from_pretrained("{checkpoint}") + + >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") + >>> outputs = model(**inputs) + + >>> last_hidden_states = outputs.last_hidden_state + ``` +""" + +PT_MULTIPLE_CHOICE_SAMPLE = r""" + Example: + + ```python + >>> from transformers import AutoTokenizer, {model_class} + >>> import torch + + >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") + >>> model = {model_class}.from_pretrained("{checkpoint}") + + >>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced." + >>> choice0 = "It is eaten with a fork and a knife." + >>> choice1 = "It is eaten while held in the hand." + >>> labels = torch.tensor(0).unsqueeze(0) # choice0 is correct (according to Wikipedia ;)), batch size 1 + + >>> encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors="pt", padding=True) + >>> outputs = model(**{{k: v.unsqueeze(0) for k, v in encoding.items()}}, labels=labels) # batch size is 1 + + >>> # the linear classifier still needs to be trained + >>> loss = outputs.loss + >>> logits = outputs.logits + ``` +""" + +PT_CAUSAL_LM_SAMPLE = r""" + Example: + + ```python + >>> import torch + >>> from transformers import AutoTokenizer, {model_class} + + >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") + >>> model = {model_class}.from_pretrained("{checkpoint}") + + >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") + >>> outputs = model(**inputs, labels=inputs["input_ids"]) + >>> loss = outputs.loss + >>> logits = outputs.logits + ``` +""" + +PT_SPEECH_BASE_MODEL_SAMPLE = r""" + Example: + + ```python + >>> from transformers import AutoProcessor, {model_class} + >>> import torch + >>> from datasets import load_dataset + + >>> dataset = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation") + >>> dataset = dataset.sort("id") + >>> sampling_rate = dataset.features["audio"].sampling_rate + + >>> processor = AutoProcessor.from_pretrained("{checkpoint}") + >>> model = {model_class}.from_pretrained("{checkpoint}") + + >>> # audio file is decoded on the fly + >>> inputs = processor(dataset[0]["audio"]["array"], sampling_rate=sampling_rate, return_tensors="pt") + >>> with torch.no_grad(): + ... outputs = model(**inputs) + + >>> last_hidden_states = outputs.last_hidden_state + >>> list(last_hidden_states.shape) + {expected_output} + ``` +""" + +PT_SPEECH_CTC_SAMPLE = r""" + Example: + + ```python + >>> from transformers import AutoProcessor, {model_class} + >>> from datasets import load_dataset + >>> import torch + + >>> dataset = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation") + >>> dataset = dataset.sort("id") + >>> sampling_rate = dataset.features["audio"].sampling_rate + + >>> processor = AutoProcessor.from_pretrained("{checkpoint}") + >>> model = {model_class}.from_pretrained("{checkpoint}") + + >>> # audio file is decoded on the fly + >>> inputs = processor(dataset[0]["audio"]["array"], sampling_rate=sampling_rate, return_tensors="pt") + >>> with torch.no_grad(): + ... logits = model(**inputs).logits + >>> predicted_ids = torch.argmax(logits, dim=-1) + + >>> # transcribe speech + >>> transcription = processor.batch_decode(predicted_ids) + >>> transcription[0] + {expected_output} + + >>> inputs["labels"] = processor(text=dataset[0]["text"], return_tensors="pt").input_ids + + >>> # compute loss + >>> loss = model(**inputs).loss + >>> round(loss.item(), 2) + {expected_loss} + ``` +""" + +PT_SPEECH_SEQ_CLASS_SAMPLE = r""" + Example: + + ```python + >>> from transformers import AutoFeatureExtractor, {model_class} + >>> from datasets import load_dataset + >>> import torch + + >>> dataset = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation") + >>> dataset = dataset.sort("id") + >>> sampling_rate = dataset.features["audio"].sampling_rate + + >>> feature_extractor = AutoFeatureExtractor.from_pretrained("{checkpoint}") + >>> model = {model_class}.from_pretrained("{checkpoint}") + + >>> # audio file is decoded on the fly + >>> inputs = feature_extractor(dataset[0]["audio"]["array"], sampling_rate=sampling_rate, return_tensors="pt") + + >>> with torch.no_grad(): + ... logits = model(**inputs).logits + + >>> predicted_class_ids = torch.argmax(logits, dim=-1).item() + >>> predicted_label = model.config.id2label[predicted_class_ids] + >>> predicted_label + {expected_output} + + >>> # compute loss - target_label is e.g. "down" + >>> target_label = model.config.id2label[0] + >>> inputs["labels"] = torch.tensor([model.config.label2id[target_label]]) + >>> loss = model(**inputs).loss + >>> round(loss.item(), 2) + {expected_loss} + ``` +""" + + +PT_SPEECH_FRAME_CLASS_SAMPLE = r""" + Example: + + ```python + >>> from transformers import AutoFeatureExtractor, {model_class} + >>> from datasets import load_dataset + >>> import torch + + >>> dataset = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation") + >>> dataset = dataset.sort("id") + >>> sampling_rate = dataset.features["audio"].sampling_rate + + >>> feature_extractor = AutoFeatureExtractor.from_pretrained("{checkpoint}") + >>> model = {model_class}.from_pretrained("{checkpoint}") + + >>> # audio file is decoded on the fly + >>> inputs = feature_extractor(dataset[0]["audio"]["array"], return_tensors="pt", sampling_rate=sampling_rate) + >>> with torch.no_grad(): + ... logits = model(**inputs).logits + + >>> probabilities = torch.sigmoid(logits[0]) + >>> # labels is a one-hot array of shape (num_frames, num_speakers) + >>> labels = (probabilities > 0.5).long() + >>> labels[0].tolist() + {expected_output} + ``` +""" + + +PT_SPEECH_XVECTOR_SAMPLE = r""" + Example: + + ```python + >>> from transformers import AutoFeatureExtractor, {model_class} + >>> from datasets import load_dataset + >>> import torch + + >>> dataset = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation") + >>> dataset = dataset.sort("id") + >>> sampling_rate = dataset.features["audio"].sampling_rate + + >>> feature_extractor = AutoFeatureExtractor.from_pretrained("{checkpoint}") + >>> model = {model_class}.from_pretrained("{checkpoint}") + + >>> # audio file is decoded on the fly + >>> inputs = feature_extractor( + ... [d["array"] for d in dataset[:2]["audio"]], sampling_rate=sampling_rate, return_tensors="pt", padding=True + ... ) + >>> with torch.no_grad(): + ... embeddings = model(**inputs).embeddings + + >>> embeddings = torch.nn.functional.normalize(embeddings, dim=-1).cpu() + + >>> # the resulting embeddings can be used for cosine similarity-based retrieval + >>> cosine_sim = torch.nn.CosineSimilarity(dim=-1) + >>> similarity = cosine_sim(embeddings[0], embeddings[1]) + >>> threshold = 0.7 # the optimal threshold is dataset-dependent + >>> if similarity < threshold: + ... print("Speakers are not the same!") + >>> round(similarity.item(), 2) + {expected_output} + ``` +""" + +PT_VISION_BASE_MODEL_SAMPLE = r""" + Example: + + ```python + >>> from transformers import AutoImageProcessor, {model_class} + >>> import torch + >>> from datasets import load_dataset + + >>> dataset = load_dataset("huggingface/cats-image") + >>> image = dataset["test"]["image"][0] + + >>> image_processor = AutoImageProcessor.from_pretrained("{checkpoint}") + >>> model = {model_class}.from_pretrained("{checkpoint}") + + >>> inputs = image_processor(image, return_tensors="pt") + + >>> with torch.no_grad(): + ... outputs = model(**inputs) + + >>> last_hidden_states = outputs.last_hidden_state + >>> list(last_hidden_states.shape) + {expected_output} + ``` +""" + +PT_VISION_SEQ_CLASS_SAMPLE = r""" + Example: + + ```python + >>> from transformers import AutoImageProcessor, {model_class} + >>> import torch + >>> from datasets import load_dataset + + >>> dataset = load_dataset("huggingface/cats-image") + >>> image = dataset["test"]["image"][0] + + >>> image_processor = AutoImageProcessor.from_pretrained("{checkpoint}") + >>> model = {model_class}.from_pretrained("{checkpoint}") + + >>> inputs = image_processor(image, return_tensors="pt") + + >>> with torch.no_grad(): + ... logits = model(**inputs).logits + + >>> # model predicts one of the 1000 ImageNet classes + >>> predicted_label = logits.argmax(-1).item() + >>> print(model.config.id2label[predicted_label]) + {expected_output} + ``` +""" + + +PT_SAMPLE_DOCSTRINGS = { + "SequenceClassification": PT_SEQUENCE_CLASSIFICATION_SAMPLE, + "QuestionAnswering": PT_QUESTION_ANSWERING_SAMPLE, + "TokenClassification": PT_TOKEN_CLASSIFICATION_SAMPLE, + "MultipleChoice": PT_MULTIPLE_CHOICE_SAMPLE, + "MaskedLM": PT_MASKED_LM_SAMPLE, + "LMHead": PT_CAUSAL_LM_SAMPLE, + "BaseModel": PT_BASE_MODEL_SAMPLE, + "SpeechBaseModel": PT_SPEECH_BASE_MODEL_SAMPLE, + "CTC": PT_SPEECH_CTC_SAMPLE, + "AudioClassification": PT_SPEECH_SEQ_CLASS_SAMPLE, + "AudioFrameClassification": PT_SPEECH_FRAME_CLASS_SAMPLE, + "AudioXVector": PT_SPEECH_XVECTOR_SAMPLE, + "VisionBaseModel": PT_VISION_BASE_MODEL_SAMPLE, + "ImageClassification": PT_VISION_SEQ_CLASS_SAMPLE, +} + + +TF_TOKEN_CLASSIFICATION_SAMPLE = r""" + Example: + + ```python + >>> from transformers import AutoTokenizer, {model_class} + >>> import tensorflow as tf + + >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") + >>> model = {model_class}.from_pretrained("{checkpoint}") + + >>> inputs = tokenizer( + ... "HuggingFace is a company based in Paris and New York", add_special_tokens=False, return_tensors="tf" + ... ) + + >>> logits = model(**inputs).logits + >>> predicted_token_class_ids = tf.math.argmax(logits, axis=-1) + + >>> # Note that tokens are classified rather then input words which means that + >>> # there might be more predicted token classes than words. + >>> # Multiple token classes might account for the same word + >>> predicted_tokens_classes = [model.config.id2label[t] for t in predicted_token_class_ids[0].numpy().tolist()] + >>> predicted_tokens_classes + {expected_output} + ``` + + ```python + >>> labels = predicted_token_class_ids + >>> loss = tf.math.reduce_mean(model(**inputs, labels=labels).loss) + >>> round(float(loss), 2) + {expected_loss} + ``` +""" + +TF_QUESTION_ANSWERING_SAMPLE = r""" + Example: + + ```python + >>> from transformers import AutoTokenizer, {model_class} + >>> import tensorflow as tf + + >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") + >>> model = {model_class}.from_pretrained("{checkpoint}") + + >>> question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet" + + >>> inputs = tokenizer(question, text, return_tensors="tf") + >>> outputs = model(**inputs) + + >>> answer_start_index = int(tf.math.argmax(outputs.start_logits, axis=-1)[0]) + >>> answer_end_index = int(tf.math.argmax(outputs.end_logits, axis=-1)[0]) + + >>> predict_answer_tokens = inputs.input_ids[0, answer_start_index : answer_end_index + 1] + >>> tokenizer.decode(predict_answer_tokens) + {expected_output} + ``` + + ```python + >>> # target is "nice puppet" + >>> target_start_index = tf.constant([{qa_target_start_index}]) + >>> target_end_index = tf.constant([{qa_target_end_index}]) + + >>> outputs = model(**inputs, start_positions=target_start_index, end_positions=target_end_index) + >>> loss = tf.math.reduce_mean(outputs.loss) + >>> round(float(loss), 2) + {expected_loss} + ``` +""" + +TF_SEQUENCE_CLASSIFICATION_SAMPLE = r""" + Example: + + ```python + >>> from transformers import AutoTokenizer, {model_class} + >>> import tensorflow as tf + + >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") + >>> model = {model_class}.from_pretrained("{checkpoint}") + + >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="tf") + + >>> logits = model(**inputs).logits + + >>> predicted_class_id = int(tf.math.argmax(logits, axis=-1)[0]) + >>> model.config.id2label[predicted_class_id] + {expected_output} + ``` + + ```python + >>> # To train a model on `num_labels` classes, you can pass `num_labels=num_labels` to `.from_pretrained(...)` + >>> num_labels = len(model.config.id2label) + >>> model = {model_class}.from_pretrained("{checkpoint}", num_labels=num_labels) + + >>> labels = tf.constant(1) + >>> loss = model(**inputs, labels=labels).loss + >>> round(float(loss), 2) + {expected_loss} + ``` +""" + +TF_MASKED_LM_SAMPLE = r""" + Example: + + ```python + >>> from transformers import AutoTokenizer, {model_class} + >>> import tensorflow as tf + + >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") + >>> model = {model_class}.from_pretrained("{checkpoint}") + + >>> inputs = tokenizer("The capital of France is {mask}.", return_tensors="tf") + >>> logits = model(**inputs).logits + + >>> # retrieve index of {mask} + >>> mask_token_index = tf.where((inputs.input_ids == tokenizer.mask_token_id)[0]) + >>> selected_logits = tf.gather_nd(logits[0], indices=mask_token_index) + + >>> predicted_token_id = tf.math.argmax(selected_logits, axis=-1) + >>> tokenizer.decode(predicted_token_id) + {expected_output} + ``` + + ```python + >>> labels = tokenizer("The capital of France is Paris.", return_tensors="tf")["input_ids"] + >>> # mask labels of non-{mask} tokens + >>> labels = tf.where(inputs.input_ids == tokenizer.mask_token_id, labels, -100) + + >>> outputs = model(**inputs, labels=labels) + >>> round(float(outputs.loss), 2) + {expected_loss} + ``` +""" + +TF_BASE_MODEL_SAMPLE = r""" + Example: + + ```python + >>> from transformers import AutoTokenizer, {model_class} + >>> import tensorflow as tf + + >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") + >>> model = {model_class}.from_pretrained("{checkpoint}") + + >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="tf") + >>> outputs = model(inputs) + + >>> last_hidden_states = outputs.last_hidden_state + ``` +""" + +TF_MULTIPLE_CHOICE_SAMPLE = r""" + Example: + + ```python + >>> from transformers import AutoTokenizer, {model_class} + >>> import tensorflow as tf + + >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") + >>> model = {model_class}.from_pretrained("{checkpoint}") + + >>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced." + >>> choice0 = "It is eaten with a fork and a knife." + >>> choice1 = "It is eaten while held in the hand." + + >>> encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors="tf", padding=True) + >>> inputs = {{k: tf.expand_dims(v, 0) for k, v in encoding.items()}} + >>> outputs = model(inputs) # batch size is 1 + + >>> # the linear classifier still needs to be trained + >>> logits = outputs.logits + ``` +""" + +TF_CAUSAL_LM_SAMPLE = r""" + Example: + + ```python + >>> from transformers import AutoTokenizer, {model_class} + >>> import tensorflow as tf + + >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") + >>> model = {model_class}.from_pretrained("{checkpoint}") + + >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="tf") + >>> outputs = model(inputs) + >>> logits = outputs.logits + ``` +""" + +TF_SPEECH_BASE_MODEL_SAMPLE = r""" + Example: + + ```python + >>> from transformers import AutoProcessor, {model_class} + >>> from datasets import load_dataset + + >>> dataset = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation") + >>> dataset = dataset.sort("id") + >>> sampling_rate = dataset.features["audio"].sampling_rate + + >>> processor = AutoProcessor.from_pretrained("{checkpoint}") + >>> model = {model_class}.from_pretrained("{checkpoint}") + + >>> # audio file is decoded on the fly + >>> inputs = processor(dataset[0]["audio"]["array"], sampling_rate=sampling_rate, return_tensors="tf") + >>> outputs = model(**inputs) + + >>> last_hidden_states = outputs.last_hidden_state + >>> list(last_hidden_states.shape) + {expected_output} + ``` +""" + +TF_SPEECH_CTC_SAMPLE = r""" + Example: + + ```python + >>> from transformers import AutoProcessor, {model_class} + >>> from datasets import load_dataset + >>> import tensorflow as tf + + >>> dataset = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation") + >>> dataset = dataset.sort("id") + >>> sampling_rate = dataset.features["audio"].sampling_rate + + >>> processor = AutoProcessor.from_pretrained("{checkpoint}") + >>> model = {model_class}.from_pretrained("{checkpoint}") + + >>> # audio file is decoded on the fly + >>> inputs = processor(dataset[0]["audio"]["array"], sampling_rate=sampling_rate, return_tensors="tf") + >>> logits = model(**inputs).logits + >>> predicted_ids = tf.math.argmax(logits, axis=-1) + + >>> # transcribe speech + >>> transcription = processor.batch_decode(predicted_ids) + >>> transcription[0] + {expected_output} + ``` + + ```python + >>> inputs["labels"] = processor(text=dataset[0]["text"], return_tensors="tf").input_ids + + >>> # compute loss + >>> loss = model(**inputs).loss + >>> round(float(loss), 2) + {expected_loss} + ``` +""" + +TF_VISION_BASE_MODEL_SAMPLE = r""" + Example: + + ```python + >>> from transformers import AutoImageProcessor, {model_class} + >>> from datasets import load_dataset + + >>> dataset = load_dataset("huggingface/cats-image") + >>> image = dataset["test"]["image"][0] + + >>> image_processor = AutoImageProcessor.from_pretrained("{checkpoint}") + >>> model = {model_class}.from_pretrained("{checkpoint}") + + >>> inputs = image_processor(image, return_tensors="tf") + >>> outputs = model(**inputs) + + >>> last_hidden_states = outputs.last_hidden_state + >>> list(last_hidden_states.shape) + {expected_output} + ``` +""" + +TF_VISION_SEQ_CLASS_SAMPLE = r""" + Example: + + ```python + >>> from transformers import AutoImageProcessor, {model_class} + >>> import tensorflow as tf + >>> from datasets import load_dataset + + >>> dataset = load_dataset("huggingface/cats-image") + >>> image = dataset["test"]["image"][0] + + >>> image_processor = AutoImageProcessor.from_pretrained("{checkpoint}") + >>> model = {model_class}.from_pretrained("{checkpoint}") + + >>> inputs = image_processor(image, return_tensors="tf") + >>> logits = model(**inputs).logits + + >>> # model predicts one of the 1000 ImageNet classes + >>> predicted_label = int(tf.math.argmax(logits, axis=-1)) + >>> print(model.config.id2label[predicted_label]) + {expected_output} + ``` +""" + +TF_SAMPLE_DOCSTRINGS = { + "SequenceClassification": TF_SEQUENCE_CLASSIFICATION_SAMPLE, + "QuestionAnswering": TF_QUESTION_ANSWERING_SAMPLE, + "TokenClassification": TF_TOKEN_CLASSIFICATION_SAMPLE, + "MultipleChoice": TF_MULTIPLE_CHOICE_SAMPLE, + "MaskedLM": TF_MASKED_LM_SAMPLE, + "LMHead": TF_CAUSAL_LM_SAMPLE, + "BaseModel": TF_BASE_MODEL_SAMPLE, + "SpeechBaseModel": TF_SPEECH_BASE_MODEL_SAMPLE, + "CTC": TF_SPEECH_CTC_SAMPLE, + "VisionBaseModel": TF_VISION_BASE_MODEL_SAMPLE, + "ImageClassification": TF_VISION_SEQ_CLASS_SAMPLE, +} + + +FLAX_TOKEN_CLASSIFICATION_SAMPLE = r""" + Example: + + ```python + >>> from transformers import AutoTokenizer, {model_class} + + >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") + >>> model = {model_class}.from_pretrained("{checkpoint}") + + >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="jax") + + >>> outputs = model(**inputs) + >>> logits = outputs.logits + ``` +""" + +FLAX_QUESTION_ANSWERING_SAMPLE = r""" + Example: + + ```python + >>> from transformers import AutoTokenizer, {model_class} + + >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") + >>> model = {model_class}.from_pretrained("{checkpoint}") + + >>> question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet" + >>> inputs = tokenizer(question, text, return_tensors="jax") + + >>> outputs = model(**inputs) + >>> start_scores = outputs.start_logits + >>> end_scores = outputs.end_logits + ``` +""" + +FLAX_SEQUENCE_CLASSIFICATION_SAMPLE = r""" + Example: + + ```python + >>> from transformers import AutoTokenizer, {model_class} + + >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") + >>> model = {model_class}.from_pretrained("{checkpoint}") + + >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="jax") + + >>> outputs = model(**inputs) + >>> logits = outputs.logits + ``` +""" + +FLAX_MASKED_LM_SAMPLE = r""" + Example: + + ```python + >>> from transformers import AutoTokenizer, {model_class} + + >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") + >>> model = {model_class}.from_pretrained("{checkpoint}") + + >>> inputs = tokenizer("The capital of France is {mask}.", return_tensors="jax") + + >>> outputs = model(**inputs) + >>> logits = outputs.logits + ``` +""" + +FLAX_BASE_MODEL_SAMPLE = r""" + Example: + + ```python + >>> from transformers import AutoTokenizer, {model_class} + + >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") + >>> model = {model_class}.from_pretrained("{checkpoint}") + + >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="jax") + >>> outputs = model(**inputs) + + >>> last_hidden_states = outputs.last_hidden_state + ``` +""" + +FLAX_MULTIPLE_CHOICE_SAMPLE = r""" + Example: + + ```python + >>> from transformers import AutoTokenizer, {model_class} + + >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") + >>> model = {model_class}.from_pretrained("{checkpoint}") + + >>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced." + >>> choice0 = "It is eaten with a fork and a knife." + >>> choice1 = "It is eaten while held in the hand." + + >>> encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors="jax", padding=True) + >>> outputs = model(**{{k: v[None, :] for k, v in encoding.items()}}) + + >>> logits = outputs.logits + ``` +""" + +FLAX_CAUSAL_LM_SAMPLE = r""" + Example: + + ```python + >>> from transformers import AutoTokenizer, {model_class} + + >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") + >>> model = {model_class}.from_pretrained("{checkpoint}") + + >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="np") + >>> outputs = model(**inputs) + + >>> # retrieve logts for next token + >>> next_token_logits = outputs.logits[:, -1] + ``` +""" + +FLAX_SAMPLE_DOCSTRINGS = { + "SequenceClassification": FLAX_SEQUENCE_CLASSIFICATION_SAMPLE, + "QuestionAnswering": FLAX_QUESTION_ANSWERING_SAMPLE, + "TokenClassification": FLAX_TOKEN_CLASSIFICATION_SAMPLE, + "MultipleChoice": FLAX_MULTIPLE_CHOICE_SAMPLE, + "MaskedLM": FLAX_MASKED_LM_SAMPLE, + "BaseModel": FLAX_BASE_MODEL_SAMPLE, + "LMHead": FLAX_CAUSAL_LM_SAMPLE, +} + + +def filter_outputs_from_example(docstring, **kwargs): + """ + Removes the lines testing an output with the doctest syntax in a code sample when it's set to `None`. + """ + for key, value in kwargs.items(): + if value is not None: + continue + + doc_key = "{" + key + "}" + docstring = re.sub(rf"\n([^\n]+)\n\s+{doc_key}\n", "\n", docstring) + + return docstring + + +def add_code_sample_docstrings( + *docstr, + processor_class=None, + checkpoint=None, + output_type=None, + config_class=None, + mask="[MASK]", + qa_target_start_index=14, + qa_target_end_index=15, + model_cls=None, + modality=None, + expected_output=None, + expected_loss=None, + real_checkpoint=None, + revision=None, +): + def docstring_decorator(fn): + # model_class defaults to function's class if not specified otherwise + model_class = fn.__qualname__.split(".")[0] if model_cls is None else model_cls + + if model_class[:2] == "TF": + sample_docstrings = TF_SAMPLE_DOCSTRINGS + elif model_class[:4] == "Flax": + sample_docstrings = FLAX_SAMPLE_DOCSTRINGS + else: + sample_docstrings = PT_SAMPLE_DOCSTRINGS + + # putting all kwargs for docstrings in a dict to be used + # with the `.format(**doc_kwargs)`. Note that string might + # be formatted with non-existing keys, which is fine. + doc_kwargs = { + "model_class": model_class, + "processor_class": processor_class, + "checkpoint": checkpoint, + "mask": mask, + "qa_target_start_index": qa_target_start_index, + "qa_target_end_index": qa_target_end_index, + "expected_output": expected_output, + "expected_loss": expected_loss, + "real_checkpoint": real_checkpoint, + "fake_checkpoint": checkpoint, + "true": "{true}", # For syntax that conflicts with formatting. + } + + if ("SequenceClassification" in model_class or "AudioClassification" in model_class) and modality == "audio": + code_sample = sample_docstrings["AudioClassification"] + elif "SequenceClassification" in model_class: + code_sample = sample_docstrings["SequenceClassification"] + elif "QuestionAnswering" in model_class: + code_sample = sample_docstrings["QuestionAnswering"] + elif "TokenClassification" in model_class: + code_sample = sample_docstrings["TokenClassification"] + elif "MultipleChoice" in model_class: + code_sample = sample_docstrings["MultipleChoice"] + elif "MaskedLM" in model_class or model_class in ["FlaubertWithLMHeadModel", "XLMWithLMHeadModel"]: + code_sample = sample_docstrings["MaskedLM"] + elif "LMHead" in model_class or "CausalLM" in model_class: + code_sample = sample_docstrings["LMHead"] + elif "CTC" in model_class: + code_sample = sample_docstrings["CTC"] + elif "AudioFrameClassification" in model_class: + code_sample = sample_docstrings["AudioFrameClassification"] + elif "XVector" in model_class and modality == "audio": + code_sample = sample_docstrings["AudioXVector"] + elif "Model" in model_class and modality == "audio": + code_sample = sample_docstrings["SpeechBaseModel"] + elif "Model" in model_class and modality == "vision": + code_sample = sample_docstrings["VisionBaseModel"] + elif "Model" in model_class or "Encoder" in model_class: + code_sample = sample_docstrings["BaseModel"] + elif "ImageClassification" in model_class: + code_sample = sample_docstrings["ImageClassification"] + else: + raise ValueError(f"Docstring can't be built for model {model_class}") + + code_sample = filter_outputs_from_example( + code_sample, expected_output=expected_output, expected_loss=expected_loss + ) + if real_checkpoint is not None: + code_sample = FAKE_MODEL_DISCLAIMER + code_sample + func_doc = (fn.__doc__ or "") + "".join(docstr) + output_doc = "" if output_type is None else _prepare_output_docstrings(output_type, config_class) + built_doc = code_sample.format(**doc_kwargs) + if revision is not None: + if re.match(r"^refs/pr/\\d+", revision): + raise ValueError( + f"The provided revision '{revision}' is incorrect. It should point to" + " a pull request reference on the hub like 'refs/pr/6'" + ) + built_doc = built_doc.replace( + f'from_pretrained("{checkpoint}")', f'from_pretrained("{checkpoint}", revision="{revision}")' + ) + fn.__doc__ = func_doc + output_doc + built_doc + return fn + + return docstring_decorator + + +def replace_return_docstrings(output_type=None, config_class=None): + def docstring_decorator(fn): + func_doc = fn.__doc__ + lines = func_doc.split("\n") + i = 0 + while i < len(lines) and re.search(r"^\s*Returns?:\s*$", lines[i]) is None: + i += 1 + if i < len(lines): + indent = len(_get_indent(lines[i])) + lines[i] = _prepare_output_docstrings(output_type, config_class, min_indent=indent) + func_doc = "\n".join(lines) + else: + raise ValueError( + f"The function {fn} should have an empty 'Return:' or 'Returns:' in its docstring as placeholder, " + f"current docstring is:\n{func_doc}" + ) + fn.__doc__ = func_doc + return fn + + return docstring_decorator + + +def copy_func(f): + """Returns a copy of a function f.""" + # Based on http://stackoverflow.com/a/6528148/190597 (Glenn Maynard) + g = types.FunctionType(f.__code__, f.__globals__, name=f.__name__, argdefs=f.__defaults__, closure=f.__closure__) + g = functools.update_wrapper(g, f) + g.__kwdefaults__ = f.__kwdefaults__ + return g diff --git a/venv/lib/python3.10/site-packages/transformers/utils/dummy_detectron2_objects.py b/venv/lib/python3.10/site-packages/transformers/utils/dummy_detectron2_objects.py new file mode 100644 index 0000000000000000000000000000000000000000..22ec32fe30a1b9abbc5d810510d97d5c81082561 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/utils/dummy_detectron2_objects.py @@ -0,0 +1,11 @@ +# This file is autogenerated by the command `make fix-copies`, do not edit. +from ..utils import requires_backends + + +class LayoutLMv2Model: + def __init__(self, *args, **kwargs): + requires_backends(self, ["detectron2"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["detectron2"]) diff --git a/venv/lib/python3.10/site-packages/transformers/utils/dummy_essentia_and_librosa_and_pretty_midi_and_scipy_and_torch_objects.py b/venv/lib/python3.10/site-packages/transformers/utils/dummy_essentia_and_librosa_and_pretty_midi_and_scipy_and_torch_objects.py new file mode 100644 index 0000000000000000000000000000000000000000..e6d75a6ec22e90427c972a753a24afd1a780758f --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/utils/dummy_essentia_and_librosa_and_pretty_midi_and_scipy_and_torch_objects.py @@ -0,0 +1,23 @@ +# This file is autogenerated by the command `make fix-copies`, do not edit. +from ..utils import DummyObject, requires_backends + + +class Pop2PianoFeatureExtractor(metaclass=DummyObject): + _backends = ["essentia", "librosa", "pretty_midi", "scipy", "torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["essentia", "librosa", "pretty_midi", "scipy", "torch"]) + + +class Pop2PianoTokenizer(metaclass=DummyObject): + _backends = ["essentia", "librosa", "pretty_midi", "scipy", "torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["essentia", "librosa", "pretty_midi", "scipy", "torch"]) + + +class Pop2PianoProcessor(metaclass=DummyObject): + _backends = ["essentia", "librosa", "pretty_midi", "scipy", "torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["essentia", "librosa", "pretty_midi", "scipy", "torch"]) diff --git a/venv/lib/python3.10/site-packages/transformers/utils/dummy_music_objects.py b/venv/lib/python3.10/site-packages/transformers/utils/dummy_music_objects.py new file mode 100644 index 0000000000000000000000000000000000000000..89052be47c1d32bac5cbd6fceab183fc1d75d3bf --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/utils/dummy_music_objects.py @@ -0,0 +1,16 @@ +# This file is autogenerated by the command `make fix-copies`, do not edit. +from ..utils import DummyObject, requires_backends + + +class Pop2PianoFeatureExtractor(metaclass=DummyObject): + _backends = ["music"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["music"]) + + +class Pop2PianoTokenizer(metaclass=DummyObject): + _backends = ["music"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["music"]) diff --git a/venv/lib/python3.10/site-packages/transformers/utils/dummy_pt_objects.py b/venv/lib/python3.10/site-packages/transformers/utils/dummy_pt_objects.py new file mode 100644 index 0000000000000000000000000000000000000000..f724d7dd6c41d501c0430fd0c850d7af25bd1005 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/utils/dummy_pt_objects.py @@ -0,0 +1,10018 @@ +# This file is autogenerated by the command `make fix-copies`, do not edit. +from ..utils import DummyObject, requires_backends + + +class PyTorchBenchmark(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class PyTorchBenchmarkArguments(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Cache(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class DynamicCache(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class SinkCache(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class StaticCache(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class GlueDataset(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class GlueDataTrainingArguments(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class LineByLineTextDataset(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class LineByLineWithRefDataset(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class LineByLineWithSOPTextDataset(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class SquadDataset(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class SquadDataTrainingArguments(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class TextDataset(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class TextDatasetForNextSentencePrediction(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class AlternatingCodebooksLogitsProcessor(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BeamScorer(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BeamSearchScorer(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ClassifierFreeGuidanceLogitsProcessor(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ConstrainedBeamSearchScorer(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Constraint(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ConstraintListState(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class DisjunctiveConstraint(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class EncoderNoRepeatNGramLogitsProcessor(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class EncoderRepetitionPenaltyLogitsProcessor(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class EpsilonLogitsWarper(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class EtaLogitsWarper(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ExponentialDecayLengthPenalty(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ForcedBOSTokenLogitsProcessor(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ForcedEOSTokenLogitsProcessor(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ForceTokensLogitsProcessor(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class GenerationMixin(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class HammingDiversityLogitsProcessor(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class InfNanRemoveLogitsProcessor(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class LogitNormalization(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class LogitsProcessor(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class LogitsProcessorList(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class LogitsWarper(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MaxLengthCriteria(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MaxTimeCriteria(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MinLengthLogitsProcessor(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MinNewTokensLengthLogitsProcessor(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class NoBadWordsLogitsProcessor(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class NoRepeatNGramLogitsProcessor(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class PhrasalConstraint(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class PrefixConstrainedLogitsProcessor(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class RepetitionPenaltyLogitsProcessor(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class SequenceBiasLogitsProcessor(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class StoppingCriteria(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class StoppingCriteriaList(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class SuppressTokensAtBeginLogitsProcessor(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class SuppressTokensLogitsProcessor(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class TemperatureLogitsWarper(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class TopKLogitsWarper(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class TopPLogitsWarper(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class TypicalLogitsWarper(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class UnbatchedClassifierFreeGuidanceLogitsProcessor(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class WhisperTimeStampLogitsProcessor(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class PreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class AlbertForMaskedLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class AlbertForMultipleChoice(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class AlbertForPreTraining(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class AlbertForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class AlbertForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class AlbertForTokenClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class AlbertModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class AlbertPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +def load_tf_weights_in_albert(*args, **kwargs): + requires_backends(load_tf_weights_in_albert, ["torch"]) + + +ALIGN_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class AlignModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class AlignPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class AlignTextModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class AlignVisionModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class AltCLIPModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class AltCLIPPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class AltCLIPTextModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class AltCLIPVisionModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class ASTForAudioClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ASTModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ASTPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING = None + + +MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING = None + + +MODEL_FOR_AUDIO_XVECTOR_MAPPING = None + + +MODEL_FOR_BACKBONE_MAPPING = None + + +MODEL_FOR_CAUSAL_IMAGE_MODELING_MAPPING = None + + +MODEL_FOR_CAUSAL_LM_MAPPING = None + + +MODEL_FOR_CTC_MAPPING = None + + +MODEL_FOR_DEPTH_ESTIMATION_MAPPING = None + + +MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING = None + + +MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING = None + + +MODEL_FOR_IMAGE_MAPPING = None + + +MODEL_FOR_IMAGE_SEGMENTATION_MAPPING = None + + +MODEL_FOR_IMAGE_TO_IMAGE_MAPPING = None + + +MODEL_FOR_INSTANCE_SEGMENTATION_MAPPING = None + + +MODEL_FOR_KEYPOINT_DETECTION_MAPPING = None + + +MODEL_FOR_MASK_GENERATION_MAPPING = None + + +MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING = None + + +MODEL_FOR_MASKED_LM_MAPPING = None + + +MODEL_FOR_MULTIPLE_CHOICE_MAPPING = None + + +MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING = None + + +MODEL_FOR_OBJECT_DETECTION_MAPPING = None + + +MODEL_FOR_PRETRAINING_MAPPING = None + + +MODEL_FOR_QUESTION_ANSWERING_MAPPING = None + + +MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING = None + + +MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING = None + + +MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING = None + + +MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING = None + + +MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING = None + + +MODEL_FOR_TEXT_ENCODING_MAPPING = None + + +MODEL_FOR_TEXT_TO_SPECTROGRAM_MAPPING = None + + +MODEL_FOR_TEXT_TO_WAVEFORM_MAPPING = None + + +MODEL_FOR_TIME_SERIES_CLASSIFICATION_MAPPING = None + + +MODEL_FOR_TIME_SERIES_REGRESSION_MAPPING = None + + +MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING = None + + +MODEL_FOR_UNIVERSAL_SEGMENTATION_MAPPING = None + + +MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING = None + + +MODEL_FOR_VISION_2_SEQ_MAPPING = None + + +MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING = None + + +MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING = None + + +MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING = None + + +MODEL_MAPPING = None + + +MODEL_WITH_LM_HEAD_MAPPING = None + + +class AutoBackbone(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class AutoModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class AutoModelForAudioClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class AutoModelForAudioFrameClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class AutoModelForAudioXVector(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class AutoModelForCausalLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class AutoModelForCTC(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class AutoModelForDepthEstimation(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class AutoModelForDocumentQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class AutoModelForImageClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class AutoModelForImageSegmentation(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class AutoModelForImageToImage(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class AutoModelForInstanceSegmentation(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class AutoModelForKeypointDetection(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class AutoModelForMaskedImageModeling(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class AutoModelForMaskedLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class AutoModelForMaskGeneration(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class AutoModelForMultipleChoice(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class AutoModelForNextSentencePrediction(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class AutoModelForObjectDetection(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class AutoModelForPreTraining(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class AutoModelForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class AutoModelForSemanticSegmentation(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class AutoModelForSeq2SeqLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class AutoModelForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class AutoModelForSpeechSeq2Seq(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class AutoModelForTableQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class AutoModelForTextEncoding(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class AutoModelForTextToSpectrogram(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class AutoModelForTextToWaveform(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class AutoModelForTokenClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class AutoModelForUniversalSegmentation(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class AutoModelForVideoClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class AutoModelForVision2Seq(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class AutoModelForVisualQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class AutoModelForZeroShotImageClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class AutoModelForZeroShotObjectDetection(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class AutoModelWithLMHead(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class AutoformerForPrediction(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class AutoformerModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class AutoformerPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +BARK_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class BarkCausalModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BarkCoarseModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BarkFineModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BarkModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BarkPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BarkSemanticModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +BART_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class BartForCausalLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BartForConditionalGeneration(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BartForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BartForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BartModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BartPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BartPretrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class PretrainedBartModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +BEIT_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class BeitBackbone(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BeitForImageClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BeitForMaskedImageModeling(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BeitForSemanticSegmentation(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BeitModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BeitPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +BERT_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class BertForMaskedLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BertForMultipleChoice(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BertForNextSentencePrediction(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BertForPreTraining(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BertForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BertForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BertForTokenClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BertLayer(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BertLMHeadModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BertModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BertPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +def load_tf_weights_in_bert(*args, **kwargs): + requires_backends(load_tf_weights_in_bert, ["torch"]) + + +class BertGenerationDecoder(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BertGenerationEncoder(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BertGenerationPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +def load_tf_weights_in_bert_generation(*args, **kwargs): + requires_backends(load_tf_weights_in_bert_generation, ["torch"]) + + +BIG_BIRD_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class BigBirdForCausalLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BigBirdForMaskedLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BigBirdForMultipleChoice(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BigBirdForPreTraining(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BigBirdForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BigBirdForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BigBirdForTokenClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BigBirdLayer(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BigBirdModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BigBirdPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +def load_tf_weights_in_big_bird(*args, **kwargs): + requires_backends(load_tf_weights_in_big_bird, ["torch"]) + + +BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class BigBirdPegasusForCausalLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BigBirdPegasusForConditionalGeneration(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BigBirdPegasusForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BigBirdPegasusForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BigBirdPegasusModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BigBirdPegasusPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class BioGptForCausalLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BioGptForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BioGptForTokenClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BioGptModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BioGptPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +BIT_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class BitBackbone(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BitForImageClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BitModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BitPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class BlenderbotForCausalLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BlenderbotForConditionalGeneration(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BlenderbotModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BlenderbotPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class BlenderbotSmallForCausalLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BlenderbotSmallForConditionalGeneration(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BlenderbotSmallModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BlenderbotSmallPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +BLIP_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class BlipForConditionalGeneration(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BlipForImageTextRetrieval(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BlipForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BlipModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BlipPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BlipTextModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BlipVisionModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class Blip2ForConditionalGeneration(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Blip2Model(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Blip2PreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Blip2QFormerModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Blip2VisionModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class BloomForCausalLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BloomForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BloomForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BloomForTokenClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BloomModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BloomPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class BridgeTowerForContrastiveLearning(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BridgeTowerForImageAndTextRetrieval(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BridgeTowerForMaskedLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BridgeTowerModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BridgeTowerPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +BROS_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class BrosForTokenClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BrosModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BrosPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BrosProcessor(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BrosSpadeEEForTokenClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class BrosSpadeELForTokenClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +CAMEMBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class CamembertForCausalLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class CamembertForMaskedLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class CamembertForMultipleChoice(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class CamembertForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class CamembertForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class CamembertForTokenClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class CamembertModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class CamembertPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +CANINE_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class CanineForMultipleChoice(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class CanineForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class CanineForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class CanineForTokenClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class CanineLayer(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class CanineModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class CaninePreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +def load_tf_weights_in_canine(*args, **kwargs): + requires_backends(load_tf_weights_in_canine, ["torch"]) + + +CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class ChineseCLIPModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ChineseCLIPPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ChineseCLIPTextModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ChineseCLIPVisionModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +CLAP_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class ClapAudioModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ClapAudioModelWithProjection(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ClapFeatureExtractor(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ClapModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ClapPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ClapTextModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ClapTextModelWithProjection(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +CLIP_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class CLIPForImageClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class CLIPModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class CLIPPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class CLIPTextModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class CLIPTextModelWithProjection(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class CLIPVisionModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class CLIPVisionModelWithProjection(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class CLIPSegForImageSegmentation(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class CLIPSegModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class CLIPSegPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class CLIPSegTextModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class CLIPSegVisionModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +CLVP_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class ClvpDecoder(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ClvpEncoder(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ClvpForCausalLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ClvpModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ClvpModelForConditionalGeneration(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ClvpPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +CODEGEN_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class CodeGenForCausalLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class CodeGenModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class CodeGenPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class CohereForCausalLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class CohereModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class CoherePreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class ConditionalDetrForObjectDetection(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ConditionalDetrForSegmentation(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ConditionalDetrModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ConditionalDetrPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class ConvBertForMaskedLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ConvBertForMultipleChoice(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ConvBertForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ConvBertForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ConvBertForTokenClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ConvBertLayer(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ConvBertModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ConvBertPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +def load_tf_weights_in_convbert(*args, **kwargs): + requires_backends(load_tf_weights_in_convbert, ["torch"]) + + +CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class ConvNextBackbone(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ConvNextForImageClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ConvNextModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ConvNextPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class ConvNextV2Backbone(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ConvNextV2ForImageClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ConvNextV2Model(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ConvNextV2PreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class CpmAntForCausalLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class CpmAntModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class CpmAntPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +CTRL_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class CTRLForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class CTRLLMHeadModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class CTRLModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class CTRLPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +CVT_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class CvtForImageClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class CvtModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class CvtPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class Data2VecAudioForAudioFrameClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Data2VecAudioForCTC(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Data2VecAudioForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Data2VecAudioForXVector(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Data2VecAudioModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Data2VecAudioPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Data2VecTextForCausalLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Data2VecTextForMaskedLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Data2VecTextForMultipleChoice(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Data2VecTextForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Data2VecTextForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Data2VecTextForTokenClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Data2VecTextModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Data2VecTextPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Data2VecVisionForImageClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Data2VecVisionForSemanticSegmentation(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Data2VecVisionModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Data2VecVisionPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class DbrxForCausalLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class DbrxModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class DbrxPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class DebertaForMaskedLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class DebertaForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class DebertaForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class DebertaForTokenClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class DebertaModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class DebertaPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class DebertaV2ForMaskedLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class DebertaV2ForMultipleChoice(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class DebertaV2ForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class DebertaV2ForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class DebertaV2ForTokenClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class DebertaV2Model(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class DebertaV2PreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class DecisionTransformerGPT2Model(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class DecisionTransformerGPT2PreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class DecisionTransformerModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class DecisionTransformerPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +DEFORMABLE_DETR_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class DeformableDetrForObjectDetection(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class DeformableDetrModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class DeformableDetrPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +DEIT_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class DeiTForImageClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class DeiTForImageClassificationWithTeacher(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class DeiTForMaskedImageModeling(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class DeiTModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class DeiTPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class MCTCTForCTC(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MCTCTModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MCTCTPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MMBTForClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MMBTModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ModalEmbeddings(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class OpenLlamaForCausalLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class OpenLlamaForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class OpenLlamaModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class OpenLlamaPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +RETRIBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class RetriBertModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class RetriBertPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class TrajectoryTransformerModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class TrajectoryTransformerPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class AdaptiveEmbedding(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class TransfoXLForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class TransfoXLLMHeadModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class TransfoXLModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class TransfoXLPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +def load_tf_weights_in_transfo_xl(*args, **kwargs): + requires_backends(load_tf_weights_in_transfo_xl, ["torch"]) + + +VAN_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class VanForImageClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class VanModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class VanPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +DEPTH_ANYTHING_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class DepthAnythingForDepthEstimation(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class DepthAnythingPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +DETA_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class DetaForObjectDetection(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class DetaModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class DetaPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +DETR_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class DetrForObjectDetection(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class DetrForSegmentation(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class DetrModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class DetrPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +DINAT_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class DinatBackbone(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class DinatForImageClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class DinatModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class DinatPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +DINOV2_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class Dinov2Backbone(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Dinov2ForImageClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Dinov2Model(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Dinov2PreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class DistilBertForMaskedLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class DistilBertForMultipleChoice(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class DistilBertForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class DistilBertForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class DistilBertForTokenClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class DistilBertModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class DistilBertPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +DONUT_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class DonutSwinModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class DonutSwinPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class DPRContextEncoder(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class DPRPretrainedContextEncoder(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class DPRPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class DPRPretrainedQuestionEncoder(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class DPRPretrainedReader(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class DPRQuestionEncoder(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class DPRReader(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +DPT_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class DPTForDepthEstimation(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class DPTForSemanticSegmentation(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class DPTModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class DPTPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class EfficientFormerForImageClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class EfficientFormerForImageClassificationWithTeacher(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class EfficientFormerModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class EfficientFormerPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class EfficientNetForImageClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class EfficientNetModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class EfficientNetPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class ElectraForCausalLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ElectraForMaskedLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ElectraForMultipleChoice(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ElectraForPreTraining(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ElectraForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ElectraForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ElectraForTokenClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ElectraModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ElectraPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +def load_tf_weights_in_electra(*args, **kwargs): + requires_backends(load_tf_weights_in_electra, ["torch"]) + + +ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class EncodecModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class EncodecPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class EncoderDecoderModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class ErnieForCausalLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ErnieForMaskedLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ErnieForMultipleChoice(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ErnieForNextSentencePrediction(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ErnieForPreTraining(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ErnieForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ErnieForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ErnieForTokenClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ErnieModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ErniePreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +ERNIE_M_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class ErnieMForInformationExtraction(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ErnieMForMultipleChoice(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ErnieMForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ErnieMForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ErnieMForTokenClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ErnieMModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ErnieMPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +ESM_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class EsmFoldPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class EsmForMaskedLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class EsmForProteinFolding(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class EsmForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class EsmForTokenClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class EsmModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class EsmPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +FALCON_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class FalconForCausalLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class FalconForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class FalconForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class FalconForTokenClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class FalconModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class FalconPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +FASTSPEECH2_CONFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class FastSpeech2ConformerHifiGan(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class FastSpeech2ConformerModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class FastSpeech2ConformerPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class FastSpeech2ConformerWithHifiGan(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class FlaubertForMultipleChoice(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class FlaubertForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class FlaubertForQuestionAnsweringSimple(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class FlaubertForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class FlaubertForTokenClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class FlaubertModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class FlaubertPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class FlaubertWithLMHeadModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +FLAVA_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class FlavaForPreTraining(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class FlavaImageCodebook(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class FlavaImageModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class FlavaModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class FlavaMultimodalModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class FlavaPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class FlavaTextModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +FNET_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class FNetForMaskedLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class FNetForMultipleChoice(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class FNetForNextSentencePrediction(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class FNetForPreTraining(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class FNetForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class FNetForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class FNetForTokenClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class FNetLayer(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class FNetModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class FNetPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class FocalNetBackbone(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class FocalNetForImageClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class FocalNetForMaskedImageModeling(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class FocalNetModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class FocalNetPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class FSMTForConditionalGeneration(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class FSMTModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class PretrainedFSMTModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class FunnelBaseModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class FunnelForMaskedLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class FunnelForMultipleChoice(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class FunnelForPreTraining(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class FunnelForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class FunnelForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class FunnelForTokenClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class FunnelModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class FunnelPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +def load_tf_weights_in_funnel(*args, **kwargs): + requires_backends(load_tf_weights_in_funnel, ["torch"]) + + +class FuyuForCausalLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class FuyuPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class GemmaForCausalLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class GemmaForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class GemmaModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class GemmaPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +GIT_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class GitForCausalLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class GitModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class GitPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class GitVisionModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +GLPN_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class GLPNForDepthEstimation(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class GLPNModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class GLPNPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +GPT2_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class GPT2DoubleHeadsModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class GPT2ForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class GPT2ForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class GPT2ForTokenClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class GPT2LMHeadModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class GPT2Model(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class GPT2PreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +def load_tf_weights_in_gpt2(*args, **kwargs): + requires_backends(load_tf_weights_in_gpt2, ["torch"]) + + +GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class GPTBigCodeForCausalLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class GPTBigCodeForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class GPTBigCodeForTokenClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class GPTBigCodeModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class GPTBigCodePreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class GPTNeoForCausalLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class GPTNeoForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class GPTNeoForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class GPTNeoForTokenClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class GPTNeoModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class GPTNeoPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +def load_tf_weights_in_gpt_neo(*args, **kwargs): + requires_backends(load_tf_weights_in_gpt_neo, ["torch"]) + + +GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class GPTNeoXForCausalLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class GPTNeoXForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class GPTNeoXForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class GPTNeoXForTokenClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class GPTNeoXLayer(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class GPTNeoXModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class GPTNeoXPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class GPTNeoXJapaneseForCausalLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class GPTNeoXJapaneseLayer(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class GPTNeoXJapaneseModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class GPTNeoXJapanesePreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +GPTJ_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class GPTJForCausalLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class GPTJForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class GPTJForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class GPTJModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class GPTJPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +GPTSAN_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class GPTSanJapaneseForConditionalGeneration(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class GPTSanJapaneseModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class GPTSanJapanesePreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class GraphormerForGraphClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class GraphormerModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class GraphormerPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +GROUNDING_DINO_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class GroundingDinoForObjectDetection(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class GroundingDinoModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class GroundingDinoPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class GroupViTModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class GroupViTPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class GroupViTTextModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class GroupViTVisionModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +HUBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class HubertForCTC(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class HubertForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class HubertModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class HubertPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +IBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class IBertForMaskedLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class IBertForMultipleChoice(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class IBertForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class IBertForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class IBertForTokenClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class IBertModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class IBertPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +IDEFICS_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class IdeficsForVisionText2Text(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class IdeficsModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class IdeficsPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class IdeficsProcessor(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +IDEFICS2_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class Idefics2ForConditionalGeneration(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Idefics2Model(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Idefics2PreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Idefics2Processor(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +IMAGEGPT_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class ImageGPTForCausalImageModeling(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ImageGPTForImageClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ImageGPTModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ImageGPTPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +def load_tf_weights_in_imagegpt(*args, **kwargs): + requires_backends(load_tf_weights_in_imagegpt, ["torch"]) + + +INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class InformerForPrediction(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class InformerModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class InformerPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class InstructBlipForConditionalGeneration(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class InstructBlipPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class InstructBlipQFormerModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class InstructBlipVisionModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class JambaForCausalLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class JambaForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class JambaModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class JambaPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class JukeboxModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class JukeboxPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class JukeboxPrior(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class JukeboxVQVAE(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +KOSMOS2_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class Kosmos2ForConditionalGeneration(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Kosmos2Model(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Kosmos2PreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class LayoutLMForMaskedLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class LayoutLMForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class LayoutLMForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class LayoutLMForTokenClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class LayoutLMModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class LayoutLMPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class LayoutLMv2ForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class LayoutLMv2ForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class LayoutLMv2ForTokenClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class LayoutLMv2Model(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class LayoutLMv2PreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class LayoutLMv3ForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class LayoutLMv3ForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class LayoutLMv3ForTokenClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class LayoutLMv3Model(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class LayoutLMv3PreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +LED_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class LEDForConditionalGeneration(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class LEDForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class LEDForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class LEDModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class LEDPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class LevitForImageClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class LevitForImageClassificationWithTeacher(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class LevitModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class LevitPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +LILT_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class LiltForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class LiltForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class LiltForTokenClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class LiltModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class LiltPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class LlamaForCausalLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class LlamaForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class LlamaForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class LlamaModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class LlamaPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +LLAVA_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class LlavaForConditionalGeneration(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class LlavaPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +LLAVA_NEXT_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class LlavaNextForConditionalGeneration(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class LlavaNextPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class LongformerForMaskedLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class LongformerForMultipleChoice(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class LongformerForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class LongformerForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class LongformerForTokenClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class LongformerModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class LongformerPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class LongformerSelfAttention(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class LongT5EncoderModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class LongT5ForConditionalGeneration(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class LongT5Model(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class LongT5PreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +LUKE_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class LukeForEntityClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class LukeForEntityPairClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class LukeForEntitySpanClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class LukeForMaskedLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class LukeForMultipleChoice(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class LukeForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class LukeForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class LukeForTokenClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class LukeModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class LukePreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class LxmertEncoder(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class LxmertForPreTraining(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class LxmertForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class LxmertModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class LxmertPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class LxmertVisualFeatureEncoder(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class LxmertXLayer(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class M2M100ForConditionalGeneration(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class M2M100Model(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class M2M100PreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +MAMBA_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class MambaForCausalLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MambaModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MambaPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MarianForCausalLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MarianModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MarianMTModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +MARKUPLM_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class MarkupLMForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MarkupLMForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MarkupLMForTokenClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MarkupLMModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MarkupLMPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class Mask2FormerForUniversalSegmentation(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Mask2FormerModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Mask2FormerPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class MaskFormerForInstanceSegmentation(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MaskFormerModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MaskFormerPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MaskFormerSwinBackbone(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MBartForCausalLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MBartForConditionalGeneration(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MBartForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MBartForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MBartModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MBartPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +MEGA_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class MegaForCausalLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MegaForMaskedLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MegaForMultipleChoice(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MegaForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MegaForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MegaForTokenClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MegaModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MegaPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class MegatronBertForCausalLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MegatronBertForMaskedLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MegatronBertForMultipleChoice(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MegatronBertForNextSentencePrediction(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MegatronBertForPreTraining(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MegatronBertForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MegatronBertForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MegatronBertForTokenClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MegatronBertModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MegatronBertPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class MgpstrForSceneTextRecognition(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MgpstrModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MgpstrPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MistralForCausalLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MistralForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MistralModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MistralPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MixtralForCausalLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MixtralForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MixtralModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MixtralPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class MobileBertForMaskedLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MobileBertForMultipleChoice(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MobileBertForNextSentencePrediction(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MobileBertForPreTraining(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MobileBertForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MobileBertForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MobileBertForTokenClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MobileBertLayer(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MobileBertModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MobileBertPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +def load_tf_weights_in_mobilebert(*args, **kwargs): + requires_backends(load_tf_weights_in_mobilebert, ["torch"]) + + +MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class MobileNetV1ForImageClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MobileNetV1Model(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MobileNetV1PreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +def load_tf_weights_in_mobilenet_v1(*args, **kwargs): + requires_backends(load_tf_weights_in_mobilenet_v1, ["torch"]) + + +MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class MobileNetV2ForImageClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MobileNetV2ForSemanticSegmentation(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MobileNetV2Model(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MobileNetV2PreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +def load_tf_weights_in_mobilenet_v2(*args, **kwargs): + requires_backends(load_tf_weights_in_mobilenet_v2, ["torch"]) + + +MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class MobileViTForImageClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MobileViTForSemanticSegmentation(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MobileViTModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MobileViTPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class MobileViTV2ForImageClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MobileViTV2ForSemanticSegmentation(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MobileViTV2Model(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MobileViTV2PreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +MPNET_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class MPNetForMaskedLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MPNetForMultipleChoice(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MPNetForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MPNetForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MPNetForTokenClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MPNetLayer(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MPNetModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MPNetPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +MPT_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class MptForCausalLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MptForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MptForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MptForTokenClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MptModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MptPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +MRA_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class MraForMaskedLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MraForMultipleChoice(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MraForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MraForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MraForTokenClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MraModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MraPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MT5EncoderModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MT5ForConditionalGeneration(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MT5ForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MT5ForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MT5ForTokenClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MT5Model(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MT5PreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +MUSICGEN_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class MusicgenForCausalLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MusicgenForConditionalGeneration(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MusicgenModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MusicgenPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MusicgenProcessor(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +MUSICGEN_MELODY_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class MusicgenMelodyForCausalLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MusicgenMelodyForConditionalGeneration(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MusicgenMelodyModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MusicgenMelodyPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +MVP_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class MvpForCausalLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MvpForConditionalGeneration(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MvpForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MvpForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MvpModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MvpPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +NAT_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class NatBackbone(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class NatForImageClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class NatModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class NatPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class NezhaForMaskedLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class NezhaForMultipleChoice(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class NezhaForNextSentencePrediction(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class NezhaForPreTraining(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class NezhaForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class NezhaForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class NezhaForTokenClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class NezhaModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class NezhaPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class NllbMoeForConditionalGeneration(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class NllbMoeModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class NllbMoePreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class NllbMoeSparseMLP(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class NllbMoeTop2Router(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class NystromformerForMaskedLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class NystromformerForMultipleChoice(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class NystromformerForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class NystromformerForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class NystromformerForTokenClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class NystromformerLayer(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class NystromformerModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class NystromformerPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class OlmoForCausalLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class OlmoModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class OlmoPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +ONEFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class OneFormerForUniversalSegmentation(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class OneFormerModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class OneFormerPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class OpenAIGPTDoubleHeadsModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class OpenAIGPTForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class OpenAIGPTLMHeadModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class OpenAIGPTModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class OpenAIGPTPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +def load_tf_weights_in_openai_gpt(*args, **kwargs): + requires_backends(load_tf_weights_in_openai_gpt, ["torch"]) + + +OPT_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class OPTForCausalLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class OPTForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class OPTForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class OPTModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class OPTPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +OWLV2_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class Owlv2ForObjectDetection(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Owlv2Model(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Owlv2PreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Owlv2TextModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Owlv2VisionModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class OwlViTForObjectDetection(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class OwlViTModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class OwlViTPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class OwlViTTextModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class OwlViTVisionModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +PATCHTSMIXER_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class PatchTSMixerForPrediction(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class PatchTSMixerForPretraining(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class PatchTSMixerForRegression(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class PatchTSMixerForTimeSeriesClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class PatchTSMixerModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class PatchTSMixerPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +PATCHTST_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class PatchTSTForClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class PatchTSTForPrediction(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class PatchTSTForPretraining(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class PatchTSTForRegression(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class PatchTSTModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class PatchTSTPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class PegasusForCausalLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class PegasusForConditionalGeneration(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class PegasusModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class PegasusPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class PegasusXForConditionalGeneration(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class PegasusXModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class PegasusXPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class PerceiverForImageClassificationConvProcessing(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class PerceiverForImageClassificationFourier(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class PerceiverForImageClassificationLearned(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class PerceiverForMaskedLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class PerceiverForMultimodalAutoencoding(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class PerceiverForOpticalFlow(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class PerceiverForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class PerceiverLayer(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class PerceiverModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class PerceiverPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class PersimmonForCausalLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class PersimmonForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class PersimmonModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class PersimmonPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +PHI_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class PhiForCausalLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class PhiForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class PhiForTokenClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class PhiModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class PhiPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class Pix2StructForConditionalGeneration(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Pix2StructPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Pix2StructTextModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Pix2StructVisionModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +PLBART_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class PLBartForCausalLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class PLBartForConditionalGeneration(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class PLBartForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class PLBartModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class PLBartPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class PoolFormerForImageClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class PoolFormerModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class PoolFormerPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +POP2PIANO_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class Pop2PianoForConditionalGeneration(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Pop2PianoPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +PROPHETNET_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class ProphetNetDecoder(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ProphetNetEncoder(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ProphetNetForCausalLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ProphetNetForConditionalGeneration(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ProphetNetModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ProphetNetPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +PVT_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class PvtForImageClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class PvtModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class PvtPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class PvtV2Backbone(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class PvtV2ForImageClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class PvtV2Model(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class PvtV2PreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +QDQBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class QDQBertForMaskedLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class QDQBertForMultipleChoice(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class QDQBertForNextSentencePrediction(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class QDQBertForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class QDQBertForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class QDQBertForTokenClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class QDQBertLayer(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class QDQBertLMHeadModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class QDQBertModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class QDQBertPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +def load_tf_weights_in_qdqbert(*args, **kwargs): + requires_backends(load_tf_weights_in_qdqbert, ["torch"]) + + +class Qwen2ForCausalLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Qwen2ForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Qwen2Model(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Qwen2PreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Qwen2MoeForCausalLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Qwen2MoeForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Qwen2MoeModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Qwen2MoePreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class RagModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class RagPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class RagSequenceForGeneration(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class RagTokenForGeneration(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +REALM_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class RealmEmbedder(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class RealmForOpenQA(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class RealmKnowledgeAugEncoder(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class RealmPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class RealmReader(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class RealmRetriever(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class RealmScorer(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +def load_tf_weights_in_realm(*args, **kwargs): + requires_backends(load_tf_weights_in_realm, ["torch"]) + + +class RecurrentGemmaForCausalLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class RecurrentGemmaModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class RecurrentGemmaPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class ReformerAttention(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ReformerForMaskedLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ReformerForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ReformerForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ReformerLayer(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ReformerModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ReformerModelWithLMHead(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ReformerPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +REGNET_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class RegNetForImageClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class RegNetModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class RegNetPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class RemBertForCausalLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class RemBertForMaskedLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class RemBertForMultipleChoice(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class RemBertForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class RemBertForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class RemBertForTokenClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class RemBertLayer(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class RemBertModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class RemBertPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +def load_tf_weights_in_rembert(*args, **kwargs): + requires_backends(load_tf_weights_in_rembert, ["torch"]) + + +RESNET_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class ResNetBackbone(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ResNetForImageClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ResNetModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ResNetPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class RobertaForCausalLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class RobertaForMaskedLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class RobertaForMultipleChoice(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class RobertaForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class RobertaForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class RobertaForTokenClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class RobertaModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class RobertaPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class RobertaPreLayerNormForCausalLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class RobertaPreLayerNormForMaskedLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class RobertaPreLayerNormForMultipleChoice(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class RobertaPreLayerNormForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class RobertaPreLayerNormForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class RobertaPreLayerNormForTokenClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class RobertaPreLayerNormModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class RobertaPreLayerNormPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class RoCBertForCausalLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class RoCBertForMaskedLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class RoCBertForMultipleChoice(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class RoCBertForPreTraining(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class RoCBertForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class RoCBertForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class RoCBertForTokenClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class RoCBertLayer(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class RoCBertModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class RoCBertPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +def load_tf_weights_in_roc_bert(*args, **kwargs): + requires_backends(load_tf_weights_in_roc_bert, ["torch"]) + + +ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class RoFormerForCausalLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class RoFormerForMaskedLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class RoFormerForMultipleChoice(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class RoFormerForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class RoFormerForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class RoFormerForTokenClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class RoFormerLayer(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class RoFormerModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class RoFormerPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +def load_tf_weights_in_roformer(*args, **kwargs): + requires_backends(load_tf_weights_in_roformer, ["torch"]) + + +RWKV_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class RwkvForCausalLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class RwkvModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class RwkvPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +SAM_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class SamModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class SamPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +SEAMLESS_M4T_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class SeamlessM4TCodeHifiGan(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class SeamlessM4TForSpeechToSpeech(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class SeamlessM4TForSpeechToText(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class SeamlessM4TForTextToSpeech(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class SeamlessM4TForTextToText(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class SeamlessM4THifiGan(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class SeamlessM4TModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class SeamlessM4TPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class SeamlessM4TTextToUnitForConditionalGeneration(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class SeamlessM4TTextToUnitModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +SEAMLESS_M4T_V2_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class SeamlessM4Tv2ForSpeechToSpeech(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class SeamlessM4Tv2ForSpeechToText(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class SeamlessM4Tv2ForTextToSpeech(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class SeamlessM4Tv2ForTextToText(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class SeamlessM4Tv2Model(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class SeamlessM4Tv2PreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class SegformerDecodeHead(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class SegformerForImageClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class SegformerForSemanticSegmentation(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class SegformerLayer(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class SegformerModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class SegformerPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +SEGGPT_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class SegGptForImageSegmentation(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class SegGptModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class SegGptPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +SEW_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class SEWForCTC(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class SEWForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class SEWModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class SEWPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +SEW_D_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class SEWDForCTC(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class SEWDForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class SEWDModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class SEWDPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +SIGLIP_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class SiglipForImageClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class SiglipModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class SiglipPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class SiglipTextModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class SiglipVisionModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class SpeechEncoderDecoderModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class Speech2TextForConditionalGeneration(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Speech2TextModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Speech2TextPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Speech2Text2ForCausalLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Speech2Text2PreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class SpeechT5ForSpeechToSpeech(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class SpeechT5ForSpeechToText(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class SpeechT5ForTextToSpeech(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class SpeechT5HifiGan(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class SpeechT5Model(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class SpeechT5PreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +SPLINTER_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class SplinterForPreTraining(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class SplinterForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class SplinterLayer(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class SplinterModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class SplinterPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class SqueezeBertForMaskedLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class SqueezeBertForMultipleChoice(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class SqueezeBertForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class SqueezeBertForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class SqueezeBertForTokenClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class SqueezeBertModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class SqueezeBertModule(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class SqueezeBertPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class StableLmForCausalLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class StableLmForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class StableLmModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class StableLmPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Starcoder2ForCausalLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Starcoder2ForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Starcoder2Model(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Starcoder2PreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +SUPERPOINT_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class SuperPointForKeypointDetection(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class SuperPointPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class SwiftFormerForImageClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class SwiftFormerModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class SwiftFormerPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +SWIN_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class SwinBackbone(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class SwinForImageClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class SwinForMaskedImageModeling(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class SwinModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class SwinPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +SWIN2SR_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class Swin2SRForImageSuperResolution(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Swin2SRModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Swin2SRPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class Swinv2Backbone(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Swinv2ForImageClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Swinv2ForMaskedImageModeling(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Swinv2Model(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Swinv2PreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +SWITCH_TRANSFORMERS_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class SwitchTransformersEncoderModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class SwitchTransformersForConditionalGeneration(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class SwitchTransformersModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class SwitchTransformersPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class SwitchTransformersSparseMLP(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class SwitchTransformersTop1Router(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +T5_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class T5EncoderModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class T5ForConditionalGeneration(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class T5ForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class T5ForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class T5ForTokenClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class T5Model(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class T5PreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +def load_tf_weights_in_t5(*args, **kwargs): + requires_backends(load_tf_weights_in_t5, ["torch"]) + + +TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class TableTransformerForObjectDetection(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class TableTransformerModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class TableTransformerPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class TapasForMaskedLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class TapasForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class TapasForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class TapasModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class TapasPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +def load_tf_weights_in_tapas(*args, **kwargs): + requires_backends(load_tf_weights_in_tapas, ["torch"]) + + +TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class TimeSeriesTransformerForPrediction(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class TimeSeriesTransformerModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class TimeSeriesTransformerPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class TimesformerForVideoClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class TimesformerModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class TimesformerPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class TimmBackbone(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +TROCR_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class TrOCRForCausalLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class TrOCRPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +TVLT_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class TvltForAudioVisualClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class TvltForPreTraining(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class TvltModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class TvltPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +TVP_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class TvpForVideoGrounding(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class TvpModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class TvpPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +UDOP_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class UdopEncoderModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class UdopForConditionalGeneration(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class UdopModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class UdopPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class UMT5EncoderModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class UMT5ForConditionalGeneration(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class UMT5ForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class UMT5ForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class UMT5ForTokenClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class UMT5Model(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class UMT5PreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class UniSpeechForCTC(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class UniSpeechForPreTraining(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class UniSpeechForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class UniSpeechModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class UniSpeechPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +UNISPEECH_SAT_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class UniSpeechSatForAudioFrameClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class UniSpeechSatForCTC(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class UniSpeechSatForPreTraining(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class UniSpeechSatForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class UniSpeechSatForXVector(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class UniSpeechSatModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class UniSpeechSatPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +UNIVNET_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class UnivNetModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class UperNetForSemanticSegmentation(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class UperNetPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class VideoMAEForPreTraining(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class VideoMAEForVideoClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class VideoMAEModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class VideoMAEPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +VILT_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class ViltForImageAndTextRetrieval(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ViltForImagesAndTextClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ViltForMaskedLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ViltForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ViltForTokenClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ViltLayer(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ViltModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ViltPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +VIPLLAVA_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class VipLlavaForConditionalGeneration(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class VipLlavaPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class VisionEncoderDecoderModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class VisionTextDualEncoderModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +VISUAL_BERT_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class VisualBertForMultipleChoice(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class VisualBertForPreTraining(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class VisualBertForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class VisualBertForRegionToPhraseAlignment(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class VisualBertForVisualReasoning(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class VisualBertLayer(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class VisualBertModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class VisualBertPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +VIT_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class ViTForImageClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ViTForMaskedImageModeling(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ViTModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ViTPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class ViTHybridForImageClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ViTHybridModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ViTHybridPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class ViTMAEForPreTraining(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ViTMAELayer(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ViTMAEModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ViTMAEPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class ViTMSNForImageClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ViTMSNModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ViTMSNPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +VITDET_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class VitDetBackbone(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class VitDetModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class VitDetPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +VITMATTE_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class VitMatteForImageMatting(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class VitMattePreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +VITS_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class VitsModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class VitsPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class VivitForVideoClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class VivitModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class VivitPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class Wav2Vec2ForAudioFrameClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Wav2Vec2ForCTC(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Wav2Vec2ForMaskedLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Wav2Vec2ForPreTraining(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Wav2Vec2ForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Wav2Vec2ForXVector(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Wav2Vec2Model(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Wav2Vec2PreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +WAV2VEC2_BERT_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class Wav2Vec2BertForAudioFrameClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Wav2Vec2BertForCTC(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Wav2Vec2BertForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Wav2Vec2BertForXVector(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Wav2Vec2BertModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Wav2Vec2BertPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +WAV2VEC2_CONFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class Wav2Vec2ConformerForAudioFrameClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Wav2Vec2ConformerForCTC(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Wav2Vec2ConformerForPreTraining(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Wav2Vec2ConformerForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Wav2Vec2ConformerForXVector(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Wav2Vec2ConformerModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Wav2Vec2ConformerPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class WavLMForAudioFrameClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class WavLMForCTC(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class WavLMForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class WavLMForXVector(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class WavLMModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class WavLMPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class WhisperForAudioClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class WhisperForCausalLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class WhisperForConditionalGeneration(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class WhisperModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class WhisperPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class XCLIPModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class XCLIPPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class XCLIPTextModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class XCLIPVisionModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +XGLM_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class XGLMForCausalLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class XGLMModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class XGLMPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +XLM_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class XLMForMultipleChoice(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class XLMForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class XLMForQuestionAnsweringSimple(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class XLMForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class XLMForTokenClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class XLMModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class XLMPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class XLMWithLMHeadModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +XLM_PROPHETNET_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class XLMProphetNetDecoder(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class XLMProphetNetEncoder(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class XLMProphetNetForCausalLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class XLMProphetNetForConditionalGeneration(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class XLMProphetNetModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class XLMProphetNetPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class XLMRobertaForCausalLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class XLMRobertaForMaskedLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class XLMRobertaForMultipleChoice(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class XLMRobertaForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class XLMRobertaForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class XLMRobertaForTokenClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class XLMRobertaModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class XLMRobertaPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class XLMRobertaXLForCausalLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class XLMRobertaXLForMaskedLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class XLMRobertaXLForMultipleChoice(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class XLMRobertaXLForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class XLMRobertaXLForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class XLMRobertaXLForTokenClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class XLMRobertaXLModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class XLMRobertaXLPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +XLNET_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class XLNetForMultipleChoice(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class XLNetForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class XLNetForQuestionAnsweringSimple(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class XLNetForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class XLNetForTokenClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class XLNetLMHeadModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class XLNetModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class XLNetPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +def load_tf_weights_in_xlnet(*args, **kwargs): + requires_backends(load_tf_weights_in_xlnet, ["torch"]) + + +XMOD_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class XmodForCausalLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class XmodForMaskedLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class XmodForMultipleChoice(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class XmodForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class XmodForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class XmodForTokenClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class XmodModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class XmodPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class YolosForObjectDetection(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class YolosModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class YolosPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +YOSO_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class YosoForMaskedLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class YosoForMultipleChoice(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class YosoForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class YosoForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class YosoForTokenClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class YosoLayer(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class YosoModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class YosoPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Adafactor(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class AdamW(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +def get_constant_schedule(*args, **kwargs): + requires_backends(get_constant_schedule, ["torch"]) + + +def get_constant_schedule_with_warmup(*args, **kwargs): + requires_backends(get_constant_schedule_with_warmup, ["torch"]) + + +def get_cosine_schedule_with_warmup(*args, **kwargs): + requires_backends(get_cosine_schedule_with_warmup, ["torch"]) + + +def get_cosine_with_hard_restarts_schedule_with_warmup(*args, **kwargs): + requires_backends(get_cosine_with_hard_restarts_schedule_with_warmup, ["torch"]) + + +def get_inverse_sqrt_schedule(*args, **kwargs): + requires_backends(get_inverse_sqrt_schedule, ["torch"]) + + +def get_linear_schedule_with_warmup(*args, **kwargs): + requires_backends(get_linear_schedule_with_warmup, ["torch"]) + + +def get_polynomial_decay_schedule_with_warmup(*args, **kwargs): + requires_backends(get_polynomial_decay_schedule_with_warmup, ["torch"]) + + +def get_scheduler(*args, **kwargs): + requires_backends(get_scheduler, ["torch"]) + + +class Conv1D(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +def apply_chunking_to_forward(*args, **kwargs): + requires_backends(apply_chunking_to_forward, ["torch"]) + + +def prune_layer(*args, **kwargs): + requires_backends(prune_layer, ["torch"]) + + +class Trainer(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +def torch_distributed_zero_first(*args, **kwargs): + requires_backends(torch_distributed_zero_first, ["torch"]) + + +class Seq2SeqTrainer(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) diff --git a/venv/lib/python3.10/site-packages/transformers/utils/dummy_sentencepiece_and_tokenizers_objects.py b/venv/lib/python3.10/site-packages/transformers/utils/dummy_sentencepiece_and_tokenizers_objects.py new file mode 100644 index 0000000000000000000000000000000000000000..38775330a81d91030f000e58c0e6035bba1c0f31 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/utils/dummy_sentencepiece_and_tokenizers_objects.py @@ -0,0 +1,9 @@ +# This file is autogenerated by the command `make fix-copies`, do not edit. +from ..utils import DummyObject, requires_backends + + +SLOW_TO_FAST_CONVERTERS = None + + +def convert_slow_tokenizer(*args, **kwargs): + requires_backends(convert_slow_tokenizer, ["sentencepiece", "tokenizers"]) diff --git a/venv/lib/python3.10/site-packages/transformers/utils/dummy_sentencepiece_objects.py b/venv/lib/python3.10/site-packages/transformers/utils/dummy_sentencepiece_objects.py new file mode 100644 index 0000000000000000000000000000000000000000..33ee907a741f18718692a7fc02aa0bcc03f39585 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/utils/dummy_sentencepiece_objects.py @@ -0,0 +1,254 @@ +# This file is autogenerated by the command `make fix-copies`, do not edit. +from ..utils import DummyObject, requires_backends + + +class AlbertTokenizer(metaclass=DummyObject): + _backends = ["sentencepiece"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["sentencepiece"]) + + +class BarthezTokenizer(metaclass=DummyObject): + _backends = ["sentencepiece"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["sentencepiece"]) + + +class BartphoTokenizer(metaclass=DummyObject): + _backends = ["sentencepiece"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["sentencepiece"]) + + +class BertGenerationTokenizer(metaclass=DummyObject): + _backends = ["sentencepiece"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["sentencepiece"]) + + +class BigBirdTokenizer(metaclass=DummyObject): + _backends = ["sentencepiece"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["sentencepiece"]) + + +class CamembertTokenizer(metaclass=DummyObject): + _backends = ["sentencepiece"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["sentencepiece"]) + + +class CodeLlamaTokenizer(metaclass=DummyObject): + _backends = ["sentencepiece"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["sentencepiece"]) + + +class CpmTokenizer(metaclass=DummyObject): + _backends = ["sentencepiece"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["sentencepiece"]) + + +class DebertaV2Tokenizer(metaclass=DummyObject): + _backends = ["sentencepiece"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["sentencepiece"]) + + +class ErnieMTokenizer(metaclass=DummyObject): + _backends = ["sentencepiece"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["sentencepiece"]) + + +class FNetTokenizer(metaclass=DummyObject): + _backends = ["sentencepiece"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["sentencepiece"]) + + +class GemmaTokenizer(metaclass=DummyObject): + _backends = ["sentencepiece"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["sentencepiece"]) + + +class GPTSw3Tokenizer(metaclass=DummyObject): + _backends = ["sentencepiece"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["sentencepiece"]) + + +class LayoutXLMTokenizer(metaclass=DummyObject): + _backends = ["sentencepiece"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["sentencepiece"]) + + +class LlamaTokenizer(metaclass=DummyObject): + _backends = ["sentencepiece"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["sentencepiece"]) + + +class M2M100Tokenizer(metaclass=DummyObject): + _backends = ["sentencepiece"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["sentencepiece"]) + + +class MarianTokenizer(metaclass=DummyObject): + _backends = ["sentencepiece"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["sentencepiece"]) + + +class MBart50Tokenizer(metaclass=DummyObject): + _backends = ["sentencepiece"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["sentencepiece"]) + + +class MBartTokenizer(metaclass=DummyObject): + _backends = ["sentencepiece"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["sentencepiece"]) + + +class MLukeTokenizer(metaclass=DummyObject): + _backends = ["sentencepiece"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["sentencepiece"]) + + +class MT5Tokenizer(metaclass=DummyObject): + _backends = ["sentencepiece"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["sentencepiece"]) + + +class NllbTokenizer(metaclass=DummyObject): + _backends = ["sentencepiece"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["sentencepiece"]) + + +class PegasusTokenizer(metaclass=DummyObject): + _backends = ["sentencepiece"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["sentencepiece"]) + + +class PLBartTokenizer(metaclass=DummyObject): + _backends = ["sentencepiece"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["sentencepiece"]) + + +class ReformerTokenizer(metaclass=DummyObject): + _backends = ["sentencepiece"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["sentencepiece"]) + + +class RemBertTokenizer(metaclass=DummyObject): + _backends = ["sentencepiece"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["sentencepiece"]) + + +class SeamlessM4TTokenizer(metaclass=DummyObject): + _backends = ["sentencepiece"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["sentencepiece"]) + + +class SiglipTokenizer(metaclass=DummyObject): + _backends = ["sentencepiece"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["sentencepiece"]) + + +class Speech2TextTokenizer(metaclass=DummyObject): + _backends = ["sentencepiece"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["sentencepiece"]) + + +class SpeechT5Tokenizer(metaclass=DummyObject): + _backends = ["sentencepiece"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["sentencepiece"]) + + +class T5Tokenizer(metaclass=DummyObject): + _backends = ["sentencepiece"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["sentencepiece"]) + + +class UdopTokenizer(metaclass=DummyObject): + _backends = ["sentencepiece"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["sentencepiece"]) + + +class XGLMTokenizer(metaclass=DummyObject): + _backends = ["sentencepiece"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["sentencepiece"]) + + +class XLMProphetNetTokenizer(metaclass=DummyObject): + _backends = ["sentencepiece"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["sentencepiece"]) + + +class XLMRobertaTokenizer(metaclass=DummyObject): + _backends = ["sentencepiece"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["sentencepiece"]) + + +class XLNetTokenizer(metaclass=DummyObject): + _backends = ["sentencepiece"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["sentencepiece"]) diff --git a/venv/lib/python3.10/site-packages/transformers/utils/dummy_tensorflow_text_objects.py b/venv/lib/python3.10/site-packages/transformers/utils/dummy_tensorflow_text_objects.py new file mode 100644 index 0000000000000000000000000000000000000000..70c7ad5cbf4077609e36592566e461c1a1ded28a --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/utils/dummy_tensorflow_text_objects.py @@ -0,0 +1,9 @@ +# This file is autogenerated by the command `make fix-copies`, do not edit. +from ..utils import DummyObject, requires_backends + + +class TFBertTokenizer(metaclass=DummyObject): + _backends = ["tensorflow_text"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tensorflow_text"]) diff --git a/venv/lib/python3.10/site-packages/transformers/utils/dummy_tf_objects.py b/venv/lib/python3.10/site-packages/transformers/utils/dummy_tf_objects.py new file mode 100644 index 0000000000000000000000000000000000000000..5441883b85a4635107bde8cd2413c2175077c5df --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/utils/dummy_tf_objects.py @@ -0,0 +1,2991 @@ +# This file is autogenerated by the command `make fix-copies`, do not edit. +from ..utils import DummyObject, requires_backends + + +class TensorFlowBenchmarkArguments(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TensorFlowBenchmark(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFForcedBOSTokenLogitsProcessor(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFForcedEOSTokenLogitsProcessor(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFForceTokensLogitsProcessor(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFGenerationMixin(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFLogitsProcessor(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFLogitsProcessorList(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFLogitsWarper(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFMinLengthLogitsProcessor(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFNoBadWordsLogitsProcessor(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFNoRepeatNGramLogitsProcessor(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFRepetitionPenaltyLogitsProcessor(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFSuppressTokensAtBeginLogitsProcessor(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFSuppressTokensLogitsProcessor(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFTemperatureLogitsWarper(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFTopKLogitsWarper(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFTopPLogitsWarper(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class KerasMetricCallback(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class PushToHubCallback(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFPreTrainedModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFSequenceSummary(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFSharedEmbeddings(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +def shape_list(*args, **kwargs): + requires_backends(shape_list, ["tf"]) + + +TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class TFAlbertForMaskedLM(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFAlbertForMultipleChoice(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFAlbertForPreTraining(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFAlbertForQuestionAnswering(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFAlbertForSequenceClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFAlbertForTokenClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFAlbertMainLayer(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFAlbertModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFAlbertPreTrainedModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING = None + + +TF_MODEL_FOR_CAUSAL_LM_MAPPING = None + + +TF_MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING = None + + +TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING = None + + +TF_MODEL_FOR_MASK_GENERATION_MAPPING = None + + +TF_MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING = None + + +TF_MODEL_FOR_MASKED_LM_MAPPING = None + + +TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING = None + + +TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING = None + + +TF_MODEL_FOR_PRETRAINING_MAPPING = None + + +TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING = None + + +TF_MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING = None + + +TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING = None + + +TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING = None + + +TF_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING = None + + +TF_MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING = None + + +TF_MODEL_FOR_TEXT_ENCODING_MAPPING = None + + +TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING = None + + +TF_MODEL_FOR_VISION_2_SEQ_MAPPING = None + + +TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING = None + + +TF_MODEL_MAPPING = None + + +TF_MODEL_WITH_LM_HEAD_MAPPING = None + + +class TFAutoModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFAutoModelForAudioClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFAutoModelForCausalLM(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFAutoModelForDocumentQuestionAnswering(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFAutoModelForImageClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFAutoModelForMaskedImageModeling(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFAutoModelForMaskedLM(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFAutoModelForMaskGeneration(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFAutoModelForMultipleChoice(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFAutoModelForNextSentencePrediction(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFAutoModelForPreTraining(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFAutoModelForQuestionAnswering(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFAutoModelForSemanticSegmentation(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFAutoModelForSeq2SeqLM(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFAutoModelForSequenceClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFAutoModelForSpeechSeq2Seq(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFAutoModelForTableQuestionAnswering(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFAutoModelForTextEncoding(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFAutoModelForTokenClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFAutoModelForVision2Seq(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFAutoModelForZeroShotImageClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFAutoModelWithLMHead(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFBartForConditionalGeneration(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFBartForSequenceClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFBartModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFBartPretrainedModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class TFBertEmbeddings(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFBertForMaskedLM(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFBertForMultipleChoice(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFBertForNextSentencePrediction(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFBertForPreTraining(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFBertForQuestionAnswering(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFBertForSequenceClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFBertForTokenClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFBertLMHeadModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFBertMainLayer(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFBertModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFBertPreTrainedModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFBlenderbotForConditionalGeneration(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFBlenderbotModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFBlenderbotPreTrainedModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFBlenderbotSmallForConditionalGeneration(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFBlenderbotSmallModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFBlenderbotSmallPreTrainedModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class TFBlipForConditionalGeneration(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFBlipForImageTextRetrieval(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFBlipForQuestionAnswering(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFBlipModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFBlipPreTrainedModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFBlipTextModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFBlipVisionModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +TF_CAMEMBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class TFCamembertForCausalLM(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFCamembertForMaskedLM(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFCamembertForMultipleChoice(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFCamembertForQuestionAnswering(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFCamembertForSequenceClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFCamembertForTokenClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFCamembertModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFCamembertPreTrainedModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class TFCLIPModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFCLIPPreTrainedModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFCLIPTextModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFCLIPVisionModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class TFConvBertForMaskedLM(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFConvBertForMultipleChoice(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFConvBertForQuestionAnswering(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFConvBertForSequenceClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFConvBertForTokenClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFConvBertLayer(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFConvBertModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFConvBertPreTrainedModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFConvNextForImageClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFConvNextModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFConvNextPreTrainedModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFConvNextV2ForImageClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFConvNextV2Model(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFConvNextV2PreTrainedModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class TFCTRLForSequenceClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFCTRLLMHeadModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFCTRLModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFCTRLPreTrainedModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class TFCvtForImageClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFCvtModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFCvtPreTrainedModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFData2VecVisionForImageClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFData2VecVisionForSemanticSegmentation(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFData2VecVisionModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFData2VecVisionPreTrainedModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class TFDebertaForMaskedLM(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFDebertaForQuestionAnswering(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFDebertaForSequenceClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFDebertaForTokenClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFDebertaModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFDebertaPreTrainedModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +TF_DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class TFDebertaV2ForMaskedLM(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFDebertaV2ForMultipleChoice(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFDebertaV2ForQuestionAnswering(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFDebertaV2ForSequenceClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFDebertaV2ForTokenClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFDebertaV2Model(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFDebertaV2PreTrainedModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class TFDeiTForImageClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFDeiTForImageClassificationWithTeacher(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFDeiTForMaskedImageModeling(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFDeiTModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFDeiTPreTrainedModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class TFAdaptiveEmbedding(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFTransfoXLForSequenceClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFTransfoXLLMHeadModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFTransfoXLMainLayer(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFTransfoXLModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFTransfoXLPreTrainedModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class TFDistilBertForMaskedLM(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFDistilBertForMultipleChoice(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFDistilBertForQuestionAnswering(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFDistilBertForSequenceClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFDistilBertForTokenClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFDistilBertMainLayer(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFDistilBertModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFDistilBertPreTrainedModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class TFDPRContextEncoder(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFDPRPretrainedContextEncoder(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFDPRPretrainedQuestionEncoder(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFDPRPretrainedReader(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFDPRQuestionEncoder(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFDPRReader(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class TFEfficientFormerForImageClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFEfficientFormerForImageClassificationWithTeacher(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFEfficientFormerModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFEfficientFormerPreTrainedModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class TFElectraForMaskedLM(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFElectraForMultipleChoice(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFElectraForPreTraining(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFElectraForQuestionAnswering(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFElectraForSequenceClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFElectraForTokenClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFElectraModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFElectraPreTrainedModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFEncoderDecoderModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +ESM_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class TFEsmForMaskedLM(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFEsmForSequenceClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFEsmForTokenClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFEsmModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFEsmPreTrainedModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class TFFlaubertForMultipleChoice(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFFlaubertForQuestionAnsweringSimple(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFFlaubertForSequenceClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFFlaubertForTokenClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFFlaubertModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFFlaubertPreTrainedModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFFlaubertWithLMHeadModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class TFFunnelBaseModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFFunnelForMaskedLM(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFFunnelForMultipleChoice(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFFunnelForPreTraining(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFFunnelForQuestionAnswering(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFFunnelForSequenceClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFFunnelForTokenClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFFunnelModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFFunnelPreTrainedModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class TFGPT2DoubleHeadsModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFGPT2ForSequenceClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFGPT2LMHeadModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFGPT2MainLayer(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFGPT2Model(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFGPT2PreTrainedModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFGPTJForCausalLM(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFGPTJForQuestionAnswering(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFGPTJForSequenceClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFGPTJModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFGPTJPreTrainedModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class TFGroupViTModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFGroupViTPreTrainedModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFGroupViTTextModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFGroupViTVisionModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +TF_HUBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class TFHubertForCTC(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFHubertModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFHubertPreTrainedModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class TFLayoutLMForMaskedLM(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFLayoutLMForQuestionAnswering(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFLayoutLMForSequenceClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFLayoutLMForTokenClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFLayoutLMMainLayer(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFLayoutLMModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFLayoutLMPreTrainedModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class TFLayoutLMv3ForQuestionAnswering(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFLayoutLMv3ForSequenceClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFLayoutLMv3ForTokenClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFLayoutLMv3Model(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFLayoutLMv3PreTrainedModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFLEDForConditionalGeneration(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFLEDModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFLEDPreTrainedModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class TFLongformerForMaskedLM(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFLongformerForMultipleChoice(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFLongformerForQuestionAnswering(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFLongformerForSequenceClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFLongformerForTokenClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFLongformerModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFLongformerPreTrainedModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFLongformerSelfAttention(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class TFLxmertForPreTraining(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFLxmertMainLayer(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFLxmertModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFLxmertPreTrainedModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFLxmertVisualFeatureEncoder(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFMarianModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFMarianMTModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFMarianPreTrainedModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFMBartForConditionalGeneration(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFMBartModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFMBartPreTrainedModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class TFMobileBertForMaskedLM(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFMobileBertForMultipleChoice(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFMobileBertForNextSentencePrediction(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFMobileBertForPreTraining(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFMobileBertForQuestionAnswering(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFMobileBertForSequenceClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFMobileBertForTokenClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFMobileBertMainLayer(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFMobileBertModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFMobileBertPreTrainedModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class TFMobileViTForImageClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFMobileViTForSemanticSegmentation(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFMobileViTModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFMobileViTPreTrainedModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +TF_MPNET_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class TFMPNetForMaskedLM(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFMPNetForMultipleChoice(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFMPNetForQuestionAnswering(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFMPNetForSequenceClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFMPNetForTokenClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFMPNetMainLayer(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFMPNetModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFMPNetPreTrainedModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFMT5EncoderModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFMT5ForConditionalGeneration(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFMT5Model(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +TF_OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class TFOpenAIGPTDoubleHeadsModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFOpenAIGPTForSequenceClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFOpenAIGPTLMHeadModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFOpenAIGPTMainLayer(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFOpenAIGPTModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFOpenAIGPTPreTrainedModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFOPTForCausalLM(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFOPTModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFOPTPreTrainedModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFPegasusForConditionalGeneration(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFPegasusModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFPegasusPreTrainedModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFRagModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFRagPreTrainedModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFRagSequenceForGeneration(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFRagTokenForGeneration(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class TFRegNetForImageClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFRegNetModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFRegNetPreTrainedModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class TFRemBertForCausalLM(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFRemBertForMaskedLM(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFRemBertForMultipleChoice(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFRemBertForQuestionAnswering(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFRemBertForSequenceClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFRemBertForTokenClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFRemBertLayer(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFRemBertModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFRemBertPreTrainedModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class TFResNetForImageClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFResNetModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFResNetPreTrainedModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class TFRobertaForCausalLM(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFRobertaForMaskedLM(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFRobertaForMultipleChoice(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFRobertaForQuestionAnswering(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFRobertaForSequenceClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFRobertaForTokenClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFRobertaMainLayer(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFRobertaModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFRobertaPreTrainedModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class TFRobertaPreLayerNormForCausalLM(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFRobertaPreLayerNormForMaskedLM(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFRobertaPreLayerNormForMultipleChoice(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFRobertaPreLayerNormForQuestionAnswering(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFRobertaPreLayerNormForSequenceClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFRobertaPreLayerNormForTokenClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFRobertaPreLayerNormMainLayer(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFRobertaPreLayerNormModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFRobertaPreLayerNormPreTrainedModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class TFRoFormerForCausalLM(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFRoFormerForMaskedLM(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFRoFormerForMultipleChoice(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFRoFormerForQuestionAnswering(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFRoFormerForSequenceClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFRoFormerForTokenClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFRoFormerLayer(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFRoFormerModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFRoFormerPreTrainedModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +TF_SAM_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class TFSamModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFSamPreTrainedModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +TF_SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class TFSegformerDecodeHead(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFSegformerForImageClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFSegformerForSemanticSegmentation(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFSegformerModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFSegformerPreTrainedModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class TFSpeech2TextForConditionalGeneration(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFSpeech2TextModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFSpeech2TextPreTrainedModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class TFSwinForImageClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFSwinForMaskedImageModeling(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFSwinModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFSwinPreTrainedModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class TFT5EncoderModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFT5ForConditionalGeneration(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFT5Model(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFT5PreTrainedModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class TFTapasForMaskedLM(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFTapasForQuestionAnswering(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFTapasForSequenceClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFTapasModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFTapasPreTrainedModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFVisionEncoderDecoderModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFVisionTextDualEncoderModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFViTForImageClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFViTModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFViTPreTrainedModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFViTMAEForPreTraining(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFViTMAEModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFViTMAEPreTrainedModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class TFWav2Vec2ForCTC(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFWav2Vec2ForSequenceClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFWav2Vec2Model(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFWav2Vec2PreTrainedModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class TFWhisperForConditionalGeneration(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFWhisperModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFWhisperPreTrainedModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class TFXGLMForCausalLM(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFXGLMModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFXGLMPreTrainedModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class TFXLMForMultipleChoice(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFXLMForQuestionAnsweringSimple(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFXLMForSequenceClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFXLMForTokenClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFXLMMainLayer(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFXLMModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFXLMPreTrainedModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFXLMWithLMHeadModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class TFXLMRobertaForCausalLM(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFXLMRobertaForMaskedLM(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFXLMRobertaForMultipleChoice(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFXLMRobertaForQuestionAnswering(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFXLMRobertaForSequenceClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFXLMRobertaForTokenClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFXLMRobertaModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFXLMRobertaPreTrainedModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class TFXLNetForMultipleChoice(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFXLNetForQuestionAnsweringSimple(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFXLNetForSequenceClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFXLNetForTokenClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFXLNetLMHeadModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFXLNetMainLayer(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFXLNetModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFXLNetPreTrainedModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class AdamWeightDecay(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class GradientAccumulator(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class WarmUp(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +def create_optimizer(*args, **kwargs): + requires_backends(create_optimizer, ["tf"]) diff --git a/venv/lib/python3.10/site-packages/transformers/utils/dummy_tokenizers_objects.py b/venv/lib/python3.10/site-packages/transformers/utils/dummy_tokenizers_objects.py new file mode 100644 index 0000000000000000000000000000000000000000..0b7ddf119d79a13004bc5841e6bb72be9c8813c2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/utils/dummy_tokenizers_objects.py @@ -0,0 +1,457 @@ +# This file is autogenerated by the command `make fix-copies`, do not edit. +from ..utils import DummyObject, requires_backends + + +class AlbertTokenizerFast(metaclass=DummyObject): + _backends = ["tokenizers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tokenizers"]) + + +class BartTokenizerFast(metaclass=DummyObject): + _backends = ["tokenizers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tokenizers"]) + + +class BarthezTokenizerFast(metaclass=DummyObject): + _backends = ["tokenizers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tokenizers"]) + + +class BertTokenizerFast(metaclass=DummyObject): + _backends = ["tokenizers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tokenizers"]) + + +class BigBirdTokenizerFast(metaclass=DummyObject): + _backends = ["tokenizers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tokenizers"]) + + +class BlenderbotTokenizerFast(metaclass=DummyObject): + _backends = ["tokenizers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tokenizers"]) + + +class BlenderbotSmallTokenizerFast(metaclass=DummyObject): + _backends = ["tokenizers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tokenizers"]) + + +class BloomTokenizerFast(metaclass=DummyObject): + _backends = ["tokenizers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tokenizers"]) + + +class CamembertTokenizerFast(metaclass=DummyObject): + _backends = ["tokenizers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tokenizers"]) + + +class CLIPTokenizerFast(metaclass=DummyObject): + _backends = ["tokenizers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tokenizers"]) + + +class CodeLlamaTokenizerFast(metaclass=DummyObject): + _backends = ["tokenizers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tokenizers"]) + + +class CodeGenTokenizerFast(metaclass=DummyObject): + _backends = ["tokenizers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tokenizers"]) + + +class CohereTokenizerFast(metaclass=DummyObject): + _backends = ["tokenizers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tokenizers"]) + + +class ConvBertTokenizerFast(metaclass=DummyObject): + _backends = ["tokenizers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tokenizers"]) + + +class CpmTokenizerFast(metaclass=DummyObject): + _backends = ["tokenizers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tokenizers"]) + + +class DebertaTokenizerFast(metaclass=DummyObject): + _backends = ["tokenizers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tokenizers"]) + + +class DebertaV2TokenizerFast(metaclass=DummyObject): + _backends = ["tokenizers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tokenizers"]) + + +class RetriBertTokenizerFast(metaclass=DummyObject): + _backends = ["tokenizers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tokenizers"]) + + +class DistilBertTokenizerFast(metaclass=DummyObject): + _backends = ["tokenizers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tokenizers"]) + + +class DPRContextEncoderTokenizerFast(metaclass=DummyObject): + _backends = ["tokenizers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tokenizers"]) + + +class DPRQuestionEncoderTokenizerFast(metaclass=DummyObject): + _backends = ["tokenizers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tokenizers"]) + + +class DPRReaderTokenizerFast(metaclass=DummyObject): + _backends = ["tokenizers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tokenizers"]) + + +class ElectraTokenizerFast(metaclass=DummyObject): + _backends = ["tokenizers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tokenizers"]) + + +class FNetTokenizerFast(metaclass=DummyObject): + _backends = ["tokenizers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tokenizers"]) + + +class FunnelTokenizerFast(metaclass=DummyObject): + _backends = ["tokenizers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tokenizers"]) + + +class GemmaTokenizerFast(metaclass=DummyObject): + _backends = ["tokenizers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tokenizers"]) + + +class GPT2TokenizerFast(metaclass=DummyObject): + _backends = ["tokenizers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tokenizers"]) + + +class GPTNeoXTokenizerFast(metaclass=DummyObject): + _backends = ["tokenizers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tokenizers"]) + + +class GPTNeoXJapaneseTokenizer(metaclass=DummyObject): + _backends = ["tokenizers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tokenizers"]) + + +class HerbertTokenizerFast(metaclass=DummyObject): + _backends = ["tokenizers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tokenizers"]) + + +class LayoutLMTokenizerFast(metaclass=DummyObject): + _backends = ["tokenizers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tokenizers"]) + + +class LayoutLMv2TokenizerFast(metaclass=DummyObject): + _backends = ["tokenizers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tokenizers"]) + + +class LayoutLMv3TokenizerFast(metaclass=DummyObject): + _backends = ["tokenizers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tokenizers"]) + + +class LayoutXLMTokenizerFast(metaclass=DummyObject): + _backends = ["tokenizers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tokenizers"]) + + +class LEDTokenizerFast(metaclass=DummyObject): + _backends = ["tokenizers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tokenizers"]) + + +class LlamaTokenizerFast(metaclass=DummyObject): + _backends = ["tokenizers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tokenizers"]) + + +class LongformerTokenizerFast(metaclass=DummyObject): + _backends = ["tokenizers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tokenizers"]) + + +class LxmertTokenizerFast(metaclass=DummyObject): + _backends = ["tokenizers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tokenizers"]) + + +class MarkupLMTokenizerFast(metaclass=DummyObject): + _backends = ["tokenizers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tokenizers"]) + + +class MBartTokenizerFast(metaclass=DummyObject): + _backends = ["tokenizers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tokenizers"]) + + +class MBart50TokenizerFast(metaclass=DummyObject): + _backends = ["tokenizers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tokenizers"]) + + +class MobileBertTokenizerFast(metaclass=DummyObject): + _backends = ["tokenizers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tokenizers"]) + + +class MPNetTokenizerFast(metaclass=DummyObject): + _backends = ["tokenizers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tokenizers"]) + + +class MT5TokenizerFast(metaclass=DummyObject): + _backends = ["tokenizers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tokenizers"]) + + +class MvpTokenizerFast(metaclass=DummyObject): + _backends = ["tokenizers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tokenizers"]) + + +class NllbTokenizerFast(metaclass=DummyObject): + _backends = ["tokenizers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tokenizers"]) + + +class NougatTokenizerFast(metaclass=DummyObject): + _backends = ["tokenizers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tokenizers"]) + + +class OpenAIGPTTokenizerFast(metaclass=DummyObject): + _backends = ["tokenizers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tokenizers"]) + + +class PegasusTokenizerFast(metaclass=DummyObject): + _backends = ["tokenizers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tokenizers"]) + + +class Qwen2TokenizerFast(metaclass=DummyObject): + _backends = ["tokenizers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tokenizers"]) + + +class RealmTokenizerFast(metaclass=DummyObject): + _backends = ["tokenizers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tokenizers"]) + + +class ReformerTokenizerFast(metaclass=DummyObject): + _backends = ["tokenizers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tokenizers"]) + + +class RemBertTokenizerFast(metaclass=DummyObject): + _backends = ["tokenizers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tokenizers"]) + + +class RobertaTokenizerFast(metaclass=DummyObject): + _backends = ["tokenizers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tokenizers"]) + + +class RoFormerTokenizerFast(metaclass=DummyObject): + _backends = ["tokenizers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tokenizers"]) + + +class SeamlessM4TTokenizerFast(metaclass=DummyObject): + _backends = ["tokenizers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tokenizers"]) + + +class SplinterTokenizerFast(metaclass=DummyObject): + _backends = ["tokenizers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tokenizers"]) + + +class SqueezeBertTokenizerFast(metaclass=DummyObject): + _backends = ["tokenizers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tokenizers"]) + + +class T5TokenizerFast(metaclass=DummyObject): + _backends = ["tokenizers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tokenizers"]) + + +class UdopTokenizerFast(metaclass=DummyObject): + _backends = ["tokenizers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tokenizers"]) + + +class WhisperTokenizerFast(metaclass=DummyObject): + _backends = ["tokenizers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tokenizers"]) + + +class XGLMTokenizerFast(metaclass=DummyObject): + _backends = ["tokenizers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tokenizers"]) + + +class XLMRobertaTokenizerFast(metaclass=DummyObject): + _backends = ["tokenizers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tokenizers"]) + + +class XLNetTokenizerFast(metaclass=DummyObject): + _backends = ["tokenizers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tokenizers"]) + + +class PreTrainedTokenizerFast(metaclass=DummyObject): + _backends = ["tokenizers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tokenizers"]) diff --git a/venv/lib/python3.10/site-packages/transformers/utils/dummy_torchaudio_objects.py b/venv/lib/python3.10/site-packages/transformers/utils/dummy_torchaudio_objects.py new file mode 100644 index 0000000000000000000000000000000000000000..58b01f06a8ab5e2ea0711586a8ef5726ba5cc256 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/utils/dummy_torchaudio_objects.py @@ -0,0 +1,16 @@ +# This file is autogenerated by the command `make fix-copies`, do not edit. +from ..utils import DummyObject, requires_backends + + +class MusicgenMelodyFeatureExtractor(metaclass=DummyObject): + _backends = ["torchaudio"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torchaudio"]) + + +class MusicgenMelodyProcessor(metaclass=DummyObject): + _backends = ["torchaudio"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torchaudio"]) diff --git a/venv/lib/python3.10/site-packages/transformers/utils/dummy_vision_objects.py b/venv/lib/python3.10/site-packages/transformers/utils/dummy_vision_objects.py new file mode 100644 index 0000000000000000000000000000000000000000..7510f91dfcd5d322648051d8f579a0f4d038288e --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/utils/dummy_vision_objects.py @@ -0,0 +1,618 @@ +# This file is autogenerated by the command `make fix-copies`, do not edit. +from ..utils import DummyObject, requires_backends + + +class ImageProcessingMixin(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class ImageFeatureExtractionMixin(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class BeitFeatureExtractor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class BeitImageProcessor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class BitImageProcessor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class BlipImageProcessor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class BridgeTowerImageProcessor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class ChineseCLIPFeatureExtractor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class ChineseCLIPImageProcessor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class CLIPFeatureExtractor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class CLIPImageProcessor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class ConditionalDetrFeatureExtractor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class ConditionalDetrImageProcessor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class ConvNextFeatureExtractor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class ConvNextImageProcessor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class DeformableDetrFeatureExtractor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class DeformableDetrImageProcessor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class DeiTFeatureExtractor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class DeiTImageProcessor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class DetaImageProcessor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class DetrFeatureExtractor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class DetrImageProcessor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class DonutFeatureExtractor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class DonutImageProcessor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class DPTFeatureExtractor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class DPTImageProcessor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class EfficientFormerImageProcessor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class EfficientNetImageProcessor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class FlavaFeatureExtractor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class FlavaImageProcessor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class FlavaProcessor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class FuyuImageProcessor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class FuyuProcessor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class GLPNFeatureExtractor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class GLPNImageProcessor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class GroundingDinoImageProcessor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class IdeficsImageProcessor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class Idefics2ImageProcessor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class ImageGPTFeatureExtractor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class ImageGPTImageProcessor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class LayoutLMv2FeatureExtractor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class LayoutLMv2ImageProcessor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class LayoutLMv3FeatureExtractor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class LayoutLMv3ImageProcessor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class LevitFeatureExtractor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class LevitImageProcessor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class LlavaNextImageProcessor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class Mask2FormerImageProcessor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class MaskFormerFeatureExtractor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class MaskFormerImageProcessor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class MobileNetV1FeatureExtractor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class MobileNetV1ImageProcessor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class MobileNetV2FeatureExtractor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class MobileNetV2ImageProcessor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class MobileViTFeatureExtractor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class MobileViTImageProcessor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class NougatImageProcessor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class OneFormerImageProcessor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class Owlv2ImageProcessor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class OwlViTFeatureExtractor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class OwlViTImageProcessor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class PerceiverFeatureExtractor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class PerceiverImageProcessor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class Pix2StructImageProcessor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class PoolFormerFeatureExtractor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class PoolFormerImageProcessor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class PvtImageProcessor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class SamImageProcessor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class SegformerFeatureExtractor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class SegformerImageProcessor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class SegGptImageProcessor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class SiglipImageProcessor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class SuperPointImageProcessor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class Swin2SRImageProcessor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class TvltImageProcessor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class TvpImageProcessor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class VideoMAEFeatureExtractor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class VideoMAEImageProcessor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class ViltFeatureExtractor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class ViltImageProcessor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class ViltProcessor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class ViTFeatureExtractor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class ViTImageProcessor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class ViTHybridImageProcessor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class VitMatteImageProcessor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class VivitImageProcessor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class YolosFeatureExtractor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class YolosImageProcessor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) diff --git a/venv/lib/python3.10/site-packages/transformers/utils/fx.py b/venv/lib/python3.10/site-packages/transformers/utils/fx.py new file mode 100644 index 0000000000000000000000000000000000000000..ab4f823c2fc8aa3ac1fcfe1f59e295a25460b323 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/utils/fx.py @@ -0,0 +1,1321 @@ +# coding=utf-8 +# Copyright 2021 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import builtins +import collections +import functools +import inspect +import math +import operator +import os +import random +import warnings +from typing import Any, Callable, Dict, List, Optional, Type, Union + +import torch +from torch import nn +from torch.fx import Graph, GraphModule, Proxy, Tracer +from torch.fx._compatibility import compatibility +from torch.fx.proxy import ParameterProxy + +from .. import PretrainedConfig, PreTrainedModel, logging +from ..models.auto import get_values +from ..models.auto.modeling_auto import ( + MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES, + MODEL_FOR_BACKBONE_MAPPING_NAMES, + MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, + MODEL_FOR_CTC_MAPPING_NAMES, + MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES, + MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES, + MODEL_FOR_IMAGE_MAPPING_NAMES, + MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING_NAMES, + MODEL_FOR_MASKED_LM_MAPPING_NAMES, + MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES, + MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES, + MODEL_FOR_PRETRAINING_MAPPING_NAMES, + MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES, + MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES, + MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, + MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES, + MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES, + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES, + MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES, + MODEL_MAPPING_NAMES, +) +from ..pytorch_utils import is_torch_greater_or_equal_than_2_0 +from ..utils import ( + ENV_VARS_TRUE_VALUES, + TORCH_FX_REQUIRED_VERSION, + get_torch_version, + is_peft_available, + is_torch_fx_available, +) + + +if is_peft_available(): + from peft import PeftModel + + +logger = logging.get_logger(__name__) +_IS_IN_DEBUG_MODE = os.environ.get("FX_DEBUG_MODE", "").upper() in ENV_VARS_TRUE_VALUES + + +def _generate_supported_model_class_names( + model_name: Type[PretrainedConfig], + supported_tasks: Optional[Union[str, List[str]]] = None, +) -> List[str]: + task_mapping = { + "default": MODEL_MAPPING_NAMES, + "pretraining": MODEL_FOR_PRETRAINING_MAPPING_NAMES, + "next-sentence-prediction": MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES, + "masked-lm": MODEL_FOR_MASKED_LM_MAPPING_NAMES, + "causal-lm": MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, + "seq2seq-lm": MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, + "speech-seq2seq": MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES, + "multiple-choice": MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES, + "document-question-answering": MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES, + "question-answering": MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES, + "sequence-classification": MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES, + "token-classification": MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES, + "masked-image-modeling": MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING_NAMES, + "image-classification": MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES, + "zero-shot-image-classification": MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES, + "ctc": MODEL_FOR_CTC_MAPPING_NAMES, + "audio-classification": MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES, + "semantic-segmentation": MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES, + "backbone": MODEL_FOR_BACKBONE_MAPPING_NAMES, + "image-feature-extraction": MODEL_FOR_IMAGE_MAPPING_NAMES, + } + + if supported_tasks is None: + supported_tasks = task_mapping.keys() + if isinstance(supported_tasks, str): + supported_tasks = [supported_tasks] + + model_class_names = [] + for task in supported_tasks: + class_name = task_mapping[task].get(model_name, None) + if class_name: + model_class_names.append(class_name) + + return model_class_names + + +_REGULAR_SUPPORTED_MODEL_NAMES_AND_TASKS = [ + "altclip", + "albert", + "bart", + "bert", + "blenderbot", + "blenderbot-small", + "bloom", + "clip", + "convnext", + "deberta", + "deberta-v2", + "dinov2", + "distilbert", + "donut-swin", + "electra", + "gpt2", + "gpt_neo", + "gptj", + "hubert", + "layoutlm", + "llama", + "cohere", + "lxmert", + "m2m_100", + "marian", + "mbart", + "megatron-bert", + "mistral", + "mixtral", + "mobilebert", + "mt5", + "nezha", + "opt", + "pegasus", + "plbart", + "qwen2", + "qwen2_moe", + "resnet", + "roberta", + "segformer", + "speech_to_text", + "speech_to_text_2", + "swin", + "t5", + "trocr", + "vit", + "xglm", + "wav2vec2", + # "xlnet", +] + +_FX_SUPPORTED_MODELS_WITH_KV_CACHE = ["llama", "opt"] + +_REGULAR_SUPPORTED_MODELS = [] +for item in _REGULAR_SUPPORTED_MODEL_NAMES_AND_TASKS: + if isinstance(item, dict): + _REGULAR_SUPPORTED_MODELS.extend(_generate_supported_model_class_names(**item)) + else: + _REGULAR_SUPPORTED_MODELS.extend(_generate_supported_model_class_names(item)) + +_SPECIAL_SUPPORTED_MODELS = [ + "CLIPTextModel", + "CLIPTextModelWithProjection", + "CLIPVisionModel", + "CLIPVisionModelWithProjection", + "AltCLIPTextModel", + "AltCLIPVisionModel", + "GitVisionModel", + "GPT2DoubleHeadsModel", + "Speech2Text2Decoder", + "TrOCRDecoder", + "PeftModelForCausalLM", + "PeftModelForSeq2SeqLM", + # TODO: add support for them as it should be quite easy to do so (small blocking issues). + # XLNetForQuestionAnswering, +] +_SUPPORTED_MODELS = tuple(sorted(set(_REGULAR_SUPPORTED_MODELS + _SPECIAL_SUPPORTED_MODELS))) + + +def torch_nn_embedding(self, input): + return torch.empty(*input.shape, self.weight.shape[-1], device="meta", dtype=self.weight.dtype) + + +def torch_nn_functional_embedding( + input, weight, padding_idx=None, max_norm=None, norm_type=2.0, scale_grad_by_freq=False, sparse=False +): + return torch.empty(*input.shape, weight.shape[-1], device="meta", dtype=weight.dtype) + + +def torch_nn_layernorm(self, input): + return input + + +def torch_nn_groupnorm(self, input): + return input + + +def torch_nn_linear(self, input): + return torch.empty(input.shape[:-1] + (self.out_features,), device="meta") + + +def torch_relu(x): + return x + + +def torch_nn_relu(self, x): + return x + + +def torch_nn_functional_relu(x, inplace=False): + if not inplace: + raise ValueError("Don't support in-place functional.relu for MetaTensor analysis") + return x + + +def torch_where(condition, x, y): + # torch.where returns the broadcasted tensor of condition, x, and y, + # so hack it by using addition + return condition.to(device="meta") + x.to(device="meta") + y.to(device="meta") + + +def torch_abs(input, *, out=None): + if out is not None: + raise ValueError("Don't support in-place abs for MetaTensor analysis") + return input + + +def torch_arange(*args, **kwargs): + n = len(args) + step = 1 + if n == 1: + start = 0 + end = args[0] + elif n == 2: + start, end = args + else: + start, end, step = args + if isinstance(start, float): + start = int(start) + if isinstance(end, float): + start = int(end) + if isinstance(step, float): + step = int(step) + step = kwargs.get("step", step) + dtype = kwargs.get("dtype") + return torch.empty((end - start) // step, dtype=dtype, device="meta") + + +def torch_full(*args, **kwargs): + args = list(args) + # We set the fill value to 1 as its value is not important as long as it's not a tensor on the `meta` device. + if len(args) > 1: + args[1] = 1 + else: + kwargs["fill_value"] = 1 + kwargs_without_device = dict(kwargs) + kwargs_without_device.pop("device", None) + return torch.full(*args, **kwargs_without_device, device="meta") + + +def torch_cat(tensors, dim=None, axis=None, *, out=None): + if dim is None and axis is None: + dim = 0 + if dim is None and axis is not None: + dim = axis + if dim < 0: + dim = tensors[0].dim() + dim + shapes = [t.shape for t in tensors] + shape = list(shapes[0]) + concatenated_dim = sum(shape[dim] for shape in shapes) + final_shape = shape[:dim] + [concatenated_dim] + shape[dim + 1 :] + return torch.empty(final_shape, device="meta") + + +def torch_stack(tensors, dim=None, axis=None, *, out=None): + if dim is None and axis is None: + dim = 0 + if dim is None and axis is not None: + dim = axis + if dim < 0: + dim = tensors[0].dim() + 1 + dim + shape = list(tensors[0].shape) + shape.insert(dim, len(tensors)) + return torch.empty(shape, device="meta") + + +def torch_add(input, other, *, alpha=1, out=None): + if not isinstance(input, torch.Tensor): + return torch.empty_like(other, device="meta") + if not isinstance(other, torch.Tensor): + return torch.empty_like(input, device="meta") + max_length = max(input.dim(), other.dim()) + input_shape = list(input.shape) + [1] * (max_length - input.dim()) + other_shape = list(other.shape) + [1] * (max_length - other.dim()) + shape = [] + for i in range(max_length): + shape.append(max(input_shape[i], other_shape[i])) + return torch.empty(shape, device="meta") + + +def torch_mul(input, other, *, out=None): + return torch_add(input, other, out=out) + + +def torch_tensor_mul(self, other): + return torch_mul(self, other) + + +def torch_matmul(input, other, *, out=None): + d1 = input.dim() + d2 = other.dim() + shape = None + if d1 == 1 and d2 == 1: + shape = None + elif d1 == 2 and d2 == 2: + shape = (input.size(0), other.size(1)) + elif d1 == 1 and d2 == 2: + shape = (other.size(1),) + elif d1 == 2 and d1 == 1: + shape = (input.size(0),) + else: + max_length = max(input.dim(), other.dim()) + shape1 = list(input.shape) + shape2 = list(other.shape) + if d1 == 1: + shape1 = [1] + shape1 + if d2 == 1: + shape2.append(1) + shape1 = [-1] * (max_length - d1) + list(input.shape) + shape2 = [-1] * (max_length - d2) + list(other.shape) + shape = [] + for i in range(max_length): + shape.append(max(shape1[i], shape2[i])) + shape[-2] = shape1[-2] + shape[-1] = shape2[-1] + if d1 == 1: + shape.pop(-2) + if d2 == 1: + shape.pop(-1) + if shape is None: + return torch.tensor(0.0, device="meta") + return torch.empty(*shape, device="meta") + + +def torch_bmm(input, mat2, *, out=None): + if out is not None: + raise ValueError("Don't support in-place bmm for MetaTensor analysis") + batch_size, n, m = input.shape + _, _, p = mat2.shape + return torch.empty(batch_size, n, p, device="meta") + + +def torch_baddbmm(input, batch1, batch2, *, beta=1, alpha=1, out=None): + if out is not None: + raise ValueError("Don't support in-place baddbmm for MetaTensor analysis") + return torch_bmm(batch1, batch2) + + +def torch_tensor_baddbmm(self, batch1, batch2, *, beta=1, alpha=1, out=None): + return torch_baddbmm(self, batch1, batch2, beta=beta, alpha=alpha, out=out) + + +def torch_einsum(equation, *operands): + # TODO: infer shape without performing the computation, this might be quite hard. + concrete_operands = (torch.empty_like(operand, device="cpu") for operand in operands) + return torch.einsum(equation, *concrete_operands).to("meta") + + +def torch_tensor_repeat(self, *sizes): + shape = list(self.shape) + for i, x in enumerate(sizes): + shape[i] *= x + return torch.empty(shape, device="meta") + + +def torch_repeat_interleave(*args, dim=None, output_size=None): + num_args = len(args) + if num_args == 1: + shape = [output_size if output_size is not None else args[0].sum()] + else: + shape = list(args[0].shape) + if dim is None: + if num_args > 2: + dim = args[2] + else: + shape = [sum(shape)] + dim = 0 + repeats = args[1] + if isinstance(repeats, int) or torch.numel(repeats) == 1: + shape[dim] *= int(repeats) + else: + shape[dim] = output_size if output_size is not None else repeats.sum() + return torch.empty(*shape, device="meta") + + +def torch_index_select(input, dim, index, *, out=None): + shape = list(input.shape) + shape[dim] = len(index) + return torch.empty(*shape, device="meta") + + +def torch_tensor_index_select(self, dim, index): + return torch_index_select(self, dim, index) + + +def torch_gather(input, dim, index, *, sparse_grad=False, out=None): + shape = list(input.shape) + shape[dim] = index.shape[dim] + return torch.empty(*shape, device="meta") + + +def torch_tensor_gather(self, dim, index): + return torch_gather(self, dim, index) + + +def torch_roll(input, shifts, dims=None): + return input + + +def torch_flip(input, dims): + return input + + +def torch_tensor_flip(self, dims): + return self + + +def torch_nn_conv1d(self, input): + l_in = input.shape[-1] + shape = None + padding = self.padding + if padding == "valid": + padding = (0, 0) + if padding == "same": + shape = list(input.shape) + if shape is None: + shape = list(input.shape) + l_out = math.floor( + (l_in + 2 * padding[0] - self.dilation[0] * (self.kernel_size[0] - 1) - 1) / self.stride[0] + 1 + ) + shape[-1] = l_out + shape[-2] = self.out_channels + return torch.empty(shape, device="meta") + + +def torch_nn_conv2d(self, input): + h_in, w_in = input.shape[-2:] + shape = None + padding = self.padding + if padding == "valid": + padding = (0, 0) + if padding == "same": + shape = list(input.shape) + if shape is None: + shape = list(input.shape) + h_out = math.floor( + (h_in + 2 * padding[0] - self.dilation[0] * (self.kernel_size[0] - 1) - 1) / self.stride[0] + 1 + ) + w_out = math.floor( + (w_in + 2 * padding[1] - self.dilation[1] * (self.kernel_size[1] - 1) - 1) / self.stride[1] + 1 + ) + shape[-2:] = [h_out, w_out] + shape[-3] = self.out_channels + return torch.empty(shape, device="meta") + + +def torch_squeeze(input, dim=None): + shape = list(input.shape) + if dim is not None: + if dim < 0: + dim = input.dim() + dim + if shape[dim] == 1: + shape.pop(dim) + else: + new_shape = [] + for dim_value in shape: + if dim_value == 1: + continue + new_shape.append(dim_value) + shape = new_shape + return torch.empty(shape, device="meta") + + +def torch_tensor_squeeze(self, dim=None): + return torch_squeeze(self, dim) + + +def torch_unsqueeze(input, dim): + shape = list(input.shape) + if dim < 0: + dim = input.dim() + 1 + dim + shape.insert(dim, 1) + return torch.empty(shape, device="meta") + + +def torch_tensor_unsqueeze(self, dim): + return torch_unsqueeze(self, dim) + + +def torch_unique_consecutive(input, **kwargs): + output = torch.unique_consecutive(torch.zeros_like(input, device="cpu"), **kwargs) + if isinstance(output, torch.Tensor): + return output.to("meta") + else: + return tuple(map(output, lambda x: x.to("meta"))) + + +def torch_nn_functional_one_hot(tensor, num_classes=-1): + if num_classes < 0: + raise ValueError("Don't support automatic num_classes inference for MetaTensor analysis") + shape = list(tensor.shape) + [num_classes] + return torch.empty(shape, device="meta") + + +def torch_nn_functional_scaled_dot_product_attention( + query, key, value, attn_mask=None, dropout_p=0.0, is_causal=False, scale=None +): + target_length = query.shape[-2] + head_dim = value.shape[-1] + return torch.empty((*query.shape[:-2], target_length, head_dim), device="meta") + + +def torch_nn_mseloss(self, input, target): + if self.reduction == "none": + shape = target.shape + else: + shape = (1,) + return torch.empty(shape, device="meta") + + +def torch_nn_crossentropyloss(self, input, target): + if self.reduction == "none": + shape = target.shape + else: + shape = (1,) + return torch.empty(shape, device="meta") + + +def torch_nn_bcewithlogitsloss(self, input, target): + if self.reduction == "none": + shape = target.shape + else: + shape = (1,) + return torch.empty(shape, device="meta") + + +def operator_getitem(a, b): + def to_concrete(t): + if isinstance(t, torch.Tensor): + concrete = torch.ones_like(t, device="cpu") + if concrete.dtype in [torch.float16, torch.float32, torch.float64, torch.int32]: + concrete = concrete.to(torch.int64) + return concrete + return t + + if isinstance(a, torch.Tensor): + # TODO: infer shape without performing the computation. + if isinstance(b, tuple): + b = tuple(map(to_concrete, b)) + else: + b = to_concrete(b) + return operator.getitem(torch.empty_like(a, device="cpu"), b).to("meta") + return operator.getitem(a, b) + + +_MANUAL_META_OVERRIDES: Dict[Callable, Callable] = { + torch.nn.Embedding: torch_nn_embedding, + torch.nn.functional.embedding: torch_nn_functional_embedding, + torch.nn.LayerNorm: torch_nn_layernorm, + torch.nn.GroupNorm: torch_nn_groupnorm, + torch.nn.Linear: torch_nn_linear, + torch.relu: torch_relu, + torch.nn.functional.relu: torch_nn_functional_relu, + torch.nn.ReLU: torch_nn_relu, + torch.where: torch_where, + torch.abs: torch_abs, + torch.arange: torch_arange, + torch.full: torch_full, + torch.cat: torch_cat, + torch.stack: torch_stack, + torch.add: torch_add, + torch.mul: torch_mul, + torch.Tensor.mul: torch_tensor_mul, + torch.matmul: torch_matmul, + torch.bmm: torch_bmm, + torch.baddbmm: torch_baddbmm, + torch.Tensor.baddbmm: torch_tensor_baddbmm, + torch.einsum: torch_einsum, + torch.Tensor.repeat: torch_tensor_repeat, + torch.repeat_interleave: torch_repeat_interleave, + torch.roll: torch_roll, + torch.flip: torch_flip, + torch.Tensor.flip: torch_tensor_flip, + torch.index_select: torch_index_select, + torch.Tensor.index_select: torch_tensor_index_select, + torch.gather: torch_gather, + torch.Tensor.gather: torch_tensor_gather, + torch.nn.Conv1d: torch_nn_conv1d, + torch.nn.Conv2d: torch_nn_conv2d, + torch.squeeze: torch_squeeze, + torch.Tensor.squeeze: torch_tensor_squeeze, + torch.unsqueeze: torch_unsqueeze, + torch.Tensor.unsqueeze: torch_tensor_unsqueeze, + torch.unique_consecutive: torch_unique_consecutive, + torch.nn.functional.one_hot: torch_nn_functional_one_hot, + torch.nn.MSELoss: torch_nn_mseloss, + torch.nn.CrossEntropyLoss: torch_nn_crossentropyloss, + torch.nn.BCEWithLogitsLoss: torch_nn_bcewithlogitsloss, + operator.getitem: operator_getitem, +} + +if is_torch_greater_or_equal_than_2_0: + _MANUAL_META_OVERRIDES[ + torch.nn.functional.scaled_dot_product_attention + ] = torch_nn_functional_scaled_dot_product_attention + + +class HFProxy(Proxy): + """ + Proxy that uses metadata to handle data-dependent control-flow. + """ + + def install_metadata(self, metadata): + self._metadata = metadata + + @property + def shape(self): + return self.tracer.create_proxy("call_method", "size", (self,), {}) + + @property + def device(self): + # Hack so we can track when devices are used. During meta-tensor propagation, + # replace these values with a constant 'meta' + return MetaDeviceAttribute(self, "device") + + def __len__(self): + if hasattr(self, "_metadata") and self._metadata is not None: + return len(self._metadata) + return super().__len__() + + def __bool__(self): + if hasattr(self, "_metadata") and self._metadata is not None: + return self._metadata + return super().__bool__() + + def __getattr__(self, k): + if k == "_metadata": + return self.__getattribute__(k) + # note: not added to the graph yet, if this is a method call + # we peephole optimize to the method invocation + return HFAttribute(self, k) + + def __setitem__(self, indices, values): + return self.tracer.create_proxy("call_function", operator.setitem, (self, indices, values), {}) + + def __contains__(self, key): + if hasattr(self, "_metadata") and self._metadata is not None: + return key in self._metadata + return super().__contains__(key) + + +class HFAttribute(HFProxy): + def __init__(self, root, attr: str): + self.root = root + self.attr = attr + self.tracer = root.tracer + self._node = None + + if hasattr(self.root, "_metadata"): + self.install_metadata(getattr(self.root._metadata, attr)) + + @property + def node(self): + # the node for attributes is added lazily, since most will just be method calls + # which do not rely on the getitem call + if self._node is None: + self._node = self.tracer.create_proxy("call_function", builtins.getattr, (self.root, self.attr), {}).node + return self._node + + def __call__(self, *args, **kwargs): + return self.tracer.create_proxy("call_method", self.attr, (self.root,) + args, kwargs) + + +class MetaDeviceAttribute(HFAttribute): + pass + + +def _proxies_to_metas(v): + """Returns the underlying metadata for HFProxies, and behaves like the identity for the others.""" + if isinstance(v, MetaDeviceAttribute): + return "meta" + if isinstance(v, torch.fx.Proxy): + if not (isinstance(v, HFProxy) and hasattr(v, "_metadata")): + raise RuntimeError(f"No metadata was found for {v}") + return v._metadata + return v + + +def _gen_constructor_wrapper(target): + @functools.wraps(target) + def wrapper(*args, **kwargs): + proxy = None + + def check_has_proxy(v): + if isinstance(v, Proxy): + nonlocal proxy + proxy = v + + torch.fx.node.map_aggregate(args, check_has_proxy) + torch.fx.node.map_aggregate(kwargs, check_has_proxy) + + if proxy is not None: + return proxy.tracer.create_proxy("call_function", target, args, kwargs) + else: + return target(*args, **kwargs) + + return wrapper, target + + +def _generate_random_int(low: int = 10, high: int = 20, forbidden_values: Optional[List[int]] = None): + if forbidden_values is None: + forbidden_values = [] + value = random.randint(low, high) + while value in forbidden_values: + value = random.randint(low, high) + return value + + +class HFTracer(Tracer): + """ + Tracer that is able to symbolically trace models from the library. To do that, it uses the HFProxy instead of the + regular PyTorch torch.fx.Proxy. + """ + + # Feature flag for proxying accesses to buffer values + proxy_buffer_attributes: bool = True + allow_insert_stateless_mods: bool = True + _TORCH_METHODS_TO_PATCH = [ + "arange", + "zeros", + "ones", + "full", + "full_like", + "eye", + "empty", + "tensor", + "clamp", + "finfo", + "tril", + ] + supported_archs = (PreTrainedModel,) if not is_peft_available() else (PreTrainedModel, PeftModel) + + def __init__(self, autowrap_modules=(math,), autowrap_functions=()): + super().__init__(autowrap_modules=autowrap_modules, autowrap_functions=autowrap_functions) + + if not is_torch_fx_available(): + raise ImportError( + f"Found an incompatible version of torch. Found version {get_torch_version()}, but only version " + f"{TORCH_FX_REQUIRED_VERSION} is supported." + ) + + def _generate_dummy_input( + self, model: PreTrainedModel, input_name: str, shape: List[int], input_names: List[str] + ) -> Dict[str, torch.Tensor]: + """Generates dummy input for model inference recording.""" + # Retrieving the model class, either from the "class_for_deserialization" attribute if the model was restored + # from pickle, or from the "__class__" attribute in the general case. + model_class_name = getattr(model, "class_for_deserialization", model.__class__).__name__ + device = model.device + inputs_dict = {} + + # when tracing a model with KV cache, we simply need to unsure that the KV cache length is larger than one to + # rightfully pass certain controlflows (Example: https://github.com/huggingface/transformers/blob/5c8d941d66734811d2ef6f57f15b44f7fb7a98c4/src/transformers/modeling_attn_mask_utils.py#L162). + # After tracing, the model can then still be used with arbitrary lengths different than the one used during tracing. + kv_cache_length = 5 + + if input_name in ["labels", "start_positions", "end_positions"]: + batch_size = shape[0] + if model_class_name in [ + *get_values(MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES), + *get_values(MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES), + *get_values(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES), + *get_values(MODEL_FOR_BACKBONE_MAPPING_NAMES), + *get_values(MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES), + ]: + inputs_dict["labels"] = torch.zeros(batch_size, dtype=torch.long, device=device) + elif model_class_name in [ + *get_values(MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES), + *get_values(MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES), + "XLNetForQuestionAnswering", + ]: + inputs_dict["start_positions"] = torch.zeros(batch_size, dtype=torch.long, device=device) + inputs_dict["end_positions"] = torch.zeros(batch_size, dtype=torch.long, device=device) + elif model_class_name in get_values(MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES): + if not hasattr(model.config, "problem_type") or model.config.problem_type is None: + raise ValueError( + "Could not retrieve the problem type for the sequence classification task, please set " + 'model.config.problem_type to one of the following values: "regression", ' + '"single_label_classification", or "multi_label_classification".' + ) + + if model.config.problem_type == "regression": + labels_shape = (batch_size, model.config.num_labels) + labels_dtype = torch.float32 + elif model.config.problem_type == "single_label_classification": + labels_shape = (batch_size,) + labels_dtype = torch.long + elif model.config.problem_type == "multi_label_classification": + labels_shape = (batch_size, model.config.num_labels) + labels_dtype = torch.float32 + else: + raise ValueError( + 'Expected model.config.problem_type to be either: "regression", "single_label_classification"' + f', or "multi_label_classification", but "{model.config.problem_type}" was provided.' + ) + inputs_dict["labels"] = torch.zeros(*labels_shape, dtype=labels_dtype, device=device) + + elif model_class_name in [ + *get_values(MODEL_FOR_PRETRAINING_MAPPING_NAMES), + *get_values(MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES), + *get_values(MODEL_FOR_CAUSAL_LM_MAPPING_NAMES), + *get_values(MODEL_FOR_MASKED_LM_MAPPING_NAMES), + *get_values(MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES), + *get_values(MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES), + "GPT2DoubleHeadsModel", + "PeftModelForCausalLM", + "PeftModelForSeq2SeqLM", + ]: + inputs_dict["labels"] = torch.zeros(shape, dtype=torch.long, device=device) + elif model_class_name in [*get_values(MODEL_FOR_CTC_MAPPING_NAMES)]: + inputs_dict["labels"] = torch.zeros(shape, dtype=torch.float32, device=device) + else: + raise NotImplementedError( + f"Generating the dummy input named {input_name} for {model_class_name} is not supported yet." + ) + elif "pixel_values" in input_name: + batch_size = shape[0] + image_size = getattr(model.config, "image_size", None) + if image_size is None: + if hasattr(model.config, "vision_config"): + image_size = model.config.vision_config.image_size + elif hasattr(model.config, "encoder"): + image_size = model.config.encoder.image_size + else: + image_size = (_generate_random_int(), _generate_random_int()) + + # If no num_channels is in the config, use some arbitrary value. + num_channels = getattr(model.config, "num_channels", 3) + if not isinstance(image_size, collections.abc.Iterable): + image_size = (image_size, image_size) + height, width = image_size + inputs_dict[input_name] = torch.zeros( + batch_size, num_channels, height, width, dtype=torch.float32, device=device + ) + elif "bbox" in input_name: + inputs_dict[input_name] = torch.zeros(*shape, 4, dtype=torch.float, device=device) + elif "input_features" in input_name: + inputs_dict[input_name] = torch.zeros( + *shape, model.config.input_feat_per_channel, dtype=torch.float, device=device + ) + elif "visual_feats" in input_name: + inputs_dict[input_name] = torch.zeros( + shape + + [ + model.config.visual_feat_dim, + ], + dtype=torch.float, + device=device, + ) + elif "visual_pos" in input_name: + inputs_dict[input_name] = torch.zeros( + shape + + [ + model.config.visual_pos_dim, + ], + dtype=torch.float, + device=device, + ) + elif "inputs" in input_name: + inputs_dict[input_name] = torch.zeros(*shape, dtype=torch.float, device=device) + elif "input_values" in input_name: + batch_size, _ = shape + # Generating big sequence length for audio inputs. + seq_length = _generate_random_int(low=10000, high=20000) + inputs_dict[input_name] = torch.zeros(batch_size, seq_length, dtype=torch.float, device=device) + elif "mask" in input_name: + if "past_key_values" in input_names: + mask_shape = [shape[0], shape[1] + kv_cache_length] + else: + mask_shape = shape + + inputs_dict[input_name] = torch.zeros(mask_shape, dtype=torch.long, device=device) + elif "ids" in input_name: + inputs_dict[input_name] = torch.zeros(shape, dtype=torch.long, device=device) + elif "past_key_values" in input_name: + if model.config.model_type not in _FX_SUPPORTED_MODELS_WITH_KV_CACHE: + raise NotImplementedError( + f"Symbolic trace with past_key_values input is not supported yet for the model {model.config.model_type}. Please open an issue or a PR in Transformers repository if you would like to see the support added." + ) + num_heads = model.config.num_attention_heads + head_dim = model.config.hidden_size // model.config.num_attention_heads + + cache_shape = (shape[0], num_heads, kv_cache_length, head_dim) + pkv = tuple( + ( + torch.rand(cache_shape, dtype=torch.float, device=device), + torch.rand(cache_shape, dtype=torch.float, device=device), + ) + for i in range(model.config.num_hidden_layers) + ) + inputs_dict[input_name] = pkv + else: + shape_with_hidden_size = shape + [model.config.hidden_size] + inputs_dict[input_name] = torch.zeros(shape_with_hidden_size, dtype=torch.float, device=device) + + return inputs_dict + + def create_proxy(self, kind, target, args, kwargs, name=None, type_expr=None, proxy_factory_fn=None): + rv = super().create_proxy(kind, target, args, kwargs, name, type_expr, proxy_factory_fn) + + if kind == "placeholder" and target in self.meta_args: + rv.install_metadata(self.meta_args[target]) + return rv + + if target in self.orig_fns: + # NOTE: tensor constructors in PyTorch define the `device` argument as + # *kwargs-only*. That is why this works. If you add methods to + # _TORCH_METHODS_TO_PATCH that do not define `device` as kwarg-only, + # this will break and you will likely see issues where we cannot infer + # the size of the output. + if "device" in kwargs: + kwargs["device"] = "meta" + + try: + args_metas = torch.fx.node.map_aggregate(args, _proxies_to_metas) + kwargs_metas = torch.fx.node.map_aggregate(kwargs, _proxies_to_metas) + + if kind == "call_function": + meta_target = _MANUAL_META_OVERRIDES.get(target, target) + meta_out = meta_target(*args_metas, **kwargs_metas) + if isinstance(meta_out, torch.Tensor): + meta_out = meta_out.to(device="meta") + elif kind == "call_method": + method = getattr(args_metas[0].__class__, target) + meta_target = _MANUAL_META_OVERRIDES.get(method, method) + meta_out = meta_target(*args_metas, **kwargs_metas) + elif kind == "call_module": + if not hasattr(self, "orig_forward"): + raise AttributeError(f"{self} does not have an attribute called orig_forward") + self._disable_module_getattr = True + try: + mod = self.root.get_submodule(target) + mod_type = type(mod) + if mod_type in _MANUAL_META_OVERRIDES: + meta_out = _MANUAL_META_OVERRIDES[mod_type](mod, *args_metas, **kwargs_metas) + else: + meta_out = self.orig_forward(*args_metas, **kwargs_metas) + finally: + self._disable_module_getattr = False + elif kind == "get_attr": + self._disable_module_getattr = True + try: + attr_itr = self.root + atoms = target.split(".") + for atom in atoms: + attr_itr = getattr(attr_itr, atom) + if isinstance(attr_itr, torch.Tensor): + meta_out = attr_itr.to(device="meta") + else: + meta_out = attr_itr + finally: + self._disable_module_getattr = False + else: + return rv + + if not isinstance(rv, Proxy): + raise ValueError("Don't support composite output yet") + rv.install_metadata(meta_out) + except Exception as e: + if _IS_IN_DEBUG_MODE: + warnings.warn(f"Could not compute metadata for {kind} target {target}: {e}") + + return rv + + # Replaced by .getattr from PyTorch 1.13 + def _module_getattr(self, attr, attr_val, parameter_proxy_cache): + if getattr(self, "_disable_module_getattr", False): + return attr_val + else: + + def maybe_get_proxy_for_attr(attr_val, collection_to_search, parameter_proxy_cache): + for n, p in collection_to_search: + if attr_val is p: + if n not in parameter_proxy_cache: + kwargs = {} + if "proxy_factory_fn" in inspect.signature(self.create_proxy).parameters: + kwargs["proxy_factory_fn"] = ( + None + if not self.param_shapes_constant + else lambda node: ParameterProxy(self, node, n, attr_val) + ) + val_proxy = self.create_proxy("get_attr", n, (), {}, **kwargs) # type: ignore[arg-type] + parameter_proxy_cache[n] = val_proxy + return parameter_proxy_cache[n] + return None + + if isinstance(attr_val, torch.nn.Parameter): + maybe_parameter_proxy = maybe_get_proxy_for_attr( + attr_val, self.root.named_parameters(), parameter_proxy_cache + ) + if maybe_parameter_proxy is not None: + return maybe_parameter_proxy + + if self.proxy_buffer_attributes and isinstance(attr_val, torch.Tensor): + maybe_buffer_proxy = maybe_get_proxy_for_attr( + attr_val, self.root.named_buffers(), parameter_proxy_cache + ) + if maybe_buffer_proxy is not None: + return maybe_buffer_proxy + + return attr_val + + # Needed for PyTorch 1.13+ + def getattr(self, attr: str, attr_val: Any, parameter_proxy_cache: Dict[str, Any]): + return self._module_getattr(attr, attr_val, parameter_proxy_cache) + + def call_module(self, m, forward, args, kwargs): + self.orig_forward = forward + return super().call_module(m, forward, args, kwargs) + + def proxy(self, node): + return HFProxy(node, self) + + def trace( + self, + root: Union[torch.nn.Module, Callable[..., Any]], + concrete_args: Optional[Dict[str, Any]] = None, + dummy_inputs: Optional[Dict[str, Any]] = None, + complete_concrete_args_with_inputs_not_in_dummy_inputs: bool = True, + ) -> Graph: + """ + Traces `root` and returns the corresponding FX `torch.fx.Graph` representation. `root` can either be a + `torch.nn.Module` instance or a Python callable. Note that after this call, `self.root` may be different from + the `root` passed in here. For example, when a free function is passed to `trace()`, we will create a + `torch.nn.Module` instance to use as the root and add embedded constants to. + + Args: + root (`torch.nn.Module` or `Callable`): + Either a `torch.nn.Module`` or a function to be traced through. If root is not a + [`~transformers.PreTrainedModel`], then `dummy_inputs` must be passed, otherwise tracing will fail. + concrete_args (`Dict[str, Any], *optional*): + Concrete arguments that should not be treated as Proxies + dummy_inputs (`Dict[str, Any]`, *optional*): + The dummy inputs needed to handle data-dependent control-flow if `root` is not a + [`~transformers.PreTrainedModel`]. It can also be used when `root` is a + [`~transformers.PreTrainedModel`] to specify custom dummy inputs for a subset or all the model inputs. + complete_concrete_args_with_inputs_not_in_dummy_inputs (`bool`, *optional*, defaults to `True`): + If `True`, and `dummy_inputs` is specified, every argument that `root` can take that is not in + `dummy_inputs` and not in `concrete_args` will be added to `concrete_args`, otherwise does nothing. + + Returns: + `torch.fx.Graph`: + A FX `torch.fx.Graph` representing the semantics of the passed-in `root`. + + """ + sig = inspect.signature(root.forward if isinstance(root, torch.nn.Module) else root) + + if concrete_args is None: + concrete_args = {} + + if dummy_inputs is not None and complete_concrete_args_with_inputs_not_in_dummy_inputs: + for param in sig.parameters.values(): + if param.name in dummy_inputs: + continue + if param.default is inspect.Parameter.empty: + raise ValueError(f"You need to specify a default value for the parameter {param.name}.") + concrete_args.update( + { + p.name: p.default + for p in sig.parameters.values() + if (p.name not in dummy_inputs and p.name not in concrete_args) + } + ) + + input_names = sig.parameters.keys() - concrete_args.keys() + + # Creating a random input shape to generate dummy inputs. + batch_size = _generate_random_int() + sequence_length = _generate_random_int() + shape = [batch_size, sequence_length] + + if root.__class__.__name__ in get_values(MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES): + num_choices = _generate_random_int(low=2, high=5) + shape.insert(1, num_choices) + + inputs = dict(dummy_inputs) if dummy_inputs is not None else {} + for input_name in input_names: + if input_name in inputs: + continue + # We enforce that root must either be a PreTrainedModel or deserialized from a serialized traced model to + # be able to use HFTracer._generate_dummy_input. + if isinstance(root, self.supported_archs) or type(root).__qualname__.startswith( + ("_deserialize_graph_module", "_CodeOnlyModule") + ): + inputs.update(self._generate_dummy_input(root, input_name, shape, input_names=input_names)) + else: + raise RuntimeError( + f"Could not generate input named {input_name} for because root is not a" + " transformers.PreTrainedModel." + ) + + concrete_metas = { + input_name: input_.to("meta") if isinstance(input_, torch.Tensor) else input_ + for input_name, input_ in inputs.items() + } + for param in sig.parameters.values(): + if param.kind == inspect.Parameter.VAR_KEYWORD and param.name not in input_names: + concrete_metas[f"**{param.name}"] = {} + self.meta_args = concrete_metas + self.patched_torch_methods = { + target: _gen_constructor_wrapper(getattr(torch, target)) for target in self._TORCH_METHODS_TO_PATCH + } + self.orig_fns = set() + + for name, (wrapper, orig) in self.patched_torch_methods.items(): + setattr(torch, name, wrapper) + self.orig_fns.add(orig) + + try: + self.graph = super().trace(root, concrete_args=concrete_args) + finally: + for name, (_, orig) in self.patched_torch_methods.items(): + setattr(torch, name, orig) + + # This is necessary because concrete args are added as input to the traced module since + # https://github.com/pytorch/pytorch/pull/55888. + for node in self.graph.nodes: + if node.op == "placeholder": + # Removing default values for inputs as the forward pass will fail with them. + if node.target in input_names: + node.args = () + # Without this, torch.jit.script fails because the inputs type is Optional[torch.Tensor]. + # It cannot infer on the attributes and methods the input should have, and fails. + node.type = torch.Tensor + # It is a concrete arg so it is not used and should be removed. + else: + to_visit = [node] + to_delete = collections.OrderedDict() + while to_visit: + n = to_visit.pop(0) + to_delete[n] = None + to_visit += list(n.users.keys()) + + for user in reversed(to_delete.keys()): + self.graph.erase_node(user) + + # TODO: solves GraphModule creation. + # Without this, return type annotation "Tuple" is causing code execution failure. + if node.op == "output": + node.type = None + + return self.graph + + def _stateless_mod_instanciation_depends_on_proxies(self, mod: nn.Module) -> bool: + """ + Whether the module was instantiated with Proxies. If that is the case, such module cannot be a leaf module + because its attributes are input-dependent. + """ + return any(isinstance(attr, Proxy) for attr in mod.__dict__.values()) + + def _insert_module_as_submodule(self, mod: nn.Module) -> str: + """ + Helper method which tries to insert a module that was not declared as submodule. + """ + # If one of the module attributes is a Proxy, it means that its instantiation is input-dependent. + # It is not possible to insert such modules, those should be traced through. + if self._stateless_mod_instanciation_depends_on_proxies(mod): + return "" + idx = 0 + mod_name = mod.__class__.__name__.lower() + path = f"{mod_name}_{idx}" + already_inserted = False + while hasattr(self.root, path): + if getattr(self.root, path) is mod: + already_inserted = True + break + path = f"{mod_name}_{idx}" + idx += 1 + + # No need to add multiple instances of the same module. + if not already_inserted: + self.root.add_module(path, mod) + return path + + def path_of_module(self, mod: nn.Module) -> str: + """ + Helper method to find the qualified name of `mod` in the Module hierarchy of `root`. For example, if `root` has + a submodule named `foo`, which has a submodule named `bar`, passing `bar` into this function will return the + string "foo.bar". + + Args: + mod (str): The `Module` to retrieve the qualified name for. + """ + try: + return super().path_of_module(mod) + except NameError as e: + if self.allow_insert_stateless_mods and len(list(mod.parameters())) == 0 and len(list(mod.buffers())) == 0: + path = self._insert_module_as_submodule(mod) + return path + raise e + + def is_leaf_module(self, m: torch.nn.Module, module_qualified_name: str) -> bool: + return (not self._stateless_mod_instanciation_depends_on_proxies(m)) and super().is_leaf_module( + m, module_qualified_name + ) + + @compatibility(is_backward_compatible=True) + def keys(self, obj: "Proxy") -> Any: + """Called when a proxy object is has the keys() method called. + This is what happens when ** is called on a proxy. This should return an iterator if ** is supposed to work in + your custom tracer. + """ + attribute = HFAttribute(obj, "keys")() + if obj.node.target == "**kwargs": + return attribute._metadata + return attribute + + +def get_concrete_args(model: nn.Module, input_names: List[str]): + sig = inspect.signature(model.forward) + + if not (set(input_names) <= set(sig.parameters.keys())): + formatted_input_names = input_names[0] if len(input_names) == 1 else ", ".join(input_names) + formatted_allowed_input_names = ", ".join(sig.parameters.keys()) + raise ValueError( + f"The model does not have input(s) named: {formatted_input_names}, expected a subset of the following:" + f" {formatted_allowed_input_names}" + ) + + return {p.name: p.default for p in sig.parameters.values() if p.name not in input_names} + + +def is_model_supported(model: PreTrainedModel): + return model.__class__.__name__ in _SUPPORTED_MODELS + + +def check_if_model_is_supported(model: PreTrainedModel): + if not is_model_supported(model): + supported_model_names = ", ".join(_SUPPORTED_MODELS) + raise NotImplementedError( + f"Model {model.__class__.__name__} is not supported yet, supported models: {supported_model_names}" + ) + + +def symbolic_trace( + model: PreTrainedModel, + input_names: Optional[List[str]] = None, + disable_check: bool = False, + tracer_cls: Type[HFTracer] = HFTracer, +) -> GraphModule: + """ + Performs symbolic tracing on the model. + + Args: + model ([`PretrainedModel`]): + The model to trace. + input_names (`List[str]`, *optional*): + The names of the inputs of the traced model. If unset, model.dummy_inputs.keys() are used instead. + disable_check (`bool`, *optional*, defaults to `False`): + If `True`, no check is done before trying to trace the model, this is mostly usesul for debugging purposes. + tracer_cls (`Type[HFTracer]`, *optional*, defaults to `HFTracer`): + The tracer class to use for instantiating the tracer. If unset, `HFTracer` is used instead. + + Returns: + `torch.fx.GraphModule`: A GraphModule constructed by recording operations seen while tracing the model. + + Example: + + ```python + from transformers.utils.fx import symbolic_trace + + traced_model = symbolic_trace(model, input_names=["input_ids", "attention_mask", "token_type_ids"]) + ``` + """ + if input_names is None: + input_names = model.dummy_inputs.keys() + + input_names = list(input_names) + concrete_args = get_concrete_args(model, input_names) + + if not disable_check: + check_if_model_is_supported(model) + + # Tracing. + tracer = tracer_cls() + traced_graph = tracer.trace(model, concrete_args=concrete_args) + traced = torch.fx.GraphModule(model, traced_graph) + + traced.config = model.config + # The model class must be stored as an attribute to allow model deserialization, which uses trace, and thus + # _generate_dummy_input, where the model class is needed. + traced.class_for_deserialization = model.__class__ + traced.device = model.device + + return traced diff --git a/venv/lib/python3.10/site-packages/transformers/utils/generic.py b/venv/lib/python3.10/site-packages/transformers/utils/generic.py new file mode 100644 index 0000000000000000000000000000000000000000..d5762337b50ac8778975dcd8dbd5ca106fb380c5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/utils/generic.py @@ -0,0 +1,735 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Generic utilities +""" + +import inspect +import tempfile +from collections import OrderedDict, UserDict +from collections.abc import MutableMapping +from contextlib import ExitStack, contextmanager +from dataclasses import fields, is_dataclass +from enum import Enum +from functools import partial +from typing import Any, ContextManager, Iterable, List, Tuple + +import numpy as np +from packaging import version + +from .import_utils import ( + get_torch_version, + is_flax_available, + is_mlx_available, + is_tf_available, + is_torch_available, + is_torch_fx_proxy, +) + + +if is_flax_available(): + import jax.numpy as jnp + + +class cached_property(property): + """ + Descriptor that mimics @property but caches output in member variable. + + From tensorflow_datasets + + Built-in in functools from Python 3.8. + """ + + def __get__(self, obj, objtype=None): + # See docs.python.org/3/howto/descriptor.html#properties + if obj is None: + return self + if self.fget is None: + raise AttributeError("unreadable attribute") + attr = "__cached_" + self.fget.__name__ + cached = getattr(obj, attr, None) + if cached is None: + cached = self.fget(obj) + setattr(obj, attr, cached) + return cached + + +# vendored from distutils.util +def strtobool(val): + """Convert a string representation of truth to true (1) or false (0). + + True values are 'y', 'yes', 't', 'true', 'on', and '1'; false values are 'n', 'no', 'f', 'false', 'off', and '0'. + Raises ValueError if 'val' is anything else. + """ + val = val.lower() + if val in {"y", "yes", "t", "true", "on", "1"}: + return 1 + if val in {"n", "no", "f", "false", "off", "0"}: + return 0 + raise ValueError(f"invalid truth value {val!r}") + + +def infer_framework_from_repr(x): + """ + Tries to guess the framework of an object `x` from its repr (brittle but will help in `is_tensor` to try the + frameworks in a smart order, without the need to import the frameworks). + """ + representation = str(type(x)) + if representation.startswith(" + + You can't unpack a `ModelOutput` directly. Use the [`~utils.ModelOutput.to_tuple`] method to convert it to a tuple + before. + + + """ + + def __init_subclass__(cls) -> None: + """Register subclasses as pytree nodes. + + This is necessary to synchronize gradients when using `torch.nn.parallel.DistributedDataParallel` with + `static_graph=True` with modules that output `ModelOutput` subclasses. + """ + if is_torch_available(): + if version.parse(get_torch_version()) >= version.parse("2.2"): + _torch_pytree.register_pytree_node( + cls, + _model_output_flatten, + partial(_model_output_unflatten, output_type=cls), + serialized_type_name=f"{cls.__module__}.{cls.__name__}", + ) + else: + _torch_pytree._register_pytree_node( + cls, + _model_output_flatten, + partial(_model_output_unflatten, output_type=cls), + ) + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + # Subclasses of ModelOutput must use the @dataclass decorator + # This check is done in __init__ because the @dataclass decorator operates after __init_subclass__ + # issubclass() would return True for issubclass(ModelOutput, ModelOutput) when False is needed + # Just need to check that the current class is not ModelOutput + is_modeloutput_subclass = self.__class__ != ModelOutput + + if is_modeloutput_subclass and not is_dataclass(self): + raise TypeError( + f"{self.__module__}.{self.__class__.__name__} is not a dataclasss." + " This is a subclass of ModelOutput and so must use the @dataclass decorator." + ) + + def __post_init__(self): + """Check the ModelOutput dataclass. + + Only occurs if @dataclass decorator has been used. + """ + class_fields = fields(self) + + # Safety and consistency checks + if not len(class_fields): + raise ValueError(f"{self.__class__.__name__} has no fields.") + if not all(field.default is None for field in class_fields[1:]): + raise ValueError(f"{self.__class__.__name__} should not have more than one required field.") + + first_field = getattr(self, class_fields[0].name) + other_fields_are_none = all(getattr(self, field.name) is None for field in class_fields[1:]) + + if other_fields_are_none and not is_tensor(first_field): + if isinstance(first_field, dict): + iterator = first_field.items() + first_field_iterator = True + else: + try: + iterator = iter(first_field) + first_field_iterator = True + except TypeError: + first_field_iterator = False + + # if we provided an iterator as first field and the iterator is a (key, value) iterator + # set the associated fields + if first_field_iterator: + for idx, element in enumerate(iterator): + if ( + not isinstance(element, (list, tuple)) + or not len(element) == 2 + or not isinstance(element[0], str) + ): + if idx == 0: + # If we do not have an iterator of key/values, set it as attribute + self[class_fields[0].name] = first_field + else: + # If we have a mixed iterator, raise an error + raise ValueError( + f"Cannot set key/value for {element}. It needs to be a tuple (key, value)." + ) + break + setattr(self, element[0], element[1]) + if element[1] is not None: + self[element[0]] = element[1] + elif first_field is not None: + self[class_fields[0].name] = first_field + else: + for field in class_fields: + v = getattr(self, field.name) + if v is not None: + self[field.name] = v + + def __delitem__(self, *args, **kwargs): + raise Exception(f"You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.") + + def setdefault(self, *args, **kwargs): + raise Exception(f"You cannot use ``setdefault`` on a {self.__class__.__name__} instance.") + + def pop(self, *args, **kwargs): + raise Exception(f"You cannot use ``pop`` on a {self.__class__.__name__} instance.") + + def update(self, *args, **kwargs): + raise Exception(f"You cannot use ``update`` on a {self.__class__.__name__} instance.") + + def __getitem__(self, k): + if isinstance(k, str): + inner_dict = dict(self.items()) + return inner_dict[k] + else: + return self.to_tuple()[k] + + def __setattr__(self, name, value): + if name in self.keys() and value is not None: + # Don't call self.__setitem__ to avoid recursion errors + super().__setitem__(name, value) + super().__setattr__(name, value) + + def __setitem__(self, key, value): + # Will raise a KeyException if needed + super().__setitem__(key, value) + # Don't call self.__setattr__ to avoid recursion errors + super().__setattr__(key, value) + + def __reduce__(self): + if not is_dataclass(self): + return super().__reduce__() + callable, _args, *remaining = super().__reduce__() + args = tuple(getattr(self, field.name) for field in fields(self)) + return callable, args, *remaining + + def to_tuple(self) -> Tuple[Any]: + """ + Convert self to a tuple containing all the attributes/keys that are not `None`. + """ + return tuple(self[k] for k in self.keys()) + + +if is_torch_available(): + import torch.utils._pytree as _torch_pytree + + def _model_output_flatten(output: ModelOutput) -> Tuple[List[Any], "_torch_pytree.Context"]: + return list(output.values()), list(output.keys()) + + def _model_output_unflatten( + values: Iterable[Any], + context: "_torch_pytree.Context", + output_type=None, + ) -> ModelOutput: + return output_type(**dict(zip(context, values))) + + if version.parse(get_torch_version()) >= version.parse("2.2"): + _torch_pytree.register_pytree_node( + ModelOutput, + _model_output_flatten, + partial(_model_output_unflatten, output_type=ModelOutput), + serialized_type_name=f"{ModelOutput.__module__}.{ModelOutput.__name__}", + ) + else: + _torch_pytree._register_pytree_node( + ModelOutput, + _model_output_flatten, + partial(_model_output_unflatten, output_type=ModelOutput), + ) + + +class ExplicitEnum(str, Enum): + """ + Enum with more explicit error message for missing values. + """ + + @classmethod + def _missing_(cls, value): + raise ValueError( + f"{value} is not a valid {cls.__name__}, please select one of {list(cls._value2member_map_.keys())}" + ) + + +class PaddingStrategy(ExplicitEnum): + """ + Possible values for the `padding` argument in [`PreTrainedTokenizerBase.__call__`]. Useful for tab-completion in an + IDE. + """ + + LONGEST = "longest" + MAX_LENGTH = "max_length" + DO_NOT_PAD = "do_not_pad" + + +class TensorType(ExplicitEnum): + """ + Possible values for the `return_tensors` argument in [`PreTrainedTokenizerBase.__call__`]. Useful for + tab-completion in an IDE. + """ + + PYTORCH = "pt" + TENSORFLOW = "tf" + NUMPY = "np" + JAX = "jax" + MLX = "mlx" + + +class ContextManagers: + """ + Wrapper for `contextlib.ExitStack` which enters a collection of context managers. Adaptation of `ContextManagers` + in the `fastcore` library. + """ + + def __init__(self, context_managers: List[ContextManager]): + self.context_managers = context_managers + self.stack = ExitStack() + + def __enter__(self): + for context_manager in self.context_managers: + self.stack.enter_context(context_manager) + + def __exit__(self, *args, **kwargs): + self.stack.__exit__(*args, **kwargs) + + +def can_return_loss(model_class): + """ + Check if a given model can return loss. + + Args: + model_class (`type`): The class of the model. + """ + framework = infer_framework(model_class) + if framework == "tf": + signature = inspect.signature(model_class.call) # TensorFlow models + elif framework == "pt": + signature = inspect.signature(model_class.forward) # PyTorch models + else: + signature = inspect.signature(model_class.__call__) # Flax models + + for p in signature.parameters: + if p == "return_loss" and signature.parameters[p].default is True: + return True + + return False + + +def find_labels(model_class): + """ + Find the labels used by a given model. + + Args: + model_class (`type`): The class of the model. + """ + model_name = model_class.__name__ + framework = infer_framework(model_class) + if framework == "tf": + signature = inspect.signature(model_class.call) # TensorFlow models + elif framework == "pt": + signature = inspect.signature(model_class.forward) # PyTorch models + else: + signature = inspect.signature(model_class.__call__) # Flax models + + if "QuestionAnswering" in model_name: + return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")] + else: + return [p for p in signature.parameters if "label" in p] + + +def flatten_dict(d: MutableMapping, parent_key: str = "", delimiter: str = "."): + """Flatten a nested dict into a single level dict.""" + + def _flatten_dict(d, parent_key="", delimiter="."): + for k, v in d.items(): + key = str(parent_key) + delimiter + str(k) if parent_key else k + if v and isinstance(v, MutableMapping): + yield from flatten_dict(v, key, delimiter=delimiter).items() + else: + yield key, v + + return dict(_flatten_dict(d, parent_key, delimiter)) + + +@contextmanager +def working_or_temp_dir(working_dir, use_temp_dir: bool = False): + if use_temp_dir: + with tempfile.TemporaryDirectory() as tmp_dir: + yield tmp_dir + else: + yield working_dir + + +def transpose(array, axes=None): + """ + Framework-agnostic version of `numpy.transpose` that will work on torch/TensorFlow/Jax tensors as well as NumPy + arrays. + """ + if is_numpy_array(array): + return np.transpose(array, axes=axes) + elif is_torch_tensor(array): + return array.T if axes is None else array.permute(*axes) + elif is_tf_tensor(array): + import tensorflow as tf + + return tf.transpose(array, perm=axes) + elif is_jax_tensor(array): + return jnp.transpose(array, axes=axes) + else: + raise ValueError(f"Type not supported for transpose: {type(array)}.") + + +def reshape(array, newshape): + """ + Framework-agnostic version of `numpy.reshape` that will work on torch/TensorFlow/Jax tensors as well as NumPy + arrays. + """ + if is_numpy_array(array): + return np.reshape(array, newshape) + elif is_torch_tensor(array): + return array.reshape(*newshape) + elif is_tf_tensor(array): + import tensorflow as tf + + return tf.reshape(array, newshape) + elif is_jax_tensor(array): + return jnp.reshape(array, newshape) + else: + raise ValueError(f"Type not supported for reshape: {type(array)}.") + + +def squeeze(array, axis=None): + """ + Framework-agnostic version of `numpy.squeeze` that will work on torch/TensorFlow/Jax tensors as well as NumPy + arrays. + """ + if is_numpy_array(array): + return np.squeeze(array, axis=axis) + elif is_torch_tensor(array): + return array.squeeze() if axis is None else array.squeeze(dim=axis) + elif is_tf_tensor(array): + import tensorflow as tf + + return tf.squeeze(array, axis=axis) + elif is_jax_tensor(array): + return jnp.squeeze(array, axis=axis) + else: + raise ValueError(f"Type not supported for squeeze: {type(array)}.") + + +def expand_dims(array, axis): + """ + Framework-agnostic version of `numpy.expand_dims` that will work on torch/TensorFlow/Jax tensors as well as NumPy + arrays. + """ + if is_numpy_array(array): + return np.expand_dims(array, axis) + elif is_torch_tensor(array): + return array.unsqueeze(dim=axis) + elif is_tf_tensor(array): + import tensorflow as tf + + return tf.expand_dims(array, axis=axis) + elif is_jax_tensor(array): + return jnp.expand_dims(array, axis=axis) + else: + raise ValueError(f"Type not supported for expand_dims: {type(array)}.") + + +def tensor_size(array): + """ + Framework-agnostic version of `numpy.size` that will work on torch/TensorFlow/Jax tensors as well as NumPy arrays. + """ + if is_numpy_array(array): + return np.size(array) + elif is_torch_tensor(array): + return array.numel() + elif is_tf_tensor(array): + import tensorflow as tf + + return tf.size(array) + elif is_jax_tensor(array): + return array.size + else: + raise ValueError(f"Type not supported for tensor_size: {type(array)}.") + + +def add_model_info_to_auto_map(auto_map, repo_id): + """ + Adds the information of the repo_id to a given auto map. + """ + for key, value in auto_map.items(): + if isinstance(value, (tuple, list)): + auto_map[key] = [f"{repo_id}--{v}" if (v is not None and "--" not in v) else v for v in value] + elif value is not None and "--" not in value: + auto_map[key] = f"{repo_id}--{value}" + + return auto_map + + +def infer_framework(model_class): + """ + Infers the framework of a given model without using isinstance(), because we cannot guarantee that the relevant + classes are imported or available. + """ + for base_class in inspect.getmro(model_class): + module = base_class.__module__ + name = base_class.__name__ + if module.startswith("tensorflow") or module.startswith("keras") or name == "TFPreTrainedModel": + return "tf" + elif module.startswith("torch") or name == "PreTrainedModel": + return "pt" + elif module.startswith("flax") or module.startswith("jax") or name == "FlaxPreTrainedModel": + return "flax" + else: + raise TypeError(f"Could not infer framework from class {model_class}.") diff --git a/venv/lib/python3.10/site-packages/transformers/utils/hp_naming.py b/venv/lib/python3.10/site-packages/transformers/utils/hp_naming.py new file mode 100644 index 0000000000000000000000000000000000000000..f7c5cb5259f8452b09cc910aee1fec7f1ba438c8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/utils/hp_naming.py @@ -0,0 +1,162 @@ +# Copyright 2020 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy +import re + + +class TrialShortNamer: + PREFIX = "hp" + DEFAULTS = {} + NAMING_INFO = None + + @classmethod + def set_defaults(cls, prefix, defaults): + cls.PREFIX = prefix + cls.DEFAULTS = defaults + cls.build_naming_info() + + @staticmethod + def shortname_for_word(info, word): + if len(word) == 0: + return "" + short_word = None + if any(char.isdigit() for char in word): + raise Exception(f"Parameters should not contain numbers: '{word}' contains a number") + if word in info["short_word"]: + return info["short_word"][word] + for prefix_len in range(1, len(word) + 1): + prefix = word[:prefix_len] + if prefix in info["reverse_short_word"]: + continue + else: + short_word = prefix + break + + if short_word is None: + # Paranoid fallback + def int_to_alphabetic(integer): + s = "" + while integer != 0: + s = chr(ord("A") + integer % 10) + s + integer //= 10 + return s + + i = 0 + while True: + sword = word + "#" + int_to_alphabetic(i) + if sword in info["reverse_short_word"]: + continue + else: + short_word = sword + break + + info["short_word"][word] = short_word + info["reverse_short_word"][short_word] = word + return short_word + + @staticmethod + def shortname_for_key(info, param_name): + words = param_name.split("_") + + shortname_parts = [TrialShortNamer.shortname_for_word(info, word) for word in words] + + # We try to create a separatorless short name, but if there is a collision we have to fallback + # to a separated short name + separators = ["", "_"] + + for separator in separators: + shortname = separator.join(shortname_parts) + if shortname not in info["reverse_short_param"]: + info["short_param"][param_name] = shortname + info["reverse_short_param"][shortname] = param_name + return shortname + + return param_name + + @staticmethod + def add_new_param_name(info, param_name): + short_name = TrialShortNamer.shortname_for_key(info, param_name) + info["short_param"][param_name] = short_name + info["reverse_short_param"][short_name] = param_name + + @classmethod + def build_naming_info(cls): + if cls.NAMING_INFO is not None: + return + + info = { + "short_word": {}, + "reverse_short_word": {}, + "short_param": {}, + "reverse_short_param": {}, + } + + field_keys = list(cls.DEFAULTS.keys()) + + for k in field_keys: + cls.add_new_param_name(info, k) + + cls.NAMING_INFO = info + + @classmethod + def shortname(cls, params): + cls.build_naming_info() + assert cls.PREFIX is not None + name = [copy.copy(cls.PREFIX)] + + for k, v in params.items(): + if k not in cls.DEFAULTS: + raise Exception(f"You should provide a default value for the param name {k} with value {v}") + if v == cls.DEFAULTS[k]: + # The default value is not added to the name + continue + + key = cls.NAMING_INFO["short_param"][k] + + if isinstance(v, bool): + v = 1 if v else 0 + + sep = "" if isinstance(v, (int, float)) else "-" + e = f"{key}{sep}{v}" + name.append(e) + + return "_".join(name) + + @classmethod + def parse_repr(cls, repr): + repr = repr[len(cls.PREFIX) + 1 :] + if repr == "": + values = [] + else: + values = repr.split("_") + + parameters = {} + + for value in values: + if "-" in value: + p_k, p_v = value.split("-") + else: + p_k = re.sub("[0-9.]", "", value) + p_v = float(re.sub("[^0-9.]", "", value)) + + key = cls.NAMING_INFO["reverse_short_param"][p_k] + + parameters[key] = p_v + + for k in cls.DEFAULTS: + if k not in parameters: + parameters[k] = cls.DEFAULTS[k] + + return parameters diff --git a/venv/lib/python3.10/site-packages/transformers/utils/import_utils.py b/venv/lib/python3.10/site-packages/transformers/utils/import_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..a8c45aeac33f16b4ae7622e696f0284de9bb8abd --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/utils/import_utils.py @@ -0,0 +1,1541 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Import utilities: Utilities related to imports and our lazy inits. +""" + +import importlib.metadata +import importlib.util +import json +import os +import shutil +import subprocess +import sys +import warnings +from collections import OrderedDict +from functools import lru_cache +from itertools import chain +from types import ModuleType +from typing import Any, Tuple, Union + +from packaging import version + +from . import logging + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +# TODO: This doesn't work for all packages (`bs4`, `faiss`, etc.) Talk to Sylvain to see how to do with it better. +def _is_package_available(pkg_name: str, return_version: bool = False) -> Union[Tuple[bool, str], bool]: + # Check if the package spec exists and grab its version to avoid importing a local directory + package_exists = importlib.util.find_spec(pkg_name) is not None + package_version = "N/A" + if package_exists: + try: + # Primary method to get the package version + package_version = importlib.metadata.version(pkg_name) + except importlib.metadata.PackageNotFoundError: + # Fallback method: Only for "torch" and versions containing "dev" + if pkg_name == "torch": + try: + package = importlib.import_module(pkg_name) + temp_version = getattr(package, "__version__", "N/A") + # Check if the version contains "dev" + if "dev" in temp_version: + package_version = temp_version + package_exists = True + else: + package_exists = False + except ImportError: + # If the package can't be imported, it's not available + package_exists = False + else: + # For packages other than "torch", don't attempt the fallback and set as not available + package_exists = False + logger.debug(f"Detected {pkg_name} version: {package_version}") + if return_version: + return package_exists, package_version + else: + return package_exists + + +ENV_VARS_TRUE_VALUES = {"1", "ON", "YES", "TRUE"} +ENV_VARS_TRUE_AND_AUTO_VALUES = ENV_VARS_TRUE_VALUES.union({"AUTO"}) + +USE_TF = os.environ.get("USE_TF", "AUTO").upper() +USE_TORCH = os.environ.get("USE_TORCH", "AUTO").upper() +USE_JAX = os.environ.get("USE_FLAX", "AUTO").upper() + +# Try to run a native pytorch job in an environment with TorchXLA installed by setting this value to 0. +USE_TORCH_XLA = os.environ.get("USE_TORCH_XLA", "1").upper() + +FORCE_TF_AVAILABLE = os.environ.get("FORCE_TF_AVAILABLE", "AUTO").upper() + +# `transformers` requires `torch>=1.11` but this variable is exposed publicly, and we can't simply remove it. +# This is the version of torch required to run torch.fx features and torch.onnx with dictionary inputs. +TORCH_FX_REQUIRED_VERSION = version.parse("1.10") + +ACCELERATE_MIN_VERSION = "0.21.0" +FSDP_MIN_VERSION = "1.12.0" +XLA_FSDPV2_MIN_VERSION = "2.2.0" + + +_accelerate_available, _accelerate_version = _is_package_available("accelerate", return_version=True) +_apex_available = _is_package_available("apex") +_aqlm_available = _is_package_available("aqlm") +_av_available = importlib.util.find_spec("av") is not None +_bitsandbytes_available = _is_package_available("bitsandbytes") +_galore_torch_available = _is_package_available("galore_torch") +# `importlib.metadata.version` doesn't work with `bs4` but `beautifulsoup4`. For `importlib.util.find_spec`, reversed. +_bs4_available = importlib.util.find_spec("bs4") is not None +_coloredlogs_available = _is_package_available("coloredlogs") +# `importlib.metadata.util` doesn't work with `opencv-python-headless`. +_cv2_available = importlib.util.find_spec("cv2") is not None +_datasets_available = _is_package_available("datasets") +_decord_available = importlib.util.find_spec("decord") is not None +_detectron2_available = _is_package_available("detectron2") +# We need to check both `faiss` and `faiss-cpu`. +_faiss_available = importlib.util.find_spec("faiss") is not None +try: + _faiss_version = importlib.metadata.version("faiss") + logger.debug(f"Successfully imported faiss version {_faiss_version}") +except importlib.metadata.PackageNotFoundError: + try: + _faiss_version = importlib.metadata.version("faiss-cpu") + logger.debug(f"Successfully imported faiss version {_faiss_version}") + except importlib.metadata.PackageNotFoundError: + _faiss_available = False +_ftfy_available = _is_package_available("ftfy") +_g2p_en_available = _is_package_available("g2p_en") +_ipex_available, _ipex_version = _is_package_available("intel_extension_for_pytorch", return_version=True) +_jieba_available = _is_package_available("jieba") +_jinja_available = _is_package_available("jinja2") +_kenlm_available = _is_package_available("kenlm") +_keras_nlp_available = _is_package_available("keras_nlp") +_levenshtein_available = _is_package_available("Levenshtein") +_librosa_available = _is_package_available("librosa") +_natten_available = _is_package_available("natten") +_nltk_available = _is_package_available("nltk") +_onnx_available = _is_package_available("onnx") +_openai_available = _is_package_available("openai") +_optimum_available = _is_package_available("optimum") +_auto_gptq_available = _is_package_available("auto_gptq") +# `importlib.metadata.version` doesn't work with `awq` +_auto_awq_available = importlib.util.find_spec("awq") is not None +_quanto_available = _is_package_available("quanto") +_pandas_available = _is_package_available("pandas") +_peft_available = _is_package_available("peft") +_phonemizer_available = _is_package_available("phonemizer") +_psutil_available = _is_package_available("psutil") +_py3nvml_available = _is_package_available("py3nvml") +_pyctcdecode_available = _is_package_available("pyctcdecode") +_pytesseract_available = _is_package_available("pytesseract") +_pytest_available = _is_package_available("pytest") +_pytorch_quantization_available = _is_package_available("pytorch_quantization") +_rjieba_available = _is_package_available("rjieba") +_sacremoses_available = _is_package_available("sacremoses") +_safetensors_available = _is_package_available("safetensors") +_scipy_available = _is_package_available("scipy") +_sentencepiece_available = _is_package_available("sentencepiece") +_is_seqio_available = _is_package_available("seqio") +_sklearn_available = importlib.util.find_spec("sklearn") is not None +if _sklearn_available: + try: + importlib.metadata.version("scikit-learn") + except importlib.metadata.PackageNotFoundError: + _sklearn_available = False +_smdistributed_available = importlib.util.find_spec("smdistributed") is not None +_soundfile_available = _is_package_available("soundfile") +_spacy_available = _is_package_available("spacy") +_sudachipy_available, _sudachipy_version = _is_package_available("sudachipy", return_version=True) +_tensorflow_probability_available = _is_package_available("tensorflow_probability") +_tensorflow_text_available = _is_package_available("tensorflow_text") +_tf2onnx_available = _is_package_available("tf2onnx") +_timm_available = _is_package_available("timm") +_tokenizers_available = _is_package_available("tokenizers") +_torchaudio_available = _is_package_available("torchaudio") +_torchdistx_available = _is_package_available("torchdistx") +_torchvision_available = _is_package_available("torchvision") +_mlx_available = _is_package_available("mlx") + + +_torch_version = "N/A" +_torch_available = False +if USE_TORCH in ENV_VARS_TRUE_AND_AUTO_VALUES and USE_TF not in ENV_VARS_TRUE_VALUES: + _torch_available, _torch_version = _is_package_available("torch", return_version=True) +else: + logger.info("Disabling PyTorch because USE_TF is set") + _torch_available = False + + +_tf_version = "N/A" +_tf_available = False +if FORCE_TF_AVAILABLE in ENV_VARS_TRUE_VALUES: + _tf_available = True +else: + if USE_TF in ENV_VARS_TRUE_AND_AUTO_VALUES and USE_TORCH not in ENV_VARS_TRUE_VALUES: + # Note: _is_package_available("tensorflow") fails for tensorflow-cpu. Please test any changes to the line below + # with tensorflow-cpu to make sure it still works! + _tf_available = importlib.util.find_spec("tensorflow") is not None + if _tf_available: + candidates = ( + "tensorflow", + "tensorflow-cpu", + "tensorflow-gpu", + "tf-nightly", + "tf-nightly-cpu", + "tf-nightly-gpu", + "tf-nightly-rocm", + "intel-tensorflow", + "intel-tensorflow-avx512", + "tensorflow-rocm", + "tensorflow-macos", + "tensorflow-aarch64", + ) + _tf_version = None + # For the metadata, we have to look for both tensorflow and tensorflow-cpu + for pkg in candidates: + try: + _tf_version = importlib.metadata.version(pkg) + break + except importlib.metadata.PackageNotFoundError: + pass + _tf_available = _tf_version is not None + if _tf_available: + if version.parse(_tf_version) < version.parse("2"): + logger.info( + f"TensorFlow found but with version {_tf_version}. Transformers requires version 2 minimum." + ) + _tf_available = False + else: + logger.info("Disabling Tensorflow because USE_TORCH is set") + + +_essentia_available = importlib.util.find_spec("essentia") is not None +try: + _essentia_version = importlib.metadata.version("essentia") + logger.debug(f"Successfully imported essentia version {_essentia_version}") +except importlib.metadata.PackageNotFoundError: + _essentia_version = False + + +_pretty_midi_available = importlib.util.find_spec("pretty_midi") is not None +try: + _pretty_midi_version = importlib.metadata.version("pretty_midi") + logger.debug(f"Successfully imported pretty_midi version {_pretty_midi_version}") +except importlib.metadata.PackageNotFoundError: + _pretty_midi_available = False + + +ccl_version = "N/A" +_is_ccl_available = ( + importlib.util.find_spec("torch_ccl") is not None + or importlib.util.find_spec("oneccl_bindings_for_pytorch") is not None +) +try: + ccl_version = importlib.metadata.version("oneccl_bind_pt") + logger.debug(f"Detected oneccl_bind_pt version {ccl_version}") +except importlib.metadata.PackageNotFoundError: + _is_ccl_available = False + + +_flax_available = False +if USE_JAX in ENV_VARS_TRUE_AND_AUTO_VALUES: + _flax_available, _flax_version = _is_package_available("flax", return_version=True) + if _flax_available: + _jax_available, _jax_version = _is_package_available("jax", return_version=True) + if _jax_available: + logger.info(f"JAX version {_jax_version}, Flax version {_flax_version} available.") + else: + _flax_available = _jax_available = False + _jax_version = _flax_version = "N/A" + + +_torch_fx_available = False +if _torch_available: + torch_version = version.parse(_torch_version) + _torch_fx_available = (torch_version.major, torch_version.minor) >= ( + TORCH_FX_REQUIRED_VERSION.major, + TORCH_FX_REQUIRED_VERSION.minor, + ) + + +_torch_xla_available = False +if USE_TORCH_XLA in ENV_VARS_TRUE_VALUES: + _torch_xla_available, _torch_xla_version = _is_package_available("torch_xla", return_version=True) + if _torch_xla_available: + logger.info(f"Torch XLA version {_torch_xla_version} available.") + + +def is_kenlm_available(): + return _kenlm_available + + +def is_cv2_available(): + return _cv2_available + + +def is_torch_available(): + return _torch_available + + +def get_torch_version(): + return _torch_version + + +def is_torch_sdpa_available(): + if not is_torch_available(): + return False + elif _torch_version == "N/A": + return False + + # NOTE: We require torch>=2.1 (and not torch>=2.0) to use SDPA in Transformers for two reasons: + # - Allow the global use of the `scale` argument introduced in https://github.com/pytorch/pytorch/pull/95259 + # - Memory-efficient attention supports arbitrary attention_mask: https://github.com/pytorch/pytorch/pull/104310 + # NOTE: We require torch>=2.1.1 to avoid a numerical issue in SDPA with non-contiguous inputs: https://github.com/pytorch/pytorch/issues/112577 + return version.parse(_torch_version) >= version.parse("2.1.1") + + +def is_torchvision_available(): + return _torchvision_available + + +def is_galore_torch_available(): + return _galore_torch_available + + +def is_pyctcdecode_available(): + return _pyctcdecode_available + + +def is_librosa_available(): + return _librosa_available + + +def is_essentia_available(): + return _essentia_available + + +def is_pretty_midi_available(): + return _pretty_midi_available + + +def is_torch_cuda_available(): + if is_torch_available(): + import torch + + return torch.cuda.is_available() + else: + return False + + +def is_mamba_ssm_available(): + if is_torch_available(): + import torch + + if not torch.cuda.is_available(): + return False + else: + return _is_package_available("mamba_ssm") + return False + + +def is_causal_conv1d_available(): + if is_torch_available(): + import torch + + if not torch.cuda.is_available(): + return False + return _is_package_available("causal_conv1d") + return False + + +def is_torch_mps_available(): + if is_torch_available(): + import torch + + if hasattr(torch.backends, "mps"): + return torch.backends.mps.is_available() + return False + + +def is_torch_bf16_gpu_available(): + if not is_torch_available(): + return False + + import torch + + return torch.cuda.is_available() and torch.cuda.is_bf16_supported() + + +def is_torch_bf16_cpu_available(): + if not is_torch_available(): + return False + + import torch + + try: + # multiple levels of AttributeError depending on the pytorch version so do them all in one check + _ = torch.cpu.amp.autocast + except AttributeError: + return False + + return True + + +def is_torch_bf16_available(): + # the original bf16 check was for gpu only, but later a cpu/bf16 combo has emerged so this util + # has become ambiguous and therefore deprecated + warnings.warn( + "The util is_torch_bf16_available is deprecated, please use is_torch_bf16_gpu_available " + "or is_torch_bf16_cpu_available instead according to whether it's used with cpu or gpu", + FutureWarning, + ) + return is_torch_bf16_gpu_available() + + +@lru_cache() +def is_torch_fp16_available_on_device(device): + if not is_torch_available(): + return False + + import torch + + try: + x = torch.zeros(2, 2, dtype=torch.float16).to(device) + _ = x @ x + + # At this moment, let's be strict of the check: check if `LayerNorm` is also supported on device, because many + # models use this layer. + batch, sentence_length, embedding_dim = 3, 4, 5 + embedding = torch.randn(batch, sentence_length, embedding_dim, dtype=torch.float16, device=device) + layer_norm = torch.nn.LayerNorm(embedding_dim, dtype=torch.float16, device=device) + _ = layer_norm(embedding) + + except: # noqa: E722 + # TODO: more precise exception matching, if possible. + # most backends should return `RuntimeError` however this is not guaranteed. + return False + + return True + + +@lru_cache() +def is_torch_bf16_available_on_device(device): + if not is_torch_available(): + return False + + import torch + + if device == "cuda": + return is_torch_bf16_gpu_available() + + try: + x = torch.zeros(2, 2, dtype=torch.bfloat16).to(device) + _ = x @ x + except: # noqa: E722 + # TODO: more precise exception matching, if possible. + # most backends should return `RuntimeError` however this is not guaranteed. + return False + + return True + + +def is_torch_tf32_available(): + if not is_torch_available(): + return False + + import torch + + if not torch.cuda.is_available() or torch.version.cuda is None: + return False + if torch.cuda.get_device_properties(torch.cuda.current_device()).major < 8: + return False + if int(torch.version.cuda.split(".")[0]) < 11: + return False + if version.parse(version.parse(torch.__version__).base_version) < version.parse("1.7"): + return False + + return True + + +def is_torch_fx_available(): + return _torch_fx_available + + +def is_peft_available(): + return _peft_available + + +def is_bs4_available(): + return _bs4_available + + +def is_tf_available(): + return _tf_available + + +def is_coloredlogs_available(): + return _coloredlogs_available + + +def is_tf2onnx_available(): + return _tf2onnx_available + + +def is_onnx_available(): + return _onnx_available + + +def is_openai_available(): + return _openai_available + + +def is_flax_available(): + return _flax_available + + +def is_ftfy_available(): + return _ftfy_available + + +def is_g2p_en_available(): + return _g2p_en_available + + +@lru_cache() +def is_torch_tpu_available(check_device=True): + "Checks if `torch_xla` is installed and potentially if a TPU is in the environment" + warnings.warn( + "`is_torch_tpu_available` is deprecated and will be removed in 4.41.0. " + "Please use the `is_torch_xla_available` instead.", + FutureWarning, + ) + + if not _torch_available: + return False + if importlib.util.find_spec("torch_xla") is not None: + if check_device: + # We need to check if `xla_device` can be found, will raise a RuntimeError if not + try: + import torch_xla.core.xla_model as xm + + _ = xm.xla_device() + return True + except RuntimeError: + return False + return True + return False + + +@lru_cache +def is_torch_xla_available(check_is_tpu=False, check_is_gpu=False): + """ + Check if `torch_xla` is available. To train a native pytorch job in an environment with torch xla installed, set + the USE_TORCH_XLA to false. + """ + assert not (check_is_tpu and check_is_gpu), "The check_is_tpu and check_is_gpu cannot both be true." + + if not _torch_xla_available: + return False + + import torch_xla + + if check_is_gpu: + return torch_xla.runtime.device_type() in ["GPU", "CUDA"] + elif check_is_tpu: + return torch_xla.runtime.device_type() == "TPU" + + return True + + +@lru_cache() +def is_torch_neuroncore_available(check_device=True): + if importlib.util.find_spec("torch_neuronx") is not None: + return is_torch_xla_available() + return False + + +@lru_cache() +def is_torch_npu_available(check_device=False): + "Checks if `torch_npu` is installed and potentially if a NPU is in the environment" + if not _torch_available or importlib.util.find_spec("torch_npu") is None: + return False + + import torch + import torch_npu # noqa: F401 + + if check_device: + try: + # Will raise a RuntimeError if no NPU is found + _ = torch.npu.device_count() + return torch.npu.is_available() + except RuntimeError: + return False + return hasattr(torch, "npu") and torch.npu.is_available() + + +@lru_cache() +def is_torch_mlu_available(check_device=False): + "Checks if `torch_mlu` is installed and potentially if a MLU is in the environment" + if not _torch_available or importlib.util.find_spec("torch_mlu") is None: + return False + + import torch + import torch_mlu # noqa: F401 + + from ..dependency_versions_table import deps + + deps["deepspeed"] = "deepspeed-mlu>=0.10.1" + + if check_device: + try: + # Will raise a RuntimeError if no MLU is found + _ = torch.mlu.device_count() + return torch.mlu.is_available() + except RuntimeError: + return False + return hasattr(torch, "mlu") and torch.mlu.is_available() + + +def is_torchdynamo_available(): + if not is_torch_available(): + return False + try: + import torch._dynamo as dynamo # noqa: F401 + + return True + except Exception: + return False + + +def is_torch_compile_available(): + if not is_torch_available(): + return False + + import torch + + # We don't do any version check here to support nighlies marked as 1.14. Ultimately needs to check version against + # 2.0 but let's do it later. + return hasattr(torch, "compile") + + +def is_torchdynamo_compiling(): + if not is_torch_available(): + return False + try: + import torch._dynamo as dynamo # noqa: F401 + + return dynamo.is_compiling() + except Exception: + return False + + +def is_torch_tensorrt_fx_available(): + if importlib.util.find_spec("torch_tensorrt") is None: + return False + return importlib.util.find_spec("torch_tensorrt.fx") is not None + + +def is_datasets_available(): + return _datasets_available + + +def is_detectron2_available(): + return _detectron2_available + + +def is_rjieba_available(): + return _rjieba_available + + +def is_psutil_available(): + return _psutil_available + + +def is_py3nvml_available(): + return _py3nvml_available + + +def is_sacremoses_available(): + return _sacremoses_available + + +def is_apex_available(): + return _apex_available + + +def is_aqlm_available(): + return _aqlm_available + + +def is_av_available(): + return _av_available + + +def is_ninja_available(): + r""" + Code comes from *torch.utils.cpp_extension.is_ninja_available()*. Returns `True` if the + [ninja](https://ninja-build.org/) build system is available on the system, `False` otherwise. + """ + try: + subprocess.check_output("ninja --version".split()) + except Exception: + return False + else: + return True + + +def is_ipex_available(): + def get_major_and_minor_from_version(full_version): + return str(version.parse(full_version).major) + "." + str(version.parse(full_version).minor) + + if not is_torch_available() or not _ipex_available: + return False + + torch_major_and_minor = get_major_and_minor_from_version(_torch_version) + ipex_major_and_minor = get_major_and_minor_from_version(_ipex_version) + if torch_major_and_minor != ipex_major_and_minor: + logger.warning( + f"Intel Extension for PyTorch {ipex_major_and_minor} needs to work with PyTorch {ipex_major_and_minor}.*," + f" but PyTorch {_torch_version} is found. Please switch to the matching version and run again." + ) + return False + return True + + +@lru_cache +def is_torch_xpu_available(check_device=False): + "Checks if `intel_extension_for_pytorch` is installed and potentially if a XPU is in the environment" + if not is_ipex_available(): + return False + + import intel_extension_for_pytorch # noqa: F401 + import torch + + if check_device: + try: + # Will raise a RuntimeError if no XPU is found + _ = torch.xpu.device_count() + return torch.xpu.is_available() + except RuntimeError: + return False + return hasattr(torch, "xpu") and torch.xpu.is_available() + + +def is_bitsandbytes_available(): + if not is_torch_available(): + return False + + # bitsandbytes throws an error if cuda is not available + # let's avoid that by adding a simple check + import torch + + return _bitsandbytes_available and torch.cuda.is_available() + + +def is_flash_attn_2_available(): + if not is_torch_available(): + return False + + if not _is_package_available("flash_attn"): + return False + + # Let's add an extra check to see if cuda is available + import torch + + if not torch.cuda.is_available(): + return False + + if torch.version.cuda: + return version.parse(importlib.metadata.version("flash_attn")) >= version.parse("2.1.0") + elif torch.version.hip: + # TODO: Bump the requirement to 2.1.0 once released in https://github.com/ROCmSoftwarePlatform/flash-attention + return version.parse(importlib.metadata.version("flash_attn")) >= version.parse("2.0.4") + else: + return False + + +def is_flash_attn_greater_or_equal_2_10(): + if not _is_package_available("flash_attn"): + return False + + return version.parse(importlib.metadata.version("flash_attn")) >= version.parse("2.1.0") + + +def is_torchdistx_available(): + return _torchdistx_available + + +def is_faiss_available(): + return _faiss_available + + +def is_scipy_available(): + return _scipy_available + + +def is_sklearn_available(): + return _sklearn_available + + +def is_sentencepiece_available(): + return _sentencepiece_available + + +def is_seqio_available(): + return _is_seqio_available + + +def is_protobuf_available(): + if importlib.util.find_spec("google") is None: + return False + return importlib.util.find_spec("google.protobuf") is not None + + +def is_accelerate_available(min_version: str = ACCELERATE_MIN_VERSION): + return _accelerate_available and version.parse(_accelerate_version) >= version.parse(min_version) + + +def is_fsdp_available(min_version: str = FSDP_MIN_VERSION): + return is_torch_available() and version.parse(_torch_version) >= version.parse(min_version) + + +def is_optimum_available(): + return _optimum_available + + +def is_auto_awq_available(): + return _auto_awq_available + + +def is_quanto_available(): + return _quanto_available + + +def is_auto_gptq_available(): + return _auto_gptq_available + + +def is_levenshtein_available(): + return _levenshtein_available + + +def is_optimum_neuron_available(): + return _optimum_available and _is_package_available("optimum.neuron") + + +def is_safetensors_available(): + return _safetensors_available + + +def is_tokenizers_available(): + return _tokenizers_available + + +@lru_cache +def is_vision_available(): + _pil_available = importlib.util.find_spec("PIL") is not None + if _pil_available: + try: + package_version = importlib.metadata.version("Pillow") + except importlib.metadata.PackageNotFoundError: + try: + package_version = importlib.metadata.version("Pillow-SIMD") + except importlib.metadata.PackageNotFoundError: + return False + logger.debug(f"Detected PIL version {package_version}") + return _pil_available + + +def is_pytesseract_available(): + return _pytesseract_available + + +def is_pytest_available(): + return _pytest_available + + +def is_spacy_available(): + return _spacy_available + + +def is_tensorflow_text_available(): + return is_tf_available() and _tensorflow_text_available + + +def is_keras_nlp_available(): + return is_tensorflow_text_available() and _keras_nlp_available + + +def is_in_notebook(): + try: + # Test adapted from tqdm.autonotebook: https://github.com/tqdm/tqdm/blob/master/tqdm/autonotebook.py + get_ipython = sys.modules["IPython"].get_ipython + if "IPKernelApp" not in get_ipython().config: + raise ImportError("console") + if "VSCODE_PID" in os.environ: + raise ImportError("vscode") + if "DATABRICKS_RUNTIME_VERSION" in os.environ and os.environ["DATABRICKS_RUNTIME_VERSION"] < "11.0": + # Databricks Runtime 11.0 and above uses IPython kernel by default so it should be compatible with Jupyter notebook + # https://docs.microsoft.com/en-us/azure/databricks/notebooks/ipython-kernel + raise ImportError("databricks") + + return importlib.util.find_spec("IPython") is not None + except (AttributeError, ImportError, KeyError): + return False + + +def is_pytorch_quantization_available(): + return _pytorch_quantization_available + + +def is_tensorflow_probability_available(): + return _tensorflow_probability_available + + +def is_pandas_available(): + return _pandas_available + + +def is_sagemaker_dp_enabled(): + # Get the sagemaker specific env variable. + sagemaker_params = os.getenv("SM_FRAMEWORK_PARAMS", "{}") + try: + # Parse it and check the field "sagemaker_distributed_dataparallel_enabled". + sagemaker_params = json.loads(sagemaker_params) + if not sagemaker_params.get("sagemaker_distributed_dataparallel_enabled", False): + return False + except json.JSONDecodeError: + return False + # Lastly, check if the `smdistributed` module is present. + return _smdistributed_available + + +def is_sagemaker_mp_enabled(): + # Get the sagemaker specific mp parameters from smp_options variable. + smp_options = os.getenv("SM_HP_MP_PARAMETERS", "{}") + try: + # Parse it and check the field "partitions" is included, it is required for model parallel. + smp_options = json.loads(smp_options) + if "partitions" not in smp_options: + return False + except json.JSONDecodeError: + return False + + # Get the sagemaker specific framework parameters from mpi_options variable. + mpi_options = os.getenv("SM_FRAMEWORK_PARAMS", "{}") + try: + # Parse it and check the field "sagemaker_distributed_dataparallel_enabled". + mpi_options = json.loads(mpi_options) + if not mpi_options.get("sagemaker_mpi_enabled", False): + return False + except json.JSONDecodeError: + return False + # Lastly, check if the `smdistributed` module is present. + return _smdistributed_available + + +def is_training_run_on_sagemaker(): + return "SAGEMAKER_JOB_NAME" in os.environ + + +def is_soundfile_availble(): + return _soundfile_available + + +def is_timm_available(): + return _timm_available + + +def is_natten_available(): + return _natten_available + + +def is_nltk_available(): + return _nltk_available + + +def is_torchaudio_available(): + return _torchaudio_available + + +def is_speech_available(): + # For now this depends on torchaudio but the exact dependency might evolve in the future. + return _torchaudio_available + + +def is_phonemizer_available(): + return _phonemizer_available + + +def torch_only_method(fn): + def wrapper(*args, **kwargs): + if not _torch_available: + raise ImportError( + "You need to install pytorch to use this method or class, " + "or activate it with environment variables USE_TORCH=1 and USE_TF=0." + ) + else: + return fn(*args, **kwargs) + + return wrapper + + +def is_ccl_available(): + return _is_ccl_available + + +def is_decord_available(): + return _decord_available + + +def is_sudachi_available(): + return _sudachipy_available + + +def get_sudachi_version(): + return _sudachipy_version + + +def is_sudachi_projection_available(): + if not is_sudachi_available(): + return False + + # NOTE: We require sudachipy>=0.6.8 to use projection option in sudachi_kwargs for the constructor of BertJapaneseTokenizer. + # - `projection` option is not supported in sudachipy<0.6.8, see https://github.com/WorksApplications/sudachi.rs/issues/230 + return version.parse(_sudachipy_version) >= version.parse("0.6.8") + + +def is_jumanpp_available(): + return (importlib.util.find_spec("rhoknp") is not None) and (shutil.which("jumanpp") is not None) + + +def is_cython_available(): + return importlib.util.find_spec("pyximport") is not None + + +def is_jieba_available(): + return _jieba_available + + +def is_jinja_available(): + return _jinja_available + + +def is_mlx_available(): + return _mlx_available + + +# docstyle-ignore +AV_IMPORT_ERROR = """ +{0} requires the PyAv library but it was not found in your environment. You can install it with: +``` +pip install av +``` +Please note that you may need to restart your runtime after installation. +""" + + +# docstyle-ignore +CV2_IMPORT_ERROR = """ +{0} requires the OpenCV library but it was not found in your environment. You can install it with: +``` +pip install opencv-python +``` +Please note that you may need to restart your runtime after installation. +""" + + +# docstyle-ignore +DATASETS_IMPORT_ERROR = """ +{0} requires the 🤗 Datasets library but it was not found in your environment. You can install it with: +``` +pip install datasets +``` +In a notebook or a colab, you can install it by executing a cell with +``` +!pip install datasets +``` +then restarting your kernel. + +Note that if you have a local folder named `datasets` or a local python file named `datasets.py` in your current +working directory, python may try to import this instead of the 🤗 Datasets library. You should rename this folder or +that python file if that's the case. Please note that you may need to restart your runtime after installation. +""" + + +# docstyle-ignore +TOKENIZERS_IMPORT_ERROR = """ +{0} requires the 🤗 Tokenizers library but it was not found in your environment. You can install it with: +``` +pip install tokenizers +``` +In a notebook or a colab, you can install it by executing a cell with +``` +!pip install tokenizers +``` +Please note that you may need to restart your runtime after installation. +""" + + +# docstyle-ignore +SENTENCEPIECE_IMPORT_ERROR = """ +{0} requires the SentencePiece library but it was not found in your environment. Checkout the instructions on the +installation page of its repo: https://github.com/google/sentencepiece#installation and follow the ones +that match your environment. Please note that you may need to restart your runtime after installation. +""" + + +# docstyle-ignore +PROTOBUF_IMPORT_ERROR = """ +{0} requires the protobuf library but it was not found in your environment. Checkout the instructions on the +installation page of its repo: https://github.com/protocolbuffers/protobuf/tree/master/python#installation and follow the ones +that match your environment. Please note that you may need to restart your runtime after installation. +""" + + +# docstyle-ignore +FAISS_IMPORT_ERROR = """ +{0} requires the faiss library but it was not found in your environment. Checkout the instructions on the +installation page of its repo: https://github.com/facebookresearch/faiss/blob/master/INSTALL.md and follow the ones +that match your environment. Please note that you may need to restart your runtime after installation. +""" + + +# docstyle-ignore +PYTORCH_IMPORT_ERROR = """ +{0} requires the PyTorch library but it was not found in your environment. Checkout the instructions on the +installation page: https://pytorch.org/get-started/locally/ and follow the ones that match your environment. +Please note that you may need to restart your runtime after installation. +""" + + +# docstyle-ignore +TORCHVISION_IMPORT_ERROR = """ +{0} requires the Torchvision library but it was not found in your environment. Checkout the instructions on the +installation page: https://pytorch.org/get-started/locally/ and follow the ones that match your environment. +Please note that you may need to restart your runtime after installation. +""" + +# docstyle-ignore +PYTORCH_IMPORT_ERROR_WITH_TF = """ +{0} requires the PyTorch library but it was not found in your environment. +However, we were able to find a TensorFlow installation. TensorFlow classes begin +with "TF", but are otherwise identically named to our PyTorch classes. This +means that the TF equivalent of the class you tried to import would be "TF{0}". +If you want to use TensorFlow, please use TF classes instead! + +If you really do want to use PyTorch please go to +https://pytorch.org/get-started/locally/ and follow the instructions that +match your environment. +""" + +# docstyle-ignore +TF_IMPORT_ERROR_WITH_PYTORCH = """ +{0} requires the TensorFlow library but it was not found in your environment. +However, we were able to find a PyTorch installation. PyTorch classes do not begin +with "TF", but are otherwise identically named to our TF classes. +If you want to use PyTorch, please use those classes instead! + +If you really do want to use TensorFlow, please follow the instructions on the +installation page https://www.tensorflow.org/install that match your environment. +""" + +# docstyle-ignore +BS4_IMPORT_ERROR = """ +{0} requires the Beautiful Soup library but it was not found in your environment. You can install it with pip: +`pip install beautifulsoup4`. Please note that you may need to restart your runtime after installation. +""" + + +# docstyle-ignore +SKLEARN_IMPORT_ERROR = """ +{0} requires the scikit-learn library but it was not found in your environment. You can install it with: +``` +pip install -U scikit-learn +``` +In a notebook or a colab, you can install it by executing a cell with +``` +!pip install -U scikit-learn +``` +Please note that you may need to restart your runtime after installation. +""" + + +# docstyle-ignore +TENSORFLOW_IMPORT_ERROR = """ +{0} requires the TensorFlow library but it was not found in your environment. Checkout the instructions on the +installation page: https://www.tensorflow.org/install and follow the ones that match your environment. +Please note that you may need to restart your runtime after installation. +""" + + +# docstyle-ignore +DETECTRON2_IMPORT_ERROR = """ +{0} requires the detectron2 library but it was not found in your environment. Checkout the instructions on the +installation page: https://github.com/facebookresearch/detectron2/blob/master/INSTALL.md and follow the ones +that match your environment. Please note that you may need to restart your runtime after installation. +""" + + +# docstyle-ignore +FLAX_IMPORT_ERROR = """ +{0} requires the FLAX library but it was not found in your environment. Checkout the instructions on the +installation page: https://github.com/google/flax and follow the ones that match your environment. +Please note that you may need to restart your runtime after installation. +""" + +# docstyle-ignore +FTFY_IMPORT_ERROR = """ +{0} requires the ftfy library but it was not found in your environment. Checkout the instructions on the +installation section: https://github.com/rspeer/python-ftfy/tree/master#installing and follow the ones +that match your environment. Please note that you may need to restart your runtime after installation. +""" + +LEVENSHTEIN_IMPORT_ERROR = """ +{0} requires the python-Levenshtein library but it was not found in your environment. You can install it with pip: `pip +install python-Levenshtein`. Please note that you may need to restart your runtime after installation. +""" + +# docstyle-ignore +G2P_EN_IMPORT_ERROR = """ +{0} requires the g2p-en library but it was not found in your environment. You can install it with pip: +`pip install g2p-en`. Please note that you may need to restart your runtime after installation. +""" + +# docstyle-ignore +PYTORCH_QUANTIZATION_IMPORT_ERROR = """ +{0} requires the pytorch-quantization library but it was not found in your environment. You can install it with pip: +`pip install pytorch-quantization --extra-index-url https://pypi.ngc.nvidia.com` +Please note that you may need to restart your runtime after installation. +""" + +# docstyle-ignore +TENSORFLOW_PROBABILITY_IMPORT_ERROR = """ +{0} requires the tensorflow_probability library but it was not found in your environment. You can install it with pip as +explained here: https://github.com/tensorflow/probability. Please note that you may need to restart your runtime after installation. +""" + +# docstyle-ignore +TENSORFLOW_TEXT_IMPORT_ERROR = """ +{0} requires the tensorflow_text library but it was not found in your environment. You can install it with pip as +explained here: https://www.tensorflow.org/text/guide/tf_text_intro. +Please note that you may need to restart your runtime after installation. +""" + + +# docstyle-ignore +PANDAS_IMPORT_ERROR = """ +{0} requires the pandas library but it was not found in your environment. You can install it with pip as +explained here: https://pandas.pydata.org/pandas-docs/stable/getting_started/install.html. +Please note that you may need to restart your runtime after installation. +""" + + +# docstyle-ignore +PHONEMIZER_IMPORT_ERROR = """ +{0} requires the phonemizer library but it was not found in your environment. You can install it with pip: +`pip install phonemizer`. Please note that you may need to restart your runtime after installation. +""" + + +# docstyle-ignore +SACREMOSES_IMPORT_ERROR = """ +{0} requires the sacremoses library but it was not found in your environment. You can install it with pip: +`pip install sacremoses`. Please note that you may need to restart your runtime after installation. +""" + +# docstyle-ignore +SCIPY_IMPORT_ERROR = """ +{0} requires the scipy library but it was not found in your environment. You can install it with pip: +`pip install scipy`. Please note that you may need to restart your runtime after installation. +""" + + +# docstyle-ignore +SPEECH_IMPORT_ERROR = """ +{0} requires the torchaudio library but it was not found in your environment. You can install it with pip: +`pip install torchaudio`. Please note that you may need to restart your runtime after installation. +""" + +# docstyle-ignore +TIMM_IMPORT_ERROR = """ +{0} requires the timm library but it was not found in your environment. You can install it with pip: +`pip install timm`. Please note that you may need to restart your runtime after installation. +""" + +# docstyle-ignore +NATTEN_IMPORT_ERROR = """ +{0} requires the natten library but it was not found in your environment. You can install it by referring to: +shi-labs.com/natten . You can also install it with pip (may take longer to build): +`pip install natten`. Please note that you may need to restart your runtime after installation. +""" + + +# docstyle-ignore +NLTK_IMPORT_ERROR = """ +{0} requires the NLTK library but it was not found in your environment. You can install it by referring to: +https://www.nltk.org/install.html. Please note that you may need to restart your runtime after installation. +""" + + +# docstyle-ignore +VISION_IMPORT_ERROR = """ +{0} requires the PIL library but it was not found in your environment. You can install it with pip: +`pip install pillow`. Please note that you may need to restart your runtime after installation. +""" + + +# docstyle-ignore +PYTESSERACT_IMPORT_ERROR = """ +{0} requires the PyTesseract library but it was not found in your environment. You can install it with pip: +`pip install pytesseract`. Please note that you may need to restart your runtime after installation. +""" + +# docstyle-ignore +PYCTCDECODE_IMPORT_ERROR = """ +{0} requires the pyctcdecode library but it was not found in your environment. You can install it with pip: +`pip install pyctcdecode`. Please note that you may need to restart your runtime after installation. +""" + +# docstyle-ignore +ACCELERATE_IMPORT_ERROR = """ +{0} requires the accelerate library >= {ACCELERATE_MIN_VERSION} it was not found in your environment. +You can install or update it with pip: `pip install --upgrade accelerate`. Please note that you may need to restart your +runtime after installation. +""" + +# docstyle-ignore +CCL_IMPORT_ERROR = """ +{0} requires the torch ccl library but it was not found in your environment. You can install it with pip: +`pip install oneccl_bind_pt -f https://developer.intel.com/ipex-whl-stable` +Please note that you may need to restart your runtime after installation. +""" + +# docstyle-ignore +ESSENTIA_IMPORT_ERROR = """ +{0} requires essentia library. But that was not found in your environment. You can install them with pip: +`pip install essentia==2.1b6.dev1034` +Please note that you may need to restart your runtime after installation. +""" + +# docstyle-ignore +LIBROSA_IMPORT_ERROR = """ +{0} requires thes librosa library. But that was not found in your environment. You can install them with pip: +`pip install librosa` +Please note that you may need to restart your runtime after installation. +""" + +# docstyle-ignore +PRETTY_MIDI_IMPORT_ERROR = """ +{0} requires thes pretty_midi library. But that was not found in your environment. You can install them with pip: +`pip install pretty_midi` +Please note that you may need to restart your runtime after installation. +""" + +DECORD_IMPORT_ERROR = """ +{0} requires the decord library but it was not found in your environment. You can install it with pip: `pip install +decord`. Please note that you may need to restart your runtime after installation. +""" + +CYTHON_IMPORT_ERROR = """ +{0} requires the Cython library but it was not found in your environment. You can install it with pip: `pip install +Cython`. Please note that you may need to restart your runtime after installation. +""" + +JIEBA_IMPORT_ERROR = """ +{0} requires the jieba library but it was not found in your environment. You can install it with pip: `pip install +jieba`. Please note that you may need to restart your runtime after installation. +""" + +PEFT_IMPORT_ERROR = """ +{0} requires the peft library but it was not found in your environment. You can install it with pip: `pip install +peft`. Please note that you may need to restart your runtime after installation. +""" + +JINJA_IMPORT_ERROR = """ +{0} requires the jinja library but it was not found in your environment. You can install it with pip: `pip install +jinja2`. Please note that you may need to restart your runtime after installation. +""" + +BACKENDS_MAPPING = OrderedDict( + [ + ("av", (is_av_available, AV_IMPORT_ERROR)), + ("bs4", (is_bs4_available, BS4_IMPORT_ERROR)), + ("cv2", (is_cv2_available, CV2_IMPORT_ERROR)), + ("datasets", (is_datasets_available, DATASETS_IMPORT_ERROR)), + ("detectron2", (is_detectron2_available, DETECTRON2_IMPORT_ERROR)), + ("essentia", (is_essentia_available, ESSENTIA_IMPORT_ERROR)), + ("faiss", (is_faiss_available, FAISS_IMPORT_ERROR)), + ("flax", (is_flax_available, FLAX_IMPORT_ERROR)), + ("ftfy", (is_ftfy_available, FTFY_IMPORT_ERROR)), + ("g2p_en", (is_g2p_en_available, G2P_EN_IMPORT_ERROR)), + ("pandas", (is_pandas_available, PANDAS_IMPORT_ERROR)), + ("phonemizer", (is_phonemizer_available, PHONEMIZER_IMPORT_ERROR)), + ("pretty_midi", (is_pretty_midi_available, PRETTY_MIDI_IMPORT_ERROR)), + ("levenshtein", (is_levenshtein_available, LEVENSHTEIN_IMPORT_ERROR)), + ("librosa", (is_librosa_available, LIBROSA_IMPORT_ERROR)), + ("protobuf", (is_protobuf_available, PROTOBUF_IMPORT_ERROR)), + ("pyctcdecode", (is_pyctcdecode_available, PYCTCDECODE_IMPORT_ERROR)), + ("pytesseract", (is_pytesseract_available, PYTESSERACT_IMPORT_ERROR)), + ("sacremoses", (is_sacremoses_available, SACREMOSES_IMPORT_ERROR)), + ("pytorch_quantization", (is_pytorch_quantization_available, PYTORCH_QUANTIZATION_IMPORT_ERROR)), + ("sentencepiece", (is_sentencepiece_available, SENTENCEPIECE_IMPORT_ERROR)), + ("sklearn", (is_sklearn_available, SKLEARN_IMPORT_ERROR)), + ("speech", (is_speech_available, SPEECH_IMPORT_ERROR)), + ("tensorflow_probability", (is_tensorflow_probability_available, TENSORFLOW_PROBABILITY_IMPORT_ERROR)), + ("tf", (is_tf_available, TENSORFLOW_IMPORT_ERROR)), + ("tensorflow_text", (is_tensorflow_text_available, TENSORFLOW_TEXT_IMPORT_ERROR)), + ("timm", (is_timm_available, TIMM_IMPORT_ERROR)), + ("natten", (is_natten_available, NATTEN_IMPORT_ERROR)), + ("nltk", (is_nltk_available, NLTK_IMPORT_ERROR)), + ("tokenizers", (is_tokenizers_available, TOKENIZERS_IMPORT_ERROR)), + ("torch", (is_torch_available, PYTORCH_IMPORT_ERROR)), + ("torchvision", (is_torchvision_available, TORCHVISION_IMPORT_ERROR)), + ("vision", (is_vision_available, VISION_IMPORT_ERROR)), + ("scipy", (is_scipy_available, SCIPY_IMPORT_ERROR)), + ("accelerate", (is_accelerate_available, ACCELERATE_IMPORT_ERROR)), + ("oneccl_bind_pt", (is_ccl_available, CCL_IMPORT_ERROR)), + ("decord", (is_decord_available, DECORD_IMPORT_ERROR)), + ("cython", (is_cython_available, CYTHON_IMPORT_ERROR)), + ("jieba", (is_jieba_available, JIEBA_IMPORT_ERROR)), + ("peft", (is_peft_available, PEFT_IMPORT_ERROR)), + ("jinja", (is_jinja_available, JINJA_IMPORT_ERROR)), + ] +) + + +def requires_backends(obj, backends): + if not isinstance(backends, (list, tuple)): + backends = [backends] + + name = obj.__name__ if hasattr(obj, "__name__") else obj.__class__.__name__ + + # Raise an error for users who might not realize that classes without "TF" are torch-only + if "torch" in backends and "tf" not in backends and not is_torch_available() and is_tf_available(): + raise ImportError(PYTORCH_IMPORT_ERROR_WITH_TF.format(name)) + + # Raise the inverse error for PyTorch users trying to load TF classes + if "tf" in backends and "torch" not in backends and is_torch_available() and not is_tf_available(): + raise ImportError(TF_IMPORT_ERROR_WITH_PYTORCH.format(name)) + + checks = (BACKENDS_MAPPING[backend] for backend in backends) + failed = [msg.format(name) for available, msg in checks if not available()] + if failed: + raise ImportError("".join(failed)) + + +class DummyObject(type): + """ + Metaclass for the dummy objects. Any class inheriting from it will return the ImportError generated by + `requires_backend` each time a user tries to access any method of that class. + """ + + def __getattribute__(cls, key): + if key.startswith("_") and key != "_from_config": + return super().__getattribute__(key) + requires_backends(cls, cls._backends) + + +def is_torch_fx_proxy(x): + if is_torch_fx_available(): + import torch.fx + + return isinstance(x, torch.fx.Proxy) + return False + + +class _LazyModule(ModuleType): + """ + Module class that surfaces all objects but only performs associated imports when the objects are requested. + """ + + # Very heavily inspired by optuna.integration._IntegrationModule + # https://github.com/optuna/optuna/blob/master/optuna/integration/__init__.py + def __init__(self, name, module_file, import_structure, module_spec=None, extra_objects=None): + super().__init__(name) + self._modules = set(import_structure.keys()) + self._class_to_module = {} + for key, values in import_structure.items(): + for value in values: + self._class_to_module[value] = key + # Needed for autocompletion in an IDE + self.__all__ = list(import_structure.keys()) + list(chain(*import_structure.values())) + self.__file__ = module_file + self.__spec__ = module_spec + self.__path__ = [os.path.dirname(module_file)] + self._objects = {} if extra_objects is None else extra_objects + self._name = name + self._import_structure = import_structure + + # Needed for autocompletion in an IDE + def __dir__(self): + result = super().__dir__() + # The elements of self.__all__ that are submodules may or may not be in the dir already, depending on whether + # they have been accessed or not. So we only add the elements of self.__all__ that are not already in the dir. + for attr in self.__all__: + if attr not in result: + result.append(attr) + return result + + def __getattr__(self, name: str) -> Any: + if name in self._objects: + return self._objects[name] + if name in self._modules: + value = self._get_module(name) + elif name in self._class_to_module.keys(): + module = self._get_module(self._class_to_module[name]) + value = getattr(module, name) + else: + raise AttributeError(f"module {self.__name__} has no attribute {name}") + + setattr(self, name, value) + return value + + def _get_module(self, module_name: str): + try: + return importlib.import_module("." + module_name, self.__name__) + except Exception as e: + raise RuntimeError( + f"Failed to import {self.__name__}.{module_name} because of the following error (look up to see its" + f" traceback):\n{e}" + ) from e + + def __reduce__(self): + return (self.__class__, (self._name, self.__file__, self._import_structure)) + + +class OptionalDependencyNotAvailable(BaseException): + """Internally used error class for signalling an optional dependency was not found.""" + + +def direct_transformers_import(path: str, file="__init__.py") -> ModuleType: + """Imports transformers directly + + Args: + path (`str`): The path to the source file + file (`str`, optional): The file to join with the path. Defaults to "__init__.py". + + Returns: + `ModuleType`: The resulting imported module + """ + name = "transformers" + location = os.path.join(path, file) + spec = importlib.util.spec_from_file_location(name, location, submodule_search_locations=[path]) + module = importlib.util.module_from_spec(spec) + spec.loader.exec_module(module) + module = sys.modules[name] + return module diff --git a/venv/lib/python3.10/site-packages/transformers/utils/logging.py b/venv/lib/python3.10/site-packages/transformers/utils/logging.py new file mode 100644 index 0000000000000000000000000000000000000000..3471e5ab66c62d0763472824cc19ec582639b00d --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/utils/logging.py @@ -0,0 +1,396 @@ +# coding=utf-8 +# Copyright 2020 Optuna, Hugging Face +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" Logging utilities.""" + + +import functools +import logging +import os +import sys +import threading +from logging import ( + CRITICAL, # NOQA + DEBUG, # NOQA + ERROR, # NOQA + FATAL, # NOQA + INFO, # NOQA + NOTSET, # NOQA + WARN, # NOQA + WARNING, # NOQA +) +from logging import captureWarnings as _captureWarnings +from typing import Optional + +import huggingface_hub.utils as hf_hub_utils +from tqdm import auto as tqdm_lib + + +_lock = threading.Lock() +_default_handler: Optional[logging.Handler] = None + +log_levels = { + "detail": logging.DEBUG, # will also print filename and line number + "debug": logging.DEBUG, + "info": logging.INFO, + "warning": logging.WARNING, + "error": logging.ERROR, + "critical": logging.CRITICAL, +} + +_default_log_level = logging.WARNING + +_tqdm_active = not hf_hub_utils.are_progress_bars_disabled() + + +def _get_default_logging_level(): + """ + If TRANSFORMERS_VERBOSITY env var is set to one of the valid choices return that as the new default level. If it is + not - fall back to `_default_log_level` + """ + env_level_str = os.getenv("TRANSFORMERS_VERBOSITY", None) + if env_level_str: + if env_level_str in log_levels: + return log_levels[env_level_str] + else: + logging.getLogger().warning( + f"Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, " + f"has to be one of: { ', '.join(log_levels.keys()) }" + ) + return _default_log_level + + +def _get_library_name() -> str: + return __name__.split(".")[0] + + +def _get_library_root_logger() -> logging.Logger: + return logging.getLogger(_get_library_name()) + + +def _configure_library_root_logger() -> None: + global _default_handler + + with _lock: + if _default_handler: + # This library has already configured the library root logger. + return + _default_handler = logging.StreamHandler() # Set sys.stderr as stream. + # set defaults based on https://github.com/pyinstaller/pyinstaller/issues/7334#issuecomment-1357447176 + if sys.stderr is None: + sys.stderr = open(os.devnull, "w") + + _default_handler.flush = sys.stderr.flush + + # Apply our default configuration to the library root logger. + library_root_logger = _get_library_root_logger() + library_root_logger.addHandler(_default_handler) + library_root_logger.setLevel(_get_default_logging_level()) + # if logging level is debug, we add pathname and lineno to formatter for easy debugging + if os.getenv("TRANSFORMERS_VERBOSITY", None) == "detail": + formatter = logging.Formatter("[%(levelname)s|%(pathname)s:%(lineno)s] %(asctime)s >> %(message)s") + _default_handler.setFormatter(formatter) + + library_root_logger.propagate = False + + +def _reset_library_root_logger() -> None: + global _default_handler + + with _lock: + if not _default_handler: + return + + library_root_logger = _get_library_root_logger() + library_root_logger.removeHandler(_default_handler) + library_root_logger.setLevel(logging.NOTSET) + _default_handler = None + + +def get_log_levels_dict(): + return log_levels + + +def captureWarnings(capture): + """ + Calls the `captureWarnings` method from the logging library to enable management of the warnings emitted by the + `warnings` library. + + Read more about this method here: + https://docs.python.org/3/library/logging.html#integration-with-the-warnings-module + + All warnings will be logged through the `py.warnings` logger. + + Careful: this method also adds a handler to this logger if it does not already have one, and updates the logging + level of that logger to the library's root logger. + """ + logger = get_logger("py.warnings") + + if not logger.handlers: + logger.addHandler(_default_handler) + + logger.setLevel(_get_library_root_logger().level) + + _captureWarnings(capture) + + +def get_logger(name: Optional[str] = None) -> logging.Logger: + """ + Return a logger with the specified name. + + This function is not supposed to be directly accessed unless you are writing a custom transformers module. + """ + + if name is None: + name = _get_library_name() + + _configure_library_root_logger() + return logging.getLogger(name) + + +def get_verbosity() -> int: + """ + Return the current level for the 🤗 Transformers's root logger as an int. + + Returns: + `int`: The logging level. + + + + 🤗 Transformers has following logging levels: + + - 50: `transformers.logging.CRITICAL` or `transformers.logging.FATAL` + - 40: `transformers.logging.ERROR` + - 30: `transformers.logging.WARNING` or `transformers.logging.WARN` + - 20: `transformers.logging.INFO` + - 10: `transformers.logging.DEBUG` + + """ + + _configure_library_root_logger() + return _get_library_root_logger().getEffectiveLevel() + + +def set_verbosity(verbosity: int) -> None: + """ + Set the verbosity level for the 🤗 Transformers's root logger. + + Args: + verbosity (`int`): + Logging level, e.g., one of: + + - `transformers.logging.CRITICAL` or `transformers.logging.FATAL` + - `transformers.logging.ERROR` + - `transformers.logging.WARNING` or `transformers.logging.WARN` + - `transformers.logging.INFO` + - `transformers.logging.DEBUG` + """ + + _configure_library_root_logger() + _get_library_root_logger().setLevel(verbosity) + + +def set_verbosity_info(): + """Set the verbosity to the `INFO` level.""" + return set_verbosity(INFO) + + +def set_verbosity_warning(): + """Set the verbosity to the `WARNING` level.""" + return set_verbosity(WARNING) + + +def set_verbosity_debug(): + """Set the verbosity to the `DEBUG` level.""" + return set_verbosity(DEBUG) + + +def set_verbosity_error(): + """Set the verbosity to the `ERROR` level.""" + return set_verbosity(ERROR) + + +def disable_default_handler() -> None: + """Disable the default handler of the HuggingFace Transformers's root logger.""" + + _configure_library_root_logger() + + assert _default_handler is not None + _get_library_root_logger().removeHandler(_default_handler) + + +def enable_default_handler() -> None: + """Enable the default handler of the HuggingFace Transformers's root logger.""" + + _configure_library_root_logger() + + assert _default_handler is not None + _get_library_root_logger().addHandler(_default_handler) + + +def add_handler(handler: logging.Handler) -> None: + """adds a handler to the HuggingFace Transformers's root logger.""" + + _configure_library_root_logger() + + assert handler is not None + _get_library_root_logger().addHandler(handler) + + +def remove_handler(handler: logging.Handler) -> None: + """removes given handler from the HuggingFace Transformers's root logger.""" + + _configure_library_root_logger() + + assert handler is not None and handler not in _get_library_root_logger().handlers + _get_library_root_logger().removeHandler(handler) + + +def disable_propagation() -> None: + """ + Disable propagation of the library log outputs. Note that log propagation is disabled by default. + """ + + _configure_library_root_logger() + _get_library_root_logger().propagate = False + + +def enable_propagation() -> None: + """ + Enable propagation of the library log outputs. Please disable the HuggingFace Transformers's default handler to + prevent double logging if the root logger has been configured. + """ + + _configure_library_root_logger() + _get_library_root_logger().propagate = True + + +def enable_explicit_format() -> None: + """ + Enable explicit formatting for every HuggingFace Transformers's logger. The explicit formatter is as follows: + ``` + [LEVELNAME|FILENAME|LINE NUMBER] TIME >> MESSAGE + ``` + All handlers currently bound to the root logger are affected by this method. + """ + handlers = _get_library_root_logger().handlers + + for handler in handlers: + formatter = logging.Formatter("[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s") + handler.setFormatter(formatter) + + +def reset_format() -> None: + """ + Resets the formatting for HuggingFace Transformers's loggers. + + All handlers currently bound to the root logger are affected by this method. + """ + handlers = _get_library_root_logger().handlers + + for handler in handlers: + handler.setFormatter(None) + + +def warning_advice(self, *args, **kwargs): + """ + This method is identical to `logger.warning()`, but if env var TRANSFORMERS_NO_ADVISORY_WARNINGS=1 is set, this + warning will not be printed + """ + no_advisory_warnings = os.getenv("TRANSFORMERS_NO_ADVISORY_WARNINGS", False) + if no_advisory_warnings: + return + self.warning(*args, **kwargs) + + +logging.Logger.warning_advice = warning_advice + + +@functools.lru_cache(None) +def warning_once(self, *args, **kwargs): + """ + This method is identical to `logger.warning()`, but will emit the warning with the same message only once + + Note: The cache is for the function arguments, so 2 different callers using the same arguments will hit the cache. + The assumption here is that all warning messages are unique across the code. If they aren't then need to switch to + another type of cache that includes the caller frame information in the hashing function. + """ + self.warning(*args, **kwargs) + + +logging.Logger.warning_once = warning_once + + +class EmptyTqdm: + """Dummy tqdm which doesn't do anything.""" + + def __init__(self, *args, **kwargs): # pylint: disable=unused-argument + self._iterator = args[0] if args else None + + def __iter__(self): + return iter(self._iterator) + + def __getattr__(self, _): + """Return empty function.""" + + def empty_fn(*args, **kwargs): # pylint: disable=unused-argument + return + + return empty_fn + + def __enter__(self): + return self + + def __exit__(self, type_, value, traceback): + return + + +class _tqdm_cls: + def __call__(self, *args, **kwargs): + if _tqdm_active: + return tqdm_lib.tqdm(*args, **kwargs) + else: + return EmptyTqdm(*args, **kwargs) + + def set_lock(self, *args, **kwargs): + self._lock = None + if _tqdm_active: + return tqdm_lib.tqdm.set_lock(*args, **kwargs) + + def get_lock(self): + if _tqdm_active: + return tqdm_lib.tqdm.get_lock() + + +tqdm = _tqdm_cls() + + +def is_progress_bar_enabled() -> bool: + """Return a boolean indicating whether tqdm progress bars are enabled.""" + global _tqdm_active + return bool(_tqdm_active) + + +def enable_progress_bar(): + """Enable tqdm progress bar.""" + global _tqdm_active + _tqdm_active = True + hf_hub_utils.enable_progress_bars() + + +def disable_progress_bar(): + """Disable tqdm progress bar.""" + global _tqdm_active + _tqdm_active = False + hf_hub_utils.disable_progress_bars() diff --git a/venv/lib/python3.10/site-packages/transformers/utils/model_parallel_utils.py b/venv/lib/python3.10/site-packages/transformers/utils/model_parallel_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..7ec79a5e23cbc976e2cb26934abe94e7bbe890d7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/utils/model_parallel_utils.py @@ -0,0 +1,56 @@ +# coding=utf-8 +# Copyright 2020 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from math import ceil + + +def assert_device_map(device_map, num_blocks): + blocks = list(range(0, num_blocks)) + + device_map_blocks = [item for sublist in list(device_map.values()) for item in sublist] + + # Duplicate check + duplicate_blocks = [] + for i in device_map_blocks: + if device_map_blocks.count(i) > 1 and i not in duplicate_blocks: + duplicate_blocks.append(i) + # Missing blocks + missing_blocks = [i for i in blocks if i not in device_map_blocks] + extra_blocks = [i for i in device_map_blocks if i not in blocks] + + if len(duplicate_blocks) != 0: + raise ValueError( + "Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device." + " These attention blocks were specified more than once: " + str(duplicate_blocks) + ) + if len(missing_blocks) != 0: + raise ValueError( + "There are attention blocks for this model that are not specified in the device_map. Add these attention " + "blocks to a device on the device_map: " + str(missing_blocks) + ) + if len(extra_blocks) != 0: + raise ValueError( + "The device_map contains more attention blocks than this model has. Remove these from the device_map:" + + str(extra_blocks) + ) + + +def get_device_map(n_layers, devices): + """Returns a dictionary of layers distributed evenly across all devices.""" + layers = list(range(n_layers)) + n_blocks = int(ceil(n_layers / len(devices))) + layers_list = [layers[i : i + n_blocks] for i in range(0, n_layers, n_blocks)] + + return dict(zip(devices, layers_list)) diff --git a/venv/lib/python3.10/site-packages/transformers/utils/notebook.py b/venv/lib/python3.10/site-packages/transformers/utils/notebook.py new file mode 100644 index 0000000000000000000000000000000000000000..f7396642732e585e450f5b345e9dcf74eb294b78 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/utils/notebook.py @@ -0,0 +1,378 @@ +# coding=utf-8 +# Copyright 2020 Hugging Face +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import re +import time +from typing import Optional + +import IPython.display as disp + +from ..trainer_callback import TrainerCallback +from ..trainer_utils import IntervalStrategy, has_length + + +def format_time(t): + "Format `t` (in seconds) to (h):mm:ss" + t = int(t) + h, m, s = t // 3600, (t // 60) % 60, t % 60 + return f"{h}:{m:02d}:{s:02d}" if h != 0 else f"{m:02d}:{s:02d}" + + +def html_progress_bar(value, total, prefix, label, width=300): + # docstyle-ignore + return f""" +
+ {prefix} + + {label} +
+ """ + + +def text_to_html_table(items): + "Put the texts in `items` in an HTML table." + html_code = """\n""" + html_code += """ \n \n""" + for i in items[0]: + html_code += f" \n" + html_code += " \n \n \n" + for line in items[1:]: + html_code += " \n" + for elt in line: + elt = f"{elt:.6f}" if isinstance(elt, float) else str(elt) + html_code += f" \n" + html_code += " \n" + html_code += " \n
{i}
{elt}

" + return html_code + + +class NotebookProgressBar: + """ + A progress par for display in a notebook. + + Class attributes (overridden by derived classes) + + - **warmup** (`int`) -- The number of iterations to do at the beginning while ignoring `update_every`. + - **update_every** (`float`) -- Since calling the time takes some time, we only do it every presumed + `update_every` seconds. The progress bar uses the average time passed up until now to guess the next value + for which it will call the update. + + Args: + total (`int`): + The total number of iterations to reach. + prefix (`str`, *optional*): + A prefix to add before the progress bar. + leave (`bool`, *optional*, defaults to `True`): + Whether or not to leave the progress bar once it's completed. You can always call the + [`~utils.notebook.NotebookProgressBar.close`] method to make the bar disappear. + parent ([`~notebook.NotebookTrainingTracker`], *optional*): + A parent object (like [`~utils.notebook.NotebookTrainingTracker`]) that spawns progress bars and handle + their display. If set, the object passed must have a `display()` method. + width (`int`, *optional*, defaults to 300): + The width (in pixels) that the bar will take. + + Example: + + ```python + import time + + pbar = NotebookProgressBar(100) + for val in range(100): + pbar.update(val) + time.sleep(0.07) + pbar.update(100) + ```""" + + warmup = 5 + update_every = 0.2 + + def __init__( + self, + total: int, + prefix: Optional[str] = None, + leave: bool = True, + parent: Optional["NotebookTrainingTracker"] = None, + width: int = 300, + ): + self.total = total + self.prefix = "" if prefix is None else prefix + self.leave = leave + self.parent = parent + self.width = width + self.last_value = None + self.comment = None + self.output = None + + def update(self, value: int, force_update: bool = False, comment: str = None): + """ + The main method to update the progress bar to `value`. + + Args: + value (`int`): + The value to use. Must be between 0 and `total`. + force_update (`bool`, *optional*, defaults to `False`): + Whether or not to force and update of the internal state and display (by default, the bar will wait for + `value` to reach the value it predicted corresponds to a time of more than the `update_every` attribute + since the last update to avoid adding boilerplate). + comment (`str`, *optional*): + A comment to add on the left of the progress bar. + """ + self.value = value + if comment is not None: + self.comment = comment + if self.last_value is None: + self.start_time = self.last_time = time.time() + self.start_value = self.last_value = value + self.elapsed_time = self.predicted_remaining = None + self.first_calls = self.warmup + self.wait_for = 1 + self.update_bar(value) + elif value <= self.last_value and not force_update: + return + elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for, self.total): + if self.first_calls > 0: + self.first_calls -= 1 + current_time = time.time() + self.elapsed_time = current_time - self.start_time + # We could have value = self.start_value if the update is called twixe with the same start value. + if value > self.start_value: + self.average_time_per_item = self.elapsed_time / (value - self.start_value) + else: + self.average_time_per_item = None + if value >= self.total: + value = self.total + self.predicted_remaining = None + if not self.leave: + self.close() + elif self.average_time_per_item is not None: + self.predicted_remaining = self.average_time_per_item * (self.total - value) + self.update_bar(value) + self.last_value = value + self.last_time = current_time + if (self.average_time_per_item is None) or (self.average_time_per_item == 0): + self.wait_for = 1 + else: + self.wait_for = max(int(self.update_every / self.average_time_per_item), 1) + + def update_bar(self, value, comment=None): + spaced_value = " " * (len(str(self.total)) - len(str(value))) + str(value) + if self.elapsed_time is None: + self.label = f"[{spaced_value}/{self.total} : < :" + elif self.predicted_remaining is None: + self.label = f"[{spaced_value}/{self.total} {format_time(self.elapsed_time)}" + else: + self.label = ( + f"[{spaced_value}/{self.total} {format_time(self.elapsed_time)} <" + f" {format_time(self.predicted_remaining)}" + ) + if self.average_time_per_item == 0: + self.label += ", +inf it/s" + else: + self.label += f", {1/self.average_time_per_item:.2f} it/s" + + self.label += "]" if self.comment is None or len(self.comment) == 0 else f", {self.comment}]" + self.display() + + def display(self): + self.html_code = html_progress_bar(self.value, self.total, self.prefix, self.label, self.width) + if self.parent is not None: + # If this is a child bar, the parent will take care of the display. + self.parent.display() + return + if self.output is None: + self.output = disp.display(disp.HTML(self.html_code), display_id=True) + else: + self.output.update(disp.HTML(self.html_code)) + + def close(self): + "Closes the progress bar." + if self.parent is None and self.output is not None: + self.output.update(disp.HTML("")) + + +class NotebookTrainingTracker(NotebookProgressBar): + """ + An object tracking the updates of an ongoing training with progress bars and a nice table reporting metrics. + + Args: + num_steps (`int`): The number of steps during training. column_names (`List[str]`, *optional*): + The list of column names for the metrics table (will be inferred from the first call to + [`~utils.notebook.NotebookTrainingTracker.write_line`] if not set). + """ + + def __init__(self, num_steps, column_names=None): + super().__init__(num_steps) + self.inner_table = None if column_names is None else [column_names] + self.child_bar = None + + def display(self): + self.html_code = html_progress_bar(self.value, self.total, self.prefix, self.label, self.width) + if self.inner_table is not None: + self.html_code += text_to_html_table(self.inner_table) + if self.child_bar is not None: + self.html_code += self.child_bar.html_code + if self.output is None: + self.output = disp.display(disp.HTML(self.html_code), display_id=True) + else: + self.output.update(disp.HTML(self.html_code)) + + def write_line(self, values): + """ + Write the values in the inner table. + + Args: + values (`Dict[str, float]`): The values to display. + """ + if self.inner_table is None: + self.inner_table = [list(values.keys()), list(values.values())] + else: + columns = self.inner_table[0] + for key in values.keys(): + if key not in columns: + columns.append(key) + self.inner_table[0] = columns + if len(self.inner_table) > 1: + last_values = self.inner_table[-1] + first_column = self.inner_table[0][0] + if last_values[0] != values[first_column]: + # write new line + self.inner_table.append([values[c] if c in values else "No Log" for c in columns]) + else: + # update last line + new_values = values + for c in columns: + if c not in new_values.keys(): + new_values[c] = last_values[columns.index(c)] + self.inner_table[-1] = [new_values[c] for c in columns] + else: + self.inner_table.append([values[c] for c in columns]) + + def add_child(self, total, prefix=None, width=300): + """ + Add a child progress bar displayed under the table of metrics. The child progress bar is returned (so it can be + easily updated). + + Args: + total (`int`): The number of iterations for the child progress bar. + prefix (`str`, *optional*): A prefix to write on the left of the progress bar. + width (`int`, *optional*, defaults to 300): The width (in pixels) of the progress bar. + """ + self.child_bar = NotebookProgressBar(total, prefix=prefix, parent=self, width=width) + return self.child_bar + + def remove_child(self): + """ + Closes the child progress bar. + """ + self.child_bar = None + self.display() + + +class NotebookProgressCallback(TrainerCallback): + """ + A [`TrainerCallback`] that displays the progress of training or evaluation, optimized for Jupyter Notebooks or + Google colab. + """ + + def __init__(self): + self.training_tracker = None + self.prediction_bar = None + self._force_next_update = False + + def on_train_begin(self, args, state, control, **kwargs): + self.first_column = "Epoch" if args.evaluation_strategy == IntervalStrategy.EPOCH else "Step" + self.training_loss = 0 + self.last_log = 0 + column_names = [self.first_column] + ["Training Loss"] + if args.evaluation_strategy != IntervalStrategy.NO: + column_names.append("Validation Loss") + self.training_tracker = NotebookTrainingTracker(state.max_steps, column_names) + + def on_step_end(self, args, state, control, **kwargs): + epoch = int(state.epoch) if int(state.epoch) == state.epoch else f"{state.epoch:.2f}" + self.training_tracker.update( + state.global_step + 1, + comment=f"Epoch {epoch}/{state.num_train_epochs}", + force_update=self._force_next_update, + ) + self._force_next_update = False + + def on_prediction_step(self, args, state, control, eval_dataloader=None, **kwargs): + if not has_length(eval_dataloader): + return + if self.prediction_bar is None: + if self.training_tracker is not None: + self.prediction_bar = self.training_tracker.add_child(len(eval_dataloader)) + else: + self.prediction_bar = NotebookProgressBar(len(eval_dataloader)) + self.prediction_bar.update(1) + else: + self.prediction_bar.update(self.prediction_bar.value + 1) + + def on_predict(self, args, state, control, **kwargs): + if self.prediction_bar is not None: + self.prediction_bar.close() + self.prediction_bar = None + + def on_log(self, args, state, control, logs=None, **kwargs): + # Only for when there is no evaluation + if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs: + values = {"Training Loss": logs["loss"]} + # First column is necessarily Step sine we're not in epoch eval strategy + values["Step"] = state.global_step + self.training_tracker.write_line(values) + + def on_evaluate(self, args, state, control, metrics=None, **kwargs): + if self.training_tracker is not None: + values = {"Training Loss": "No log", "Validation Loss": "No log"} + for log in reversed(state.log_history): + if "loss" in log: + values["Training Loss"] = log["loss"] + break + + if self.first_column == "Epoch": + values["Epoch"] = int(state.epoch) + else: + values["Step"] = state.global_step + metric_key_prefix = "eval" + for k in metrics: + if k.endswith("_loss"): + metric_key_prefix = re.sub(r"\_loss$", "", k) + _ = metrics.pop("total_flos", None) + _ = metrics.pop("epoch", None) + _ = metrics.pop(f"{metric_key_prefix}_runtime", None) + _ = metrics.pop(f"{metric_key_prefix}_samples_per_second", None) + _ = metrics.pop(f"{metric_key_prefix}_steps_per_second", None) + _ = metrics.pop(f"{metric_key_prefix}_jit_compilation_time", None) + for k, v in metrics.items(): + splits = k.split("_") + name = " ".join([part.capitalize() for part in splits[1:]]) + if name == "Loss": + # Single dataset + name = "Validation Loss" + values[name] = v + self.training_tracker.write_line(values) + self.training_tracker.remove_child() + self.prediction_bar = None + # Evaluation takes a long time so we should force the next update. + self._force_next_update = True + + def on_train_end(self, args, state, control, **kwargs): + self.training_tracker.update( + state.global_step, + comment=f"Epoch {int(state.epoch)}/{state.num_train_epochs}", + force_update=True, + ) + self.training_tracker = None diff --git a/venv/lib/python3.10/site-packages/transformers/utils/peft_utils.py b/venv/lib/python3.10/site-packages/transformers/utils/peft_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..2078f1ae960955b7c615819f25081978fdab2563 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/utils/peft_utils.py @@ -0,0 +1,124 @@ +# Copyright 2023 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import importlib +import os +from typing import Dict, Optional, Union + +from packaging import version + +from .hub import cached_file +from .import_utils import is_peft_available + + +ADAPTER_CONFIG_NAME = "adapter_config.json" +ADAPTER_WEIGHTS_NAME = "adapter_model.bin" +ADAPTER_SAFE_WEIGHTS_NAME = "adapter_model.safetensors" + + +def find_adapter_config_file( + model_id: str, + cache_dir: Optional[Union[str, os.PathLike]] = None, + force_download: bool = False, + resume_download: bool = False, + proxies: Optional[Dict[str, str]] = None, + token: Optional[Union[bool, str]] = None, + revision: Optional[str] = None, + local_files_only: bool = False, + subfolder: str = "", + _commit_hash: Optional[str] = None, +) -> Optional[str]: + r""" + Simply checks if the model stored on the Hub or locally is an adapter model or not, return the path of the adapter + config file if it is, None otherwise. + + Args: + model_id (`str`): + The identifier of the model to look for, can be either a local path or an id to the repository on the Hub. + cache_dir (`str` or `os.PathLike`, *optional*): + Path to a directory in which a downloaded pretrained model configuration should be cached if the standard + cache should not be used. + force_download (`bool`, *optional*, defaults to `False`): + Whether or not to force to (re-)download the configuration files and override the cached versions if they + exist. + resume_download (`bool`, *optional*, defaults to `False`): + Whether or not to delete incompletely received file. Attempts to resume the download if such a file exists. + proxies (`Dict[str, str]`, *optional*): + A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128', + 'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request. + token (`str` or *bool*, *optional*): + The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated + when running `huggingface-cli login` (stored in `~/.huggingface`). + revision (`str`, *optional*, defaults to `"main"`): + The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a + git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any + identifier allowed by git. + + + + To test a pull request you made on the Hub, you can pass `revision="refs/pr/". + + + + local_files_only (`bool`, *optional*, defaults to `False`): + If `True`, will only try to load the tokenizer configuration from local files. + subfolder (`str`, *optional*, defaults to `""`): + In case the relevant files are located inside a subfolder of the model repo on huggingface.co, you can + specify the folder name here. + """ + adapter_cached_filename = None + if model_id is None: + return None + elif os.path.isdir(model_id): + list_remote_files = os.listdir(model_id) + if ADAPTER_CONFIG_NAME in list_remote_files: + adapter_cached_filename = os.path.join(model_id, ADAPTER_CONFIG_NAME) + else: + adapter_cached_filename = cached_file( + model_id, + ADAPTER_CONFIG_NAME, + cache_dir=cache_dir, + force_download=force_download, + resume_download=resume_download, + proxies=proxies, + token=token, + revision=revision, + local_files_only=local_files_only, + subfolder=subfolder, + _commit_hash=_commit_hash, + _raise_exceptions_for_gated_repo=False, + _raise_exceptions_for_missing_entries=False, + _raise_exceptions_for_connection_errors=False, + ) + + return adapter_cached_filename + + +def check_peft_version(min_version: str) -> None: + r""" + Checks if the version of PEFT is compatible. + + Args: + version (`str`): + The version of PEFT to check against. + """ + if not is_peft_available(): + raise ValueError("PEFT is not installed. Please install it with `pip install peft`") + + is_peft_version_compatible = version.parse(importlib.metadata.version("peft")) >= version.parse(min_version) + + if not is_peft_version_compatible: + raise ValueError( + f"The version of PEFT you are using is not compatible, please use a version that is greater" + f" than {min_version}" + ) diff --git a/venv/lib/python3.10/site-packages/transformers/utils/quantization_config.py b/venv/lib/python3.10/site-packages/transformers/utils/quantization_config.py new file mode 100644 index 0000000000000000000000000000000000000000..d91ecef16e37e158c7eb4bbe9072b31d01d691de --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/utils/quantization_config.py @@ -0,0 +1,895 @@ +#!/usr/bin/env python +# coding=utf-8 + +# Copyright 2023 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import copy +import importlib.metadata +import json +import os +from dataclasses import dataclass +from enum import Enum +from typing import Any, Dict, List, Optional, Union + +from packaging import version + +from ..utils import is_auto_awq_available, is_torch_available, logging + + +if is_torch_available(): + import torch + + +logger = logging.get_logger(__name__) + + +class QuantizationMethod(str, Enum): + BITS_AND_BYTES = "bitsandbytes" + GPTQ = "gptq" + AWQ = "awq" + AQLM = "aqlm" + QUANTO = "quanto" + + +class AWQLinearVersion(str, Enum): + GEMM = "gemm" + GEMV = "gemv" + EXLLAMA = "exllama" + + @staticmethod + def from_str(version: str): + version = version.lower() + if version == "gemm": + return AWQLinearVersion.GEMM + elif version == "gemv": + return AWQLinearVersion.GEMV + elif version == "exllama": + return AWQLinearVersion.EXLLAMA + else: + raise ValueError(f"Unknown AWQLinearVersion {version}") + + +class AwqBackendPackingMethod(str, Enum): + AUTOAWQ = "autoawq" + LLMAWQ = "llm-awq" + + +@dataclass +class QuantizationConfigMixin: + """ + Mixin class for quantization config + """ + + quant_method: QuantizationMethod + + @classmethod + def from_dict(cls, config_dict, return_unused_kwargs=False, **kwargs): + """ + Instantiates a [`QuantizationConfigMixin`] from a Python dictionary of parameters. + + Args: + config_dict (`Dict[str, Any]`): + Dictionary that will be used to instantiate the configuration object. + return_unused_kwargs (`bool`,*optional*, defaults to `False`): + Whether or not to return a list of unused keyword arguments. Used for `from_pretrained` method in + `PreTrainedModel`. + kwargs (`Dict[str, Any]`): + Additional parameters from which to initialize the configuration object. + + Returns: + [`QuantizationConfigMixin`]: The configuration object instantiated from those parameters. + """ + + config = cls(**config_dict) + + to_remove = [] + for key, value in kwargs.items(): + if hasattr(config, key): + setattr(config, key, value) + to_remove.append(key) + for key in to_remove: + kwargs.pop(key, None) + + if return_unused_kwargs: + return config, kwargs + else: + return config + + def to_json_file(self, json_file_path: Union[str, os.PathLike]): + """ + Save this instance to a JSON file. + + Args: + json_file_path (`str` or `os.PathLike`): + Path to the JSON file in which this configuration instance's parameters will be saved. + use_diff (`bool`, *optional*, defaults to `True`): + If set to `True`, only the difference between the config instance and the default + `QuantizationConfig()` is serialized to JSON file. + """ + with open(json_file_path, "w", encoding="utf-8") as writer: + config_dict = self.to_dict() + json_string = json.dumps(config_dict, indent=2, sort_keys=True) + "\n" + + writer.write(json_string) + + def to_dict(self) -> Dict[str, Any]: + """ + Serializes this instance to a Python dictionary. Returns: + `Dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance. + """ + return copy.deepcopy(self.__dict__) + + def __iter__(self): + """allows `dict(obj)` for situations where obj may be a dict or QuantizationConfigMixin""" + for attr, value in copy.deepcopy(self.__dict__).items(): + yield attr, value + + def __repr__(self): + return f"{self.__class__.__name__} {self.to_json_string()}" + + def to_json_string(self, use_diff: bool = True) -> str: + """ + Serializes this instance to a JSON string. + + Args: + use_diff (`bool`, *optional*, defaults to `True`): + If set to `True`, only the difference between the config instance and the default `PretrainedConfig()` + is serialized to JSON string. + + Returns: + `str`: String containing all the attributes that make up this configuration instance in JSON format. + """ + if use_diff is True: + config_dict = self.to_diff_dict() + else: + config_dict = self.to_dict() + return json.dumps(config_dict, indent=2, sort_keys=True) + "\n" + + def update(self, **kwargs): + """ + Updates attributes of this class instance with attributes from `kwargs` if they match existing atributtes, + returning all the unused kwargs. + + Args: + kwargs (`Dict[str, Any]`): + Dictionary of attributes to tentatively update this class. + + Returns: + `Dict[str, Any]`: Dictionary containing all the key-value pairs that were not used to update the instance. + """ + to_remove = [] + for key, value in kwargs.items(): + if hasattr(self, key): + setattr(self, key, value) + to_remove.append(key) + + # Remove all the attributes that were updated, without modifying the input dict + unused_kwargs = {key: value for key, value in kwargs.items() if key not in to_remove} + return unused_kwargs + + +@dataclass +class BitsAndBytesConfig(QuantizationConfigMixin): + """ + This is a wrapper class about all possible attributes and features that you can play with a model that has been + loaded using `bitsandbytes`. + + This replaces `load_in_8bit` or `load_in_4bit`therefore both options are mutually exclusive. + + Currently only supports `LLM.int8()`, `FP4`, and `NF4` quantization. If more methods are added to `bitsandbytes`, + then more arguments will be added to this class. + + Args: + load_in_8bit (`bool`, *optional*, defaults to `False`): + This flag is used to enable 8-bit quantization with LLM.int8(). + load_in_4bit (`bool`, *optional*, defaults to `False`): + This flag is used to enable 4-bit quantization by replacing the Linear layers with FP4/NF4 layers from + `bitsandbytes`. + llm_int8_threshold (`float`, *optional*, defaults to 6.0): + This corresponds to the outlier threshold for outlier detection as described in `LLM.int8() : 8-bit Matrix + Multiplication for Transformers at Scale` paper: https://arxiv.org/abs/2208.07339 Any hidden states value + that is above this threshold will be considered an outlier and the operation on those values will be done + in fp16. Values are usually normally distributed, that is, most values are in the range [-3.5, 3.5], but + there are some exceptional systematic outliers that are very differently distributed for large models. + These outliers are often in the interval [-60, -6] or [6, 60]. Int8 quantization works well for values of + magnitude ~5, but beyond that, there is a significant performance penalty. A good default threshold is 6, + but a lower threshold might be needed for more unstable models (small models, fine-tuning). + llm_int8_skip_modules (`List[str]`, *optional*): + An explicit list of the modules that we do not want to convert in 8-bit. This is useful for models such as + Jukebox that has several heads in different places and not necessarily at the last position. For example + for `CausalLM` models, the last `lm_head` is kept in its original `dtype`. + llm_int8_enable_fp32_cpu_offload (`bool`, *optional*, defaults to `False`): + This flag is used for advanced use cases and users that are aware of this feature. If you want to split + your model in different parts and run some parts in int8 on GPU and some parts in fp32 on CPU, you can use + this flag. This is useful for offloading large models such as `google/flan-t5-xxl`. Note that the int8 + operations will not be run on CPU. + llm_int8_has_fp16_weight (`bool`, *optional*, defaults to `False`): + This flag runs LLM.int8() with 16-bit main weights. This is useful for fine-tuning as the weights do not + have to be converted back and forth for the backward pass. + bnb_4bit_compute_dtype (`torch.dtype` or str, *optional*, defaults to `torch.float32`): + This sets the computational type which might be different than the input type. For example, inputs might be + fp32, but computation can be set to bf16 for speedups. + bnb_4bit_quant_type (`str`, *optional*, defaults to `"fp4"`): + This sets the quantization data type in the bnb.nn.Linear4Bit layers. Options are FP4 and NF4 data types + which are specified by `fp4` or `nf4`. + bnb_4bit_use_double_quant (`bool`, *optional*, defaults to `False`): + This flag is used for nested quantization where the quantization constants from the first quantization are + quantized again. + bnb_4bit_quant_storage (`torch.dtype` or str, *optional*, defaults to `torch.uint8`): + This sets the storage type to pack the quanitzed 4-bit prarams. + kwargs (`Dict[str, Any]`, *optional*): + Additional parameters from which to initialize the configuration object. + """ + + def __init__( + self, + load_in_8bit=False, + load_in_4bit=False, + llm_int8_threshold=6.0, + llm_int8_skip_modules=None, + llm_int8_enable_fp32_cpu_offload=False, + llm_int8_has_fp16_weight=False, + bnb_4bit_compute_dtype=None, + bnb_4bit_quant_type="fp4", + bnb_4bit_use_double_quant=False, + bnb_4bit_quant_storage=None, + **kwargs, + ): + self.quant_method = QuantizationMethod.BITS_AND_BYTES + + if load_in_4bit and load_in_8bit: + raise ValueError("load_in_4bit and load_in_8bit are both True, but only one can be used at the same time") + + self._load_in_8bit = load_in_8bit + self._load_in_4bit = load_in_4bit + self.llm_int8_threshold = llm_int8_threshold + self.llm_int8_skip_modules = llm_int8_skip_modules + self.llm_int8_enable_fp32_cpu_offload = llm_int8_enable_fp32_cpu_offload + self.llm_int8_has_fp16_weight = llm_int8_has_fp16_weight + self.bnb_4bit_quant_type = bnb_4bit_quant_type + self.bnb_4bit_use_double_quant = bnb_4bit_use_double_quant + + if bnb_4bit_compute_dtype is None: + self.bnb_4bit_compute_dtype = torch.float32 + elif isinstance(bnb_4bit_compute_dtype, str): + self.bnb_4bit_compute_dtype = getattr(torch, bnb_4bit_compute_dtype) + elif isinstance(bnb_4bit_compute_dtype, torch.dtype): + self.bnb_4bit_compute_dtype = bnb_4bit_compute_dtype + else: + raise ValueError("bnb_4bit_compute_dtype must be a string or a torch.dtype") + + if bnb_4bit_quant_storage is None: + self.bnb_4bit_quant_storage = torch.uint8 + elif isinstance(bnb_4bit_quant_storage, str): + self.bnb_4bit_quant_storage = getattr(torch, bnb_4bit_quant_storage) + elif isinstance(bnb_4bit_quant_storage, torch.dtype): + self.bnb_4bit_quant_storage = bnb_4bit_quant_storage + else: + raise ValueError("bnb_4bit_quant_storage must be a string or a torch.dtype") + + if kwargs: + logger.warning(f"Unused kwargs: {list(kwargs.keys())}. These kwargs are not used in {self.__class__}.") + + self.post_init() + + @property + def load_in_4bit(self): + return self._load_in_4bit + + @load_in_4bit.setter + def load_in_4bit(self, value: bool): + if not isinstance(value, bool): + raise ValueError("load_in_4bit must be a boolean") + + if self.load_in_8bit and value: + raise ValueError("load_in_4bit and load_in_8bit are both True, but only one can be used at the same time") + self._load_in_4bit = value + + @property + def load_in_8bit(self): + return self._load_in_8bit + + @load_in_8bit.setter + def load_in_8bit(self, value: bool): + if not isinstance(value, bool): + raise ValueError("load_in_8bit must be a boolean") + + if self.load_in_4bit and value: + raise ValueError("load_in_4bit and load_in_8bit are both True, but only one can be used at the same time") + self._load_in_8bit = value + + def post_init(self): + r""" + Safety checker that arguments are correct - also replaces some NoneType arguments with their default values. + """ + if not isinstance(self.load_in_4bit, bool): + raise ValueError("load_in_4bit must be a boolean") + + if not isinstance(self.load_in_8bit, bool): + raise ValueError("load_in_8bit must be a boolean") + + if not isinstance(self.llm_int8_threshold, float): + raise ValueError("llm_int8_threshold must be a float") + + if self.llm_int8_skip_modules is not None and not isinstance(self.llm_int8_skip_modules, list): + raise ValueError("llm_int8_skip_modules must be a list of strings") + if not isinstance(self.llm_int8_enable_fp32_cpu_offload, bool): + raise ValueError("llm_int8_enable_fp32_cpu_offload must be a boolean") + + if not isinstance(self.llm_int8_has_fp16_weight, bool): + raise ValueError("llm_int8_has_fp16_weight must be a boolean") + + if self.bnb_4bit_compute_dtype is not None and not isinstance(self.bnb_4bit_compute_dtype, torch.dtype): + raise ValueError("bnb_4bit_compute_dtype must be torch.dtype") + + if not isinstance(self.bnb_4bit_quant_type, str): + raise ValueError("bnb_4bit_quant_type must be a string") + + if not isinstance(self.bnb_4bit_use_double_quant, bool): + raise ValueError("bnb_4bit_use_double_quant must be a boolean") + + if self.load_in_4bit and not version.parse(importlib.metadata.version("bitsandbytes")) >= version.parse( + "0.39.0" + ): + raise ValueError( + "4 bit quantization requires bitsandbytes>=0.39.0 - please upgrade your bitsandbytes version" + ) + + def is_quantizable(self): + r""" + Returns `True` if the model is quantizable, `False` otherwise. + """ + return self.load_in_8bit or self.load_in_4bit + + def quantization_method(self): + r""" + This method returns the quantization method used for the model. If the model is not quantizable, it returns + `None`. + """ + if self.load_in_8bit: + return "llm_int8" + elif self.load_in_4bit and self.bnb_4bit_quant_type == "fp4": + return "fp4" + elif self.load_in_4bit and self.bnb_4bit_quant_type == "nf4": + return "nf4" + else: + return None + + def to_dict(self) -> Dict[str, Any]: + """ + Serializes this instance to a Python dictionary. Returns: + `Dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance. + """ + output = copy.deepcopy(self.__dict__) + output["bnb_4bit_compute_dtype"] = str(output["bnb_4bit_compute_dtype"]).split(".")[1] + output["bnb_4bit_quant_storage"] = str(output["bnb_4bit_quant_storage"]).split(".")[1] + output["load_in_4bit"] = self.load_in_4bit + output["load_in_8bit"] = self.load_in_8bit + + return output + + def __repr__(self): + config_dict = self.to_dict() + return f"{self.__class__.__name__} {json.dumps(config_dict, indent=2, sort_keys=True)}\n" + + def to_diff_dict(self) -> Dict[str, Any]: + """ + Removes all attributes from config which correspond to the default config attributes for better readability and + serializes to a Python dictionary. + + Returns: + `Dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance, + """ + config_dict = self.to_dict() + + # get the default config dict + default_config_dict = BitsAndBytesConfig().to_dict() + + serializable_config_dict = {} + + # only serialize values that differ from the default config + for key, value in config_dict.items(): + if value != default_config_dict[key]: + serializable_config_dict[key] = value + + return serializable_config_dict + + +class ExllamaVersion(int, Enum): + ONE = 1 + TWO = 2 + + +@dataclass +class GPTQConfig(QuantizationConfigMixin): + """ + This is a wrapper class about all possible attributes and features that you can play with a model that has been + loaded using `optimum` api for gptq quantization relying on auto_gptq backend. + + Args: + bits (`int`): + The number of bits to quantize to, supported numbers are (2, 3, 4, 8). + tokenizer (`str` or `PreTrainedTokenizerBase`, *optional*): + The tokenizer used to process the dataset. You can pass either: + - A custom tokenizer object. + - A string, the *model id* of a predefined tokenizer hosted inside a model repo on huggingface.co. + - A path to a *directory* containing vocabulary files required by the tokenizer, for instance saved + using the [`~PreTrainedTokenizer.save_pretrained`] method, e.g., `./my_model_directory/`. + dataset (`Union[List[str]]`, *optional*): + The dataset used for quantization. You can provide your own dataset in a list of string or just use the + original datasets used in GPTQ paper ['wikitext2','c4','c4-new','ptb','ptb-new'] + group_size (`int`, *optional*, defaults to 128): + The group size to use for quantization. Recommended value is 128 and -1 uses per-column quantization. + damp_percent (`float`, *optional*, defaults to 0.1): + The percent of the average Hessian diagonal to use for dampening. Recommended value is 0.1. + desc_act (`bool`, *optional*, defaults to `False`): + Whether to quantize columns in order of decreasing activation size. Setting it to False can significantly + speed up inference but the perplexity may become slightly worse. Also known as act-order. + sym (`bool`, *optional*, defaults to `True`): + Whether to use symetric quantization. + true_sequential (`bool`, *optional*, defaults to `True`): + Whether to perform sequential quantization even within a single Transformer block. Instead of quantizing + the entire block at once, we perform layer-wise quantization. As a result, each layer undergoes + quantization using inputs that have passed through the previously quantized layers. + use_cuda_fp16 (`bool`, *optional*, defaults to `False`): + Whether or not to use optimized cuda kernel for fp16 model. Need to have model in fp16. + model_seqlen (`int`, *optional*): + The maximum sequence length that the model can take. + block_name_to_quantize (`str`, *optional*): + The transformers block name to quantize. If None, we will infer the block name using common patterns (e.g. model.layers) + module_name_preceding_first_block (`List[str]`, *optional*): + The layers that are preceding the first Transformer block. + batch_size (`int`, *optional*, defaults to 1): + The batch size used when processing the dataset + pad_token_id (`int`, *optional*): + The pad token id. Needed to prepare the dataset when `batch_size` > 1. + use_exllama (`bool`, *optional*): + Whether to use exllama backend. Defaults to `True` if unset. Only works with `bits` = 4. + max_input_length (`int`, *optional*): + The maximum input length. This is needed to initialize a buffer that depends on the maximum expected input + length. It is specific to the exllama backend with act-order. + exllama_config (`Dict[str, Any]`, *optional*): + The exllama config. You can specify the version of the exllama kernel through the `version` key. Defaults + to `{"version": 1}` if unset. + cache_block_outputs (`bool`, *optional*, defaults to `True`): + Whether to cache block outputs to reuse as inputs for the succeeding block. + modules_in_block_to_quantize (`List[List[str]]`, *optional*): + List of list of module names to quantize in the specified block. This argument is useful to exclude certain linear modules from being quantized. + The block to quantize can be specified by setting `block_name_to_quantize`. We will quantize each list sequentially. If not set, we will quantize all linear layers. + Example: `modules_in_block_to_quantize =[["self_attn.k_proj", "self_attn.v_proj", "self_attn.q_proj"], ["self_attn.o_proj"]]`. + In this example, we will first quantize the q,k,v layers simultaneously since they are independent. + Then, we will quantize `self_attn.o_proj` layer with the q,k,v layers quantized. This way, we will get + better results since it reflects the real input `self_attn.o_proj` will get when the model is quantized. + """ + + def __init__( + self, + bits: int, + tokenizer: Any = None, + dataset: Optional[Union[List[str], str]] = None, + group_size: int = 128, + damp_percent: float = 0.1, + desc_act: bool = False, + sym: bool = True, + true_sequential: bool = True, + use_cuda_fp16: bool = False, + model_seqlen: Optional[int] = None, + block_name_to_quantize: Optional[str] = None, + module_name_preceding_first_block: Optional[List[str]] = None, + batch_size: int = 1, + pad_token_id: Optional[int] = None, + use_exllama: Optional[bool] = None, + max_input_length: Optional[int] = None, + exllama_config: Optional[Dict[str, Any]] = None, + cache_block_outputs: bool = True, + modules_in_block_to_quantize: Optional[List[List[str]]] = None, + **kwargs, + ): + self.quant_method = QuantizationMethod.GPTQ + self.bits = bits + self.tokenizer = tokenizer + self.dataset = dataset + self.group_size = group_size + self.damp_percent = damp_percent + self.desc_act = desc_act + self.sym = sym + self.true_sequential = true_sequential + self.use_cuda_fp16 = use_cuda_fp16 + self.model_seqlen = model_seqlen + self.block_name_to_quantize = block_name_to_quantize + self.module_name_preceding_first_block = module_name_preceding_first_block + self.batch_size = batch_size + self.pad_token_id = pad_token_id + self.use_exllama = use_exllama + self.max_input_length = max_input_length + self.exllama_config = exllama_config + self.disable_exllama = kwargs.pop("disable_exllama", None) + self.cache_block_outputs = cache_block_outputs + self.modules_in_block_to_quantize = modules_in_block_to_quantize + self.post_init() + + def get_loading_attributes(self): + attibutes_dict = copy.deepcopy(self.__dict__) + loading_attibutes = ["disable_exllama", "use_exllama", "exllama_config", "use_cuda_fp16", "max_input_length"] + loading_attibutes_dict = {i: j for i, j in attibutes_dict.items() if i in loading_attibutes} + return loading_attibutes_dict + + def post_init(self): + r""" + Safety checker that arguments are correct + """ + if self.bits not in [2, 3, 4, 8]: + raise ValueError(f"Only support quantization to [2,3,4,8] bits but found {self.bits}") + if self.group_size != -1 and self.group_size <= 0: + raise ValueError("group_size must be greater than 0 or equal to -1") + if not (0 < self.damp_percent < 1): + raise ValueError("damp_percent must between 0 and 1.") + if self.dataset is not None: + if isinstance(self.dataset, str): + if self.dataset not in ["wikitext2", "c4", "c4-new", "ptb", "ptb-new"]: + raise ValueError( + f"""You have entered a string value for dataset. You can only choose between + ['wikitext2','c4','c4-new','ptb','ptb-new'], but we found {self.dataset}""" + ) + elif not isinstance(self.dataset, list): + raise ValueError( + f"""dataset needs to be either a list of string or a value in + ['wikitext2','c4','c4-new','ptb','ptb-new'], but we found {self.dataset}""" + ) + + if self.disable_exllama is None and self.use_exllama is None: + # New default behaviour + self.use_exllama = True + elif self.disable_exllama is not None and self.use_exllama is None: + # Follow pattern of old config + logger.warning( + "Using `disable_exllama` is deprecated and will be removed in version 4.37. Use `use_exllama` instead and specify the version with `exllama_config`." + "The value of `use_exllama` will be overwritten by `disable_exllama` passed in `GPTQConfig` or stored in your config file." + ) + self.use_exllama = not self.disable_exllama + self.disable_exllama = None + elif self.disable_exllama is not None and self.use_exllama is not None: + # Only happens if user explicitly passes in both arguments + raise ValueError("Cannot specify both `disable_exllama` and `use_exllama`. Please use just `use_exllama`") + + if self.exllama_config is None: + self.exllama_config = {"version": ExllamaVersion.ONE} + else: + if "version" not in self.exllama_config: + raise ValueError("`exllama_config` needs to have a `version` key.") + elif self.exllama_config["version"] not in [ExllamaVersion.ONE, ExllamaVersion.TWO]: + exllama_version = self.exllama_config["version"] + raise ValueError( + f"Only supported versions are in [ExllamaVersion.ONE, ExllamaVersion.TWO] - not recognized version {exllama_version}" + ) + + if self.bits == 4 and self.use_exllama: + if self.exllama_config["version"] == ExllamaVersion.ONE: + logger.info( + "You have activated exllama backend. Note that you can get better inference " + "speed using exllamav2 kernel by setting `exllama_config`." + ) + elif self.exllama_config["version"] == ExllamaVersion.TWO: + optimum_version = version.parse(importlib.metadata.version("optimum")) + autogptq_version = version.parse(importlib.metadata.version("auto_gptq")) + if optimum_version <= version.parse("1.13.2") or autogptq_version <= version.parse("0.4.2"): + raise ValueError( + f"You need optimum > 1.13.2 and auto-gptq > 0.4.2 . Make sure to have that version installed - detected version : optimum {optimum_version} and autogptq {autogptq_version}" + ) + if self.modules_in_block_to_quantize is not None: + optimum_version = version.parse(importlib.metadata.version("optimum")) + if optimum_version < version.parse("1.15.0"): + raise ValueError( + "You current version of `optimum` does not support `modules_in_block_to_quantize` quantization argument, please upgrade `optimum` package to a version superior than 1.15.0 ." + ) + + def to_dict(self): + config_dict = super().to_dict() + config_dict.pop("disable_exllama", None) + return config_dict + + def to_dict_optimum(self): + """ + Get compatible dict for optimum gptq config + """ + quant_dict = self.to_dict() + # make it compatible with optimum config + quant_dict["disable_exllama"] = not self.use_exllama + return quant_dict + + @classmethod + def from_dict_optimum(cls, config_dict): + """ + Get compatible class with optimum gptq config dict + """ + + if "disable_exllama" in config_dict: + config_dict["use_exllama"] = not config_dict["disable_exllama"] + # switch to None to not trigger the warning + config_dict["disable_exllama"] = None + + config = cls(**config_dict) + return config + + +@dataclass +class AwqConfig(QuantizationConfigMixin): + """ + This is a wrapper class about all possible attributes and features that you can play with a model that has been + loaded using `auto-awq` library awq quantization relying on auto_awq backend. + + Args: + bits (`int`, *optional*, defaults to 4): + The number of bits to quantize to. + group_size (`int`, *optional*, defaults to 128): + The group size to use for quantization. Recommended value is 128 and -1 uses per-column quantization. + zero_point (`bool`, *optional*, defaults to `True`): + Whether to use zero point quantization. + version (`AWQLinearVersion`, *optional*, defaults to `AWQLinearVersion.GEMM`): + The version of the quantization algorithm to use. GEMM is better for big batch_size (e.g. >= 8) otherwise, + GEMV is better (e.g. < 8 ). GEMM models are compatible with Exllama kernels. + backend (`AwqBackendPackingMethod`, *optional*, defaults to `AwqBackendPackingMethod.AUTOAWQ`): + The quantization backend. Some models might be quantized using `llm-awq` backend. This is useful for users + that quantize their own models using `llm-awq` library. + do_fuse (`bool`, *optional*, defaults to `False`): + Whether to fuse attention and mlp layers together for faster inference + fuse_max_seq_len (`int`, *optional*): + The Maximum sequence length to generate when using fusing. + modules_to_fuse (`dict`, *optional*, default to `None`): + Overwrite the natively supported fusing scheme with the one specified by the users. + modules_to_not_convert (`list`, *optional*, default to `None`): + The list of modules to not quantize, useful for quantizing models that explicitly require to have + some modules left in their original precision (e.g. Whisper encoder, Llava encoder, Mixtral gate layers). + Note you cannot quantize directly with transformers, please refer to `AutoAWQ` documentation for quantizing HF models. + exllama_config (`Dict[str, Any]`, *optional*): + You can specify the version of the exllama kernel through the `version` key, the maximum sequence + length through the `max_input_len` key, and the maximum batch size through the `max_batch_size` key. + Defaults to `{"version": 2, "max_input_len": 2048, "max_batch_size": 8}` if unset. + """ + + def __init__( + self, + bits: int = 4, + group_size: int = 128, + zero_point: bool = True, + version: AWQLinearVersion = AWQLinearVersion.GEMM, + backend: AwqBackendPackingMethod = AwqBackendPackingMethod.AUTOAWQ, + do_fuse: Optional[bool] = None, + fuse_max_seq_len: Optional[int] = None, + modules_to_fuse: Optional[dict] = None, + modules_to_not_convert: Optional[List] = None, + exllama_config: Optional[Dict[str, int]] = None, + **kwargs, + ): + self.quant_method = QuantizationMethod.AWQ + + self.bits = bits + self.group_size = group_size + self.zero_point = zero_point + self.version = version + self.backend = backend + self.fuse_max_seq_len = fuse_max_seq_len + self.modules_to_not_convert = modules_to_not_convert + self.exllama_config = exllama_config + + self.modules_to_fuse = modules_to_fuse + if do_fuse is None: + self.do_fuse = modules_to_fuse is not None and len(modules_to_fuse) > 0 + else: + self.do_fuse = do_fuse + self.fuse_max_seq_len = fuse_max_seq_len + + self.post_init() + + def post_init(self): + r""" + Safety checker that arguments are correct + """ + if not torch.cuda.is_available(): + raise ValueError("AWQ is only available on GPU") + + if self.backend not in [AwqBackendPackingMethod.AUTOAWQ, AwqBackendPackingMethod.LLMAWQ]: + raise ValueError( + f"Only supported quantization backends in {AwqBackendPackingMethod.AUTOAWQ} and {AwqBackendPackingMethod.LLMAWQ} - not recognized backend {self.backend}" + ) + + self.version = AWQLinearVersion.from_str(self.version) + if self.version not in [AWQLinearVersion.GEMM, AWQLinearVersion.GEMV, AWQLinearVersion.EXLLAMA]: + raise ValueError( + f"Only supported versions are in [AWQLinearVersion.GEMM, AWQLinearVersion.GEMV, AWQLinearVersion.EXLLAMA] - not recognized version {self.version}" + ) + + if self.backend == AwqBackendPackingMethod.LLMAWQ: + compute_capability = torch.cuda.get_device_capability() + major, minor = compute_capability + if major < 8: + raise ValueError("LLM-AWQ backend is only supported on GPUs with compute capability >= 8.0") + + if self.do_fuse and self.fuse_max_seq_len is None: + raise ValueError( + "You cannot enable fused modules without specifying a `fuse_max_seq_len`, make sure to pass a valid `fuse_max_seq_len` for your usecase" + ) + + if self.do_fuse: + awq_version_supports_fusing = False + MIN_AWQ_VERSION = "0.1.7" + if is_auto_awq_available(): + awq_version_supports_fusing = version.parse(importlib.metadata.version("autoawq")) >= version.parse( + MIN_AWQ_VERSION + ) + + if not awq_version_supports_fusing: + raise ValueError( + f"You current version of `autoawq` does not support module fusing, please upgrade `autoawq` package to at least {MIN_AWQ_VERSION}." + ) + + if self.modules_to_not_convert is not None: + awq_version_supports_non_conversion = False + MIN_AWQ_VERSION = "0.1.8" + if is_auto_awq_available(): + awq_version_supports_non_conversion = version.parse( + importlib.metadata.version("autoawq") + ) >= version.parse(MIN_AWQ_VERSION) + + if not awq_version_supports_non_conversion: + raise ValueError( + f"You current version of `autoawq` does not support module quantization skipping, please upgrade `autoawq` package to at least {MIN_AWQ_VERSION}." + ) + + if self.do_fuse and self.modules_to_fuse is not None: + required_keys = [ + "hidden_size", + "num_attention_heads", + "num_key_value_heads", + "mlp", + "attention", + "layernorm", + "use_alibi", + ] + if not all(key in self.modules_to_fuse for key in required_keys): + raise ValueError( + f"Required fields are missing in the fusing mapping, required fields are {required_keys}" + ) + + if self.version == AWQLinearVersion.EXLLAMA: + awq_version_supports_exllama = False + MIN_AWQ_VERSION = "0.2.0" + if is_auto_awq_available(): + awq_version_supports_exllama = version.parse(importlib.metadata.version("autoawq")) >= version.parse( + MIN_AWQ_VERSION + ) + + if not awq_version_supports_exllama: + raise ValueError( + f"You current version of `autoawq` does not support exllama backend, " + f"please upgrade `autoawq` package to at least {MIN_AWQ_VERSION}." + ) + + if self.exllama_config is None: + self.exllama_config = {"version": ExllamaVersion.TWO, "max_input_len": 2048, "max_batch_size": 8} + else: + if "version" not in self.exllama_config: + raise ValueError("`exllama_config` needs to have a `version` key.") + elif self.exllama_config["version"] not in [ExllamaVersion.ONE, ExllamaVersion.TWO]: + exllama_version = self.exllama_config["version"] + raise ValueError( + f"Only supported versions are in [ExllamaVersion.ONE, ExllamaVersion.TWO] - not recognized version {exllama_version}" + ) + + def get_loading_attributes(self): + attibutes_dict = copy.deepcopy(self.__dict__) + loading_attibutes = ["version", "do_fuse", "modules_to_fuse", "fuse_max_seq_len", "exllama_config"] + loading_attibutes_dict = {i: j for i, j in attibutes_dict.items() if i in loading_attibutes} + return loading_attibutes_dict + + +@dataclass +class AqlmConfig(QuantizationConfigMixin): + """ + This is a wrapper class about `aqlm` parameters. + + Args: + in_group_size (`int`, *optional*, defaults to 8): + The group size along the input dimension. + out_group_size (`int`, *optional*, defaults to 1): + The group size along the output dimension. It's recommended to always use 1. + num_codebooks (`int`, *optional*, defaults to 1): + Number of codebooks for the Additive Quantization procedure. + nbits_per_codebook (`int`, *optional*, defaults to 16): + Number of bits encoding a single codebook vector. Codebooks size is 2**nbits_per_codebook. + linear_weights_not_to_quantize (`Optional[List[str]]`, *optional*): + List of full paths of `nn.Linear` weight parameters that shall not be quantized. + kwargs (`Dict[str, Any]`, *optional*): + Additional parameters from which to initialize the configuration object. + """ + + def __init__( + self, + in_group_size: int = 8, + out_group_size: int = 1, + num_codebooks: int = 1, + nbits_per_codebook: int = 16, + linear_weights_not_to_quantize: Optional[List[str]] = None, + **kwargs, + ): + self.quant_method = QuantizationMethod.AQLM + self.in_group_size = in_group_size + self.out_group_size = out_group_size + self.num_codebooks = num_codebooks + self.nbits_per_codebook = nbits_per_codebook + self.linear_weights_not_to_quantize = linear_weights_not_to_quantize + + self.post_init() + + def post_init(self): + r""" + Safety checker that arguments are correct - also replaces some NoneType arguments with their default values. + """ + if not isinstance(self.in_group_size, int): + raise ValueError("in_group_size must be a float") + if not isinstance(self.out_group_size, int): + raise ValueError("out_group_size must be a float") + if not isinstance(self.num_codebooks, int): + raise ValueError("num_codebooks must be a float") + if not isinstance(self.nbits_per_codebook, int): + raise ValueError("nbits_per_codebook must be a float") + + if self.linear_weights_not_to_quantize is not None and not isinstance( + self.linear_weights_not_to_quantize, list + ): + raise ValueError("linear_weights_not_to_quantize must be a list of strings") + + if self.linear_weights_not_to_quantize is None: + self.linear_weights_not_to_quantize = [] + + +@dataclass +class QuantoConfig(QuantizationConfigMixin): + """ + This is a wrapper class about all possible attributes and features that you can play with a model that has been + loaded using `quanto`. + + Args: + weights (`str`, *optional*, defaults to `"int8"`): + The target dtype for the weights after quantization. Supported values are ("float8","int8","int4","int2") + activations (`str`, *optional*): + The target dtype for the activations after quantization. Supported values are (None,"int8","float8") + modules_to_not_convert (`list`, *optional*, default to `None`): + The list of modules to not quantize, useful for quantizing models that explicitly require to have + some modules left in their original precision (e.g. Whisper encoder, Llava encoder, Mixtral gate layers). + """ + + def __init__( + self, + weights="int8", + activations=None, + modules_to_not_convert: Optional[List] = None, + **kwargs, + ): + self.quant_method = QuantizationMethod.QUANTO + self.weights = weights + self.activations = activations + self.modules_to_not_convert = modules_to_not_convert + self.post_init() + + def post_init(self): + r""" + Safety checker that arguments are correct + """ + accepted_weights = ["float8", "int8", "int4", "int2"] + accepted_activations = [None, "int8", "float8"] + if self.weights not in accepted_weights: + raise ValueError(f"Only support weights in {accepted_weights} but found {self.weights}") + if self.activations not in accepted_activations: + raise ValueError(f"Only support weights in {accepted_activations} but found {self.activations}") diff --git a/venv/lib/python3.10/site-packages/transformers/utils/sentencepiece_model_pb2_new.py b/venv/lib/python3.10/site-packages/transformers/utils/sentencepiece_model_pb2_new.py new file mode 100644 index 0000000000000000000000000000000000000000..4a2e29b1bdc308c4522e7ae283a10bfa1749991e --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/utils/sentencepiece_model_pb2_new.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: sentencepiece_model.proto +"""Generated protocol buffer code.""" +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +from google.protobuf.internal import builder as _builder + + +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile( + b'\n\x19sentencepiece_model.proto\x12\rsentencepiece"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05\x12\x16\n\tbos_piece\x18. \x01(\t:\x03\x12\x17\n\teos_piece\x18/ \x01(\t:\x04\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03' +) + +_globals = globals() +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, "sentencepiece_model_pb2", _globals) +if _descriptor._USE_C_DESCRIPTORS is False: + DESCRIPTOR._options = None + DESCRIPTOR._serialized_options = b"H\003" + # (generated by protobuf compiler, but `_TRAINERSPEC` is not defined) + # _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None + # _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001" + # _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None + # _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001" + _globals["_TRAINERSPEC"]._serialized_start = 45 + _globals["_TRAINERSPEC"]._serialized_end = 1581 + _globals["_TRAINERSPEC_MODELTYPE"]._serialized_start = 1517 + _globals["_TRAINERSPEC_MODELTYPE"]._serialized_end = 1570 + _globals["_NORMALIZERSPEC"]._serialized_start = 1584 + _globals["_NORMALIZERSPEC"]._serialized_end = 1793 + _globals["_SELFTESTDATA"]._serialized_start = 1795 + _globals["_SELFTESTDATA"]._serialized_end = 1916 + _globals["_SELFTESTDATA_SAMPLE"]._serialized_start = 1864 + _globals["_SELFTESTDATA_SAMPLE"]._serialized_end = 1905 + _globals["_MODELPROTO"]._serialized_start = 1919 + _globals["_MODELPROTO"]._serialized_end = 2429 + _globals["_MODELPROTO_SENTENCEPIECE"]._serialized_start = 2208 + _globals["_MODELPROTO_SENTENCEPIECE"]._serialized_end = 2418 + _globals["_MODELPROTO_SENTENCEPIECE_TYPE"]._serialized_start = 2323 + _globals["_MODELPROTO_SENTENCEPIECE_TYPE"]._serialized_end = 2407 +# @@protoc_insertion_point(module_scope) diff --git a/venv/lib/python3.10/site-packages/transformers/utils/versions.py b/venv/lib/python3.10/site-packages/transformers/utils/versions.py new file mode 100644 index 0000000000000000000000000000000000000000..85dabec8b659022655ce570b4642febd715b6d52 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/utils/versions.py @@ -0,0 +1,117 @@ +# Copyright 2020 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Utilities for working with package versions +""" + +import importlib.metadata +import operator +import re +import sys +from typing import Optional + +from packaging import version + + +ops = { + "<": operator.lt, + "<=": operator.le, + "==": operator.eq, + "!=": operator.ne, + ">=": operator.ge, + ">": operator.gt, +} + + +def _compare_versions(op, got_ver, want_ver, requirement, pkg, hint): + if got_ver is None or want_ver is None: + raise ValueError( + f"Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider" + f" reinstalling {pkg}." + ) + if not ops[op](version.parse(got_ver), version.parse(want_ver)): + raise ImportError( + f"{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}" + ) + + +def require_version(requirement: str, hint: Optional[str] = None) -> None: + """ + Perform a runtime check of the dependency versions, using the exact same syntax used by pip. + + The installed module version comes from the *site-packages* dir via *importlib.metadata*. + + Args: + requirement (`str`): pip style definition, e.g., "tokenizers==0.9.4", "tqdm>=4.27", "numpy" + hint (`str`, *optional*): what suggestion to print in case of requirements not being met + + Example: + + ```python + require_version("pandas>1.1.2") + require_version("numpy>1.18.5", "this is important to have for whatever reason") + ```""" + + hint = f"\n{hint}" if hint is not None else "" + + # non-versioned check + if re.match(r"^[\w_\-\d]+$", requirement): + pkg, op, want_ver = requirement, None, None + else: + match = re.findall(r"^([^!=<>\s]+)([\s!=<>]{1,2}.+)", requirement) + if not match: + raise ValueError( + "requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but" + f" got {requirement}" + ) + pkg, want_full = match[0] + want_range = want_full.split(",") # there could be multiple requirements + wanted = {} + for w in want_range: + match = re.findall(r"^([\s!=<>]{1,2})(.+)", w) + if not match: + raise ValueError( + "requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23," + f" but got {requirement}" + ) + op, want_ver = match[0] + wanted[op] = want_ver + if op not in ops: + raise ValueError(f"{requirement}: need one of {list(ops.keys())}, but got {op}") + + # special case + if pkg == "python": + got_ver = ".".join([str(x) for x in sys.version_info[:3]]) + for op, want_ver in wanted.items(): + _compare_versions(op, got_ver, want_ver, requirement, pkg, hint) + return + + # check if any version is installed + try: + got_ver = importlib.metadata.version(pkg) + except importlib.metadata.PackageNotFoundError: + raise importlib.metadata.PackageNotFoundError( + f"The '{requirement}' distribution was not found and is required by this application. {hint}" + ) + + # check that the right version is installed if version number or a range was provided + if want_ver is not None: + for op, want_ver in wanted.items(): + _compare_versions(op, got_ver, want_ver, requirement, pkg, hint) + + +def require_version_core(requirement): + """require_version wrapper which emits a core-specific hint on failure""" + hint = "Try: `pip install transformers -U` or `pip install -e '.[dev]'` if you're working with git main" + return require_version(requirement, hint)