diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/commands/__init__.py b/env-llmeval/lib/python3.10/site-packages/transformers/commands/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..aa5d95a85b538171ec9cf4fa16e892df1efdef6b
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/commands/__init__.py
@@ -0,0 +1,27 @@
+# Copyright 2020 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from abc import ABC, abstractmethod
+from argparse import ArgumentParser
+
+
+class BaseTransformersCLICommand(ABC):
+ @staticmethod
+ @abstractmethod
+ def register_subcommand(parser: ArgumentParser):
+ raise NotImplementedError()
+
+ @abstractmethod
+ def run(self):
+ raise NotImplementedError()
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/commands/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/commands/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4b8315e632e87f3f38fb5a2b5a86e2993eba5f9c
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/commands/__pycache__/__init__.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/commands/__pycache__/add_new_model.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/commands/__pycache__/add_new_model.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..fb60fef701d09f662f08f80a06743baafd58c032
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/commands/__pycache__/add_new_model.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/commands/__pycache__/add_new_model_like.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/commands/__pycache__/add_new_model_like.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..055e53457bb42a00fa60a4bd673dabb67314265a
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/commands/__pycache__/add_new_model_like.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/commands/__pycache__/convert.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/commands/__pycache__/convert.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..598fbd36531518d78d92ff59e9b8172062a3a28a
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/commands/__pycache__/convert.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/commands/__pycache__/download.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/commands/__pycache__/download.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..41904be3b5f3dabd0cf79e7b1de386b7c7fbd171
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/commands/__pycache__/download.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/commands/__pycache__/env.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/commands/__pycache__/env.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c9109d80e1810db6203df29710aa329e7c73100a
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/commands/__pycache__/env.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/commands/__pycache__/lfs.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/commands/__pycache__/lfs.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..176679f54c82e9572de155eb3568236d7ca08a58
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/commands/__pycache__/lfs.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/commands/__pycache__/pt_to_tf.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/commands/__pycache__/pt_to_tf.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e16215adfa833c4989f236af710950674f22461e
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/commands/__pycache__/pt_to_tf.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/commands/__pycache__/run.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/commands/__pycache__/run.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..bfb74bb835b06b40edeaaba732f634fad84c88a5
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/commands/__pycache__/run.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/commands/__pycache__/serving.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/commands/__pycache__/serving.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..21590c0d4af3d78dfccaa0b278074e898b4586dd
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/commands/__pycache__/serving.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/commands/__pycache__/train.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/commands/__pycache__/train.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4ca66d39d8d80eb454d7c5dbb4b5d95590e5f6fd
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/commands/__pycache__/train.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/commands/__pycache__/transformers_cli.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/commands/__pycache__/transformers_cli.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7e288802779e379ff35d4e508b0c97a36eb9a061
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/commands/__pycache__/transformers_cli.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/commands/__pycache__/user.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/commands/__pycache__/user.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..220bb2cb47ec4cbe16115a0c73df0be51369d184
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/commands/__pycache__/user.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/commands/add_new_model.py b/env-llmeval/lib/python3.10/site-packages/transformers/commands/add_new_model.py
new file mode 100644
index 0000000000000000000000000000000000000000..87949827d9f8844f931375f21fcc06df51acb155
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/commands/add_new_model.py
@@ -0,0 +1,259 @@
+# Copyright 2020 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import json
+import os
+import shutil
+import warnings
+from argparse import ArgumentParser, Namespace
+from pathlib import Path
+from typing import List
+
+from ..utils import logging
+from . import BaseTransformersCLICommand
+
+
+try:
+ from cookiecutter.main import cookiecutter
+
+ _has_cookiecutter = True
+except ImportError:
+ _has_cookiecutter = False
+
+logger = logging.get_logger(__name__) # pylint: disable=invalid-name
+
+
+def add_new_model_command_factory(args: Namespace):
+ return AddNewModelCommand(args.testing, args.testing_file, path=args.path)
+
+
+class AddNewModelCommand(BaseTransformersCLICommand):
+ @staticmethod
+ def register_subcommand(parser: ArgumentParser):
+ add_new_model_parser = parser.add_parser("add-new-model")
+ add_new_model_parser.add_argument("--testing", action="store_true", help="If in testing mode.")
+ add_new_model_parser.add_argument("--testing_file", type=str, help="Configuration file on which to run.")
+ add_new_model_parser.add_argument(
+ "--path", type=str, help="Path to cookiecutter. Should only be used for testing purposes."
+ )
+ add_new_model_parser.set_defaults(func=add_new_model_command_factory)
+
+ def __init__(self, testing: bool, testing_file: str, path=None, *args):
+ self._testing = testing
+ self._testing_file = testing_file
+ self._path = path
+
+ def run(self):
+ warnings.warn(
+ "The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. "
+ "It is not actively maintained anymore, so might give a result that won't pass all tests and quality "
+ "checks, you should use `transformers-cli add-new-model-like` instead."
+ )
+ if not _has_cookiecutter:
+ raise ImportError(
+ "Model creation dependencies are required to use the `add_new_model` command. Install them by running "
+ "the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n"
+ )
+ # Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
+ directories = [directory for directory in os.listdir() if "cookiecutter-template-" == directory[:22]]
+ if len(directories) > 0:
+ raise ValueError(
+ "Several directories starting with `cookiecutter-template-` in current working directory. "
+ "Please clean your directory by removing all folders starting with `cookiecutter-template-` or "
+ "change your working directory."
+ )
+
+ path_to_transformer_root = (
+ Path(__file__).parent.parent.parent.parent if self._path is None else Path(self._path).parent.parent
+ )
+ path_to_cookiecutter = path_to_transformer_root / "templates" / "adding_a_new_model"
+
+ # Execute cookiecutter
+ if not self._testing:
+ cookiecutter(str(path_to_cookiecutter))
+ else:
+ with open(self._testing_file, "r") as configuration_file:
+ testing_configuration = json.load(configuration_file)
+
+ cookiecutter(
+ str(path_to_cookiecutter if self._path is None else self._path),
+ no_input=True,
+ extra_context=testing_configuration,
+ )
+
+ directory = [directory for directory in os.listdir() if "cookiecutter-template-" in directory[:22]][0]
+
+ # Retrieve configuration
+ with open(directory + "/configuration.json", "r") as configuration_file:
+ configuration = json.load(configuration_file)
+
+ lowercase_model_name = configuration["lowercase_modelname"]
+ generate_tensorflow_pytorch_and_flax = configuration["generate_tensorflow_pytorch_and_flax"]
+ os.remove(f"{directory}/configuration.json")
+
+ output_pytorch = "PyTorch" in generate_tensorflow_pytorch_and_flax
+ output_tensorflow = "TensorFlow" in generate_tensorflow_pytorch_and_flax
+ output_flax = "Flax" in generate_tensorflow_pytorch_and_flax
+
+ model_dir = f"{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}"
+ os.makedirs(model_dir, exist_ok=True)
+ os.makedirs(f"{path_to_transformer_root}/tests/models/{lowercase_model_name}", exist_ok=True)
+
+ # Tests require submodules as they have parent imports
+ with open(f"{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py", "w"):
+ pass
+
+ shutil.move(
+ f"{directory}/__init__.py",
+ f"{model_dir}/__init__.py",
+ )
+ shutil.move(
+ f"{directory}/configuration_{lowercase_model_name}.py",
+ f"{model_dir}/configuration_{lowercase_model_name}.py",
+ )
+
+ def remove_copy_lines(path):
+ with open(path, "r") as f:
+ lines = f.readlines()
+ with open(path, "w") as f:
+ for line in lines:
+ if "# Copied from transformers." not in line:
+ f.write(line)
+
+ if output_pytorch:
+ if not self._testing:
+ remove_copy_lines(f"{directory}/modeling_{lowercase_model_name}.py")
+
+ shutil.move(
+ f"{directory}/modeling_{lowercase_model_name}.py",
+ f"{model_dir}/modeling_{lowercase_model_name}.py",
+ )
+
+ shutil.move(
+ f"{directory}/test_modeling_{lowercase_model_name}.py",
+ f"{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py",
+ )
+ else:
+ os.remove(f"{directory}/modeling_{lowercase_model_name}.py")
+ os.remove(f"{directory}/test_modeling_{lowercase_model_name}.py")
+
+ if output_tensorflow:
+ if not self._testing:
+ remove_copy_lines(f"{directory}/modeling_tf_{lowercase_model_name}.py")
+
+ shutil.move(
+ f"{directory}/modeling_tf_{lowercase_model_name}.py",
+ f"{model_dir}/modeling_tf_{lowercase_model_name}.py",
+ )
+
+ shutil.move(
+ f"{directory}/test_modeling_tf_{lowercase_model_name}.py",
+ f"{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py",
+ )
+ else:
+ os.remove(f"{directory}/modeling_tf_{lowercase_model_name}.py")
+ os.remove(f"{directory}/test_modeling_tf_{lowercase_model_name}.py")
+
+ if output_flax:
+ if not self._testing:
+ remove_copy_lines(f"{directory}/modeling_flax_{lowercase_model_name}.py")
+
+ shutil.move(
+ f"{directory}/modeling_flax_{lowercase_model_name}.py",
+ f"{model_dir}/modeling_flax_{lowercase_model_name}.py",
+ )
+
+ shutil.move(
+ f"{directory}/test_modeling_flax_{lowercase_model_name}.py",
+ f"{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py",
+ )
+ else:
+ os.remove(f"{directory}/modeling_flax_{lowercase_model_name}.py")
+ os.remove(f"{directory}/test_modeling_flax_{lowercase_model_name}.py")
+
+ shutil.move(
+ f"{directory}/{lowercase_model_name}.md",
+ f"{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md",
+ )
+
+ shutil.move(
+ f"{directory}/tokenization_{lowercase_model_name}.py",
+ f"{model_dir}/tokenization_{lowercase_model_name}.py",
+ )
+
+ shutil.move(
+ f"{directory}/tokenization_fast_{lowercase_model_name}.py",
+ f"{model_dir}/tokenization_{lowercase_model_name}_fast.py",
+ )
+
+ from os import fdopen, remove
+ from shutil import copymode, move
+ from tempfile import mkstemp
+
+ def replace(original_file: str, line_to_copy_below: str, lines_to_copy: List[str]):
+ # Create temp file
+ fh, abs_path = mkstemp()
+ line_found = False
+ with fdopen(fh, "w") as new_file:
+ with open(original_file) as old_file:
+ for line in old_file:
+ new_file.write(line)
+ if line_to_copy_below in line:
+ line_found = True
+ for line_to_copy in lines_to_copy:
+ new_file.write(line_to_copy)
+
+ if not line_found:
+ raise ValueError(f"Line {line_to_copy_below} was not found in file.")
+
+ # Copy the file permissions from the old file to the new file
+ copymode(original_file, abs_path)
+ # Remove original file
+ remove(original_file)
+ # Move new file
+ move(abs_path, original_file)
+
+ def skip_units(line):
+ return (
+ ("generating PyTorch" in line and not output_pytorch)
+ or ("generating TensorFlow" in line and not output_tensorflow)
+ or ("generating Flax" in line and not output_flax)
+ )
+
+ def replace_in_files(path_to_datafile):
+ with open(path_to_datafile) as datafile:
+ lines_to_copy = []
+ skip_file = False
+ skip_snippet = False
+ for line in datafile:
+ if "# To replace in: " in line and "##" not in line:
+ file_to_replace_in = line.split('"')[1]
+ skip_file = skip_units(line)
+ elif "# Below: " in line and "##" not in line:
+ line_to_copy_below = line.split('"')[1]
+ skip_snippet = skip_units(line)
+ elif "# End." in line and "##" not in line:
+ if not skip_file and not skip_snippet:
+ replace(file_to_replace_in, line_to_copy_below, lines_to_copy)
+
+ lines_to_copy = []
+ elif "# Replace with" in line and "##" not in line:
+ lines_to_copy = []
+ elif "##" not in line:
+ lines_to_copy.append(line)
+
+ remove(path_to_datafile)
+
+ replace_in_files(f"{directory}/to_replace_{lowercase_model_name}.py")
+ os.rmdir(directory)
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/commands/add_new_model_like.py b/env-llmeval/lib/python3.10/site-packages/transformers/commands/add_new_model_like.py
new file mode 100644
index 0000000000000000000000000000000000000000..3b7fcdf19f869f5fb6c51ccd10457747446e20c4
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/commands/add_new_model_like.py
@@ -0,0 +1,1763 @@
+# Copyright 2021 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import difflib
+import json
+import os
+import re
+from argparse import ArgumentParser, Namespace
+from dataclasses import dataclass
+from datetime import date
+from itertools import chain
+from pathlib import Path
+from typing import Any, Callable, Dict, List, Optional, Pattern, Tuple, Union
+
+import yaml
+
+from ..models import auto as auto_module
+from ..models.auto.configuration_auto import model_type_to_module_name
+from ..utils import is_flax_available, is_tf_available, is_torch_available, logging
+from . import BaseTransformersCLICommand
+
+
+logger = logging.get_logger(__name__) # pylint: disable=invalid-name
+
+
+CURRENT_YEAR = date.today().year
+TRANSFORMERS_PATH = Path(__file__).parent.parent
+REPO_PATH = TRANSFORMERS_PATH.parent.parent
+
+
+@dataclass
+class ModelPatterns:
+ """
+ Holds the basic information about a new model for the add-new-model-like command.
+
+ Args:
+ model_name (`str`): The model name.
+ checkpoint (`str`): The checkpoint to use for doc examples.
+ model_type (`str`, *optional*):
+ The model type, the identifier used internally in the library like `bert` or `xlm-roberta`. Will default to
+ `model_name` lowercased with spaces replaced with minuses (-).
+ model_lower_cased (`str`, *optional*):
+ The lowercased version of the model name, to use for the module name or function names. Will default to
+ `model_name` lowercased with spaces and minuses replaced with underscores.
+ model_camel_cased (`str`, *optional*):
+ The camel-cased version of the model name, to use for the class names. Will default to `model_name`
+ camel-cased (with spaces and minuses both considered as word separators.
+ model_upper_cased (`str`, *optional*):
+ The uppercased version of the model name, to use for the constant names. Will default to `model_name`
+ uppercased with spaces and minuses replaced with underscores.
+ config_class (`str`, *optional*):
+ The tokenizer class associated with this model. Will default to `"{model_camel_cased}Config"`.
+ tokenizer_class (`str`, *optional*):
+ The tokenizer class associated with this model (leave to `None` for models that don't use a tokenizer).
+ image_processor_class (`str`, *optional*):
+ The image processor class associated with this model (leave to `None` for models that don't use an image
+ processor).
+ feature_extractor_class (`str`, *optional*):
+ The feature extractor class associated with this model (leave to `None` for models that don't use a feature
+ extractor).
+ processor_class (`str`, *optional*):
+ The processor class associated with this model (leave to `None` for models that don't use a processor).
+ """
+
+ model_name: str
+ checkpoint: str
+ model_type: Optional[str] = None
+ model_lower_cased: Optional[str] = None
+ model_camel_cased: Optional[str] = None
+ model_upper_cased: Optional[str] = None
+ config_class: Optional[str] = None
+ tokenizer_class: Optional[str] = None
+ image_processor_class: Optional[str] = None
+ feature_extractor_class: Optional[str] = None
+ processor_class: Optional[str] = None
+
+ def __post_init__(self):
+ if self.model_type is None:
+ self.model_type = self.model_name.lower().replace(" ", "-")
+ if self.model_lower_cased is None:
+ self.model_lower_cased = self.model_name.lower().replace(" ", "_").replace("-", "_")
+ if self.model_camel_cased is None:
+ # Split the model name on - and space
+ words = self.model_name.split(" ")
+ words = list(chain(*[w.split("-") for w in words]))
+ # Make sure each word is capitalized
+ words = [w[0].upper() + w[1:] for w in words]
+ self.model_camel_cased = "".join(words)
+ if self.model_upper_cased is None:
+ self.model_upper_cased = self.model_name.upper().replace(" ", "_").replace("-", "_")
+ if self.config_class is None:
+ self.config_class = f"{self.model_camel_cased}Config"
+
+
+ATTRIBUTE_TO_PLACEHOLDER = {
+ "config_class": "[CONFIG_CLASS]",
+ "tokenizer_class": "[TOKENIZER_CLASS]",
+ "image_processor_class": "[IMAGE_PROCESSOR_CLASS]",
+ "feature_extractor_class": "[FEATURE_EXTRACTOR_CLASS]",
+ "processor_class": "[PROCESSOR_CLASS]",
+ "checkpoint": "[CHECKPOINT]",
+ "model_type": "[MODEL_TYPE]",
+ "model_upper_cased": "[MODEL_UPPER_CASED]",
+ "model_camel_cased": "[MODEL_CAMELCASED]",
+ "model_lower_cased": "[MODEL_LOWER_CASED]",
+ "model_name": "[MODEL_NAME]",
+}
+
+
+def is_empty_line(line: str) -> bool:
+ """
+ Determines whether a line is empty or not.
+ """
+ return len(line) == 0 or line.isspace()
+
+
+def find_indent(line: str) -> int:
+ """
+ Returns the number of spaces that start a line indent.
+ """
+ search = re.search(r"^(\s*)(?:\S|$)", line)
+ if search is None:
+ return 0
+ return len(search.groups()[0])
+
+
+def parse_module_content(content: str) -> List[str]:
+ """
+ Parse the content of a module in the list of objects it defines.
+
+ Args:
+ content (`str`): The content to parse
+
+ Returns:
+ `List[str]`: The list of objects defined in the module.
+ """
+ objects = []
+ current_object = []
+ lines = content.split("\n")
+ # Doc-styler takes everything between two triple quotes in docstrings, so we need a fake """ here to go with this.
+ end_markers = [")", "]", "}", '"""']
+
+ for line in lines:
+ # End of an object
+ is_valid_object = len(current_object) > 0
+ if is_valid_object and len(current_object) == 1:
+ is_valid_object = not current_object[0].startswith("# Copied from")
+ if not is_empty_line(line) and find_indent(line) == 0 and is_valid_object:
+ # Closing parts should be included in current object
+ if line in end_markers:
+ current_object.append(line)
+ objects.append("\n".join(current_object))
+ current_object = []
+ else:
+ objects.append("\n".join(current_object))
+ current_object = [line]
+ else:
+ current_object.append(line)
+
+ # Add last object
+ if len(current_object) > 0:
+ objects.append("\n".join(current_object))
+
+ return objects
+
+
+def extract_block(content: str, indent_level: int = 0) -> str:
+ """Return the first block in `content` with the indent level `indent_level`.
+
+ The first line in `content` should be indented at `indent_level` level, otherwise an error will be thrown.
+
+ This method will immediately stop the search when a (non-empty) line with indent level less than `indent_level` is
+ encountered.
+
+ Args:
+ content (`str`): The content to parse
+ indent_level (`int`, *optional*, default to 0): The indent level of the blocks to search for
+
+ Returns:
+ `str`: The first block in `content` with the indent level `indent_level`.
+ """
+ current_object = []
+ lines = content.split("\n")
+ # Doc-styler takes everything between two triple quotes in docstrings, so we need a fake """ here to go with this.
+ end_markers = [")", "]", "}", '"""']
+
+ for idx, line in enumerate(lines):
+ if idx == 0 and indent_level > 0 and not is_empty_line(line) and find_indent(line) != indent_level:
+ raise ValueError(
+ f"When `indent_level > 0`, the first line in `content` should have indent level {indent_level}. Got "
+ f"{find_indent(line)} instead."
+ )
+
+ if find_indent(line) < indent_level and not is_empty_line(line):
+ break
+
+ # End of an object
+ is_valid_object = len(current_object) > 0
+ if (
+ not is_empty_line(line)
+ and not line.endswith(":")
+ and find_indent(line) == indent_level
+ and is_valid_object
+ ):
+ # Closing parts should be included in current object
+ if line.lstrip() in end_markers:
+ current_object.append(line)
+ return "\n".join(current_object)
+ else:
+ current_object.append(line)
+
+ # Add last object
+ if len(current_object) > 0:
+ return "\n".join(current_object)
+
+
+def add_content_to_text(
+ text: str,
+ content: str,
+ add_after: Optional[Union[str, Pattern]] = None,
+ add_before: Optional[Union[str, Pattern]] = None,
+ exact_match: bool = False,
+) -> str:
+ """
+ A utility to add some content inside a given text.
+
+ Args:
+ text (`str`): The text in which we want to insert some content.
+ content (`str`): The content to add.
+ add_after (`str` or `Pattern`):
+ The pattern to test on a line of `text`, the new content is added after the first instance matching it.
+ add_before (`str` or `Pattern`):
+ The pattern to test on a line of `text`, the new content is added before the first instance matching it.
+ exact_match (`bool`, *optional*, defaults to `False`):
+ A line is considered a match with `add_after` or `add_before` if it matches exactly when `exact_match=True`,
+ otherwise, if `add_after`/`add_before` is present in the line.
+
+
+
+ The arguments `add_after` and `add_before` are mutually exclusive, and one exactly needs to be provided.
+
+
+
+ Returns:
+ `str`: The text with the new content added if a match was found.
+ """
+ if add_after is None and add_before is None:
+ raise ValueError("You need to pass either `add_after` or `add_before`")
+ if add_after is not None and add_before is not None:
+ raise ValueError("You can't pass both `add_after` or `add_before`")
+ pattern = add_after if add_before is None else add_before
+
+ def this_is_the_line(line):
+ if isinstance(pattern, Pattern):
+ return pattern.search(line) is not None
+ elif exact_match:
+ return pattern == line
+ else:
+ return pattern in line
+
+ new_lines = []
+ for line in text.split("\n"):
+ if this_is_the_line(line):
+ if add_before is not None:
+ new_lines.append(content)
+ new_lines.append(line)
+ if add_after is not None:
+ new_lines.append(content)
+ else:
+ new_lines.append(line)
+
+ return "\n".join(new_lines)
+
+
+def add_content_to_file(
+ file_name: Union[str, os.PathLike],
+ content: str,
+ add_after: Optional[Union[str, Pattern]] = None,
+ add_before: Optional[Union[str, Pattern]] = None,
+ exact_match: bool = False,
+):
+ """
+ A utility to add some content inside a given file.
+
+ Args:
+ file_name (`str` or `os.PathLike`): The name of the file in which we want to insert some content.
+ content (`str`): The content to add.
+ add_after (`str` or `Pattern`):
+ The pattern to test on a line of `text`, the new content is added after the first instance matching it.
+ add_before (`str` or `Pattern`):
+ The pattern to test on a line of `text`, the new content is added before the first instance matching it.
+ exact_match (`bool`, *optional*, defaults to `False`):
+ A line is considered a match with `add_after` or `add_before` if it matches exactly when `exact_match=True`,
+ otherwise, if `add_after`/`add_before` is present in the line.
+
+
+
+ The arguments `add_after` and `add_before` are mutually exclusive, and one exactly needs to be provided.
+
+
+ """
+ with open(file_name, "r", encoding="utf-8") as f:
+ old_content = f.read()
+
+ new_content = add_content_to_text(
+ old_content, content, add_after=add_after, add_before=add_before, exact_match=exact_match
+ )
+
+ with open(file_name, "w", encoding="utf-8") as f:
+ f.write(new_content)
+
+
+def replace_model_patterns(
+ text: str, old_model_patterns: ModelPatterns, new_model_patterns: ModelPatterns
+) -> Tuple[str, str]:
+ """
+ Replace all patterns present in a given text.
+
+ Args:
+ text (`str`): The text to treat.
+ old_model_patterns (`ModelPatterns`): The patterns for the old model.
+ new_model_patterns (`ModelPatterns`): The patterns for the new model.
+
+ Returns:
+ `Tuple(str, str)`: A tuple of with the treated text and the replacement actually done in it.
+ """
+ # The order is crucially important as we will check and replace in that order. For instance the config probably
+ # contains the camel-cased named, but will be treated before.
+ attributes_to_check = ["config_class"]
+ # Add relevant preprocessing classes
+ for attr in ["tokenizer_class", "image_processor_class", "feature_extractor_class", "processor_class"]:
+ if getattr(old_model_patterns, attr) is not None and getattr(new_model_patterns, attr) is not None:
+ attributes_to_check.append(attr)
+
+ # Special cases for checkpoint and model_type
+ if old_model_patterns.checkpoint not in [old_model_patterns.model_type, old_model_patterns.model_lower_cased]:
+ attributes_to_check.append("checkpoint")
+ if old_model_patterns.model_type != old_model_patterns.model_lower_cased:
+ attributes_to_check.append("model_type")
+ else:
+ text = re.sub(
+ rf'(\s*)model_type = "{old_model_patterns.model_type}"',
+ r'\1model_type = "[MODEL_TYPE]"',
+ text,
+ )
+
+ # Special case when the model camel cased and upper cased names are the same for the old model (like for GPT2) but
+ # not the new one. We can't just do a replace in all the text and will need a special regex
+ if old_model_patterns.model_upper_cased == old_model_patterns.model_camel_cased:
+ old_model_value = old_model_patterns.model_upper_cased
+ if re.search(rf"{old_model_value}_[A-Z_]*[^A-Z_]", text) is not None:
+ text = re.sub(rf"{old_model_value}([A-Z_]*)([^a-zA-Z_])", r"[MODEL_UPPER_CASED]\1\2", text)
+ else:
+ attributes_to_check.append("model_upper_cased")
+
+ attributes_to_check.extend(["model_camel_cased", "model_lower_cased", "model_name"])
+
+ # Now let's replace every other attribute by their placeholder
+ for attr in attributes_to_check:
+ text = text.replace(getattr(old_model_patterns, attr), ATTRIBUTE_TO_PLACEHOLDER[attr])
+
+ # Finally we can replace the placeholder byt the new values.
+ replacements = []
+ for attr, placeholder in ATTRIBUTE_TO_PLACEHOLDER.items():
+ if placeholder in text:
+ replacements.append((getattr(old_model_patterns, attr), getattr(new_model_patterns, attr)))
+ text = text.replace(placeholder, getattr(new_model_patterns, attr))
+
+ # If we have two inconsistent replacements, we don't return anything (ex: GPT2->GPT_NEW and GPT2->GPTNew)
+ old_replacement_values = [old for old, new in replacements]
+ if len(set(old_replacement_values)) != len(old_replacement_values):
+ return text, ""
+
+ replacements = simplify_replacements(replacements)
+ replacements = [f"{old}->{new}" for old, new in replacements]
+ return text, ",".join(replacements)
+
+
+def simplify_replacements(replacements):
+ """
+ Simplify a list of replacement patterns to make sure there are no needless ones.
+
+ For instance in the sequence "Bert->BertNew, BertConfig->BertNewConfig, bert->bert_new", the replacement
+ "BertConfig->BertNewConfig" is implied by "Bert->BertNew" so not needed.
+
+ Args:
+ replacements (`List[Tuple[str, str]]`): List of patterns (old, new)
+
+ Returns:
+ `List[Tuple[str, str]]`: The list of patterns simplified.
+ """
+ if len(replacements) <= 1:
+ # Nothing to simplify
+ return replacements
+
+ # Next let's sort replacements by length as a replacement can only "imply" another replacement if it's shorter.
+ replacements.sort(key=lambda x: len(x[0]))
+
+ idx = 0
+ while idx < len(replacements):
+ old, new = replacements[idx]
+ # Loop through all replacements after
+ j = idx + 1
+ while j < len(replacements):
+ old_2, new_2 = replacements[j]
+ # If the replacement is implied by the current one, we can drop it.
+ if old_2.replace(old, new) == new_2:
+ replacements.pop(j)
+ else:
+ j += 1
+ idx += 1
+
+ return replacements
+
+
+def get_module_from_file(module_file: Union[str, os.PathLike]) -> str:
+ """
+ Returns the module name corresponding to a module file.
+ """
+ full_module_path = Path(module_file).absolute()
+ module_parts = full_module_path.with_suffix("").parts
+
+ # Find the first part named transformers, starting from the end.
+ idx = len(module_parts) - 1
+ while idx >= 0 and module_parts[idx] != "transformers":
+ idx -= 1
+ if idx < 0:
+ raise ValueError(f"{module_file} is not a transformers module.")
+
+ return ".".join(module_parts[idx:])
+
+
+SPECIAL_PATTERNS = {
+ "_CHECKPOINT_FOR_DOC =": "checkpoint",
+ "_CONFIG_FOR_DOC =": "config_class",
+ "_TOKENIZER_FOR_DOC =": "tokenizer_class",
+ "_IMAGE_PROCESSOR_FOR_DOC =": "image_processor_class",
+ "_FEAT_EXTRACTOR_FOR_DOC =": "feature_extractor_class",
+ "_PROCESSOR_FOR_DOC =": "processor_class",
+}
+
+
+_re_class_func = re.compile(r"^(?:class|def)\s+([^\s:\(]+)\s*(?:\(|\:)", flags=re.MULTILINE)
+
+
+def remove_attributes(obj, target_attr):
+ """Remove `target_attr` in `obj`."""
+ lines = obj.split(os.linesep)
+
+ target_idx = None
+ for idx, line in enumerate(lines):
+ # search for assignment
+ if line.lstrip().startswith(f"{target_attr} = "):
+ target_idx = idx
+ break
+ # search for function/method definition
+ elif line.lstrip().startswith(f"def {target_attr}("):
+ target_idx = idx
+ break
+
+ # target not found
+ if target_idx is None:
+ return obj
+
+ line = lines[target_idx]
+ indent_level = find_indent(line)
+ # forward pass to find the ending of the block (including empty lines)
+ parsed = extract_block("\n".join(lines[target_idx:]), indent_level)
+ num_lines = len(parsed.split("\n"))
+ for idx in range(num_lines):
+ lines[target_idx + idx] = None
+
+ # backward pass to find comments or decorator
+ for idx in range(target_idx - 1, -1, -1):
+ line = lines[idx]
+ if (line.lstrip().startswith("#") or line.lstrip().startswith("@")) and find_indent(line) == indent_level:
+ lines[idx] = None
+ else:
+ break
+
+ new_obj = os.linesep.join([x for x in lines if x is not None])
+
+ return new_obj
+
+
+def duplicate_module(
+ module_file: Union[str, os.PathLike],
+ old_model_patterns: ModelPatterns,
+ new_model_patterns: ModelPatterns,
+ dest_file: Optional[str] = None,
+ add_copied_from: bool = True,
+ attrs_to_remove: List[str] = None,
+):
+ """
+ Create a new module from an existing one and adapting all function and classes names from old patterns to new ones.
+
+ Args:
+ module_file (`str` or `os.PathLike`): Path to the module to duplicate.
+ old_model_patterns (`ModelPatterns`): The patterns for the old model.
+ new_model_patterns (`ModelPatterns`): The patterns for the new model.
+ dest_file (`str` or `os.PathLike`, *optional*): Path to the new module.
+ add_copied_from (`bool`, *optional*, defaults to `True`):
+ Whether or not to add `# Copied from` statements in the duplicated module.
+ """
+ if dest_file is None:
+ dest_file = str(module_file).replace(
+ old_model_patterns.model_lower_cased, new_model_patterns.model_lower_cased
+ )
+
+ with open(module_file, "r", encoding="utf-8") as f:
+ content = f.read()
+
+ content = re.sub(r"# Copyright (\d+)\s", f"# Copyright {CURRENT_YEAR} ", content)
+ objects = parse_module_content(content)
+
+ # Loop and treat all objects
+ new_objects = []
+ for obj in objects:
+ # Special cases
+ if "PRETRAINED_CONFIG_ARCHIVE_MAP = {" in obj:
+ # docstyle-ignore
+ obj = (
+ f"{new_model_patterns.model_upper_cased}_PRETRAINED_CONFIG_ARCHIVE_MAP = "
+ + "{"
+ + f"""
+ "{new_model_patterns.checkpoint}": "https://huggingface.co/{new_model_patterns.checkpoint}/resolve/main/config.json",
+"""
+ + "}\n"
+ )
+ new_objects.append(obj)
+ continue
+ elif "PRETRAINED_MODEL_ARCHIVE_LIST = [" in obj:
+ if obj.startswith("TF_"):
+ prefix = "TF_"
+ elif obj.startswith("FLAX_"):
+ prefix = "FLAX_"
+ else:
+ prefix = ""
+ # docstyle-ignore
+ obj = f"""{prefix}{new_model_patterns.model_upper_cased}_PRETRAINED_MODEL_ARCHIVE_LIST = [
+ "{new_model_patterns.checkpoint}",
+ # See all {new_model_patterns.model_name} models at https://huggingface.co/models?filter={new_model_patterns.model_type}
+]
+"""
+ new_objects.append(obj)
+ continue
+
+ special_pattern = False
+ for pattern, attr in SPECIAL_PATTERNS.items():
+ if pattern in obj:
+ obj = obj.replace(getattr(old_model_patterns, attr), getattr(new_model_patterns, attr))
+ new_objects.append(obj)
+ special_pattern = True
+ break
+
+ if special_pattern:
+ continue
+
+ # Regular classes functions
+ old_obj = obj
+ obj, replacement = replace_model_patterns(obj, old_model_patterns, new_model_patterns)
+ has_copied_from = re.search(r"^#\s+Copied from", obj, flags=re.MULTILINE) is not None
+ if add_copied_from and not has_copied_from and _re_class_func.search(obj) is not None and len(replacement) > 0:
+ # Copied from statement must be added just before the class/function definition, which may not be the
+ # first line because of decorators.
+ module_name = get_module_from_file(module_file)
+ old_object_name = _re_class_func.search(old_obj).groups()[0]
+ obj = add_content_to_text(
+ obj, f"# Copied from {module_name}.{old_object_name} with {replacement}", add_before=_re_class_func
+ )
+ # In all cases, we remove Copied from statement with indent on methods.
+ obj = re.sub("\n[ ]+# Copied from [^\n]*\n", "\n", obj)
+
+ new_objects.append(obj)
+
+ content = "\n".join(new_objects)
+ # Remove some attributes that we don't want to copy to the new file(s)
+ if attrs_to_remove is not None:
+ for attr in attrs_to_remove:
+ content = remove_attributes(content, target_attr=attr)
+
+ with open(dest_file, "w", encoding="utf-8") as f:
+ f.write(content)
+
+
+def filter_framework_files(
+ files: List[Union[str, os.PathLike]], frameworks: Optional[List[str]] = None
+) -> List[Union[str, os.PathLike]]:
+ """
+ Filter a list of files to only keep the ones corresponding to a list of frameworks.
+
+ Args:
+ files (`List[Union[str, os.PathLike]]`): The list of files to filter.
+ frameworks (`List[str]`, *optional*): The list of allowed frameworks.
+
+ Returns:
+ `List[Union[str, os.PathLike]]`: The list of filtered files.
+ """
+ if frameworks is None:
+ frameworks = get_default_frameworks()
+
+ framework_to_file = {}
+ others = []
+ for f in files:
+ parts = Path(f).name.split("_")
+ if "modeling" not in parts:
+ others.append(f)
+ continue
+ if "tf" in parts:
+ framework_to_file["tf"] = f
+ elif "flax" in parts:
+ framework_to_file["flax"] = f
+ else:
+ framework_to_file["pt"] = f
+
+ return [framework_to_file[f] for f in frameworks if f in framework_to_file] + others
+
+
+def get_model_files(model_type: str, frameworks: Optional[List[str]] = None) -> Dict[str, Union[Path, List[Path]]]:
+ """
+ Retrieves all the files associated to a model.
+
+ Args:
+ model_type (`str`): A valid model type (like "bert" or "gpt2")
+ frameworks (`List[str]`, *optional*):
+ If passed, will only keep the model files corresponding to the passed frameworks.
+
+ Returns:
+ `Dict[str, Union[Path, List[Path]]]`: A dictionary with the following keys:
+ - **doc_file** -- The documentation file for the model.
+ - **model_files** -- All the files in the model module.
+ - **test_files** -- The test files for the model.
+ """
+ module_name = model_type_to_module_name(model_type)
+
+ model_module = TRANSFORMERS_PATH / "models" / module_name
+ model_files = list(model_module.glob("*.py"))
+ model_files = filter_framework_files(model_files, frameworks=frameworks)
+
+ doc_file = REPO_PATH / "docs" / "source" / "en" / "model_doc" / f"{model_type}.md"
+
+ # Basic pattern for test files
+ test_files = [
+ f"test_modeling_{module_name}.py",
+ f"test_modeling_tf_{module_name}.py",
+ f"test_modeling_flax_{module_name}.py",
+ f"test_tokenization_{module_name}.py",
+ f"test_image_processing_{module_name}.py",
+ f"test_feature_extraction_{module_name}.py",
+ f"test_processor_{module_name}.py",
+ ]
+ test_files = filter_framework_files(test_files, frameworks=frameworks)
+ # Add the test directory
+ test_files = [REPO_PATH / "tests" / "models" / module_name / f for f in test_files]
+ # Filter by existing files
+ test_files = [f for f in test_files if f.exists()]
+
+ return {"doc_file": doc_file, "model_files": model_files, "module_name": module_name, "test_files": test_files}
+
+
+_re_checkpoint_for_doc = re.compile(r"^_CHECKPOINT_FOR_DOC\s+=\s+(\S*)\s*$", flags=re.MULTILINE)
+
+
+def find_base_model_checkpoint(
+ model_type: str, model_files: Optional[Dict[str, Union[Path, List[Path]]]] = None
+) -> str:
+ """
+ Finds the model checkpoint used in the docstrings for a given model.
+
+ Args:
+ model_type (`str`): A valid model type (like "bert" or "gpt2")
+ model_files (`Dict[str, Union[Path, List[Path]]`, *optional*):
+ The files associated to `model_type`. Can be passed to speed up the function, otherwise will be computed.
+
+ Returns:
+ `str`: The checkpoint used.
+ """
+ if model_files is None:
+ model_files = get_model_files(model_type)
+ module_files = model_files["model_files"]
+ for fname in module_files:
+ if "modeling" not in str(fname):
+ continue
+
+ with open(fname, "r", encoding="utf-8") as f:
+ content = f.read()
+ if _re_checkpoint_for_doc.search(content) is not None:
+ checkpoint = _re_checkpoint_for_doc.search(content).groups()[0]
+ # Remove quotes
+ checkpoint = checkpoint.replace('"', "")
+ checkpoint = checkpoint.replace("'", "")
+ return checkpoint
+
+ # TODO: Find some kind of fallback if there is no _CHECKPOINT_FOR_DOC in any of the modeling file.
+ return ""
+
+
+def get_default_frameworks():
+ """
+ Returns the list of frameworks (PyTorch, TensorFlow, Flax) that are installed in the environment.
+ """
+ frameworks = []
+ if is_torch_available():
+ frameworks.append("pt")
+ if is_tf_available():
+ frameworks.append("tf")
+ if is_flax_available():
+ frameworks.append("flax")
+ return frameworks
+
+
+_re_model_mapping = re.compile("MODEL_([A-Z_]*)MAPPING_NAMES")
+
+
+def retrieve_model_classes(model_type: str, frameworks: Optional[List[str]] = None) -> Dict[str, List[str]]:
+ """
+ Retrieve the model classes associated to a given model.
+
+ Args:
+ model_type (`str`): A valid model type (like "bert" or "gpt2")
+ frameworks (`List[str]`, *optional*):
+ The frameworks to look for. Will default to `["pt", "tf", "flax"]`, passing a smaller list will restrict
+ the classes returned.
+
+ Returns:
+ `Dict[str, List[str]]`: A dictionary with one key per framework and the list of model classes associated to
+ that framework as values.
+ """
+ if frameworks is None:
+ frameworks = get_default_frameworks()
+
+ modules = {
+ "pt": auto_module.modeling_auto if is_torch_available() else None,
+ "tf": auto_module.modeling_tf_auto if is_tf_available() else None,
+ "flax": auto_module.modeling_flax_auto if is_flax_available() else None,
+ }
+
+ model_classes = {}
+ for framework in frameworks:
+ new_model_classes = []
+ if modules[framework] is None:
+ raise ValueError(f"You selected {framework} in the frameworks, but it is not installed.")
+ model_mappings = [attr for attr in dir(modules[framework]) if _re_model_mapping.search(attr) is not None]
+ for model_mapping_name in model_mappings:
+ model_mapping = getattr(modules[framework], model_mapping_name)
+ if model_type in model_mapping:
+ new_model_classes.append(model_mapping[model_type])
+
+ if len(new_model_classes) > 0:
+ # Remove duplicates
+ model_classes[framework] = list(set(new_model_classes))
+
+ return model_classes
+
+
+def retrieve_info_for_model(model_type, frameworks: Optional[List[str]] = None):
+ """
+ Retrieves all the information from a given model_type.
+
+ Args:
+ model_type (`str`): A valid model type (like "bert" or "gpt2")
+ frameworks (`List[str]`, *optional*):
+ If passed, will only keep the info corresponding to the passed frameworks.
+
+ Returns:
+ `Dict`: A dictionary with the following keys:
+ - **frameworks** (`List[str]`): The list of frameworks that back this model type.
+ - **model_classes** (`Dict[str, List[str]]`): The model classes implemented for that model type.
+ - **model_files** (`Dict[str, Union[Path, List[Path]]]`): The files associated with that model type.
+ - **model_patterns** (`ModelPatterns`): The various patterns for the model.
+ """
+ if model_type not in auto_module.MODEL_NAMES_MAPPING:
+ raise ValueError(f"{model_type} is not a valid model type.")
+
+ model_name = auto_module.MODEL_NAMES_MAPPING[model_type]
+ config_class = auto_module.configuration_auto.CONFIG_MAPPING_NAMES[model_type]
+ archive_map = auto_module.configuration_auto.CONFIG_ARCHIVE_MAP_MAPPING_NAMES.get(model_type, None)
+ if model_type in auto_module.tokenization_auto.TOKENIZER_MAPPING_NAMES:
+ tokenizer_classes = auto_module.tokenization_auto.TOKENIZER_MAPPING_NAMES[model_type]
+ tokenizer_class = tokenizer_classes[0] if tokenizer_classes[0] is not None else tokenizer_classes[1]
+ else:
+ tokenizer_class = None
+ image_processor_class = auto_module.image_processing_auto.IMAGE_PROCESSOR_MAPPING_NAMES.get(model_type, None)
+ feature_extractor_class = auto_module.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES.get(model_type, None)
+ processor_class = auto_module.processing_auto.PROCESSOR_MAPPING_NAMES.get(model_type, None)
+
+ model_files = get_model_files(model_type, frameworks=frameworks)
+ model_camel_cased = config_class.replace("Config", "")
+
+ available_frameworks = []
+ for fname in model_files["model_files"]:
+ if "modeling_tf" in str(fname):
+ available_frameworks.append("tf")
+ elif "modeling_flax" in str(fname):
+ available_frameworks.append("flax")
+ elif "modeling" in str(fname):
+ available_frameworks.append("pt")
+
+ if frameworks is None:
+ frameworks = get_default_frameworks()
+
+ frameworks = [f for f in frameworks if f in available_frameworks]
+
+ model_classes = retrieve_model_classes(model_type, frameworks=frameworks)
+
+ # Retrieve model upper-cased name from the constant name of the pretrained archive map.
+ if archive_map is None:
+ model_upper_cased = model_camel_cased.upper()
+ else:
+ parts = archive_map.split("_")
+ idx = 0
+ while idx < len(parts) and parts[idx] != "PRETRAINED":
+ idx += 1
+ if idx < len(parts):
+ model_upper_cased = "_".join(parts[:idx])
+ else:
+ model_upper_cased = model_camel_cased.upper()
+
+ model_patterns = ModelPatterns(
+ model_name,
+ checkpoint=find_base_model_checkpoint(model_type, model_files=model_files),
+ model_type=model_type,
+ model_camel_cased=model_camel_cased,
+ model_lower_cased=model_files["module_name"],
+ model_upper_cased=model_upper_cased,
+ config_class=config_class,
+ tokenizer_class=tokenizer_class,
+ image_processor_class=image_processor_class,
+ feature_extractor_class=feature_extractor_class,
+ processor_class=processor_class,
+ )
+
+ return {
+ "frameworks": frameworks,
+ "model_classes": model_classes,
+ "model_files": model_files,
+ "model_patterns": model_patterns,
+ }
+
+
+def clean_frameworks_in_init(
+ init_file: Union[str, os.PathLike], frameworks: Optional[List[str]] = None, keep_processing: bool = True
+):
+ """
+ Removes all the import lines that don't belong to a given list of frameworks or concern tokenizers/feature
+ extractors/image processors/processors in an init.
+
+ Args:
+ init_file (`str` or `os.PathLike`): The path to the init to treat.
+ frameworks (`List[str]`, *optional*):
+ If passed, this will remove all imports that are subject to a framework not in frameworks
+ keep_processing (`bool`, *optional*, defaults to `True`):
+ Whether or not to keep the preprocessing (tokenizer, feature extractor, image processor, processor) imports
+ in the init.
+ """
+ if frameworks is None:
+ frameworks = get_default_frameworks()
+
+ names = {"pt": "torch"}
+ to_remove = [names.get(f, f) for f in ["pt", "tf", "flax"] if f not in frameworks]
+ if not keep_processing:
+ to_remove.extend(["sentencepiece", "tokenizers", "vision"])
+
+ if len(to_remove) == 0:
+ # Nothing to do
+ return
+
+ remove_pattern = "|".join(to_remove)
+ re_conditional_imports = re.compile(rf"^\s*if not is_({remove_pattern})_available\(\):\s*$")
+ re_try = re.compile(r"\s*try:")
+ re_else = re.compile(r"\s*else:")
+ re_is_xxx_available = re.compile(rf"is_({remove_pattern})_available")
+
+ with open(init_file, "r", encoding="utf-8") as f:
+ content = f.read()
+
+ lines = content.split("\n")
+ new_lines = []
+ idx = 0
+ while idx < len(lines):
+ # Conditional imports in try-except-else blocks
+ if (re_conditional_imports.search(lines[idx]) is not None) and (re_try.search(lines[idx - 1]) is not None):
+ # Remove the preceding `try:`
+ new_lines.pop()
+ idx += 1
+ # Iterate until `else:`
+ while is_empty_line(lines[idx]) or re_else.search(lines[idx]) is None:
+ idx += 1
+ idx += 1
+ indent = find_indent(lines[idx])
+ while find_indent(lines[idx]) >= indent or is_empty_line(lines[idx]):
+ idx += 1
+ # Remove the import from utils
+ elif re_is_xxx_available.search(lines[idx]) is not None:
+ line = lines[idx]
+ for framework in to_remove:
+ line = line.replace(f", is_{framework}_available", "")
+ line = line.replace(f"is_{framework}_available, ", "")
+ line = line.replace(f"is_{framework}_available,", "")
+ line = line.replace(f"is_{framework}_available", "")
+
+ if len(line.strip()) > 0:
+ new_lines.append(line)
+ idx += 1
+ # Otherwise we keep the line, except if it's a tokenizer import and we don't want to keep it.
+ elif keep_processing or (
+ re.search(r'^\s*"(tokenization|processing|feature_extraction|image_processing)', lines[idx]) is None
+ and re.search(r"^\s*from .(tokenization|processing|feature_extraction|image_processing)", lines[idx])
+ is None
+ ):
+ new_lines.append(lines[idx])
+ idx += 1
+ else:
+ idx += 1
+
+ with open(init_file, "w", encoding="utf-8") as f:
+ f.write("\n".join(new_lines))
+
+
+def add_model_to_main_init(
+ old_model_patterns: ModelPatterns,
+ new_model_patterns: ModelPatterns,
+ frameworks: Optional[List[str]] = None,
+ with_processing: bool = True,
+):
+ """
+ Add a model to the main init of Transformers.
+
+ Args:
+ old_model_patterns (`ModelPatterns`): The patterns for the old model.
+ new_model_patterns (`ModelPatterns`): The patterns for the new model.
+ frameworks (`List[str]`, *optional*):
+ If specified, only the models implemented in those frameworks will be added.
+ with_processsing (`bool`, *optional*, defaults to `True`):
+ Whether the tokenizer/feature extractor/processor of the model should also be added to the init or not.
+ """
+ with open(TRANSFORMERS_PATH / "__init__.py", "r", encoding="utf-8") as f:
+ content = f.read()
+
+ lines = content.split("\n")
+ idx = 0
+ new_lines = []
+ framework = None
+ while idx < len(lines):
+ new_framework = False
+ if not is_empty_line(lines[idx]) and find_indent(lines[idx]) == 0:
+ framework = None
+ elif lines[idx].lstrip().startswith("if not is_torch_available"):
+ framework = "pt"
+ new_framework = True
+ elif lines[idx].lstrip().startswith("if not is_tf_available"):
+ framework = "tf"
+ new_framework = True
+ elif lines[idx].lstrip().startswith("if not is_flax_available"):
+ framework = "flax"
+ new_framework = True
+
+ if new_framework:
+ # For a new framework, we need to skip until the else: block to get where the imports are.
+ while lines[idx].strip() != "else:":
+ new_lines.append(lines[idx])
+ idx += 1
+
+ # Skip if we are in a framework not wanted.
+ if framework is not None and frameworks is not None and framework not in frameworks:
+ new_lines.append(lines[idx])
+ idx += 1
+ elif re.search(rf'models.{old_model_patterns.model_lower_cased}( |")', lines[idx]) is not None:
+ block = [lines[idx]]
+ indent = find_indent(lines[idx])
+ idx += 1
+ while find_indent(lines[idx]) > indent:
+ block.append(lines[idx])
+ idx += 1
+ if lines[idx].strip() in [")", "]", "],"]:
+ block.append(lines[idx])
+ idx += 1
+ block = "\n".join(block)
+ new_lines.append(block)
+
+ add_block = True
+ if not with_processing:
+ processing_classes = [
+ old_model_patterns.tokenizer_class,
+ old_model_patterns.image_processor_class,
+ old_model_patterns.feature_extractor_class,
+ old_model_patterns.processor_class,
+ ]
+ # Only keep the ones that are not None
+ processing_classes = [c for c in processing_classes if c is not None]
+ for processing_class in processing_classes:
+ block = block.replace(f' "{processing_class}",', "")
+ block = block.replace(f', "{processing_class}"', "")
+ block = block.replace(f" {processing_class},", "")
+ block = block.replace(f", {processing_class}", "")
+
+ if processing_class in block:
+ add_block = False
+ if add_block:
+ new_lines.append(replace_model_patterns(block, old_model_patterns, new_model_patterns)[0])
+ else:
+ new_lines.append(lines[idx])
+ idx += 1
+
+ with open(TRANSFORMERS_PATH / "__init__.py", "w", encoding="utf-8") as f:
+ f.write("\n".join(new_lines))
+
+
+def insert_tokenizer_in_auto_module(old_model_patterns: ModelPatterns, new_model_patterns: ModelPatterns):
+ """
+ Add a tokenizer to the relevant mappings in the auto module.
+
+ Args:
+ old_model_patterns (`ModelPatterns`): The patterns for the old model.
+ new_model_patterns (`ModelPatterns`): The patterns for the new model.
+ """
+ if old_model_patterns.tokenizer_class is None or new_model_patterns.tokenizer_class is None:
+ return
+
+ with open(TRANSFORMERS_PATH / "models" / "auto" / "tokenization_auto.py", "r", encoding="utf-8") as f:
+ content = f.read()
+
+ lines = content.split("\n")
+ idx = 0
+ # First we get to the TOKENIZER_MAPPING_NAMES block.
+ while not lines[idx].startswith(" TOKENIZER_MAPPING_NAMES = OrderedDict("):
+ idx += 1
+ idx += 1
+
+ # That block will end at this prompt:
+ while not lines[idx].startswith("TOKENIZER_MAPPING = _LazyAutoMapping"):
+ # Either all the tokenizer block is defined on one line, in which case, it ends with "),"
+ if lines[idx].endswith(","):
+ block = lines[idx]
+ # Otherwise it takes several lines until we get to a "),"
+ else:
+ block = []
+ while not lines[idx].startswith(" ),"):
+ block.append(lines[idx])
+ idx += 1
+ block = "\n".join(block)
+ idx += 1
+
+ # If we find the model type and tokenizer class in that block, we have the old model tokenizer block
+ if f'"{old_model_patterns.model_type}"' in block and old_model_patterns.tokenizer_class in block:
+ break
+
+ new_block = block.replace(old_model_patterns.model_type, new_model_patterns.model_type)
+ new_block = new_block.replace(old_model_patterns.tokenizer_class, new_model_patterns.tokenizer_class)
+
+ new_lines = lines[:idx] + [new_block] + lines[idx:]
+ with open(TRANSFORMERS_PATH / "models" / "auto" / "tokenization_auto.py", "w", encoding="utf-8") as f:
+ f.write("\n".join(new_lines))
+
+
+AUTO_CLASSES_PATTERNS = {
+ "configuration_auto.py": [
+ ' ("{model_type}", "{model_name}"),',
+ ' ("{model_type}", "{config_class}"),',
+ ' ("{model_type}", "{pretrained_archive_map}"),',
+ ],
+ "feature_extraction_auto.py": [' ("{model_type}", "{feature_extractor_class}"),'],
+ "image_processing_auto.py": [' ("{model_type}", "{image_processor_class}"),'],
+ "modeling_auto.py": [' ("{model_type}", "{any_pt_class}"),'],
+ "modeling_tf_auto.py": [' ("{model_type}", "{any_tf_class}"),'],
+ "modeling_flax_auto.py": [' ("{model_type}", "{any_flax_class}"),'],
+ "processing_auto.py": [' ("{model_type}", "{processor_class}"),'],
+}
+
+
+def add_model_to_auto_classes(
+ old_model_patterns: ModelPatterns, new_model_patterns: ModelPatterns, model_classes: Dict[str, List[str]]
+):
+ """
+ Add a model to the relevant mappings in the auto module.
+
+ Args:
+ old_model_patterns (`ModelPatterns`): The patterns for the old model.
+ new_model_patterns (`ModelPatterns`): The patterns for the new model.
+ model_classes (`Dict[str, List[str]]`): A dictionary framework to list of model classes implemented.
+ """
+ for filename in AUTO_CLASSES_PATTERNS:
+ # Extend patterns with all model classes if necessary
+ new_patterns = []
+ for pattern in AUTO_CLASSES_PATTERNS[filename]:
+ if re.search("any_([a-z]*)_class", pattern) is not None:
+ framework = re.search("any_([a-z]*)_class", pattern).groups()[0]
+ if framework in model_classes:
+ new_patterns.extend(
+ [
+ pattern.replace("{" + f"any_{framework}_class" + "}", cls)
+ for cls in model_classes[framework]
+ ]
+ )
+ elif "{config_class}" in pattern:
+ new_patterns.append(pattern.replace("{config_class}", old_model_patterns.config_class))
+ elif "{image_processor_class}" in pattern:
+ if (
+ old_model_patterns.image_processor_class is not None
+ and new_model_patterns.image_processor_class is not None
+ ):
+ new_patterns.append(
+ pattern.replace("{image_processor_class}", old_model_patterns.image_processor_class)
+ )
+ elif "{feature_extractor_class}" in pattern:
+ if (
+ old_model_patterns.feature_extractor_class is not None
+ and new_model_patterns.feature_extractor_class is not None
+ ):
+ new_patterns.append(
+ pattern.replace("{feature_extractor_class}", old_model_patterns.feature_extractor_class)
+ )
+ elif "{processor_class}" in pattern:
+ if old_model_patterns.processor_class is not None and new_model_patterns.processor_class is not None:
+ new_patterns.append(pattern.replace("{processor_class}", old_model_patterns.processor_class))
+ else:
+ new_patterns.append(pattern)
+
+ # Loop through all patterns.
+ for pattern in new_patterns:
+ full_name = TRANSFORMERS_PATH / "models" / "auto" / filename
+ old_model_line = pattern
+ new_model_line = pattern
+ for attr in ["model_type", "model_name"]:
+ old_model_line = old_model_line.replace("{" + attr + "}", getattr(old_model_patterns, attr))
+ new_model_line = new_model_line.replace("{" + attr + "}", getattr(new_model_patterns, attr))
+ if "pretrained_archive_map" in pattern:
+ old_model_line = old_model_line.replace(
+ "{pretrained_archive_map}", f"{old_model_patterns.model_upper_cased}_PRETRAINED_CONFIG_ARCHIVE_MAP"
+ )
+ new_model_line = new_model_line.replace(
+ "{pretrained_archive_map}", f"{new_model_patterns.model_upper_cased}_PRETRAINED_CONFIG_ARCHIVE_MAP"
+ )
+
+ new_model_line = new_model_line.replace(
+ old_model_patterns.model_camel_cased, new_model_patterns.model_camel_cased
+ )
+
+ add_content_to_file(full_name, new_model_line, add_after=old_model_line)
+
+ # Tokenizers require special handling
+ insert_tokenizer_in_auto_module(old_model_patterns, new_model_patterns)
+
+
+DOC_OVERVIEW_TEMPLATE = """## Overview
+
+The {model_name} model was proposed in []() by .
+
+
+The abstract from the paper is the following:
+
+**
+
+Tips:
+
+
+
+This model was contributed by [INSERT YOUR HF USERNAME HERE](https://huggingface.co/).
+The original code can be found [here]().
+
+"""
+
+
+def duplicate_doc_file(
+ doc_file: Union[str, os.PathLike],
+ old_model_patterns: ModelPatterns,
+ new_model_patterns: ModelPatterns,
+ dest_file: Optional[Union[str, os.PathLike]] = None,
+ frameworks: Optional[List[str]] = None,
+):
+ """
+ Duplicate a documentation file and adapts it for a new model.
+
+ Args:
+ module_file (`str` or `os.PathLike`): Path to the doc file to duplicate.
+ old_model_patterns (`ModelPatterns`): The patterns for the old model.
+ new_model_patterns (`ModelPatterns`): The patterns for the new model.
+ dest_file (`str` or `os.PathLike`, *optional*): Path to the new doc file.
+ Will default to the a file named `{new_model_patterns.model_type}.md` in the same folder as `module_file`.
+ frameworks (`List[str]`, *optional*):
+ If passed, will only keep the model classes corresponding to this list of frameworks in the new doc file.
+ """
+ with open(doc_file, "r", encoding="utf-8") as f:
+ content = f.read()
+
+ content = re.sub(r" [{"score":x, ...}, ...]
+ keys = ["score", "label", "box"]
+ annotation = [
+ dict(zip(keys, vals))
+ for vals in zip(raw_annotation["scores"], raw_annotation["labels"], raw_annotation["boxes"])
+ ]
+
+ return annotation
+
+ def _get_bounding_box(self, box: "torch.Tensor") -> Dict[str, int]:
+ """
+ Turns list [xmin, xmax, ymin, ymax] into dict { "xmin": xmin, ... }
+
+ Args:
+ box (`torch.Tensor`): Tensor containing the coordinates in corners format.
+
+ Returns:
+ bbox (`Dict[str, int]`): Dict containing the coordinates in corners format.
+ """
+ if self.framework != "pt":
+ raise ValueError("The ObjectDetectionPipeline is only available in PyTorch.")
+ xmin, ymin, xmax, ymax = box.int().tolist()
+ bbox = {
+ "xmin": xmin,
+ "ymin": ymin,
+ "xmax": xmax,
+ "ymax": ymax,
+ }
+ return bbox
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/text_to_audio.py b/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/text_to_audio.py
new file mode 100644
index 0000000000000000000000000000000000000000..58c21cc1216869c4ae7cc2486324e85a45225020
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/text_to_audio.py
@@ -0,0 +1,207 @@
+# Copyright 2023 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.from typing import List, Union
+from typing import List, Union
+
+from ..utils import is_torch_available
+from .base import Pipeline
+
+
+if is_torch_available():
+ from ..models.auto.modeling_auto import MODEL_FOR_TEXT_TO_SPECTROGRAM_MAPPING
+ from ..models.speecht5.modeling_speecht5 import SpeechT5HifiGan
+
+DEFAULT_VOCODER_ID = "microsoft/speecht5_hifigan"
+
+
+class TextToAudioPipeline(Pipeline):
+ """
+ Text-to-audio generation pipeline using any `AutoModelForTextToWaveform` or `AutoModelForTextToSpectrogram`. This
+ pipeline generates an audio file from an input text and optional other conditional inputs.
+
+ Example:
+
+ ```python
+ >>> from transformers import pipeline
+
+ >>> pipe = pipeline(model="suno/bark-small")
+ >>> output = pipe("Hey it's HuggingFace on the phone!")
+
+ >>> audio = output["audio"]
+ >>> sampling_rate = output["sampling_rate"]
+ ```
+
+ Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial)
+
+
+
+ You can specify parameters passed to the model by using [`TextToAudioPipeline.__call__.forward_params`] or
+ [`TextToAudioPipeline.__call__.generate_kwargs`].
+
+ Example:
+
+ ```python
+ >>> from transformers import pipeline
+
+ >>> music_generator = pipeline(task="text-to-audio", model="facebook/musicgen-small", framework="pt")
+
+ >>> # diversify the music generation by adding randomness with a high temperature and set a maximum music length
+ >>> generate_kwargs = {
+ ... "do_sample": True,
+ ... "temperature": 0.7,
+ ... "max_new_tokens": 35,
+ ... }
+
+ >>> outputs = music_generator("Techno music with high melodic riffs", generate_kwargs=generate_kwargs)
+ ```
+
+
+
+ This pipeline can currently be loaded from [`pipeline`] using the following task identifiers: `"text-to-speech"` or
+ `"text-to-audio"`.
+
+ See the list of available models on [huggingface.co/models](https://huggingface.co/models?filter=text-to-speech).
+ """
+
+ def __init__(self, *args, vocoder=None, sampling_rate=None, **kwargs):
+ super().__init__(*args, **kwargs)
+
+ if self.framework == "tf":
+ raise ValueError("The TextToAudioPipeline is only available in PyTorch.")
+
+ self.vocoder = None
+ if self.model.__class__ in MODEL_FOR_TEXT_TO_SPECTROGRAM_MAPPING.values():
+ self.vocoder = (
+ SpeechT5HifiGan.from_pretrained(DEFAULT_VOCODER_ID).to(self.model.device)
+ if vocoder is None
+ else vocoder
+ )
+
+ self.sampling_rate = sampling_rate
+ if self.vocoder is not None:
+ self.sampling_rate = self.vocoder.config.sampling_rate
+
+ if self.sampling_rate is None:
+ # get sampling_rate from config and generation config
+
+ config = self.model.config
+ gen_config = self.model.__dict__.get("generation_config", None)
+ if gen_config is not None:
+ config.update(gen_config.to_dict())
+
+ for sampling_rate_name in ["sample_rate", "sampling_rate"]:
+ sampling_rate = getattr(config, sampling_rate_name, None)
+ if sampling_rate is not None:
+ self.sampling_rate = sampling_rate
+
+ def preprocess(self, text, **kwargs):
+ if isinstance(text, str):
+ text = [text]
+
+ if self.model.config.model_type == "bark":
+ # bark Tokenizer is called with BarkProcessor which uses those kwargs
+ new_kwargs = {
+ "max_length": self.model.generation_config.semantic_config.get("max_input_semantic_length", 256),
+ "add_special_tokens": False,
+ "return_attention_mask": True,
+ "return_token_type_ids": False,
+ "padding": "max_length",
+ }
+
+ # priority is given to kwargs
+ new_kwargs.update(kwargs)
+
+ kwargs = new_kwargs
+
+ output = self.tokenizer(text, **kwargs, return_tensors="pt")
+
+ return output
+
+ def _forward(self, model_inputs, **kwargs):
+ # we expect some kwargs to be additional tensors which need to be on the right device
+ kwargs = self._ensure_tensor_on_device(kwargs, device=self.device)
+ forward_params = kwargs["forward_params"]
+ generate_kwargs = kwargs["generate_kwargs"]
+
+ if self.model.can_generate():
+ # we expect some kwargs to be additional tensors which need to be on the right device
+ generate_kwargs = self._ensure_tensor_on_device(generate_kwargs, device=self.device)
+
+ # generate_kwargs get priority over forward_params
+ forward_params.update(generate_kwargs)
+
+ output = self.model.generate(**model_inputs, **forward_params)
+ else:
+ if len(generate_kwargs):
+ raise ValueError(
+ f"""You're using the `TextToAudioPipeline` with a forward-only model, but `generate_kwargs` is non empty.
+ For forward-only TTA models, please use `forward_params` instead of of
+ `generate_kwargs`. For reference, here are the `generate_kwargs` used here:
+ {generate_kwargs.keys()}"""
+ )
+ output = self.model(**model_inputs, **forward_params)[0]
+
+ if self.vocoder is not None:
+ # in that case, the output is a spectrogram that needs to be converted into a waveform
+ output = self.vocoder(output)
+
+ return output
+
+ def __call__(self, text_inputs: Union[str, List[str]], **forward_params):
+ """
+ Generates speech/audio from the inputs. See the [`TextToAudioPipeline`] documentation for more information.
+
+ Args:
+ text_inputs (`str` or `List[str]`):
+ The text(s) to generate.
+ forward_params (`dict`, *optional*):
+ Parameters passed to the model generation/forward method. `forward_params` are always passed to the
+ underlying model.
+ generate_kwargs (`dict`, *optional*):
+ The dictionary of ad-hoc parametrization of `generate_config` to be used for the generation call. For a
+ complete overview of generate, check the [following
+ guide](https://huggingface.co/docs/transformers/en/main_classes/text_generation). `generate_kwargs` are
+ only passed to the underlying model if the latter is a generative model.
+
+ Return:
+ A `dict` or a list of `dict`: The dictionaries have two keys:
+
+ - **audio** (`np.ndarray` of shape `(nb_channels, audio_length)`) -- The generated audio waveform.
+ - **sampling_rate** (`int`) -- The sampling rate of the generated audio waveform.
+ """
+ return super().__call__(text_inputs, **forward_params)
+
+ def _sanitize_parameters(
+ self,
+ preprocess_params=None,
+ forward_params=None,
+ generate_kwargs=None,
+ ):
+ params = {
+ "forward_params": forward_params if forward_params else {},
+ "generate_kwargs": generate_kwargs if generate_kwargs else {},
+ }
+
+ if preprocess_params is None:
+ preprocess_params = {}
+ postprocess_params = {}
+
+ return preprocess_params, params, postprocess_params
+
+ def postprocess(self, waveform):
+ output_dict = {}
+
+ output_dict["audio"] = waveform.cpu().float().numpy()
+ output_dict["sampling_rate"] = self.sampling_rate
+
+ return output_dict
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/visual_question_answering.py b/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/visual_question_answering.py
new file mode 100644
index 0000000000000000000000000000000000000000..9106b19d33671a959a5be0d834e48a8a3dc05010
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/visual_question_answering.py
@@ -0,0 +1,151 @@
+from typing import Union
+
+from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
+from .base import Pipeline, build_pipeline_init_args
+
+
+if is_vision_available():
+ from PIL import Image
+
+ from ..image_utils import load_image
+
+if is_torch_available():
+ from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES
+
+logger = logging.get_logger(__name__)
+
+
+@add_end_docstrings(build_pipeline_init_args(has_tokenizer=True, has_image_processor=True))
+class VisualQuestionAnsweringPipeline(Pipeline):
+ """
+ Visual Question Answering pipeline using a `AutoModelForVisualQuestionAnswering`. This pipeline is currently only
+ available in PyTorch.
+
+ Example:
+
+ ```python
+ >>> from transformers import pipeline
+
+ >>> oracle = pipeline(model="dandelin/vilt-b32-finetuned-vqa")
+ >>> image_url = "https://huggingface.co/datasets/Narsil/image_dummy/raw/main/lena.png"
+ >>> oracle(question="What is she wearing ?", image=image_url)
+ [{'score': 0.948, 'answer': 'hat'}, {'score': 0.009, 'answer': 'fedora'}, {'score': 0.003, 'answer': 'clothes'}, {'score': 0.003, 'answer': 'sun hat'}, {'score': 0.002, 'answer': 'nothing'}]
+
+ >>> oracle(question="What is she wearing ?", image=image_url, top_k=1)
+ [{'score': 0.948, 'answer': 'hat'}]
+
+ >>> oracle(question="Is this a person ?", image=image_url, top_k=1)
+ [{'score': 0.993, 'answer': 'yes'}]
+
+ >>> oracle(question="Is this a man ?", image=image_url, top_k=1)
+ [{'score': 0.996, 'answer': 'no'}]
+ ```
+
+ Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial)
+
+ This visual question answering pipeline can currently be loaded from [`pipeline`] using the following task
+ identifiers: `"visual-question-answering", "vqa"`.
+
+ The models that this pipeline can use are models that have been fine-tuned on a visual question answering task. See
+ the up-to-date list of available models on
+ [huggingface.co/models](https://huggingface.co/models?filter=visual-question-answering).
+ """
+
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+ self.check_model_type(MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES)
+
+ def _sanitize_parameters(self, top_k=None, padding=None, truncation=None, timeout=None, **kwargs):
+ preprocess_params, postprocess_params = {}, {}
+ if padding is not None:
+ preprocess_params["padding"] = padding
+ if truncation is not None:
+ preprocess_params["truncation"] = truncation
+ if timeout is not None:
+ preprocess_params["timeout"] = timeout
+ if top_k is not None:
+ postprocess_params["top_k"] = top_k
+ return preprocess_params, {}, postprocess_params
+
+ def __call__(self, image: Union["Image.Image", str], question: str = None, **kwargs):
+ r"""
+ Answers open-ended questions about images. The pipeline accepts several types of inputs which are detailed
+ below:
+
+ - `pipeline(image=image, question=question)`
+ - `pipeline({"image": image, "question": question})`
+ - `pipeline([{"image": image, "question": question}])`
+ - `pipeline([{"image": image, "question": question}, {"image": image, "question": question}])`
+
+ Args:
+ image (`str`, `List[str]`, `PIL.Image` or `List[PIL.Image]`):
+ The pipeline handles three types of images:
+
+ - A string containing a http link pointing to an image
+ - A string containing a local path to an image
+ - An image loaded in PIL directly
+
+ The pipeline accepts either a single image or a batch of images. If given a single image, it can be
+ broadcasted to multiple questions.
+ question (`str`, `List[str]`):
+ The question(s) asked. If given a single question, it can be broadcasted to multiple images.
+ top_k (`int`, *optional*, defaults to 5):
+ The number of top labels that will be returned by the pipeline. If the provided number is higher than
+ the number of labels available in the model configuration, it will default to the number of labels.
+ timeout (`float`, *optional*, defaults to None):
+ The maximum time in seconds to wait for fetching images from the web. If None, no timeout is set and
+ the call may block forever.
+ Return:
+ A dictionary or a list of dictionaries containing the result. The dictionaries contain the following keys:
+
+ - **label** (`str`) -- The label identified by the model.
+ - **score** (`int`) -- The score attributed by the model for that label.
+ """
+ if isinstance(image, (Image.Image, str)) and isinstance(question, str):
+ inputs = {"image": image, "question": question}
+ else:
+ """
+ Supports the following format
+ - {"image": image, "question": question}
+ - [{"image": image, "question": question}]
+ - Generator and datasets
+ """
+ inputs = image
+ results = super().__call__(inputs, **kwargs)
+ return results
+
+ def preprocess(self, inputs, padding=False, truncation=False, timeout=None):
+ image = load_image(inputs["image"], timeout=timeout)
+ model_inputs = self.tokenizer(
+ inputs["question"], return_tensors=self.framework, padding=padding, truncation=truncation
+ )
+ image_features = self.image_processor(images=image, return_tensors=self.framework)
+ model_inputs.update(image_features)
+ return model_inputs
+
+ def _forward(self, model_inputs, **generate_kwargs):
+ if self.model.can_generate():
+ model_outputs = self.model.generate(**model_inputs, **generate_kwargs)
+ else:
+ model_outputs = self.model(**model_inputs)
+ return model_outputs
+
+ def postprocess(self, model_outputs, top_k=5):
+ if self.model.can_generate():
+ return [
+ {"answer": self.tokenizer.decode(output_ids, skip_special_tokens=True).strip()}
+ for output_ids in model_outputs
+ ]
+ else:
+ if top_k > self.model.config.num_labels:
+ top_k = self.model.config.num_labels
+
+ if self.framework == "pt":
+ probs = model_outputs.logits.sigmoid()[0]
+ scores, ids = probs.topk(top_k)
+ else:
+ raise ValueError(f"Unsupported framework: {self.framework}")
+
+ scores = scores.tolist()
+ ids = ids.tolist()
+ return [{"score": score, "answer": self.model.config.id2label[_id]} for score, _id in zip(scores, ids)]
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/zero_shot_classification.py b/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/zero_shot_classification.py
new file mode 100644
index 0000000000000000000000000000000000000000..9a600bc8ad0fb850a29e53710238437d168521d0
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/pipelines/zero_shot_classification.py
@@ -0,0 +1,265 @@
+import inspect
+from typing import List, Union
+
+import numpy as np
+
+from ..tokenization_utils import TruncationStrategy
+from ..utils import add_end_docstrings, logging
+from .base import ArgumentHandler, ChunkPipeline, build_pipeline_init_args
+
+
+logger = logging.get_logger(__name__)
+
+
+class ZeroShotClassificationArgumentHandler(ArgumentHandler):
+ """
+ Handles arguments for zero-shot for text classification by turning each possible label into an NLI
+ premise/hypothesis pair.
+ """
+
+ def _parse_labels(self, labels):
+ if isinstance(labels, str):
+ labels = [label.strip() for label in labels.split(",") if label.strip()]
+ return labels
+
+ def __call__(self, sequences, labels, hypothesis_template):
+ if len(labels) == 0 or len(sequences) == 0:
+ raise ValueError("You must include at least one label and at least one sequence.")
+ if hypothesis_template.format(labels[0]) == hypothesis_template:
+ raise ValueError(
+ (
+ 'The provided hypothesis_template "{}" was not able to be formatted with the target labels. '
+ "Make sure the passed template includes formatting syntax such as {{}} where the label should go."
+ ).format(hypothesis_template)
+ )
+
+ if isinstance(sequences, str):
+ sequences = [sequences]
+
+ sequence_pairs = []
+ for sequence in sequences:
+ sequence_pairs.extend([[sequence, hypothesis_template.format(label)] for label in labels])
+
+ return sequence_pairs, sequences
+
+
+@add_end_docstrings(build_pipeline_init_args(has_tokenizer=True))
+class ZeroShotClassificationPipeline(ChunkPipeline):
+ """
+ NLI-based zero-shot classification pipeline using a `ModelForSequenceClassification` trained on NLI (natural
+ language inference) tasks. Equivalent of `text-classification` pipelines, but these models don't require a
+ hardcoded number of potential classes, they can be chosen at runtime. It usually means it's slower but it is
+ **much** more flexible.
+
+ Any combination of sequences and labels can be passed and each combination will be posed as a premise/hypothesis
+ pair and passed to the pretrained model. Then, the logit for *entailment* is taken as the logit for the candidate
+ label being valid. Any NLI model can be used, but the id of the *entailment* label must be included in the model
+ config's :attr:*~transformers.PretrainedConfig.label2id*.
+
+ Example:
+
+ ```python
+ >>> from transformers import pipeline
+
+ >>> oracle = pipeline(model="facebook/bart-large-mnli")
+ >>> oracle(
+ ... "I have a problem with my iphone that needs to be resolved asap!!",
+ ... candidate_labels=["urgent", "not urgent", "phone", "tablet", "computer"],
+ ... )
+ {'sequence': 'I have a problem with my iphone that needs to be resolved asap!!', 'labels': ['urgent', 'phone', 'computer', 'not urgent', 'tablet'], 'scores': [0.504, 0.479, 0.013, 0.003, 0.002]}
+
+ >>> oracle(
+ ... "I have a problem with my iphone that needs to be resolved asap!!",
+ ... candidate_labels=["english", "german"],
+ ... )
+ {'sequence': 'I have a problem with my iphone that needs to be resolved asap!!', 'labels': ['english', 'german'], 'scores': [0.814, 0.186]}
+ ```
+
+ Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial)
+
+ This NLI pipeline can currently be loaded from [`pipeline`] using the following task identifier:
+ `"zero-shot-classification"`.
+
+ The models that this pipeline can use are models that have been fine-tuned on an NLI task. See the up-to-date list
+ of available models on [huggingface.co/models](https://huggingface.co/models?search=nli).
+ """
+
+ def __init__(self, args_parser=ZeroShotClassificationArgumentHandler(), *args, **kwargs):
+ self._args_parser = args_parser
+ super().__init__(*args, **kwargs)
+ if self.entailment_id == -1:
+ logger.warning(
+ "Failed to determine 'entailment' label id from the label2id mapping in the model config. Setting to "
+ "-1. Define a descriptive label2id mapping in the model config to ensure correct outputs."
+ )
+
+ @property
+ def entailment_id(self):
+ for label, ind in self.model.config.label2id.items():
+ if label.lower().startswith("entail"):
+ return ind
+ return -1
+
+ def _parse_and_tokenize(
+ self, sequence_pairs, padding=True, add_special_tokens=True, truncation=TruncationStrategy.ONLY_FIRST, **kwargs
+ ):
+ """
+ Parse arguments and tokenize only_first so that hypothesis (label) is not truncated
+ """
+ return_tensors = self.framework
+ if self.tokenizer.pad_token is None:
+ # Override for tokenizers not supporting padding
+ logger.error(
+ "Tokenizer was not supporting padding necessary for zero-shot, attempting to use "
+ " `pad_token=eos_token`"
+ )
+ self.tokenizer.pad_token = self.tokenizer.eos_token
+ try:
+ inputs = self.tokenizer(
+ sequence_pairs,
+ add_special_tokens=add_special_tokens,
+ return_tensors=return_tensors,
+ padding=padding,
+ truncation=truncation,
+ )
+ except Exception as e:
+ if "too short" in str(e):
+ # tokenizers might yell that we want to truncate
+ # to a value that is not even reached by the input.
+ # In that case we don't want to truncate.
+ # It seems there's not a really better way to catch that
+ # exception.
+
+ inputs = self.tokenizer(
+ sequence_pairs,
+ add_special_tokens=add_special_tokens,
+ return_tensors=return_tensors,
+ padding=padding,
+ truncation=TruncationStrategy.DO_NOT_TRUNCATE,
+ )
+ else:
+ raise e
+
+ return inputs
+
+ def _sanitize_parameters(self, **kwargs):
+ if kwargs.get("multi_class", None) is not None:
+ kwargs["multi_label"] = kwargs["multi_class"]
+ logger.warning(
+ "The `multi_class` argument has been deprecated and renamed to `multi_label`. "
+ "`multi_class` will be removed in a future version of Transformers."
+ )
+ preprocess_params = {}
+ if "candidate_labels" in kwargs:
+ preprocess_params["candidate_labels"] = self._args_parser._parse_labels(kwargs["candidate_labels"])
+ if "hypothesis_template" in kwargs:
+ preprocess_params["hypothesis_template"] = kwargs["hypothesis_template"]
+
+ postprocess_params = {}
+ if "multi_label" in kwargs:
+ postprocess_params["multi_label"] = kwargs["multi_label"]
+ return preprocess_params, {}, postprocess_params
+
+ def __call__(
+ self,
+ sequences: Union[str, List[str]],
+ *args,
+ **kwargs,
+ ):
+ """
+ Classify the sequence(s) given as inputs. See the [`ZeroShotClassificationPipeline`] documentation for more
+ information.
+
+ Args:
+ sequences (`str` or `List[str]`):
+ The sequence(s) to classify, will be truncated if the model input is too large.
+ candidate_labels (`str` or `List[str]`):
+ The set of possible class labels to classify each sequence into. Can be a single label, a string of
+ comma-separated labels, or a list of labels.
+ hypothesis_template (`str`, *optional*, defaults to `"This example is {}."`):
+ The template used to turn each label into an NLI-style hypothesis. This template must include a {} or
+ similar syntax for the candidate label to be inserted into the template. For example, the default
+ template is `"This example is {}."` With the candidate label `"sports"`, this would be fed into the
+ model like `" sequence to classify This example is sports . "`. The default template
+ works well in many cases, but it may be worthwhile to experiment with different templates depending on
+ the task setting.
+ multi_label (`bool`, *optional*, defaults to `False`):
+ Whether or not multiple candidate labels can be true. If `False`, the scores are normalized such that
+ the sum of the label likelihoods for each sequence is 1. If `True`, the labels are considered
+ independent and probabilities are normalized for each candidate by doing a softmax of the entailment
+ score vs. the contradiction score.
+
+ Return:
+ A `dict` or a list of `dict`: Each result comes as a dictionary with the following keys:
+
+ - **sequence** (`str`) -- The sequence for which this is the output.
+ - **labels** (`List[str]`) -- The labels sorted by order of likelihood.
+ - **scores** (`List[float]`) -- The probabilities for each of the labels.
+ """
+ if len(args) == 0:
+ pass
+ elif len(args) == 1 and "candidate_labels" not in kwargs:
+ kwargs["candidate_labels"] = args[0]
+ else:
+ raise ValueError(f"Unable to understand extra arguments {args}")
+
+ return super().__call__(sequences, **kwargs)
+
+ def preprocess(self, inputs, candidate_labels=None, hypothesis_template="This example is {}."):
+ sequence_pairs, sequences = self._args_parser(inputs, candidate_labels, hypothesis_template)
+
+ for i, (candidate_label, sequence_pair) in enumerate(zip(candidate_labels, sequence_pairs)):
+ model_input = self._parse_and_tokenize([sequence_pair])
+
+ yield {
+ "candidate_label": candidate_label,
+ "sequence": sequences[0],
+ "is_last": i == len(candidate_labels) - 1,
+ **model_input,
+ }
+
+ def _forward(self, inputs):
+ candidate_label = inputs["candidate_label"]
+ sequence = inputs["sequence"]
+ model_inputs = {k: inputs[k] for k in self.tokenizer.model_input_names}
+ # `XXXForSequenceClassification` models should not use `use_cache=True` even if it's supported
+ model_forward = self.model.forward if self.framework == "pt" else self.model.call
+ if "use_cache" in inspect.signature(model_forward).parameters.keys():
+ model_inputs["use_cache"] = False
+ outputs = self.model(**model_inputs)
+
+ model_outputs = {
+ "candidate_label": candidate_label,
+ "sequence": sequence,
+ "is_last": inputs["is_last"],
+ **outputs,
+ }
+ return model_outputs
+
+ def postprocess(self, model_outputs, multi_label=False):
+ candidate_labels = [outputs["candidate_label"] for outputs in model_outputs]
+ sequences = [outputs["sequence"] for outputs in model_outputs]
+ logits = np.concatenate([output["logits"].numpy() for output in model_outputs])
+ N = logits.shape[0]
+ n = len(candidate_labels)
+ num_sequences = N // n
+ reshaped_outputs = logits.reshape((num_sequences, n, -1))
+
+ if multi_label or len(candidate_labels) == 1:
+ # softmax over the entailment vs. contradiction dim for each label independently
+ entailment_id = self.entailment_id
+ contradiction_id = -1 if entailment_id == 0 else 0
+ entail_contr_logits = reshaped_outputs[..., [contradiction_id, entailment_id]]
+ scores = np.exp(entail_contr_logits) / np.exp(entail_contr_logits).sum(-1, keepdims=True)
+ scores = scores[..., 1]
+ else:
+ # softmax the "entailment" logits over all candidate labels
+ entail_logits = reshaped_outputs[..., self.entailment_id]
+ scores = np.exp(entail_logits) / np.exp(entail_logits).sum(-1, keepdims=True)
+
+ top_inds = list(reversed(scores[0].argsort()))
+ return {
+ "sequence": sequences[0],
+ "labels": [candidate_labels[i] for i in top_inds],
+ "scores": scores[0, top_inds].tolist(),
+ }
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/utils/__init__.py b/env-llmeval/lib/python3.10/site-packages/transformers/utils/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..b8da221a8c914ca385f04ae8610f2d2a93542a14
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/utils/__init__.py
@@ -0,0 +1,255 @@
+#!/usr/bin/env python
+# coding=utf-8
+
+# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from huggingface_hub import get_full_repo_name # for backward compatibility
+from huggingface_hub.constants import HF_HUB_DISABLE_TELEMETRY as DISABLE_TELEMETRY # for backward compatibility
+from packaging import version
+
+from .. import __version__
+from .backbone_utils import BackboneConfigMixin, BackboneMixin
+from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD
+from .doc import (
+ add_code_sample_docstrings,
+ add_end_docstrings,
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ copy_func,
+ replace_return_docstrings,
+)
+from .generic import (
+ ContextManagers,
+ ExplicitEnum,
+ ModelOutput,
+ PaddingStrategy,
+ TensorType,
+ add_model_info_to_auto_map,
+ cached_property,
+ can_return_loss,
+ expand_dims,
+ find_labels,
+ flatten_dict,
+ infer_framework,
+ is_jax_tensor,
+ is_numpy_array,
+ is_tensor,
+ is_tf_symbolic_tensor,
+ is_tf_tensor,
+ is_torch_device,
+ is_torch_dtype,
+ is_torch_tensor,
+ reshape,
+ squeeze,
+ strtobool,
+ tensor_size,
+ to_numpy,
+ to_py_obj,
+ transpose,
+ working_or_temp_dir,
+)
+from .hub import (
+ CLOUDFRONT_DISTRIB_PREFIX,
+ HF_MODULES_CACHE,
+ HUGGINGFACE_CO_PREFIX,
+ HUGGINGFACE_CO_RESOLVE_ENDPOINT,
+ PYTORCH_PRETRAINED_BERT_CACHE,
+ PYTORCH_TRANSFORMERS_CACHE,
+ S3_BUCKET_PREFIX,
+ TRANSFORMERS_CACHE,
+ TRANSFORMERS_DYNAMIC_MODULE_NAME,
+ EntryNotFoundError,
+ PushInProgress,
+ PushToHubMixin,
+ RepositoryNotFoundError,
+ RevisionNotFoundError,
+ cached_file,
+ default_cache_path,
+ define_sagemaker_information,
+ download_url,
+ extract_commit_hash,
+ get_cached_models,
+ get_file_from_repo,
+ has_file,
+ http_user_agent,
+ is_offline_mode,
+ is_remote_url,
+ move_cache,
+ send_example_telemetry,
+ try_to_load_from_cache,
+)
+from .import_utils import (
+ ACCELERATE_MIN_VERSION,
+ ENV_VARS_TRUE_AND_AUTO_VALUES,
+ ENV_VARS_TRUE_VALUES,
+ TORCH_FX_REQUIRED_VERSION,
+ USE_JAX,
+ USE_TF,
+ USE_TORCH,
+ DummyObject,
+ OptionalDependencyNotAvailable,
+ _LazyModule,
+ ccl_version,
+ direct_transformers_import,
+ get_torch_version,
+ is_accelerate_available,
+ is_apex_available,
+ is_aqlm_available,
+ is_auto_awq_available,
+ is_auto_gptq_available,
+ is_bitsandbytes_available,
+ is_bs4_available,
+ is_coloredlogs_available,
+ is_cv2_available,
+ is_cython_available,
+ is_datasets_available,
+ is_decord_available,
+ is_detectron2_available,
+ is_essentia_available,
+ is_faiss_available,
+ is_flash_attn_2_available,
+ is_flash_attn_greater_or_equal_2_10,
+ is_flax_available,
+ is_fsdp_available,
+ is_ftfy_available,
+ is_g2p_en_available,
+ is_galore_torch_available,
+ is_in_notebook,
+ is_ipex_available,
+ is_jieba_available,
+ is_jinja_available,
+ is_jumanpp_available,
+ is_kenlm_available,
+ is_keras_nlp_available,
+ is_levenshtein_available,
+ is_librosa_available,
+ is_mlx_available,
+ is_natten_available,
+ is_ninja_available,
+ is_nltk_available,
+ is_onnx_available,
+ is_openai_available,
+ is_optimum_available,
+ is_pandas_available,
+ is_peft_available,
+ is_phonemizer_available,
+ is_pretty_midi_available,
+ is_protobuf_available,
+ is_psutil_available,
+ is_py3nvml_available,
+ is_pyctcdecode_available,
+ is_pytesseract_available,
+ is_pytest_available,
+ is_pytorch_quantization_available,
+ is_quanto_available,
+ is_rjieba_available,
+ is_sacremoses_available,
+ is_safetensors_available,
+ is_sagemaker_dp_enabled,
+ is_sagemaker_mp_enabled,
+ is_scipy_available,
+ is_sentencepiece_available,
+ is_seqio_available,
+ is_sklearn_available,
+ is_soundfile_availble,
+ is_spacy_available,
+ is_speech_available,
+ is_sudachi_available,
+ is_sudachi_projection_available,
+ is_tensorflow_probability_available,
+ is_tensorflow_text_available,
+ is_tf2onnx_available,
+ is_tf_available,
+ is_timm_available,
+ is_tokenizers_available,
+ is_torch_available,
+ is_torch_bf16_available,
+ is_torch_bf16_available_on_device,
+ is_torch_bf16_cpu_available,
+ is_torch_bf16_gpu_available,
+ is_torch_compile_available,
+ is_torch_cuda_available,
+ is_torch_fp16_available_on_device,
+ is_torch_fx_available,
+ is_torch_fx_proxy,
+ is_torch_mps_available,
+ is_torch_neuroncore_available,
+ is_torch_npu_available,
+ is_torch_sdpa_available,
+ is_torch_tensorrt_fx_available,
+ is_torch_tf32_available,
+ is_torch_tpu_available,
+ is_torch_xla_available,
+ is_torch_xpu_available,
+ is_torchaudio_available,
+ is_torchdistx_available,
+ is_torchdynamo_available,
+ is_torchdynamo_compiling,
+ is_torchvision_available,
+ is_training_run_on_sagemaker,
+ is_vision_available,
+ requires_backends,
+ torch_only_method,
+)
+from .peft_utils import (
+ ADAPTER_CONFIG_NAME,
+ ADAPTER_SAFE_WEIGHTS_NAME,
+ ADAPTER_WEIGHTS_NAME,
+ check_peft_version,
+ find_adapter_config_file,
+)
+
+
+WEIGHTS_NAME = "pytorch_model.bin"
+WEIGHTS_INDEX_NAME = "pytorch_model.bin.index.json"
+TF2_WEIGHTS_NAME = "tf_model.h5"
+TF2_WEIGHTS_INDEX_NAME = "tf_model.h5.index.json"
+TF_WEIGHTS_NAME = "model.ckpt"
+FLAX_WEIGHTS_NAME = "flax_model.msgpack"
+FLAX_WEIGHTS_INDEX_NAME = "flax_model.msgpack.index.json"
+SAFE_WEIGHTS_NAME = "model.safetensors"
+SAFE_WEIGHTS_INDEX_NAME = "model.safetensors.index.json"
+CONFIG_NAME = "config.json"
+FEATURE_EXTRACTOR_NAME = "preprocessor_config.json"
+IMAGE_PROCESSOR_NAME = FEATURE_EXTRACTOR_NAME
+PROCESSOR_NAME = "processor_config.json"
+GENERATION_CONFIG_NAME = "generation_config.json"
+MODEL_CARD_NAME = "modelcard.json"
+
+SENTENCEPIECE_UNDERLINE = "▁"
+SPIECE_UNDERLINE = SENTENCEPIECE_UNDERLINE # Kept for backward compatibility
+
+MULTIPLE_CHOICE_DUMMY_INPUTS = [
+ [[0, 1, 0, 1], [1, 0, 0, 1]]
+] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too.
+DUMMY_INPUTS = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
+DUMMY_MASK = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]]
+
+
+def check_min_version(min_version):
+ if version.parse(__version__) < version.parse(min_version):
+ if "dev" in min_version:
+ error_message = (
+ "This example requires a source install from HuggingFace Transformers (see "
+ "`https://huggingface.co/docs/transformers/installation#install-from-source`),"
+ )
+ else:
+ error_message = f"This example requires a minimum version of {min_version},"
+ error_message += f" but the version found is {__version__}.\n"
+ raise ImportError(
+ error_message
+ + "Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other "
+ "versions of HuggingFace Transformers."
+ )
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/utils/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/utils/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4f1794818018f4f6b5364a67bda8ec5bd64b17b2
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/utils/__pycache__/__init__.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/utils/__pycache__/backbone_utils.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/utils/__pycache__/backbone_utils.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..60740eabdcc5c3c3cdba4434b9d92eaa7a89808e
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/utils/__pycache__/backbone_utils.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/utils/__pycache__/bitsandbytes.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/utils/__pycache__/bitsandbytes.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8629ea72ee36271b47d91e947ad90b7c864d9a00
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/utils/__pycache__/bitsandbytes.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/utils/__pycache__/constants.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/utils/__pycache__/constants.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ed979556a5c35c0cfe78f2d9e3359364af4d91ed
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/utils/__pycache__/constants.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/utils/__pycache__/doc.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/utils/__pycache__/doc.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c9b1875ce606dbab255ee1dd173371f6aa9208f5
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/utils/__pycache__/doc.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/utils/__pycache__/dummy_keras_nlp_objects.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/utils/__pycache__/dummy_keras_nlp_objects.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2e4343a4db7627db9ff28b6735ccd96a3d0d4437
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/utils/__pycache__/dummy_keras_nlp_objects.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/utils/__pycache__/dummy_pt_objects.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/utils/__pycache__/dummy_pt_objects.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b6bdec4aa6d0e614547833a5865efccddc321ab1
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/utils/__pycache__/dummy_pt_objects.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/utils/__pycache__/dummy_sentencepiece_and_tokenizers_objects.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/utils/__pycache__/dummy_sentencepiece_and_tokenizers_objects.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..fe857c60c90c9f84efcc8cdb4a357483b93663e2
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/utils/__pycache__/dummy_sentencepiece_and_tokenizers_objects.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/utils/__pycache__/dummy_speech_objects.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/utils/__pycache__/dummy_speech_objects.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4eeecf48271c782a87656ab51a0f891a2f2276dc
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/utils/__pycache__/dummy_speech_objects.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/utils/__pycache__/dummy_tensorflow_text_objects.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/utils/__pycache__/dummy_tensorflow_text_objects.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8834d060e656dbd8a9f4c340a89acb03669b261e
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/utils/__pycache__/dummy_tensorflow_text_objects.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/utils/__pycache__/dummy_tokenizers_objects.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/utils/__pycache__/dummy_tokenizers_objects.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1c8219c22621714ea8663dbf0b512b6e4a46c46f
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/utils/__pycache__/dummy_tokenizers_objects.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/utils/__pycache__/logging.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/utils/__pycache__/logging.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9da7f7eeec9daa3ebee7b42c610b5acb645186b3
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/utils/__pycache__/logging.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/utils/__pycache__/model_parallel_utils.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/utils/__pycache__/model_parallel_utils.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e5d6f14153d0b4f811edf00b0758a19ff181249f
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/utils/__pycache__/model_parallel_utils.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/utils/__pycache__/notebook.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/utils/__pycache__/notebook.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..dfc6231ff6fd851db854504120540d7b4a154d51
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/utils/__pycache__/notebook.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/utils/__pycache__/peft_utils.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/utils/__pycache__/peft_utils.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..17a1b532bb6ea893bc98902dee21d8ebf119d8c7
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/utils/__pycache__/peft_utils.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/utils/__pycache__/quantization_config.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/utils/__pycache__/quantization_config.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..fd7c3a4095e9fb1fd30e24ba204669993d1bce12
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/utils/__pycache__/quantization_config.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/utils/__pycache__/sentencepiece_model_pb2_new.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/utils/__pycache__/sentencepiece_model_pb2_new.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ccf5e75ae72ed11fcfd7948851548bf18c8333da
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/utils/__pycache__/sentencepiece_model_pb2_new.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/utils/dummy_music_objects.py b/env-llmeval/lib/python3.10/site-packages/transformers/utils/dummy_music_objects.py
new file mode 100644
index 0000000000000000000000000000000000000000..89052be47c1d32bac5cbd6fceab183fc1d75d3bf
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/utils/dummy_music_objects.py
@@ -0,0 +1,16 @@
+# This file is autogenerated by the command `make fix-copies`, do not edit.
+from ..utils import DummyObject, requires_backends
+
+
+class Pop2PianoFeatureExtractor(metaclass=DummyObject):
+ _backends = ["music"]
+
+ def __init__(self, *args, **kwargs):
+ requires_backends(self, ["music"])
+
+
+class Pop2PianoTokenizer(metaclass=DummyObject):
+ _backends = ["music"]
+
+ def __init__(self, *args, **kwargs):
+ requires_backends(self, ["music"])
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/utils/dummy_sentencepiece_objects.py b/env-llmeval/lib/python3.10/site-packages/transformers/utils/dummy_sentencepiece_objects.py
new file mode 100644
index 0000000000000000000000000000000000000000..33ee907a741f18718692a7fc02aa0bcc03f39585
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/utils/dummy_sentencepiece_objects.py
@@ -0,0 +1,254 @@
+# This file is autogenerated by the command `make fix-copies`, do not edit.
+from ..utils import DummyObject, requires_backends
+
+
+class AlbertTokenizer(metaclass=DummyObject):
+ _backends = ["sentencepiece"]
+
+ def __init__(self, *args, **kwargs):
+ requires_backends(self, ["sentencepiece"])
+
+
+class BarthezTokenizer(metaclass=DummyObject):
+ _backends = ["sentencepiece"]
+
+ def __init__(self, *args, **kwargs):
+ requires_backends(self, ["sentencepiece"])
+
+
+class BartphoTokenizer(metaclass=DummyObject):
+ _backends = ["sentencepiece"]
+
+ def __init__(self, *args, **kwargs):
+ requires_backends(self, ["sentencepiece"])
+
+
+class BertGenerationTokenizer(metaclass=DummyObject):
+ _backends = ["sentencepiece"]
+
+ def __init__(self, *args, **kwargs):
+ requires_backends(self, ["sentencepiece"])
+
+
+class BigBirdTokenizer(metaclass=DummyObject):
+ _backends = ["sentencepiece"]
+
+ def __init__(self, *args, **kwargs):
+ requires_backends(self, ["sentencepiece"])
+
+
+class CamembertTokenizer(metaclass=DummyObject):
+ _backends = ["sentencepiece"]
+
+ def __init__(self, *args, **kwargs):
+ requires_backends(self, ["sentencepiece"])
+
+
+class CodeLlamaTokenizer(metaclass=DummyObject):
+ _backends = ["sentencepiece"]
+
+ def __init__(self, *args, **kwargs):
+ requires_backends(self, ["sentencepiece"])
+
+
+class CpmTokenizer(metaclass=DummyObject):
+ _backends = ["sentencepiece"]
+
+ def __init__(self, *args, **kwargs):
+ requires_backends(self, ["sentencepiece"])
+
+
+class DebertaV2Tokenizer(metaclass=DummyObject):
+ _backends = ["sentencepiece"]
+
+ def __init__(self, *args, **kwargs):
+ requires_backends(self, ["sentencepiece"])
+
+
+class ErnieMTokenizer(metaclass=DummyObject):
+ _backends = ["sentencepiece"]
+
+ def __init__(self, *args, **kwargs):
+ requires_backends(self, ["sentencepiece"])
+
+
+class FNetTokenizer(metaclass=DummyObject):
+ _backends = ["sentencepiece"]
+
+ def __init__(self, *args, **kwargs):
+ requires_backends(self, ["sentencepiece"])
+
+
+class GemmaTokenizer(metaclass=DummyObject):
+ _backends = ["sentencepiece"]
+
+ def __init__(self, *args, **kwargs):
+ requires_backends(self, ["sentencepiece"])
+
+
+class GPTSw3Tokenizer(metaclass=DummyObject):
+ _backends = ["sentencepiece"]
+
+ def __init__(self, *args, **kwargs):
+ requires_backends(self, ["sentencepiece"])
+
+
+class LayoutXLMTokenizer(metaclass=DummyObject):
+ _backends = ["sentencepiece"]
+
+ def __init__(self, *args, **kwargs):
+ requires_backends(self, ["sentencepiece"])
+
+
+class LlamaTokenizer(metaclass=DummyObject):
+ _backends = ["sentencepiece"]
+
+ def __init__(self, *args, **kwargs):
+ requires_backends(self, ["sentencepiece"])
+
+
+class M2M100Tokenizer(metaclass=DummyObject):
+ _backends = ["sentencepiece"]
+
+ def __init__(self, *args, **kwargs):
+ requires_backends(self, ["sentencepiece"])
+
+
+class MarianTokenizer(metaclass=DummyObject):
+ _backends = ["sentencepiece"]
+
+ def __init__(self, *args, **kwargs):
+ requires_backends(self, ["sentencepiece"])
+
+
+class MBart50Tokenizer(metaclass=DummyObject):
+ _backends = ["sentencepiece"]
+
+ def __init__(self, *args, **kwargs):
+ requires_backends(self, ["sentencepiece"])
+
+
+class MBartTokenizer(metaclass=DummyObject):
+ _backends = ["sentencepiece"]
+
+ def __init__(self, *args, **kwargs):
+ requires_backends(self, ["sentencepiece"])
+
+
+class MLukeTokenizer(metaclass=DummyObject):
+ _backends = ["sentencepiece"]
+
+ def __init__(self, *args, **kwargs):
+ requires_backends(self, ["sentencepiece"])
+
+
+class MT5Tokenizer(metaclass=DummyObject):
+ _backends = ["sentencepiece"]
+
+ def __init__(self, *args, **kwargs):
+ requires_backends(self, ["sentencepiece"])
+
+
+class NllbTokenizer(metaclass=DummyObject):
+ _backends = ["sentencepiece"]
+
+ def __init__(self, *args, **kwargs):
+ requires_backends(self, ["sentencepiece"])
+
+
+class PegasusTokenizer(metaclass=DummyObject):
+ _backends = ["sentencepiece"]
+
+ def __init__(self, *args, **kwargs):
+ requires_backends(self, ["sentencepiece"])
+
+
+class PLBartTokenizer(metaclass=DummyObject):
+ _backends = ["sentencepiece"]
+
+ def __init__(self, *args, **kwargs):
+ requires_backends(self, ["sentencepiece"])
+
+
+class ReformerTokenizer(metaclass=DummyObject):
+ _backends = ["sentencepiece"]
+
+ def __init__(self, *args, **kwargs):
+ requires_backends(self, ["sentencepiece"])
+
+
+class RemBertTokenizer(metaclass=DummyObject):
+ _backends = ["sentencepiece"]
+
+ def __init__(self, *args, **kwargs):
+ requires_backends(self, ["sentencepiece"])
+
+
+class SeamlessM4TTokenizer(metaclass=DummyObject):
+ _backends = ["sentencepiece"]
+
+ def __init__(self, *args, **kwargs):
+ requires_backends(self, ["sentencepiece"])
+
+
+class SiglipTokenizer(metaclass=DummyObject):
+ _backends = ["sentencepiece"]
+
+ def __init__(self, *args, **kwargs):
+ requires_backends(self, ["sentencepiece"])
+
+
+class Speech2TextTokenizer(metaclass=DummyObject):
+ _backends = ["sentencepiece"]
+
+ def __init__(self, *args, **kwargs):
+ requires_backends(self, ["sentencepiece"])
+
+
+class SpeechT5Tokenizer(metaclass=DummyObject):
+ _backends = ["sentencepiece"]
+
+ def __init__(self, *args, **kwargs):
+ requires_backends(self, ["sentencepiece"])
+
+
+class T5Tokenizer(metaclass=DummyObject):
+ _backends = ["sentencepiece"]
+
+ def __init__(self, *args, **kwargs):
+ requires_backends(self, ["sentencepiece"])
+
+
+class UdopTokenizer(metaclass=DummyObject):
+ _backends = ["sentencepiece"]
+
+ def __init__(self, *args, **kwargs):
+ requires_backends(self, ["sentencepiece"])
+
+
+class XGLMTokenizer(metaclass=DummyObject):
+ _backends = ["sentencepiece"]
+
+ def __init__(self, *args, **kwargs):
+ requires_backends(self, ["sentencepiece"])
+
+
+class XLMProphetNetTokenizer(metaclass=DummyObject):
+ _backends = ["sentencepiece"]
+
+ def __init__(self, *args, **kwargs):
+ requires_backends(self, ["sentencepiece"])
+
+
+class XLMRobertaTokenizer(metaclass=DummyObject):
+ _backends = ["sentencepiece"]
+
+ def __init__(self, *args, **kwargs):
+ requires_backends(self, ["sentencepiece"])
+
+
+class XLNetTokenizer(metaclass=DummyObject):
+ _backends = ["sentencepiece"]
+
+ def __init__(self, *args, **kwargs):
+ requires_backends(self, ["sentencepiece"])
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/utils/fx.py b/env-llmeval/lib/python3.10/site-packages/transformers/utils/fx.py
new file mode 100644
index 0000000000000000000000000000000000000000..fd2b1512b21ee21e920c09a45818127ba72641ed
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/utils/fx.py
@@ -0,0 +1,1313 @@
+# coding=utf-8
+# Copyright 2021 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import builtins
+import collections
+import functools
+import inspect
+import math
+import operator
+import os
+import random
+import warnings
+from typing import Any, Callable, Dict, List, Optional, Type, Union
+
+import torch
+from torch import nn
+from torch.fx import Graph, GraphModule, Proxy, Tracer
+from torch.fx._compatibility import compatibility
+from torch.fx.proxy import ParameterProxy
+
+from .. import PretrainedConfig, PreTrainedModel, logging
+from ..models.auto import get_values
+from ..models.auto.modeling_auto import (
+ MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
+ MODEL_FOR_BACKBONE_MAPPING_NAMES,
+ MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
+ MODEL_FOR_CTC_MAPPING_NAMES,
+ MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
+ MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
+ MODEL_FOR_IMAGE_MAPPING_NAMES,
+ MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING_NAMES,
+ MODEL_FOR_MASKED_LM_MAPPING_NAMES,
+ MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
+ MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES,
+ MODEL_FOR_PRETRAINING_MAPPING_NAMES,
+ MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
+ MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
+ MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
+ MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
+ MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES,
+ MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
+ MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES,
+ MODEL_MAPPING_NAMES,
+)
+from ..pytorch_utils import is_torch_greater_or_equal_than_2_0
+from ..utils import (
+ ENV_VARS_TRUE_VALUES,
+ TORCH_FX_REQUIRED_VERSION,
+ get_torch_version,
+ is_peft_available,
+ is_torch_fx_available,
+)
+
+
+if is_peft_available():
+ from peft import PeftModel
+
+
+logger = logging.get_logger(__name__)
+_IS_IN_DEBUG_MODE = os.environ.get("FX_DEBUG_MODE", "").upper() in ENV_VARS_TRUE_VALUES
+
+
+def _generate_supported_model_class_names(
+ model_name: Type[PretrainedConfig],
+ supported_tasks: Optional[Union[str, List[str]]] = None,
+) -> List[str]:
+ task_mapping = {
+ "default": MODEL_MAPPING_NAMES,
+ "pretraining": MODEL_FOR_PRETRAINING_MAPPING_NAMES,
+ "next-sentence-prediction": MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES,
+ "masked-lm": MODEL_FOR_MASKED_LM_MAPPING_NAMES,
+ "causal-lm": MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
+ "seq2seq-lm": MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
+ "speech-seq2seq": MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES,
+ "multiple-choice": MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
+ "document-question-answering": MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
+ "question-answering": MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
+ "sequence-classification": MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
+ "token-classification": MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
+ "masked-image-modeling": MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING_NAMES,
+ "image-classification": MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
+ "zero-shot-image-classification": MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES,
+ "ctc": MODEL_FOR_CTC_MAPPING_NAMES,
+ "audio-classification": MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
+ "semantic-segmentation": MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
+ "backbone": MODEL_FOR_BACKBONE_MAPPING_NAMES,
+ "image-feature-extraction": MODEL_FOR_IMAGE_MAPPING_NAMES,
+ }
+
+ if supported_tasks is None:
+ supported_tasks = task_mapping.keys()
+ if isinstance(supported_tasks, str):
+ supported_tasks = [supported_tasks]
+
+ model_class_names = []
+ for task in supported_tasks:
+ class_name = task_mapping[task].get(model_name, None)
+ if class_name:
+ model_class_names.append(class_name)
+
+ return model_class_names
+
+
+_REGULAR_SUPPORTED_MODEL_NAMES_AND_TASKS = [
+ "altclip",
+ "albert",
+ "bart",
+ "bert",
+ "blenderbot",
+ "blenderbot-small",
+ "bloom",
+ "clip",
+ "convnext",
+ "deberta",
+ "deberta-v2",
+ "dinov2",
+ "distilbert",
+ "donut-swin",
+ "electra",
+ "gpt2",
+ "gpt_neo",
+ "gptj",
+ "hubert",
+ "layoutlm",
+ "llama",
+ "cohere",
+ "lxmert",
+ "m2m_100",
+ "marian",
+ "mbart",
+ "megatron-bert",
+ "mobilebert",
+ "mt5",
+ "nezha",
+ "opt",
+ "pegasus",
+ "plbart",
+ "resnet",
+ "roberta",
+ "segformer",
+ "speech_to_text",
+ "speech_to_text_2",
+ "swin",
+ "t5",
+ "trocr",
+ "vit",
+ "xglm",
+ "wav2vec2",
+ # "xlnet",
+]
+
+_FX_SUPPORTED_MODELS_WITH_KV_CACHE = ["llama", "opt"]
+
+_REGULAR_SUPPORTED_MODELS = []
+for item in _REGULAR_SUPPORTED_MODEL_NAMES_AND_TASKS:
+ if isinstance(item, dict):
+ _REGULAR_SUPPORTED_MODELS.extend(_generate_supported_model_class_names(**item))
+ else:
+ _REGULAR_SUPPORTED_MODELS.extend(_generate_supported_model_class_names(item))
+
+_SPECIAL_SUPPORTED_MODELS = [
+ "CLIPTextModel",
+ "CLIPTextModelWithProjection",
+ "CLIPVisionModel",
+ "CLIPVisionModelWithProjection",
+ "AltCLIPTextModel",
+ "AltCLIPVisionModel",
+ "GitVisionModel",
+ "GPT2DoubleHeadsModel",
+ "Speech2Text2Decoder",
+ "TrOCRDecoder",
+ "PeftModelForCausalLM",
+ "PeftModelForSeq2SeqLM",
+ # TODO: add support for them as it should be quite easy to do so (small blocking issues).
+ # XLNetForQuestionAnswering,
+]
+_SUPPORTED_MODELS = tuple(sorted(set(_REGULAR_SUPPORTED_MODELS + _SPECIAL_SUPPORTED_MODELS)))
+
+
+def torch_nn_embedding(self, input):
+ return torch.empty(*input.shape, self.weight.shape[-1], device="meta", dtype=self.weight.dtype)
+
+
+def torch_nn_functional_embedding(
+ input, weight, padding_idx=None, max_norm=None, norm_type=2.0, scale_grad_by_freq=False, sparse=False
+):
+ return torch.empty(*input.shape, weight.shape[-1], device="meta", dtype=weight.dtype)
+
+
+def torch_nn_layernorm(self, input):
+ return input
+
+
+def torch_nn_groupnorm(self, input):
+ return input
+
+
+def torch_nn_linear(self, input):
+ return torch.empty(input.shape[:-1] + (self.out_features,), device="meta")
+
+
+def torch_relu(x):
+ return x
+
+
+def torch_nn_relu(self, x):
+ return x
+
+
+def torch_nn_functional_relu(x, inplace=False):
+ if not inplace:
+ raise ValueError("Don't support in-place functional.relu for MetaTensor analysis")
+ return x
+
+
+def torch_where(condition, x, y):
+ # torch.where returns the broadcasted tensor of condition, x, and y,
+ # so hack it by using addition
+ return condition.to(device="meta") + x.to(device="meta") + y.to(device="meta")
+
+
+def torch_abs(input, *, out=None):
+ if out is not None:
+ raise ValueError("Don't support in-place abs for MetaTensor analysis")
+ return input
+
+
+def torch_arange(*args, **kwargs):
+ n = len(args)
+ step = 1
+ if n == 1:
+ start = 0
+ end = args[0]
+ elif n == 2:
+ start, end = args
+ else:
+ start, end, step = args
+ if isinstance(start, float):
+ start = int(start)
+ if isinstance(end, float):
+ start = int(end)
+ if isinstance(step, float):
+ step = int(step)
+ step = kwargs.get("step", step)
+ dtype = kwargs.get("dtype")
+ return torch.empty((end - start) // step, dtype=dtype, device="meta")
+
+
+def torch_full(*args, **kwargs):
+ args = list(args)
+ if isinstance(args[1], torch.Tensor) and args[1].device == torch.device("meta"):
+ args[1] = 1 # Any value.
+ kwargs_without_device = dict(kwargs)
+ kwargs_without_device.pop("device", None)
+ return torch.full(*args, **kwargs_without_device)
+
+
+def torch_cat(tensors, dim=None, axis=None, *, out=None):
+ if dim is None and axis is None:
+ dim = 0
+ if dim is None and axis is not None:
+ dim = axis
+ if dim < 0:
+ dim = tensors[0].dim() + dim
+ shapes = [t.shape for t in tensors]
+ shape = list(shapes[0])
+ concatenated_dim = sum(shape[dim] for shape in shapes)
+ final_shape = shape[:dim] + [concatenated_dim] + shape[dim + 1 :]
+ return torch.empty(final_shape, device="meta")
+
+
+def torch_stack(tensors, dim=None, axis=None, *, out=None):
+ if dim is None and axis is None:
+ dim = 0
+ if dim is None and axis is not None:
+ dim = axis
+ if dim < 0:
+ dim = tensors[0].dim() + 1 + dim
+ shape = list(tensors[0].shape)
+ shape.insert(dim, len(tensors))
+ return torch.empty(shape, device="meta")
+
+
+def torch_add(input, other, *, alpha=1, out=None):
+ if not isinstance(input, torch.Tensor):
+ return torch.empty_like(other, device="meta")
+ if not isinstance(other, torch.Tensor):
+ return torch.empty_like(input, device="meta")
+ max_length = max(input.dim(), other.dim())
+ input_shape = list(input.shape) + [1] * (max_length - input.dim())
+ other_shape = list(other.shape) + [1] * (max_length - other.dim())
+ shape = []
+ for i in range(max_length):
+ shape.append(max(input_shape[i], other_shape[i]))
+ return torch.empty(shape, device="meta")
+
+
+def torch_mul(input, other, *, out=None):
+ return torch_add(input, other, out=out)
+
+
+def torch_tensor_mul(self, other):
+ return torch_mul(self, other)
+
+
+def torch_matmul(input, other, *, out=None):
+ d1 = input.dim()
+ d2 = other.dim()
+ shape = None
+ if d1 == 1 and d2 == 1:
+ shape = None
+ elif d1 == 2 and d2 == 2:
+ shape = (input.size(0), other.size(1))
+ elif d1 == 1 and d2 == 2:
+ shape = (other.size(1),)
+ elif d1 == 2 and d1 == 1:
+ shape = (input.size(0),)
+ else:
+ max_length = max(input.dim(), other.dim())
+ shape1 = list(input.shape)
+ shape2 = list(other.shape)
+ if d1 == 1:
+ shape1 = [1] + shape1
+ if d2 == 1:
+ shape2.append(1)
+ shape1 = [-1] * (max_length - d1) + list(input.shape)
+ shape2 = [-1] * (max_length - d2) + list(other.shape)
+ shape = []
+ for i in range(max_length):
+ shape.append(max(shape1[i], shape2[i]))
+ shape[-2] = shape1[-2]
+ shape[-1] = shape2[-1]
+ if d1 == 1:
+ shape.pop(-2)
+ if d2 == 1:
+ shape.pop(-1)
+ if shape is None:
+ return torch.tensor(0.0, device="meta")
+ return torch.empty(*shape, device="meta")
+
+
+def torch_bmm(input, mat2, *, out=None):
+ if out is not None:
+ raise ValueError("Don't support in-place bmm for MetaTensor analysis")
+ batch_size, n, m = input.shape
+ _, _, p = mat2.shape
+ return torch.empty(batch_size, n, p, device="meta")
+
+
+def torch_baddbmm(input, batch1, batch2, *, beta=1, alpha=1, out=None):
+ if out is not None:
+ raise ValueError("Don't support in-place baddbmm for MetaTensor analysis")
+ return torch_bmm(batch1, batch2)
+
+
+def torch_tensor_baddbmm(self, batch1, batch2, *, beta=1, alpha=1, out=None):
+ return torch_baddbmm(self, batch1, batch2, beta=beta, alpha=alpha, out=out)
+
+
+def torch_einsum(equation, *operands):
+ # TODO: infer shape without performing the computation, this might be quite hard.
+ concrete_operands = (torch.empty_like(operand, device="cpu") for operand in operands)
+ return torch.einsum(equation, *concrete_operands).to("meta")
+
+
+def torch_tensor_repeat(self, *sizes):
+ shape = list(self.shape)
+ for i, x in enumerate(sizes):
+ shape[i] *= x
+ return torch.empty(shape, device="meta")
+
+
+def torch_repeat_interleave(*args, dim=None, output_size=None):
+ num_args = len(args)
+ if num_args == 1:
+ shape = [output_size if output_size is not None else args[0].sum()]
+ else:
+ shape = list(args[0].shape)
+ if dim is None:
+ if num_args > 2:
+ dim = args[2]
+ else:
+ shape = [sum(shape)]
+ dim = 0
+ repeats = args[1]
+ if isinstance(repeats, int) or torch.numel(repeats) == 1:
+ shape[dim] *= int(repeats)
+ else:
+ shape[dim] = output_size if output_size is not None else repeats.sum()
+ return torch.empty(*shape, device="meta")
+
+
+def torch_index_select(input, dim, index, *, out=None):
+ shape = list(input.shape)
+ shape[dim] = len(index)
+ return torch.empty(*shape, device="meta")
+
+
+def torch_tensor_index_select(self, dim, index):
+ return torch_index_select(self, dim, index)
+
+
+def torch_gather(input, dim, index, *, sparse_grad=False, out=None):
+ shape = list(input.shape)
+ shape[dim] = index.shape[dim]
+ return torch.empty(*shape, device="meta")
+
+
+def torch_tensor_gather(self, dim, index):
+ return torch_gather(self, dim, index)
+
+
+def torch_roll(input, shifts, dims=None):
+ return input
+
+
+def torch_flip(input, dims):
+ return input
+
+
+def torch_tensor_flip(self, dims):
+ return self
+
+
+def torch_nn_conv1d(self, input):
+ l_in = input.shape[-1]
+ shape = None
+ padding = self.padding
+ if padding == "valid":
+ padding = (0, 0)
+ if padding == "same":
+ shape = list(input.shape)
+ if shape is None:
+ shape = list(input.shape)
+ l_out = math.floor(
+ (l_in + 2 * padding[0] - self.dilation[0] * (self.kernel_size[0] - 1) - 1) / self.stride[0] + 1
+ )
+ shape[-1] = l_out
+ shape[-2] = self.out_channels
+ return torch.empty(shape, device="meta")
+
+
+def torch_nn_conv2d(self, input):
+ h_in, w_in = input.shape[-2:]
+ shape = None
+ padding = self.padding
+ if padding == "valid":
+ padding = (0, 0)
+ if padding == "same":
+ shape = list(input.shape)
+ if shape is None:
+ shape = list(input.shape)
+ h_out = math.floor(
+ (h_in + 2 * padding[0] - self.dilation[0] * (self.kernel_size[0] - 1) - 1) / self.stride[0] + 1
+ )
+ w_out = math.floor(
+ (w_in + 2 * padding[1] - self.dilation[1] * (self.kernel_size[1] - 1) - 1) / self.stride[1] + 1
+ )
+ shape[-2:] = [h_out, w_out]
+ shape[-3] = self.out_channels
+ return torch.empty(shape, device="meta")
+
+
+def torch_squeeze(input, dim=None):
+ shape = list(input.shape)
+ if dim is not None:
+ if dim < 0:
+ dim = input.dim() + dim
+ if shape[dim] == 1:
+ shape.pop(dim)
+ else:
+ new_shape = []
+ for dim_value in shape:
+ if dim_value == 1:
+ continue
+ new_shape.append(dim_value)
+ shape = new_shape
+ return torch.empty(shape, device="meta")
+
+
+def torch_tensor_squeeze(self, dim=None):
+ return torch_squeeze(self, dim)
+
+
+def torch_unsqueeze(input, dim):
+ shape = list(input.shape)
+ if dim < 0:
+ dim = input.dim() + 1 + dim
+ shape.insert(dim, 1)
+ return torch.empty(shape, device="meta")
+
+
+def torch_tensor_unsqueeze(self, dim):
+ return torch_unsqueeze(self, dim)
+
+
+def torch_unique_consecutive(input, **kwargs):
+ output = torch.unique_consecutive(torch.zeros_like(input, device="cpu"), **kwargs)
+ if isinstance(output, torch.Tensor):
+ return output.to("meta")
+ else:
+ return tuple(map(output, lambda x: x.to("meta")))
+
+
+def torch_nn_functional_one_hot(tensor, num_classes=-1):
+ if num_classes < 0:
+ raise ValueError("Don't support automatic num_classes inference for MetaTensor analysis")
+ shape = list(tensor.shape) + [num_classes]
+ return torch.empty(shape, device="meta")
+
+
+def torch_nn_functional_scaled_dot_product_attention(
+ query, key, value, attn_mask=None, dropout_p=0.0, is_causal=False, scale=None
+):
+ target_length = query.shape[-2]
+ head_dim = value.shape[-1]
+ return torch.empty((*query.shape[:-2], target_length, head_dim), device="meta")
+
+
+def torch_nn_mseloss(self, input, target):
+ if self.reduction == "none":
+ shape = target.shape
+ else:
+ shape = (1,)
+ return torch.empty(shape, device="meta")
+
+
+def torch_nn_crossentropyloss(self, input, target):
+ if self.reduction == "none":
+ shape = target.shape
+ else:
+ shape = (1,)
+ return torch.empty(shape, device="meta")
+
+
+def torch_nn_bcewithlogitsloss(self, input, target):
+ if self.reduction == "none":
+ shape = target.shape
+ else:
+ shape = (1,)
+ return torch.empty(shape, device="meta")
+
+
+def operator_getitem(a, b):
+ def to_concrete(t):
+ if isinstance(t, torch.Tensor):
+ concrete = torch.ones_like(t, device="cpu")
+ if concrete.dtype in [torch.float16, torch.float32, torch.float64, torch.int32]:
+ concrete = concrete.to(torch.int64)
+ return concrete
+ return t
+
+ if isinstance(a, torch.Tensor):
+ # TODO: infer shape without performing the computation.
+ if isinstance(b, tuple):
+ b = tuple(map(to_concrete, b))
+ else:
+ b = to_concrete(b)
+ return operator.getitem(torch.empty_like(a, device="cpu"), b).to("meta")
+ return operator.getitem(a, b)
+
+
+_MANUAL_META_OVERRIDES: Dict[Callable, Callable] = {
+ torch.nn.Embedding: torch_nn_embedding,
+ torch.nn.functional.embedding: torch_nn_functional_embedding,
+ torch.nn.LayerNorm: torch_nn_layernorm,
+ torch.nn.GroupNorm: torch_nn_groupnorm,
+ torch.nn.Linear: torch_nn_linear,
+ torch.relu: torch_relu,
+ torch.nn.functional.relu: torch_nn_functional_relu,
+ torch.nn.ReLU: torch_nn_relu,
+ torch.where: torch_where,
+ torch.abs: torch_abs,
+ torch.arange: torch_arange,
+ torch.full: torch_full,
+ torch.cat: torch_cat,
+ torch.stack: torch_stack,
+ torch.add: torch_add,
+ torch.mul: torch_mul,
+ torch.Tensor.mul: torch_tensor_mul,
+ torch.matmul: torch_matmul,
+ torch.bmm: torch_bmm,
+ torch.baddbmm: torch_baddbmm,
+ torch.Tensor.baddbmm: torch_tensor_baddbmm,
+ torch.einsum: torch_einsum,
+ torch.Tensor.repeat: torch_tensor_repeat,
+ torch.repeat_interleave: torch_repeat_interleave,
+ torch.roll: torch_roll,
+ torch.flip: torch_flip,
+ torch.Tensor.flip: torch_tensor_flip,
+ torch.index_select: torch_index_select,
+ torch.Tensor.index_select: torch_tensor_index_select,
+ torch.gather: torch_gather,
+ torch.Tensor.gather: torch_tensor_gather,
+ torch.nn.Conv1d: torch_nn_conv1d,
+ torch.nn.Conv2d: torch_nn_conv2d,
+ torch.squeeze: torch_squeeze,
+ torch.Tensor.squeeze: torch_tensor_squeeze,
+ torch.unsqueeze: torch_unsqueeze,
+ torch.Tensor.unsqueeze: torch_tensor_unsqueeze,
+ torch.unique_consecutive: torch_unique_consecutive,
+ torch.nn.functional.one_hot: torch_nn_functional_one_hot,
+ torch.nn.MSELoss: torch_nn_mseloss,
+ torch.nn.CrossEntropyLoss: torch_nn_crossentropyloss,
+ torch.nn.BCEWithLogitsLoss: torch_nn_bcewithlogitsloss,
+ operator.getitem: operator_getitem,
+}
+
+if is_torch_greater_or_equal_than_2_0:
+ _MANUAL_META_OVERRIDES[
+ torch.nn.functional.scaled_dot_product_attention
+ ] = torch_nn_functional_scaled_dot_product_attention
+
+
+class HFProxy(Proxy):
+ """
+ Proxy that uses metadata to handle data-dependent control-flow.
+ """
+
+ def install_metadata(self, metadata):
+ self._metadata = metadata
+
+ @property
+ def shape(self):
+ return self.tracer.create_proxy("call_method", "size", (self,), {})
+
+ @property
+ def device(self):
+ # Hack so we can track when devices are used. During meta-tensor propagation,
+ # replace these values with a constant 'meta'
+ return MetaDeviceAttribute(self, "device")
+
+ def __len__(self):
+ if hasattr(self, "_metadata") and self._metadata is not None:
+ return len(self._metadata)
+ return super().__len__()
+
+ def __bool__(self):
+ if hasattr(self, "_metadata") and self._metadata is not None:
+ return self._metadata
+ return super().__bool__()
+
+ def __getattr__(self, k):
+ if k == "_metadata":
+ return self.__getattribute__(k)
+ # note: not added to the graph yet, if this is a method call
+ # we peephole optimize to the method invocation
+ return HFAttribute(self, k)
+
+ def __setitem__(self, indices, values):
+ return self.tracer.create_proxy("call_function", operator.setitem, (self, indices, values), {})
+
+ def __contains__(self, key):
+ if hasattr(self, "_metadata") and self._metadata is not None:
+ return key in self._metadata
+ return super().__contains__(key)
+
+
+class HFAttribute(HFProxy):
+ def __init__(self, root, attr: str):
+ self.root = root
+ self.attr = attr
+ self.tracer = root.tracer
+ self._node = None
+
+ if hasattr(self.root, "_metadata"):
+ self.install_metadata(getattr(self.root._metadata, attr))
+
+ @property
+ def node(self):
+ # the node for attributes is added lazily, since most will just be method calls
+ # which do not rely on the getitem call
+ if self._node is None:
+ self._node = self.tracer.create_proxy("call_function", builtins.getattr, (self.root, self.attr), {}).node
+ return self._node
+
+ def __call__(self, *args, **kwargs):
+ return self.tracer.create_proxy("call_method", self.attr, (self.root,) + args, kwargs)
+
+
+class MetaDeviceAttribute(HFAttribute):
+ pass
+
+
+def _proxies_to_metas(v):
+ """Returns the underlying metadata for HFProxies, and behaves like the identity for the others."""
+ if isinstance(v, MetaDeviceAttribute):
+ return "meta"
+ if isinstance(v, torch.fx.Proxy):
+ if not (isinstance(v, HFProxy) and hasattr(v, "_metadata")):
+ raise RuntimeError(f"No metadata was found for {v}")
+ return v._metadata
+ return v
+
+
+def _gen_constructor_wrapper(target):
+ @functools.wraps(target)
+ def wrapper(*args, **kwargs):
+ proxy = None
+
+ def check_has_proxy(v):
+ if isinstance(v, Proxy):
+ nonlocal proxy
+ proxy = v
+
+ torch.fx.node.map_aggregate(args, check_has_proxy)
+ torch.fx.node.map_aggregate(kwargs, check_has_proxy)
+
+ if proxy is not None:
+ return proxy.tracer.create_proxy("call_function", target, args, kwargs)
+ else:
+ return target(*args, **kwargs)
+
+ return wrapper, target
+
+
+def _generate_random_int(low: int = 10, high: int = 20, forbidden_values: Optional[List[int]] = None):
+ if forbidden_values is None:
+ forbidden_values = []
+ value = random.randint(low, high)
+ while value in forbidden_values:
+ value = random.randint(low, high)
+ return value
+
+
+class HFTracer(Tracer):
+ """
+ Tracer that is able to symbolically trace models from the library. To do that, it uses the HFProxy instead of the
+ regular PyTorch torch.fx.Proxy.
+ """
+
+ # Feature flag for proxying accesses to buffer values
+ proxy_buffer_attributes: bool = True
+ allow_insert_stateless_mods: bool = True
+ _TORCH_METHODS_TO_PATCH = [
+ "arange",
+ "zeros",
+ "ones",
+ "full",
+ "full_like",
+ "eye",
+ "empty",
+ "tensor",
+ "clamp",
+ "finfo",
+ ]
+ supported_archs = (PreTrainedModel,) if not is_peft_available() else (PreTrainedModel, PeftModel)
+
+ def __init__(self, autowrap_modules=(math,), autowrap_functions=()):
+ super().__init__(autowrap_modules=autowrap_modules, autowrap_functions=autowrap_functions)
+
+ if not is_torch_fx_available():
+ raise ImportError(
+ f"Found an incompatible version of torch. Found version {get_torch_version()}, but only version "
+ f"{TORCH_FX_REQUIRED_VERSION} is supported."
+ )
+
+ def _generate_dummy_input(
+ self, model: PreTrainedModel, input_name: str, shape: List[int], input_names: List[str]
+ ) -> Dict[str, torch.Tensor]:
+ """Generates dummy input for model inference recording."""
+ # Retrieving the model class, either from the "class_for_deserialization" attribute if the model was restored
+ # from pickle, or from the "__class__" attribute in the general case.
+ model_class_name = getattr(model, "class_for_deserialization", model.__class__).__name__
+ device = model.device
+ inputs_dict = {}
+
+ # when tracing a model with KV cache, we simply need to unsure that the KV cache length is larger than one to
+ # rightfully pass certain controlflows (Example: https://github.com/huggingface/transformers/blob/5c8d941d66734811d2ef6f57f15b44f7fb7a98c4/src/transformers/modeling_attn_mask_utils.py#L162).
+ # After tracing, the model can then still be used with arbitrary lengths different than the one used during tracing.
+ kv_cache_length = 5
+
+ if input_name in ["labels", "start_positions", "end_positions"]:
+ batch_size = shape[0]
+ if model_class_name in [
+ *get_values(MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES),
+ *get_values(MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES),
+ *get_values(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES),
+ *get_values(MODEL_FOR_BACKBONE_MAPPING_NAMES),
+ *get_values(MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES),
+ ]:
+ inputs_dict["labels"] = torch.zeros(batch_size, dtype=torch.long, device=device)
+ elif model_class_name in [
+ *get_values(MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES),
+ *get_values(MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES),
+ "XLNetForQuestionAnswering",
+ ]:
+ inputs_dict["start_positions"] = torch.zeros(batch_size, dtype=torch.long, device=device)
+ inputs_dict["end_positions"] = torch.zeros(batch_size, dtype=torch.long, device=device)
+ elif model_class_name in get_values(MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES):
+ if not hasattr(model.config, "problem_type") or model.config.problem_type is None:
+ raise ValueError(
+ "Could not retrieve the problem type for the sequence classification task, please set "
+ 'model.config.problem_type to one of the following values: "regression", '
+ '"single_label_classification", or "multi_label_classification".'
+ )
+
+ if model.config.problem_type == "regression":
+ labels_shape = (batch_size, model.config.num_labels)
+ labels_dtype = torch.float32
+ elif model.config.problem_type == "single_label_classification":
+ labels_shape = (batch_size,)
+ labels_dtype = torch.long
+ elif model.config.problem_type == "multi_label_classification":
+ labels_shape = (batch_size, model.config.num_labels)
+ labels_dtype = torch.float32
+ else:
+ raise ValueError(
+ 'Expected model.config.problem_type to be either: "regression", "single_label_classification"'
+ f', or "multi_label_classification", but "{model.config.problem_type}" was provided.'
+ )
+ inputs_dict["labels"] = torch.zeros(*labels_shape, dtype=labels_dtype, device=device)
+
+ elif model_class_name in [
+ *get_values(MODEL_FOR_PRETRAINING_MAPPING_NAMES),
+ *get_values(MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES),
+ *get_values(MODEL_FOR_CAUSAL_LM_MAPPING_NAMES),
+ *get_values(MODEL_FOR_MASKED_LM_MAPPING_NAMES),
+ *get_values(MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES),
+ *get_values(MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES),
+ "GPT2DoubleHeadsModel",
+ "PeftModelForCausalLM",
+ "PeftModelForSeq2SeqLM",
+ ]:
+ inputs_dict["labels"] = torch.zeros(shape, dtype=torch.long, device=device)
+ elif model_class_name in [*get_values(MODEL_FOR_CTC_MAPPING_NAMES)]:
+ inputs_dict["labels"] = torch.zeros(shape, dtype=torch.float32, device=device)
+ else:
+ raise NotImplementedError(
+ f"Generating the dummy input named {input_name} for {model_class_name} is not supported yet."
+ )
+ elif "pixel_values" in input_name:
+ batch_size = shape[0]
+ image_size = getattr(model.config, "image_size", None)
+ if image_size is None:
+ if hasattr(model.config, "vision_config"):
+ image_size = model.config.vision_config.image_size
+ elif hasattr(model.config, "encoder"):
+ image_size = model.config.encoder.image_size
+ else:
+ image_size = (_generate_random_int(), _generate_random_int())
+
+ # If no num_channels is in the config, use some arbitrary value.
+ num_channels = getattr(model.config, "num_channels", 3)
+ if not isinstance(image_size, collections.abc.Iterable):
+ image_size = (image_size, image_size)
+ height, width = image_size
+ inputs_dict[input_name] = torch.zeros(
+ batch_size, num_channels, height, width, dtype=torch.float32, device=device
+ )
+ elif "bbox" in input_name:
+ inputs_dict[input_name] = torch.zeros(*shape, 4, dtype=torch.float, device=device)
+ elif "input_features" in input_name:
+ inputs_dict[input_name] = torch.zeros(
+ *shape, model.config.input_feat_per_channel, dtype=torch.float, device=device
+ )
+ elif "visual_feats" in input_name:
+ inputs_dict[input_name] = torch.zeros(
+ shape
+ + [
+ model.config.visual_feat_dim,
+ ],
+ dtype=torch.float,
+ device=device,
+ )
+ elif "visual_pos" in input_name:
+ inputs_dict[input_name] = torch.zeros(
+ shape
+ + [
+ model.config.visual_pos_dim,
+ ],
+ dtype=torch.float,
+ device=device,
+ )
+ elif "inputs" in input_name:
+ inputs_dict[input_name] = torch.zeros(*shape, dtype=torch.float, device=device)
+ elif "input_values" in input_name:
+ batch_size, _ = shape
+ # Generating big sequence length for audio inputs.
+ seq_length = _generate_random_int(low=10000, high=20000)
+ inputs_dict[input_name] = torch.zeros(batch_size, seq_length, dtype=torch.float, device=device)
+ elif "mask" in input_name:
+ if "past_key_values" in input_names:
+ mask_shape = [shape[0], shape[1] + kv_cache_length]
+ else:
+ mask_shape = shape
+
+ inputs_dict[input_name] = torch.zeros(mask_shape, dtype=torch.long, device=device)
+ elif "ids" in input_name:
+ inputs_dict[input_name] = torch.zeros(shape, dtype=torch.long, device=device)
+ elif "past_key_values" in input_name:
+ if model.config.model_type not in _FX_SUPPORTED_MODELS_WITH_KV_CACHE:
+ raise NotImplementedError(
+ f"Symbolic trace with past_key_values input is not supported yet for the model {model.config.model_type}. Please open an issue or a PR in Transformers repository if you would like to see the support added."
+ )
+ num_heads = model.config.num_attention_heads
+ head_dim = model.config.hidden_size // model.config.num_attention_heads
+
+ cache_shape = (shape[0], num_heads, kv_cache_length, head_dim)
+ pkv = tuple(
+ (
+ torch.rand(cache_shape, dtype=torch.float, device=device),
+ torch.rand(cache_shape, dtype=torch.float, device=device),
+ )
+ for i in range(model.config.num_hidden_layers)
+ )
+ inputs_dict[input_name] = pkv
+ else:
+ shape_with_hidden_size = shape + [model.config.hidden_size]
+ inputs_dict[input_name] = torch.zeros(shape_with_hidden_size, dtype=torch.float, device=device)
+
+ return inputs_dict
+
+ def create_proxy(self, kind, target, args, kwargs, name=None, type_expr=None, proxy_factory_fn=None):
+ rv = super().create_proxy(kind, target, args, kwargs, name, type_expr, proxy_factory_fn)
+
+ if kind == "placeholder" and target in self.meta_args:
+ rv.install_metadata(self.meta_args[target])
+ return rv
+
+ if target in self.orig_fns:
+ # NOTE: tensor constructors in PyTorch define the `device` argument as
+ # *kwargs-only*. That is why this works. If you add methods to
+ # _TORCH_METHODS_TO_PATCH that do not define `device` as kwarg-only,
+ # this will break and you will likely see issues where we cannot infer
+ # the size of the output.
+ if "device" in kwargs:
+ kwargs["device"] = "meta"
+
+ try:
+ args_metas = torch.fx.node.map_aggregate(args, _proxies_to_metas)
+ kwargs_metas = torch.fx.node.map_aggregate(kwargs, _proxies_to_metas)
+
+ if kind == "call_function":
+ meta_target = _MANUAL_META_OVERRIDES.get(target, target)
+ meta_out = meta_target(*args_metas, **kwargs_metas)
+ if isinstance(meta_out, torch.Tensor):
+ meta_out = meta_out.to(device="meta")
+ elif kind == "call_method":
+ method = getattr(args_metas[0].__class__, target)
+ meta_target = _MANUAL_META_OVERRIDES.get(method, method)
+ meta_out = meta_target(*args_metas, **kwargs_metas)
+ elif kind == "call_module":
+ if not hasattr(self, "orig_forward"):
+ raise AttributeError(f"{self} does not have an attribute called orig_forward")
+ self._disable_module_getattr = True
+ try:
+ mod = self.root.get_submodule(target)
+ mod_type = type(mod)
+ if mod_type in _MANUAL_META_OVERRIDES:
+ meta_out = _MANUAL_META_OVERRIDES[mod_type](mod, *args_metas, **kwargs_metas)
+ else:
+ meta_out = self.orig_forward(*args_metas, **kwargs_metas)
+ finally:
+ self._disable_module_getattr = False
+ elif kind == "get_attr":
+ self._disable_module_getattr = True
+ try:
+ attr_itr = self.root
+ atoms = target.split(".")
+ for atom in atoms:
+ attr_itr = getattr(attr_itr, atom)
+ if isinstance(attr_itr, torch.Tensor):
+ meta_out = attr_itr.to(device="meta")
+ else:
+ meta_out = attr_itr
+ finally:
+ self._disable_module_getattr = False
+ else:
+ return rv
+
+ if not isinstance(rv, Proxy):
+ raise ValueError("Don't support composite output yet")
+ rv.install_metadata(meta_out)
+ except Exception as e:
+ if _IS_IN_DEBUG_MODE:
+ warnings.warn(f"Could not compute metadata for {kind} target {target}: {e}")
+
+ return rv
+
+ # Replaced by .getattr from PyTorch 1.13
+ def _module_getattr(self, attr, attr_val, parameter_proxy_cache):
+ if getattr(self, "_disable_module_getattr", False):
+ return attr_val
+ else:
+
+ def maybe_get_proxy_for_attr(attr_val, collection_to_search, parameter_proxy_cache):
+ for n, p in collection_to_search:
+ if attr_val is p:
+ if n not in parameter_proxy_cache:
+ kwargs = {}
+ if "proxy_factory_fn" in inspect.signature(self.create_proxy).parameters:
+ kwargs["proxy_factory_fn"] = (
+ None
+ if not self.param_shapes_constant
+ else lambda node: ParameterProxy(self, node, n, attr_val)
+ )
+ val_proxy = self.create_proxy("get_attr", n, (), {}, **kwargs) # type: ignore[arg-type]
+ parameter_proxy_cache[n] = val_proxy
+ return parameter_proxy_cache[n]
+ return None
+
+ if isinstance(attr_val, torch.nn.Parameter):
+ maybe_parameter_proxy = maybe_get_proxy_for_attr(
+ attr_val, self.root.named_parameters(), parameter_proxy_cache
+ )
+ if maybe_parameter_proxy is not None:
+ return maybe_parameter_proxy
+
+ if self.proxy_buffer_attributes and isinstance(attr_val, torch.Tensor):
+ maybe_buffer_proxy = maybe_get_proxy_for_attr(
+ attr_val, self.root.named_buffers(), parameter_proxy_cache
+ )
+ if maybe_buffer_proxy is not None:
+ return maybe_buffer_proxy
+
+ return attr_val
+
+ # Needed for PyTorch 1.13+
+ def getattr(self, attr: str, attr_val: Any, parameter_proxy_cache: Dict[str, Any]):
+ return self._module_getattr(attr, attr_val, parameter_proxy_cache)
+
+ def call_module(self, m, forward, args, kwargs):
+ self.orig_forward = forward
+ return super().call_module(m, forward, args, kwargs)
+
+ def proxy(self, node):
+ return HFProxy(node, self)
+
+ def trace(
+ self,
+ root: Union[torch.nn.Module, Callable[..., Any]],
+ concrete_args: Optional[Dict[str, Any]] = None,
+ dummy_inputs: Optional[Dict[str, Any]] = None,
+ complete_concrete_args_with_inputs_not_in_dummy_inputs: bool = True,
+ ) -> Graph:
+ """
+ Traces `root` and returns the corresponding FX `torch.fx.Graph` representation. `root` can either be a
+ `torch.nn.Module` instance or a Python callable. Note that after this call, `self.root` may be different from
+ the `root` passed in here. For example, when a free function is passed to `trace()`, we will create a
+ `torch.nn.Module` instance to use as the root and add embedded constants to.
+
+ Args:
+ root (`torch.nn.Module` or `Callable`):
+ Either a `torch.nn.Module`` or a function to be traced through. If root is not a
+ [`~transformers.PreTrainedModel`], then `dummy_inputs` must be passed, otherwise tracing will fail.
+ concrete_args (`Dict[str, Any], *optional*):
+ Concrete arguments that should not be treated as Proxies
+ dummy_inputs (`Dict[str, Any]`, *optional*):
+ The dummy inputs needed to handle data-dependent control-flow if `root` is not a
+ [`~transformers.PreTrainedModel`]. It can also be used when `root` is a
+ [`~transformers.PreTrainedModel`] to specify custom dummy inputs for a subset or all the model inputs.
+ complete_concrete_args_with_inputs_not_in_dummy_inputs (`bool`, *optional*, defaults to `True`):
+ If `True`, and `dummy_inputs` is specified, every argument that `root` can take that is not in
+ `dummy_inputs` and not in `concrete_args` will be added to `concrete_args`, otherwise does nothing.
+
+ Returns:
+ `torch.fx.Graph`:
+ A FX `torch.fx.Graph` representing the semantics of the passed-in `root`.
+
+ """
+ sig = inspect.signature(root.forward if isinstance(root, torch.nn.Module) else root)
+
+ if concrete_args is None:
+ concrete_args = {}
+
+ if dummy_inputs is not None and complete_concrete_args_with_inputs_not_in_dummy_inputs:
+ for param in sig.parameters.values():
+ if param.name in dummy_inputs:
+ continue
+ if param.default is inspect.Parameter.empty:
+ raise ValueError(f"You need to specify a default value for the parameter {param.name}.")
+ concrete_args.update(
+ {
+ p.name: p.default
+ for p in sig.parameters.values()
+ if (p.name not in dummy_inputs and p.name not in concrete_args)
+ }
+ )
+
+ input_names = sig.parameters.keys() - concrete_args.keys()
+
+ # Creating a random input shape to generate dummy inputs.
+ batch_size = _generate_random_int()
+ sequence_length = _generate_random_int()
+ shape = [batch_size, sequence_length]
+
+ if root.__class__.__name__ in get_values(MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES):
+ num_choices = _generate_random_int(low=2, high=5)
+ shape.insert(1, num_choices)
+
+ inputs = dict(dummy_inputs) if dummy_inputs is not None else {}
+ for input_name in input_names:
+ if input_name in inputs:
+ continue
+ # We enforce that root must either be a PreTrainedModel or deserialized from a serialized traced model to
+ # be able to use HFTracer._generate_dummy_input.
+ if isinstance(root, self.supported_archs) or type(root).__qualname__.startswith(
+ ("_deserialize_graph_module", "_CodeOnlyModule")
+ ):
+ inputs.update(self._generate_dummy_input(root, input_name, shape, input_names=input_names))
+ else:
+ raise RuntimeError(
+ f"Could not generate input named {input_name} for because root is not a"
+ " transformers.PreTrainedModel."
+ )
+
+ concrete_metas = {
+ input_name: input_.to("meta") if isinstance(input_, torch.Tensor) else input_
+ for input_name, input_ in inputs.items()
+ }
+ for param in sig.parameters.values():
+ if param.kind == inspect.Parameter.VAR_KEYWORD and param.name not in input_names:
+ concrete_metas[f"**{param.name}"] = {}
+ self.meta_args = concrete_metas
+ self.patched_torch_methods = {
+ target: _gen_constructor_wrapper(getattr(torch, target)) for target in self._TORCH_METHODS_TO_PATCH
+ }
+ self.orig_fns = set()
+
+ for name, (wrapper, orig) in self.patched_torch_methods.items():
+ setattr(torch, name, wrapper)
+ self.orig_fns.add(orig)
+
+ try:
+ self.graph = super().trace(root, concrete_args=concrete_args)
+ finally:
+ for name, (_, orig) in self.patched_torch_methods.items():
+ setattr(torch, name, orig)
+
+ # This is necessary because concrete args are added as input to the traced module since
+ # https://github.com/pytorch/pytorch/pull/55888.
+ for node in self.graph.nodes:
+ if node.op == "placeholder":
+ # Removing default values for inputs as the forward pass will fail with them.
+ if node.target in input_names:
+ node.args = ()
+ # Without this, torch.jit.script fails because the inputs type is Optional[torch.Tensor].
+ # It cannot infer on the attributes and methods the input should have, and fails.
+ node.type = torch.Tensor
+ # It is a concrete arg so it is not used and should be removed.
+ else:
+ to_visit = [node]
+ to_delete = collections.OrderedDict()
+ while to_visit:
+ n = to_visit.pop(0)
+ to_delete[n] = None
+ to_visit += list(n.users.keys())
+
+ for user in reversed(to_delete.keys()):
+ self.graph.erase_node(user)
+
+ # TODO: solves GraphModule creation.
+ # Without this, return type annotation "Tuple" is causing code execution failure.
+ if node.op == "output":
+ node.type = None
+
+ return self.graph
+
+ def _stateless_mod_instanciation_depends_on_proxies(self, mod: nn.Module) -> bool:
+ """
+ Whether the module was instantiated with Proxies. If that is the case, such module cannot be a leaf module
+ because its attributes are input-dependent.
+ """
+ return any(isinstance(attr, Proxy) for attr in mod.__dict__.values())
+
+ def _insert_module_as_submodule(self, mod: nn.Module) -> str:
+ """
+ Helper method which tries to insert a module that was not declared as submodule.
+ """
+ # If one of the module attributes is a Proxy, it means that its instantiation is input-dependent.
+ # It is not possible to insert such modules, those should be traced through.
+ if self._stateless_mod_instanciation_depends_on_proxies(mod):
+ return ""
+ idx = 0
+ mod_name = mod.__class__.__name__.lower()
+ path = f"{mod_name}_{idx}"
+ already_inserted = False
+ while hasattr(self.root, path):
+ if getattr(self.root, path) is mod:
+ already_inserted = True
+ break
+ path = f"{mod_name}_{idx}"
+ idx += 1
+
+ # No need to add multiple instances of the same module.
+ if not already_inserted:
+ self.root.add_module(path, mod)
+ return path
+
+ def path_of_module(self, mod: nn.Module) -> str:
+ """
+ Helper method to find the qualified name of `mod` in the Module hierarchy of `root`. For example, if `root` has
+ a submodule named `foo`, which has a submodule named `bar`, passing `bar` into this function will return the
+ string "foo.bar".
+
+ Args:
+ mod (str): The `Module` to retrieve the qualified name for.
+ """
+ try:
+ return super().path_of_module(mod)
+ except NameError as e:
+ if self.allow_insert_stateless_mods and len(list(mod.parameters())) == 0 and len(list(mod.buffers())) == 0:
+ path = self._insert_module_as_submodule(mod)
+ return path
+ raise e
+
+ def is_leaf_module(self, m: torch.nn.Module, module_qualified_name: str) -> bool:
+ return (not self._stateless_mod_instanciation_depends_on_proxies(m)) and super().is_leaf_module(
+ m, module_qualified_name
+ )
+
+ @compatibility(is_backward_compatible=True)
+ def keys(self, obj: "Proxy") -> Any:
+ """Called when a proxy object is has the keys() method called.
+ This is what happens when ** is called on a proxy. This should return an iterator if ** is supposed to work in
+ your custom tracer.
+ """
+ attribute = HFAttribute(obj, "keys")()
+ if obj.node.target == "**kwargs":
+ return attribute._metadata
+ return attribute
+
+
+def get_concrete_args(model: nn.Module, input_names: List[str]):
+ sig = inspect.signature(model.forward)
+
+ if not (set(input_names) <= set(sig.parameters.keys())):
+ formatted_input_names = input_names[0] if len(input_names) == 1 else ", ".join(input_names)
+ formatted_allowed_input_names = ", ".join(sig.parameters.keys())
+ raise ValueError(
+ f"The model does not have input(s) named: {formatted_input_names}, expected a subset of the following:"
+ f" {formatted_allowed_input_names}"
+ )
+
+ return {p.name: p.default for p in sig.parameters.values() if p.name not in input_names}
+
+
+def is_model_supported(model: PreTrainedModel):
+ return model.__class__.__name__ in _SUPPORTED_MODELS
+
+
+def check_if_model_is_supported(model: PreTrainedModel):
+ if not is_model_supported(model):
+ supported_model_names = ", ".join(_SUPPORTED_MODELS)
+ raise NotImplementedError(
+ f"Model {model.__class__.__name__} is not supported yet, supported models: {supported_model_names}"
+ )
+
+
+def symbolic_trace(
+ model: PreTrainedModel,
+ input_names: Optional[List[str]] = None,
+ disable_check: bool = False,
+ tracer_cls: Type[HFTracer] = HFTracer,
+) -> GraphModule:
+ """
+ Performs symbolic tracing on the model.
+
+ Args:
+ model ([`PretrainedModel`]):
+ The model to trace.
+ input_names (`List[str]`, *optional*):
+ The names of the inputs of the traced model. If unset, model.dummy_inputs.keys() are used instead.
+ disable_check (`bool`, *optional*, defaults to `False`):
+ If `True`, no check is done before trying to trace the model, this is mostly usesul for debugging purposes.
+ tracer_cls (`Type[HFTracer]`, *optional*, defaults to `HFTracer`):
+ The tracer class to use for instantiating the tracer. If unset, `HFTracer` is used instead.
+
+ Returns:
+ `torch.fx.GraphModule`: A GraphModule constructed by recording operations seen while tracing the model.
+
+ Example:
+
+ ```python
+ from transformers.utils.fx import symbolic_trace
+
+ traced_model = symbolic_trace(model, input_names=["input_ids", "attention_mask", "token_type_ids"])
+ ```
+ """
+ if input_names is None:
+ input_names = model.dummy_inputs.keys()
+
+ input_names = list(input_names)
+ concrete_args = get_concrete_args(model, input_names)
+
+ if not disable_check:
+ check_if_model_is_supported(model)
+
+ # Tracing.
+ tracer = tracer_cls()
+ traced_graph = tracer.trace(model, concrete_args=concrete_args)
+ traced = torch.fx.GraphModule(model, traced_graph)
+
+ traced.config = model.config
+ # The model class must be stored as an attribute to allow model deserialization, which uses trace, and thus
+ # _generate_dummy_input, where the model class is needed.
+ traced.class_for_deserialization = model.__class__
+ traced.device = model.device
+
+ return traced
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/utils/import_utils.py b/env-llmeval/lib/python3.10/site-packages/transformers/utils/import_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..3835831e88a44ef39a8b40ed19772b261884af50
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/utils/import_utils.py
@@ -0,0 +1,1503 @@
+# Copyright 2022 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+Import utilities: Utilities related to imports and our lazy inits.
+"""
+
+import importlib.metadata
+import importlib.util
+import json
+import os
+import shutil
+import subprocess
+import sys
+import warnings
+from collections import OrderedDict
+from functools import lru_cache
+from itertools import chain
+from types import ModuleType
+from typing import Any, Tuple, Union
+
+from packaging import version
+
+from . import logging
+
+
+logger = logging.get_logger(__name__) # pylint: disable=invalid-name
+
+
+# TODO: This doesn't work for all packages (`bs4`, `faiss`, etc.) Talk to Sylvain to see how to do with it better.
+def _is_package_available(pkg_name: str, return_version: bool = False) -> Union[Tuple[bool, str], bool]:
+ # Check if the package spec exists and grab its version to avoid importing a local directory
+ package_exists = importlib.util.find_spec(pkg_name) is not None
+ package_version = "N/A"
+ if package_exists:
+ try:
+ # Primary method to get the package version
+ package_version = importlib.metadata.version(pkg_name)
+ except importlib.metadata.PackageNotFoundError:
+ # Fallback method: Only for "torch" and versions containing "dev"
+ if pkg_name == "torch":
+ try:
+ package = importlib.import_module(pkg_name)
+ temp_version = getattr(package, "__version__", "N/A")
+ # Check if the version contains "dev"
+ if "dev" in temp_version:
+ package_version = temp_version
+ package_exists = True
+ else:
+ package_exists = False
+ except ImportError:
+ # If the package can't be imported, it's not available
+ package_exists = False
+ else:
+ # For packages other than "torch", don't attempt the fallback and set as not available
+ package_exists = False
+ logger.debug(f"Detected {pkg_name} version: {package_version}")
+ if return_version:
+ return package_exists, package_version
+ else:
+ return package_exists
+
+
+ENV_VARS_TRUE_VALUES = {"1", "ON", "YES", "TRUE"}
+ENV_VARS_TRUE_AND_AUTO_VALUES = ENV_VARS_TRUE_VALUES.union({"AUTO"})
+
+USE_TF = os.environ.get("USE_TF", "AUTO").upper()
+USE_TORCH = os.environ.get("USE_TORCH", "AUTO").upper()
+USE_JAX = os.environ.get("USE_FLAX", "AUTO").upper()
+
+# Try to run a native pytorch job in an environment with TorchXLA installed by setting this value to 0.
+USE_TORCH_XLA = os.environ.get("USE_TORCH_XLA", "1").upper()
+
+FORCE_TF_AVAILABLE = os.environ.get("FORCE_TF_AVAILABLE", "AUTO").upper()
+
+# `transformers` requires `torch>=1.11` but this variable is exposed publicly, and we can't simply remove it.
+# This is the version of torch required to run torch.fx features and torch.onnx with dictionary inputs.
+TORCH_FX_REQUIRED_VERSION = version.parse("1.10")
+
+ACCELERATE_MIN_VERSION = "0.21.0"
+FSDP_MIN_VERSION = "1.12.0"
+
+
+_accelerate_available, _accelerate_version = _is_package_available("accelerate", return_version=True)
+_apex_available = _is_package_available("apex")
+_aqlm_available = _is_package_available("aqlm")
+_bitsandbytes_available = _is_package_available("bitsandbytes")
+_galore_torch_available = _is_package_available("galore_torch")
+# `importlib.metadata.version` doesn't work with `bs4` but `beautifulsoup4`. For `importlib.util.find_spec`, reversed.
+_bs4_available = importlib.util.find_spec("bs4") is not None
+_coloredlogs_available = _is_package_available("coloredlogs")
+# `importlib.metadata.util` doesn't work with `opencv-python-headless`.
+_cv2_available = importlib.util.find_spec("cv2") is not None
+_datasets_available = _is_package_available("datasets")
+_decord_available = importlib.util.find_spec("decord") is not None
+_detectron2_available = _is_package_available("detectron2")
+# We need to check both `faiss` and `faiss-cpu`.
+_faiss_available = importlib.util.find_spec("faiss") is not None
+try:
+ _faiss_version = importlib.metadata.version("faiss")
+ logger.debug(f"Successfully imported faiss version {_faiss_version}")
+except importlib.metadata.PackageNotFoundError:
+ try:
+ _faiss_version = importlib.metadata.version("faiss-cpu")
+ logger.debug(f"Successfully imported faiss version {_faiss_version}")
+ except importlib.metadata.PackageNotFoundError:
+ _faiss_available = False
+_ftfy_available = _is_package_available("ftfy")
+_g2p_en_available = _is_package_available("g2p_en")
+_ipex_available, _ipex_version = _is_package_available("intel_extension_for_pytorch", return_version=True)
+_jieba_available = _is_package_available("jieba")
+_jinja_available = _is_package_available("jinja2")
+_kenlm_available = _is_package_available("kenlm")
+_keras_nlp_available = _is_package_available("keras_nlp")
+_levenshtein_available = _is_package_available("Levenshtein")
+_librosa_available = _is_package_available("librosa")
+_natten_available = _is_package_available("natten")
+_nltk_available = _is_package_available("nltk")
+_onnx_available = _is_package_available("onnx")
+_openai_available = _is_package_available("openai")
+_optimum_available = _is_package_available("optimum")
+_auto_gptq_available = _is_package_available("auto_gptq")
+# `importlib.metadata.version` doesn't work with `awq`
+_auto_awq_available = importlib.util.find_spec("awq") is not None
+_quanto_available = _is_package_available("quanto")
+_pandas_available = _is_package_available("pandas")
+_peft_available = _is_package_available("peft")
+_phonemizer_available = _is_package_available("phonemizer")
+_psutil_available = _is_package_available("psutil")
+_py3nvml_available = _is_package_available("py3nvml")
+_pyctcdecode_available = _is_package_available("pyctcdecode")
+_pytesseract_available = _is_package_available("pytesseract")
+_pytest_available = _is_package_available("pytest")
+_pytorch_quantization_available = _is_package_available("pytorch_quantization")
+_rjieba_available = _is_package_available("rjieba")
+_sacremoses_available = _is_package_available("sacremoses")
+_safetensors_available = _is_package_available("safetensors")
+_scipy_available = _is_package_available("scipy")
+_sentencepiece_available = _is_package_available("sentencepiece")
+_is_seqio_available = _is_package_available("seqio")
+_sklearn_available = importlib.util.find_spec("sklearn") is not None
+if _sklearn_available:
+ try:
+ importlib.metadata.version("scikit-learn")
+ except importlib.metadata.PackageNotFoundError:
+ _sklearn_available = False
+_smdistributed_available = importlib.util.find_spec("smdistributed") is not None
+_soundfile_available = _is_package_available("soundfile")
+_spacy_available = _is_package_available("spacy")
+_sudachipy_available, _sudachipy_version = _is_package_available("sudachipy", return_version=True)
+_tensorflow_probability_available = _is_package_available("tensorflow_probability")
+_tensorflow_text_available = _is_package_available("tensorflow_text")
+_tf2onnx_available = _is_package_available("tf2onnx")
+_timm_available = _is_package_available("timm")
+_tokenizers_available = _is_package_available("tokenizers")
+_torchaudio_available = _is_package_available("torchaudio")
+_torchdistx_available = _is_package_available("torchdistx")
+_torchvision_available = _is_package_available("torchvision")
+_mlx_available = _is_package_available("mlx")
+
+
+_torch_version = "N/A"
+_torch_available = False
+if USE_TORCH in ENV_VARS_TRUE_AND_AUTO_VALUES and USE_TF not in ENV_VARS_TRUE_VALUES:
+ _torch_available, _torch_version = _is_package_available("torch", return_version=True)
+else:
+ logger.info("Disabling PyTorch because USE_TF is set")
+ _torch_available = False
+
+
+_tf_version = "N/A"
+_tf_available = False
+if FORCE_TF_AVAILABLE in ENV_VARS_TRUE_VALUES:
+ _tf_available = True
+else:
+ if USE_TF in ENV_VARS_TRUE_AND_AUTO_VALUES and USE_TORCH not in ENV_VARS_TRUE_VALUES:
+ # Note: _is_package_available("tensorflow") fails for tensorflow-cpu. Please test any changes to the line below
+ # with tensorflow-cpu to make sure it still works!
+ _tf_available = importlib.util.find_spec("tensorflow") is not None
+ if _tf_available:
+ candidates = (
+ "tensorflow",
+ "tensorflow-cpu",
+ "tensorflow-gpu",
+ "tf-nightly",
+ "tf-nightly-cpu",
+ "tf-nightly-gpu",
+ "tf-nightly-rocm",
+ "intel-tensorflow",
+ "intel-tensorflow-avx512",
+ "tensorflow-rocm",
+ "tensorflow-macos",
+ "tensorflow-aarch64",
+ )
+ _tf_version = None
+ # For the metadata, we have to look for both tensorflow and tensorflow-cpu
+ for pkg in candidates:
+ try:
+ _tf_version = importlib.metadata.version(pkg)
+ break
+ except importlib.metadata.PackageNotFoundError:
+ pass
+ _tf_available = _tf_version is not None
+ if _tf_available:
+ if version.parse(_tf_version) < version.parse("2"):
+ logger.info(
+ f"TensorFlow found but with version {_tf_version}. Transformers requires version 2 minimum."
+ )
+ _tf_available = False
+ else:
+ logger.info("Disabling Tensorflow because USE_TORCH is set")
+
+
+_essentia_available = importlib.util.find_spec("essentia") is not None
+try:
+ _essentia_version = importlib.metadata.version("essentia")
+ logger.debug(f"Successfully imported essentia version {_essentia_version}")
+except importlib.metadata.PackageNotFoundError:
+ _essentia_version = False
+
+
+_pretty_midi_available = importlib.util.find_spec("pretty_midi") is not None
+try:
+ _pretty_midi_version = importlib.metadata.version("pretty_midi")
+ logger.debug(f"Successfully imported pretty_midi version {_pretty_midi_version}")
+except importlib.metadata.PackageNotFoundError:
+ _pretty_midi_available = False
+
+
+ccl_version = "N/A"
+_is_ccl_available = (
+ importlib.util.find_spec("torch_ccl") is not None
+ or importlib.util.find_spec("oneccl_bindings_for_pytorch") is not None
+)
+try:
+ ccl_version = importlib.metadata.version("oneccl_bind_pt")
+ logger.debug(f"Detected oneccl_bind_pt version {ccl_version}")
+except importlib.metadata.PackageNotFoundError:
+ _is_ccl_available = False
+
+
+_flax_available = False
+if USE_JAX in ENV_VARS_TRUE_AND_AUTO_VALUES:
+ _flax_available, _flax_version = _is_package_available("flax", return_version=True)
+ if _flax_available:
+ _jax_available, _jax_version = _is_package_available("jax", return_version=True)
+ if _jax_available:
+ logger.info(f"JAX version {_jax_version}, Flax version {_flax_version} available.")
+ else:
+ _flax_available = _jax_available = False
+ _jax_version = _flax_version = "N/A"
+
+
+_torch_fx_available = False
+if _torch_available:
+ torch_version = version.parse(_torch_version)
+ _torch_fx_available = (torch_version.major, torch_version.minor) >= (
+ TORCH_FX_REQUIRED_VERSION.major,
+ TORCH_FX_REQUIRED_VERSION.minor,
+ )
+
+
+_torch_xla_available = False
+if USE_TORCH_XLA in ENV_VARS_TRUE_VALUES:
+ _torch_xla_available, _torch_xla_version = _is_package_available("torch_xla", return_version=True)
+ if _torch_xla_available:
+ logger.info(f"Torch XLA version {_torch_xla_version} available.")
+
+
+def is_kenlm_available():
+ return _kenlm_available
+
+
+def is_cv2_available():
+ return _cv2_available
+
+
+def is_torch_available():
+ return _torch_available
+
+
+def get_torch_version():
+ return _torch_version
+
+
+def is_torch_sdpa_available():
+ if not is_torch_available():
+ return False
+ elif _torch_version == "N/A":
+ return False
+
+ # NOTE: We require torch>=2.1 (and not torch>=2.0) to use SDPA in Transformers for two reasons:
+ # - Allow the global use of the `scale` argument introduced in https://github.com/pytorch/pytorch/pull/95259
+ # - Memory-efficient attention supports arbitrary attention_mask: https://github.com/pytorch/pytorch/pull/104310
+ # NOTE: We require torch>=2.1.1 to avoid a numerical issue in SDPA with non-contiguous inputs: https://github.com/pytorch/pytorch/issues/112577
+ return version.parse(_torch_version) >= version.parse("2.1.1")
+
+
+def is_torchvision_available():
+ return _torchvision_available
+
+
+def is_galore_torch_available():
+ return _galore_torch_available
+
+
+def is_pyctcdecode_available():
+ return _pyctcdecode_available
+
+
+def is_librosa_available():
+ return _librosa_available
+
+
+def is_essentia_available():
+ return _essentia_available
+
+
+def is_pretty_midi_available():
+ return _pretty_midi_available
+
+
+def is_torch_cuda_available():
+ if is_torch_available():
+ import torch
+
+ return torch.cuda.is_available()
+ else:
+ return False
+
+
+def is_mamba_ssm_available():
+ if is_torch_available():
+ import torch
+
+ if not torch.cuda.is_available():
+ return False
+ else:
+ return _is_package_available("mamba_ssm")
+ return False
+
+
+def is_causal_conv1d_available():
+ if is_torch_available():
+ import torch
+
+ if not torch.cuda.is_available():
+ return False
+ return _is_package_available("causal_conv1d")
+ return False
+
+
+def is_torch_mps_available():
+ if is_torch_available():
+ import torch
+
+ if hasattr(torch.backends, "mps"):
+ return torch.backends.mps.is_available()
+ return False
+
+
+def is_torch_bf16_gpu_available():
+ if not is_torch_available():
+ return False
+
+ import torch
+
+ return torch.cuda.is_available() and torch.cuda.is_bf16_supported()
+
+
+def is_torch_bf16_cpu_available():
+ if not is_torch_available():
+ return False
+
+ import torch
+
+ try:
+ # multiple levels of AttributeError depending on the pytorch version so do them all in one check
+ _ = torch.cpu.amp.autocast
+ except AttributeError:
+ return False
+
+ return True
+
+
+def is_torch_bf16_available():
+ # the original bf16 check was for gpu only, but later a cpu/bf16 combo has emerged so this util
+ # has become ambiguous and therefore deprecated
+ warnings.warn(
+ "The util is_torch_bf16_available is deprecated, please use is_torch_bf16_gpu_available "
+ "or is_torch_bf16_cpu_available instead according to whether it's used with cpu or gpu",
+ FutureWarning,
+ )
+ return is_torch_bf16_gpu_available()
+
+
+@lru_cache()
+def is_torch_fp16_available_on_device(device):
+ if not is_torch_available():
+ return False
+
+ import torch
+
+ try:
+ x = torch.zeros(2, 2, dtype=torch.float16).to(device)
+ _ = x @ x
+
+ # At this moment, let's be strict of the check: check if `LayerNorm` is also supported on device, because many
+ # models use this layer.
+ batch, sentence_length, embedding_dim = 3, 4, 5
+ embedding = torch.randn(batch, sentence_length, embedding_dim, dtype=torch.float16, device=device)
+ layer_norm = torch.nn.LayerNorm(embedding_dim, dtype=torch.float16, device=device)
+ _ = layer_norm(embedding)
+
+ except: # noqa: E722
+ # TODO: more precise exception matching, if possible.
+ # most backends should return `RuntimeError` however this is not guaranteed.
+ return False
+
+ return True
+
+
+@lru_cache()
+def is_torch_bf16_available_on_device(device):
+ if not is_torch_available():
+ return False
+
+ import torch
+
+ if device == "cuda":
+ return is_torch_bf16_gpu_available()
+
+ try:
+ x = torch.zeros(2, 2, dtype=torch.bfloat16).to(device)
+ _ = x @ x
+ except: # noqa: E722
+ # TODO: more precise exception matching, if possible.
+ # most backends should return `RuntimeError` however this is not guaranteed.
+ return False
+
+ return True
+
+
+def is_torch_tf32_available():
+ if not is_torch_available():
+ return False
+
+ import torch
+
+ if not torch.cuda.is_available() or torch.version.cuda is None:
+ return False
+ if torch.cuda.get_device_properties(torch.cuda.current_device()).major < 8:
+ return False
+ if int(torch.version.cuda.split(".")[0]) < 11:
+ return False
+ if version.parse(version.parse(torch.__version__).base_version) < version.parse("1.7"):
+ return False
+
+ return True
+
+
+def is_torch_fx_available():
+ return _torch_fx_available
+
+
+def is_peft_available():
+ return _peft_available
+
+
+def is_bs4_available():
+ return _bs4_available
+
+
+def is_tf_available():
+ return _tf_available
+
+
+def is_coloredlogs_available():
+ return _coloredlogs_available
+
+
+def is_tf2onnx_available():
+ return _tf2onnx_available
+
+
+def is_onnx_available():
+ return _onnx_available
+
+
+def is_openai_available():
+ return _openai_available
+
+
+def is_flax_available():
+ return _flax_available
+
+
+def is_ftfy_available():
+ return _ftfy_available
+
+
+def is_g2p_en_available():
+ return _g2p_en_available
+
+
+@lru_cache()
+def is_torch_tpu_available(check_device=True):
+ "Checks if `torch_xla` is installed and potentially if a TPU is in the environment"
+ warnings.warn(
+ "`is_torch_tpu_available` is deprecated and will be removed in 4.41.0. "
+ "Please use the `is_torch_xla_available` instead.",
+ FutureWarning,
+ )
+
+ if not _torch_available:
+ return False
+ if importlib.util.find_spec("torch_xla") is not None:
+ if check_device:
+ # We need to check if `xla_device` can be found, will raise a RuntimeError if not
+ try:
+ import torch_xla.core.xla_model as xm
+
+ _ = xm.xla_device()
+ return True
+ except RuntimeError:
+ return False
+ return True
+ return False
+
+
+@lru_cache
+def is_torch_xla_available(check_is_tpu=False, check_is_gpu=False):
+ """
+ Check if `torch_xla` is available. To train a native pytorch job in an environment with torch xla installed, set
+ the USE_TORCH_XLA to false.
+ """
+ assert not (check_is_tpu and check_is_gpu), "The check_is_tpu and check_is_gpu cannot both be true."
+
+ if not _torch_xla_available:
+ return False
+
+ import torch_xla
+
+ if check_is_gpu:
+ return torch_xla.runtime.device_type() in ["GPU", "CUDA"]
+ elif check_is_tpu:
+ return torch_xla.runtime.device_type() == "TPU"
+
+ return True
+
+
+@lru_cache()
+def is_torch_neuroncore_available(check_device=True):
+ if importlib.util.find_spec("torch_neuronx") is not None:
+ return is_torch_xla_available()
+ return False
+
+
+@lru_cache()
+def is_torch_npu_available(check_device=False):
+ "Checks if `torch_npu` is installed and potentially if a NPU is in the environment"
+ if not _torch_available or importlib.util.find_spec("torch_npu") is None:
+ return False
+
+ import torch
+ import torch_npu # noqa: F401
+
+ if check_device:
+ try:
+ # Will raise a RuntimeError if no NPU is found
+ _ = torch.npu.device_count()
+ return torch.npu.is_available()
+ except RuntimeError:
+ return False
+ return hasattr(torch, "npu") and torch.npu.is_available()
+
+
+def is_torchdynamo_available():
+ if not is_torch_available():
+ return False
+ try:
+ import torch._dynamo as dynamo # noqa: F401
+
+ return True
+ except Exception:
+ return False
+
+
+def is_torch_compile_available():
+ if not is_torch_available():
+ return False
+
+ import torch
+
+ # We don't do any version check here to support nighlies marked as 1.14. Ultimately needs to check version against
+ # 2.0 but let's do it later.
+ return hasattr(torch, "compile")
+
+
+def is_torchdynamo_compiling():
+ if not is_torch_available():
+ return False
+ try:
+ import torch._dynamo as dynamo # noqa: F401
+
+ return dynamo.is_compiling()
+ except Exception:
+ return False
+
+
+def is_torch_tensorrt_fx_available():
+ if importlib.util.find_spec("torch_tensorrt") is None:
+ return False
+ return importlib.util.find_spec("torch_tensorrt.fx") is not None
+
+
+def is_datasets_available():
+ return _datasets_available
+
+
+def is_detectron2_available():
+ return _detectron2_available
+
+
+def is_rjieba_available():
+ return _rjieba_available
+
+
+def is_psutil_available():
+ return _psutil_available
+
+
+def is_py3nvml_available():
+ return _py3nvml_available
+
+
+def is_sacremoses_available():
+ return _sacremoses_available
+
+
+def is_apex_available():
+ return _apex_available
+
+
+def is_aqlm_available():
+ return _aqlm_available
+
+
+def is_ninja_available():
+ r"""
+ Code comes from *torch.utils.cpp_extension.is_ninja_available()*. Returns `True` if the
+ [ninja](https://ninja-build.org/) build system is available on the system, `False` otherwise.
+ """
+ try:
+ subprocess.check_output("ninja --version".split())
+ except Exception:
+ return False
+ else:
+ return True
+
+
+def is_ipex_available():
+ def get_major_and_minor_from_version(full_version):
+ return str(version.parse(full_version).major) + "." + str(version.parse(full_version).minor)
+
+ if not is_torch_available() or not _ipex_available:
+ return False
+
+ torch_major_and_minor = get_major_and_minor_from_version(_torch_version)
+ ipex_major_and_minor = get_major_and_minor_from_version(_ipex_version)
+ if torch_major_and_minor != ipex_major_and_minor:
+ logger.warning(
+ f"Intel Extension for PyTorch {ipex_major_and_minor} needs to work with PyTorch {ipex_major_and_minor}.*,"
+ f" but PyTorch {_torch_version} is found. Please switch to the matching version and run again."
+ )
+ return False
+ return True
+
+
+@lru_cache
+def is_torch_xpu_available(check_device=False):
+ "Checks if `intel_extension_for_pytorch` is installed and potentially if a XPU is in the environment"
+ if not is_ipex_available():
+ return False
+
+ import intel_extension_for_pytorch # noqa: F401
+ import torch
+
+ if check_device:
+ try:
+ # Will raise a RuntimeError if no XPU is found
+ _ = torch.xpu.device_count()
+ return torch.xpu.is_available()
+ except RuntimeError:
+ return False
+ return hasattr(torch, "xpu") and torch.xpu.is_available()
+
+
+def is_bitsandbytes_available():
+ if not is_torch_available():
+ return False
+
+ # bitsandbytes throws an error if cuda is not available
+ # let's avoid that by adding a simple check
+ import torch
+
+ return _bitsandbytes_available and torch.cuda.is_available()
+
+
+def is_flash_attn_2_available():
+ if not is_torch_available():
+ return False
+
+ if not _is_package_available("flash_attn"):
+ return False
+
+ # Let's add an extra check to see if cuda is available
+ import torch
+
+ if not torch.cuda.is_available():
+ return False
+
+ if torch.version.cuda:
+ return version.parse(importlib.metadata.version("flash_attn")) >= version.parse("2.1.0")
+ elif torch.version.hip:
+ # TODO: Bump the requirement to 2.1.0 once released in https://github.com/ROCmSoftwarePlatform/flash-attention
+ return version.parse(importlib.metadata.version("flash_attn")) >= version.parse("2.0.4")
+ else:
+ return False
+
+
+def is_flash_attn_greater_or_equal_2_10():
+ if not _is_package_available("flash_attn"):
+ return False
+
+ return version.parse(importlib.metadata.version("flash_attn")) >= version.parse("2.1.0")
+
+
+def is_torchdistx_available():
+ return _torchdistx_available
+
+
+def is_faiss_available():
+ return _faiss_available
+
+
+def is_scipy_available():
+ return _scipy_available
+
+
+def is_sklearn_available():
+ return _sklearn_available
+
+
+def is_sentencepiece_available():
+ return _sentencepiece_available
+
+
+def is_seqio_available():
+ return _is_seqio_available
+
+
+def is_protobuf_available():
+ if importlib.util.find_spec("google") is None:
+ return False
+ return importlib.util.find_spec("google.protobuf") is not None
+
+
+def is_accelerate_available(min_version: str = ACCELERATE_MIN_VERSION):
+ if min_version is not None:
+ return _accelerate_available and version.parse(_accelerate_version) >= version.parse(min_version)
+ return _accelerate_available
+
+
+def is_fsdp_available(min_version: str = FSDP_MIN_VERSION):
+ return is_torch_available() and version.parse(_torch_version) >= version.parse(min_version)
+
+
+def is_optimum_available():
+ return _optimum_available
+
+
+def is_auto_awq_available():
+ return _auto_awq_available
+
+
+def is_quanto_available():
+ return _quanto_available
+
+
+def is_auto_gptq_available():
+ return _auto_gptq_available
+
+
+def is_levenshtein_available():
+ return _levenshtein_available
+
+
+def is_optimum_neuron_available():
+ return _optimum_available and _is_package_available("optimum.neuron")
+
+
+def is_safetensors_available():
+ return _safetensors_available
+
+
+def is_tokenizers_available():
+ return _tokenizers_available
+
+
+@lru_cache
+def is_vision_available():
+ _pil_available = importlib.util.find_spec("PIL") is not None
+ if _pil_available:
+ try:
+ package_version = importlib.metadata.version("Pillow")
+ except importlib.metadata.PackageNotFoundError:
+ try:
+ package_version = importlib.metadata.version("Pillow-SIMD")
+ except importlib.metadata.PackageNotFoundError:
+ return False
+ logger.debug(f"Detected PIL version {package_version}")
+ return _pil_available
+
+
+def is_pytesseract_available():
+ return _pytesseract_available
+
+
+def is_pytest_available():
+ return _pytest_available
+
+
+def is_spacy_available():
+ return _spacy_available
+
+
+def is_tensorflow_text_available():
+ return is_tf_available() and _tensorflow_text_available
+
+
+def is_keras_nlp_available():
+ return is_tensorflow_text_available() and _keras_nlp_available
+
+
+def is_in_notebook():
+ try:
+ # Test adapted from tqdm.autonotebook: https://github.com/tqdm/tqdm/blob/master/tqdm/autonotebook.py
+ get_ipython = sys.modules["IPython"].get_ipython
+ if "IPKernelApp" not in get_ipython().config:
+ raise ImportError("console")
+ if "VSCODE_PID" in os.environ:
+ raise ImportError("vscode")
+ if "DATABRICKS_RUNTIME_VERSION" in os.environ and os.environ["DATABRICKS_RUNTIME_VERSION"] < "11.0":
+ # Databricks Runtime 11.0 and above uses IPython kernel by default so it should be compatible with Jupyter notebook
+ # https://docs.microsoft.com/en-us/azure/databricks/notebooks/ipython-kernel
+ raise ImportError("databricks")
+
+ return importlib.util.find_spec("IPython") is not None
+ except (AttributeError, ImportError, KeyError):
+ return False
+
+
+def is_pytorch_quantization_available():
+ return _pytorch_quantization_available
+
+
+def is_tensorflow_probability_available():
+ return _tensorflow_probability_available
+
+
+def is_pandas_available():
+ return _pandas_available
+
+
+def is_sagemaker_dp_enabled():
+ # Get the sagemaker specific env variable.
+ sagemaker_params = os.getenv("SM_FRAMEWORK_PARAMS", "{}")
+ try:
+ # Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
+ sagemaker_params = json.loads(sagemaker_params)
+ if not sagemaker_params.get("sagemaker_distributed_dataparallel_enabled", False):
+ return False
+ except json.JSONDecodeError:
+ return False
+ # Lastly, check if the `smdistributed` module is present.
+ return _smdistributed_available
+
+
+def is_sagemaker_mp_enabled():
+ # Get the sagemaker specific mp parameters from smp_options variable.
+ smp_options = os.getenv("SM_HP_MP_PARAMETERS", "{}")
+ try:
+ # Parse it and check the field "partitions" is included, it is required for model parallel.
+ smp_options = json.loads(smp_options)
+ if "partitions" not in smp_options:
+ return False
+ except json.JSONDecodeError:
+ return False
+
+ # Get the sagemaker specific framework parameters from mpi_options variable.
+ mpi_options = os.getenv("SM_FRAMEWORK_PARAMS", "{}")
+ try:
+ # Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
+ mpi_options = json.loads(mpi_options)
+ if not mpi_options.get("sagemaker_mpi_enabled", False):
+ return False
+ except json.JSONDecodeError:
+ return False
+ # Lastly, check if the `smdistributed` module is present.
+ return _smdistributed_available
+
+
+def is_training_run_on_sagemaker():
+ return "SAGEMAKER_JOB_NAME" in os.environ
+
+
+def is_soundfile_availble():
+ return _soundfile_available
+
+
+def is_timm_available():
+ return _timm_available
+
+
+def is_natten_available():
+ return _natten_available
+
+
+def is_nltk_available():
+ return _nltk_available
+
+
+def is_torchaudio_available():
+ return _torchaudio_available
+
+
+def is_speech_available():
+ # For now this depends on torchaudio but the exact dependency might evolve in the future.
+ return _torchaudio_available
+
+
+def is_phonemizer_available():
+ return _phonemizer_available
+
+
+def torch_only_method(fn):
+ def wrapper(*args, **kwargs):
+ if not _torch_available:
+ raise ImportError(
+ "You need to install pytorch to use this method or class, "
+ "or activate it with environment variables USE_TORCH=1 and USE_TF=0."
+ )
+ else:
+ return fn(*args, **kwargs)
+
+ return wrapper
+
+
+def is_ccl_available():
+ return _is_ccl_available
+
+
+def is_decord_available():
+ return _decord_available
+
+
+def is_sudachi_available():
+ return _sudachipy_available
+
+
+def get_sudachi_version():
+ return _sudachipy_version
+
+
+def is_sudachi_projection_available():
+ if not is_sudachi_available():
+ return False
+
+ # NOTE: We require sudachipy>=0.6.8 to use projection option in sudachi_kwargs for the constructor of BertJapaneseTokenizer.
+ # - `projection` option is not supported in sudachipy<0.6.8, see https://github.com/WorksApplications/sudachi.rs/issues/230
+ return version.parse(_sudachipy_version) >= version.parse("0.6.8")
+
+
+def is_jumanpp_available():
+ return (importlib.util.find_spec("rhoknp") is not None) and (shutil.which("jumanpp") is not None)
+
+
+def is_cython_available():
+ return importlib.util.find_spec("pyximport") is not None
+
+
+def is_jieba_available():
+ return _jieba_available
+
+
+def is_jinja_available():
+ return _jinja_available
+
+
+def is_mlx_available():
+ return _mlx_available
+
+
+# docstyle-ignore
+CV2_IMPORT_ERROR = """
+{0} requires the OpenCV library but it was not found in your environment. You can install it with:
+```
+pip install opencv-python
+```
+Please note that you may need to restart your runtime after installation.
+"""
+
+
+# docstyle-ignore
+DATASETS_IMPORT_ERROR = """
+{0} requires the 🤗 Datasets library but it was not found in your environment. You can install it with:
+```
+pip install datasets
+```
+In a notebook or a colab, you can install it by executing a cell with
+```
+!pip install datasets
+```
+then restarting your kernel.
+
+Note that if you have a local folder named `datasets` or a local python file named `datasets.py` in your current
+working directory, python may try to import this instead of the 🤗 Datasets library. You should rename this folder or
+that python file if that's the case. Please note that you may need to restart your runtime after installation.
+"""
+
+
+# docstyle-ignore
+TOKENIZERS_IMPORT_ERROR = """
+{0} requires the 🤗 Tokenizers library but it was not found in your environment. You can install it with:
+```
+pip install tokenizers
+```
+In a notebook or a colab, you can install it by executing a cell with
+```
+!pip install tokenizers
+```
+Please note that you may need to restart your runtime after installation.
+"""
+
+
+# docstyle-ignore
+SENTENCEPIECE_IMPORT_ERROR = """
+{0} requires the SentencePiece library but it was not found in your environment. Checkout the instructions on the
+installation page of its repo: https://github.com/google/sentencepiece#installation and follow the ones
+that match your environment. Please note that you may need to restart your runtime after installation.
+"""
+
+
+# docstyle-ignore
+PROTOBUF_IMPORT_ERROR = """
+{0} requires the protobuf library but it was not found in your environment. Checkout the instructions on the
+installation page of its repo: https://github.com/protocolbuffers/protobuf/tree/master/python#installation and follow the ones
+that match your environment. Please note that you may need to restart your runtime after installation.
+"""
+
+
+# docstyle-ignore
+FAISS_IMPORT_ERROR = """
+{0} requires the faiss library but it was not found in your environment. Checkout the instructions on the
+installation page of its repo: https://github.com/facebookresearch/faiss/blob/master/INSTALL.md and follow the ones
+that match your environment. Please note that you may need to restart your runtime after installation.
+"""
+
+
+# docstyle-ignore
+PYTORCH_IMPORT_ERROR = """
+{0} requires the PyTorch library but it was not found in your environment. Checkout the instructions on the
+installation page: https://pytorch.org/get-started/locally/ and follow the ones that match your environment.
+Please note that you may need to restart your runtime after installation.
+"""
+
+
+# docstyle-ignore
+TORCHVISION_IMPORT_ERROR = """
+{0} requires the Torchvision library but it was not found in your environment. Checkout the instructions on the
+installation page: https://pytorch.org/get-started/locally/ and follow the ones that match your environment.
+Please note that you may need to restart your runtime after installation.
+"""
+
+# docstyle-ignore
+PYTORCH_IMPORT_ERROR_WITH_TF = """
+{0} requires the PyTorch library but it was not found in your environment.
+However, we were able to find a TensorFlow installation. TensorFlow classes begin
+with "TF", but are otherwise identically named to our PyTorch classes. This
+means that the TF equivalent of the class you tried to import would be "TF{0}".
+If you want to use TensorFlow, please use TF classes instead!
+
+If you really do want to use PyTorch please go to
+https://pytorch.org/get-started/locally/ and follow the instructions that
+match your environment.
+"""
+
+# docstyle-ignore
+TF_IMPORT_ERROR_WITH_PYTORCH = """
+{0} requires the TensorFlow library but it was not found in your environment.
+However, we were able to find a PyTorch installation. PyTorch classes do not begin
+with "TF", but are otherwise identically named to our TF classes.
+If you want to use PyTorch, please use those classes instead!
+
+If you really do want to use TensorFlow, please follow the instructions on the
+installation page https://www.tensorflow.org/install that match your environment.
+"""
+
+# docstyle-ignore
+BS4_IMPORT_ERROR = """
+{0} requires the Beautiful Soup library but it was not found in your environment. You can install it with pip:
+`pip install beautifulsoup4`. Please note that you may need to restart your runtime after installation.
+"""
+
+
+# docstyle-ignore
+SKLEARN_IMPORT_ERROR = """
+{0} requires the scikit-learn library but it was not found in your environment. You can install it with:
+```
+pip install -U scikit-learn
+```
+In a notebook or a colab, you can install it by executing a cell with
+```
+!pip install -U scikit-learn
+```
+Please note that you may need to restart your runtime after installation.
+"""
+
+
+# docstyle-ignore
+TENSORFLOW_IMPORT_ERROR = """
+{0} requires the TensorFlow library but it was not found in your environment. Checkout the instructions on the
+installation page: https://www.tensorflow.org/install and follow the ones that match your environment.
+Please note that you may need to restart your runtime after installation.
+"""
+
+
+# docstyle-ignore
+DETECTRON2_IMPORT_ERROR = """
+{0} requires the detectron2 library but it was not found in your environment. Checkout the instructions on the
+installation page: https://github.com/facebookresearch/detectron2/blob/master/INSTALL.md and follow the ones
+that match your environment. Please note that you may need to restart your runtime after installation.
+"""
+
+
+# docstyle-ignore
+FLAX_IMPORT_ERROR = """
+{0} requires the FLAX library but it was not found in your environment. Checkout the instructions on the
+installation page: https://github.com/google/flax and follow the ones that match your environment.
+Please note that you may need to restart your runtime after installation.
+"""
+
+# docstyle-ignore
+FTFY_IMPORT_ERROR = """
+{0} requires the ftfy library but it was not found in your environment. Checkout the instructions on the
+installation section: https://github.com/rspeer/python-ftfy/tree/master#installing and follow the ones
+that match your environment. Please note that you may need to restart your runtime after installation.
+"""
+
+LEVENSHTEIN_IMPORT_ERROR = """
+{0} requires the python-Levenshtein library but it was not found in your environment. You can install it with pip: `pip
+install python-Levenshtein`. Please note that you may need to restart your runtime after installation.
+"""
+
+# docstyle-ignore
+G2P_EN_IMPORT_ERROR = """
+{0} requires the g2p-en library but it was not found in your environment. You can install it with pip:
+`pip install g2p-en`. Please note that you may need to restart your runtime after installation.
+"""
+
+# docstyle-ignore
+PYTORCH_QUANTIZATION_IMPORT_ERROR = """
+{0} requires the pytorch-quantization library but it was not found in your environment. You can install it with pip:
+`pip install pytorch-quantization --extra-index-url https://pypi.ngc.nvidia.com`
+Please note that you may need to restart your runtime after installation.
+"""
+
+# docstyle-ignore
+TENSORFLOW_PROBABILITY_IMPORT_ERROR = """
+{0} requires the tensorflow_probability library but it was not found in your environment. You can install it with pip as
+explained here: https://github.com/tensorflow/probability. Please note that you may need to restart your runtime after installation.
+"""
+
+# docstyle-ignore
+TENSORFLOW_TEXT_IMPORT_ERROR = """
+{0} requires the tensorflow_text library but it was not found in your environment. You can install it with pip as
+explained here: https://www.tensorflow.org/text/guide/tf_text_intro.
+Please note that you may need to restart your runtime after installation.
+"""
+
+
+# docstyle-ignore
+PANDAS_IMPORT_ERROR = """
+{0} requires the pandas library but it was not found in your environment. You can install it with pip as
+explained here: https://pandas.pydata.org/pandas-docs/stable/getting_started/install.html.
+Please note that you may need to restart your runtime after installation.
+"""
+
+
+# docstyle-ignore
+PHONEMIZER_IMPORT_ERROR = """
+{0} requires the phonemizer library but it was not found in your environment. You can install it with pip:
+`pip install phonemizer`. Please note that you may need to restart your runtime after installation.
+"""
+
+
+# docstyle-ignore
+SACREMOSES_IMPORT_ERROR = """
+{0} requires the sacremoses library but it was not found in your environment. You can install it with pip:
+`pip install sacremoses`. Please note that you may need to restart your runtime after installation.
+"""
+
+# docstyle-ignore
+SCIPY_IMPORT_ERROR = """
+{0} requires the scipy library but it was not found in your environment. You can install it with pip:
+`pip install scipy`. Please note that you may need to restart your runtime after installation.
+"""
+
+
+# docstyle-ignore
+SPEECH_IMPORT_ERROR = """
+{0} requires the torchaudio library but it was not found in your environment. You can install it with pip:
+`pip install torchaudio`. Please note that you may need to restart your runtime after installation.
+"""
+
+# docstyle-ignore
+TIMM_IMPORT_ERROR = """
+{0} requires the timm library but it was not found in your environment. You can install it with pip:
+`pip install timm`. Please note that you may need to restart your runtime after installation.
+"""
+
+# docstyle-ignore
+NATTEN_IMPORT_ERROR = """
+{0} requires the natten library but it was not found in your environment. You can install it by referring to:
+shi-labs.com/natten . You can also install it with pip (may take longer to build):
+`pip install natten`. Please note that you may need to restart your runtime after installation.
+"""
+
+
+# docstyle-ignore
+NLTK_IMPORT_ERROR = """
+{0} requires the NLTK library but it was not found in your environment. You can install it by referring to:
+https://www.nltk.org/install.html. Please note that you may need to restart your runtime after installation.
+"""
+
+
+# docstyle-ignore
+VISION_IMPORT_ERROR = """
+{0} requires the PIL library but it was not found in your environment. You can install it with pip:
+`pip install pillow`. Please note that you may need to restart your runtime after installation.
+"""
+
+
+# docstyle-ignore
+PYTESSERACT_IMPORT_ERROR = """
+{0} requires the PyTesseract library but it was not found in your environment. You can install it with pip:
+`pip install pytesseract`. Please note that you may need to restart your runtime after installation.
+"""
+
+# docstyle-ignore
+PYCTCDECODE_IMPORT_ERROR = """
+{0} requires the pyctcdecode library but it was not found in your environment. You can install it with pip:
+`pip install pyctcdecode`. Please note that you may need to restart your runtime after installation.
+"""
+
+# docstyle-ignore
+ACCELERATE_IMPORT_ERROR = """
+{0} requires the accelerate library >= {ACCELERATE_MIN_VERSION} it was not found in your environment.
+You can install or update it with pip: `pip install --upgrade accelerate`. Please note that you may need to restart your
+runtime after installation.
+"""
+
+# docstyle-ignore
+CCL_IMPORT_ERROR = """
+{0} requires the torch ccl library but it was not found in your environment. You can install it with pip:
+`pip install oneccl_bind_pt -f https://developer.intel.com/ipex-whl-stable`
+Please note that you may need to restart your runtime after installation.
+"""
+
+# docstyle-ignore
+ESSENTIA_IMPORT_ERROR = """
+{0} requires essentia library. But that was not found in your environment. You can install them with pip:
+`pip install essentia==2.1b6.dev1034`
+Please note that you may need to restart your runtime after installation.
+"""
+
+# docstyle-ignore
+LIBROSA_IMPORT_ERROR = """
+{0} requires thes librosa library. But that was not found in your environment. You can install them with pip:
+`pip install librosa`
+Please note that you may need to restart your runtime after installation.
+"""
+
+# docstyle-ignore
+PRETTY_MIDI_IMPORT_ERROR = """
+{0} requires thes pretty_midi library. But that was not found in your environment. You can install them with pip:
+`pip install pretty_midi`
+Please note that you may need to restart your runtime after installation.
+"""
+
+DECORD_IMPORT_ERROR = """
+{0} requires the decord library but it was not found in your environment. You can install it with pip: `pip install
+decord`. Please note that you may need to restart your runtime after installation.
+"""
+
+CYTHON_IMPORT_ERROR = """
+{0} requires the Cython library but it was not found in your environment. You can install it with pip: `pip install
+Cython`. Please note that you may need to restart your runtime after installation.
+"""
+
+JIEBA_IMPORT_ERROR = """
+{0} requires the jieba library but it was not found in your environment. You can install it with pip: `pip install
+jieba`. Please note that you may need to restart your runtime after installation.
+"""
+
+PEFT_IMPORT_ERROR = """
+{0} requires the peft library but it was not found in your environment. You can install it with pip: `pip install
+peft`. Please note that you may need to restart your runtime after installation.
+"""
+
+JINJA_IMPORT_ERROR = """
+{0} requires the jinja library but it was not found in your environment. You can install it with pip: `pip install
+jinja2`. Please note that you may need to restart your runtime after installation.
+"""
+
+BACKENDS_MAPPING = OrderedDict(
+ [
+ ("bs4", (is_bs4_available, BS4_IMPORT_ERROR)),
+ ("cv2", (is_cv2_available, CV2_IMPORT_ERROR)),
+ ("datasets", (is_datasets_available, DATASETS_IMPORT_ERROR)),
+ ("detectron2", (is_detectron2_available, DETECTRON2_IMPORT_ERROR)),
+ ("essentia", (is_essentia_available, ESSENTIA_IMPORT_ERROR)),
+ ("faiss", (is_faiss_available, FAISS_IMPORT_ERROR)),
+ ("flax", (is_flax_available, FLAX_IMPORT_ERROR)),
+ ("ftfy", (is_ftfy_available, FTFY_IMPORT_ERROR)),
+ ("g2p_en", (is_g2p_en_available, G2P_EN_IMPORT_ERROR)),
+ ("pandas", (is_pandas_available, PANDAS_IMPORT_ERROR)),
+ ("phonemizer", (is_phonemizer_available, PHONEMIZER_IMPORT_ERROR)),
+ ("pretty_midi", (is_pretty_midi_available, PRETTY_MIDI_IMPORT_ERROR)),
+ ("levenshtein", (is_levenshtein_available, LEVENSHTEIN_IMPORT_ERROR)),
+ ("librosa", (is_librosa_available, LIBROSA_IMPORT_ERROR)),
+ ("protobuf", (is_protobuf_available, PROTOBUF_IMPORT_ERROR)),
+ ("pyctcdecode", (is_pyctcdecode_available, PYCTCDECODE_IMPORT_ERROR)),
+ ("pytesseract", (is_pytesseract_available, PYTESSERACT_IMPORT_ERROR)),
+ ("sacremoses", (is_sacremoses_available, SACREMOSES_IMPORT_ERROR)),
+ ("pytorch_quantization", (is_pytorch_quantization_available, PYTORCH_QUANTIZATION_IMPORT_ERROR)),
+ ("sentencepiece", (is_sentencepiece_available, SENTENCEPIECE_IMPORT_ERROR)),
+ ("sklearn", (is_sklearn_available, SKLEARN_IMPORT_ERROR)),
+ ("speech", (is_speech_available, SPEECH_IMPORT_ERROR)),
+ ("tensorflow_probability", (is_tensorflow_probability_available, TENSORFLOW_PROBABILITY_IMPORT_ERROR)),
+ ("tf", (is_tf_available, TENSORFLOW_IMPORT_ERROR)),
+ ("tensorflow_text", (is_tensorflow_text_available, TENSORFLOW_TEXT_IMPORT_ERROR)),
+ ("timm", (is_timm_available, TIMM_IMPORT_ERROR)),
+ ("natten", (is_natten_available, NATTEN_IMPORT_ERROR)),
+ ("nltk", (is_nltk_available, NLTK_IMPORT_ERROR)),
+ ("tokenizers", (is_tokenizers_available, TOKENIZERS_IMPORT_ERROR)),
+ ("torch", (is_torch_available, PYTORCH_IMPORT_ERROR)),
+ ("torchvision", (is_torchvision_available, TORCHVISION_IMPORT_ERROR)),
+ ("vision", (is_vision_available, VISION_IMPORT_ERROR)),
+ ("scipy", (is_scipy_available, SCIPY_IMPORT_ERROR)),
+ ("accelerate", (is_accelerate_available, ACCELERATE_IMPORT_ERROR)),
+ ("oneccl_bind_pt", (is_ccl_available, CCL_IMPORT_ERROR)),
+ ("decord", (is_decord_available, DECORD_IMPORT_ERROR)),
+ ("cython", (is_cython_available, CYTHON_IMPORT_ERROR)),
+ ("jieba", (is_jieba_available, JIEBA_IMPORT_ERROR)),
+ ("peft", (is_peft_available, PEFT_IMPORT_ERROR)),
+ ("jinja", (is_jinja_available, JINJA_IMPORT_ERROR)),
+ ]
+)
+
+
+def requires_backends(obj, backends):
+ if not isinstance(backends, (list, tuple)):
+ backends = [backends]
+
+ name = obj.__name__ if hasattr(obj, "__name__") else obj.__class__.__name__
+
+ # Raise an error for users who might not realize that classes without "TF" are torch-only
+ if "torch" in backends and "tf" not in backends and not is_torch_available() and is_tf_available():
+ raise ImportError(PYTORCH_IMPORT_ERROR_WITH_TF.format(name))
+
+ # Raise the inverse error for PyTorch users trying to load TF classes
+ if "tf" in backends and "torch" not in backends and is_torch_available() and not is_tf_available():
+ raise ImportError(TF_IMPORT_ERROR_WITH_PYTORCH.format(name))
+
+ checks = (BACKENDS_MAPPING[backend] for backend in backends)
+ failed = [msg.format(name) for available, msg in checks if not available()]
+ if failed:
+ raise ImportError("".join(failed))
+
+
+class DummyObject(type):
+ """
+ Metaclass for the dummy objects. Any class inheriting from it will return the ImportError generated by
+ `requires_backend` each time a user tries to access any method of that class.
+ """
+
+ def __getattribute__(cls, key):
+ if key.startswith("_") and key != "_from_config":
+ return super().__getattribute__(key)
+ requires_backends(cls, cls._backends)
+
+
+def is_torch_fx_proxy(x):
+ if is_torch_fx_available():
+ import torch.fx
+
+ return isinstance(x, torch.fx.Proxy)
+ return False
+
+
+class _LazyModule(ModuleType):
+ """
+ Module class that surfaces all objects but only performs associated imports when the objects are requested.
+ """
+
+ # Very heavily inspired by optuna.integration._IntegrationModule
+ # https://github.com/optuna/optuna/blob/master/optuna/integration/__init__.py
+ def __init__(self, name, module_file, import_structure, module_spec=None, extra_objects=None):
+ super().__init__(name)
+ self._modules = set(import_structure.keys())
+ self._class_to_module = {}
+ for key, values in import_structure.items():
+ for value in values:
+ self._class_to_module[value] = key
+ # Needed for autocompletion in an IDE
+ self.__all__ = list(import_structure.keys()) + list(chain(*import_structure.values()))
+ self.__file__ = module_file
+ self.__spec__ = module_spec
+ self.__path__ = [os.path.dirname(module_file)]
+ self._objects = {} if extra_objects is None else extra_objects
+ self._name = name
+ self._import_structure = import_structure
+
+ # Needed for autocompletion in an IDE
+ def __dir__(self):
+ result = super().__dir__()
+ # The elements of self.__all__ that are submodules may or may not be in the dir already, depending on whether
+ # they have been accessed or not. So we only add the elements of self.__all__ that are not already in the dir.
+ for attr in self.__all__:
+ if attr not in result:
+ result.append(attr)
+ return result
+
+ def __getattr__(self, name: str) -> Any:
+ if name in self._objects:
+ return self._objects[name]
+ if name in self._modules:
+ value = self._get_module(name)
+ elif name in self._class_to_module.keys():
+ module = self._get_module(self._class_to_module[name])
+ value = getattr(module, name)
+ else:
+ raise AttributeError(f"module {self.__name__} has no attribute {name}")
+
+ setattr(self, name, value)
+ return value
+
+ def _get_module(self, module_name: str):
+ try:
+ return importlib.import_module("." + module_name, self.__name__)
+ except Exception as e:
+ raise RuntimeError(
+ f"Failed to import {self.__name__}.{module_name} because of the following error (look up to see its"
+ f" traceback):\n{e}"
+ ) from e
+
+ def __reduce__(self):
+ return (self.__class__, (self._name, self.__file__, self._import_structure))
+
+
+class OptionalDependencyNotAvailable(BaseException):
+ """Internally used error class for signalling an optional dependency was not found."""
+
+
+def direct_transformers_import(path: str, file="__init__.py") -> ModuleType:
+ """Imports transformers directly
+
+ Args:
+ path (`str`): The path to the source file
+ file (`str`, optional): The file to join with the path. Defaults to "__init__.py".
+
+ Returns:
+ `ModuleType`: The resulting imported module
+ """
+ name = "transformers"
+ location = os.path.join(path, file)
+ spec = importlib.util.spec_from_file_location(name, location, submodule_search_locations=[path])
+ module = importlib.util.module_from_spec(spec)
+ spec.loader.exec_module(module)
+ module = sys.modules[name]
+ return module
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/utils/logging.py b/env-llmeval/lib/python3.10/site-packages/transformers/utils/logging.py
new file mode 100644
index 0000000000000000000000000000000000000000..3471e5ab66c62d0763472824cc19ec582639b00d
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/utils/logging.py
@@ -0,0 +1,396 @@
+# coding=utf-8
+# Copyright 2020 Optuna, Hugging Face
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" Logging utilities."""
+
+
+import functools
+import logging
+import os
+import sys
+import threading
+from logging import (
+ CRITICAL, # NOQA
+ DEBUG, # NOQA
+ ERROR, # NOQA
+ FATAL, # NOQA
+ INFO, # NOQA
+ NOTSET, # NOQA
+ WARN, # NOQA
+ WARNING, # NOQA
+)
+from logging import captureWarnings as _captureWarnings
+from typing import Optional
+
+import huggingface_hub.utils as hf_hub_utils
+from tqdm import auto as tqdm_lib
+
+
+_lock = threading.Lock()
+_default_handler: Optional[logging.Handler] = None
+
+log_levels = {
+ "detail": logging.DEBUG, # will also print filename and line number
+ "debug": logging.DEBUG,
+ "info": logging.INFO,
+ "warning": logging.WARNING,
+ "error": logging.ERROR,
+ "critical": logging.CRITICAL,
+}
+
+_default_log_level = logging.WARNING
+
+_tqdm_active = not hf_hub_utils.are_progress_bars_disabled()
+
+
+def _get_default_logging_level():
+ """
+ If TRANSFORMERS_VERBOSITY env var is set to one of the valid choices return that as the new default level. If it is
+ not - fall back to `_default_log_level`
+ """
+ env_level_str = os.getenv("TRANSFORMERS_VERBOSITY", None)
+ if env_level_str:
+ if env_level_str in log_levels:
+ return log_levels[env_level_str]
+ else:
+ logging.getLogger().warning(
+ f"Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, "
+ f"has to be one of: { ', '.join(log_levels.keys()) }"
+ )
+ return _default_log_level
+
+
+def _get_library_name() -> str:
+ return __name__.split(".")[0]
+
+
+def _get_library_root_logger() -> logging.Logger:
+ return logging.getLogger(_get_library_name())
+
+
+def _configure_library_root_logger() -> None:
+ global _default_handler
+
+ with _lock:
+ if _default_handler:
+ # This library has already configured the library root logger.
+ return
+ _default_handler = logging.StreamHandler() # Set sys.stderr as stream.
+ # set defaults based on https://github.com/pyinstaller/pyinstaller/issues/7334#issuecomment-1357447176
+ if sys.stderr is None:
+ sys.stderr = open(os.devnull, "w")
+
+ _default_handler.flush = sys.stderr.flush
+
+ # Apply our default configuration to the library root logger.
+ library_root_logger = _get_library_root_logger()
+ library_root_logger.addHandler(_default_handler)
+ library_root_logger.setLevel(_get_default_logging_level())
+ # if logging level is debug, we add pathname and lineno to formatter for easy debugging
+ if os.getenv("TRANSFORMERS_VERBOSITY", None) == "detail":
+ formatter = logging.Formatter("[%(levelname)s|%(pathname)s:%(lineno)s] %(asctime)s >> %(message)s")
+ _default_handler.setFormatter(formatter)
+
+ library_root_logger.propagate = False
+
+
+def _reset_library_root_logger() -> None:
+ global _default_handler
+
+ with _lock:
+ if not _default_handler:
+ return
+
+ library_root_logger = _get_library_root_logger()
+ library_root_logger.removeHandler(_default_handler)
+ library_root_logger.setLevel(logging.NOTSET)
+ _default_handler = None
+
+
+def get_log_levels_dict():
+ return log_levels
+
+
+def captureWarnings(capture):
+ """
+ Calls the `captureWarnings` method from the logging library to enable management of the warnings emitted by the
+ `warnings` library.
+
+ Read more about this method here:
+ https://docs.python.org/3/library/logging.html#integration-with-the-warnings-module
+
+ All warnings will be logged through the `py.warnings` logger.
+
+ Careful: this method also adds a handler to this logger if it does not already have one, and updates the logging
+ level of that logger to the library's root logger.
+ """
+ logger = get_logger("py.warnings")
+
+ if not logger.handlers:
+ logger.addHandler(_default_handler)
+
+ logger.setLevel(_get_library_root_logger().level)
+
+ _captureWarnings(capture)
+
+
+def get_logger(name: Optional[str] = None) -> logging.Logger:
+ """
+ Return a logger with the specified name.
+
+ This function is not supposed to be directly accessed unless you are writing a custom transformers module.
+ """
+
+ if name is None:
+ name = _get_library_name()
+
+ _configure_library_root_logger()
+ return logging.getLogger(name)
+
+
+def get_verbosity() -> int:
+ """
+ Return the current level for the 🤗 Transformers's root logger as an int.
+
+ Returns:
+ `int`: The logging level.
+
+
+
+ 🤗 Transformers has following logging levels:
+
+ - 50: `transformers.logging.CRITICAL` or `transformers.logging.FATAL`
+ - 40: `transformers.logging.ERROR`
+ - 30: `transformers.logging.WARNING` or `transformers.logging.WARN`
+ - 20: `transformers.logging.INFO`
+ - 10: `transformers.logging.DEBUG`
+
+ """
+
+ _configure_library_root_logger()
+ return _get_library_root_logger().getEffectiveLevel()
+
+
+def set_verbosity(verbosity: int) -> None:
+ """
+ Set the verbosity level for the 🤗 Transformers's root logger.
+
+ Args:
+ verbosity (`int`):
+ Logging level, e.g., one of:
+
+ - `transformers.logging.CRITICAL` or `transformers.logging.FATAL`
+ - `transformers.logging.ERROR`
+ - `transformers.logging.WARNING` or `transformers.logging.WARN`
+ - `transformers.logging.INFO`
+ - `transformers.logging.DEBUG`
+ """
+
+ _configure_library_root_logger()
+ _get_library_root_logger().setLevel(verbosity)
+
+
+def set_verbosity_info():
+ """Set the verbosity to the `INFO` level."""
+ return set_verbosity(INFO)
+
+
+def set_verbosity_warning():
+ """Set the verbosity to the `WARNING` level."""
+ return set_verbosity(WARNING)
+
+
+def set_verbosity_debug():
+ """Set the verbosity to the `DEBUG` level."""
+ return set_verbosity(DEBUG)
+
+
+def set_verbosity_error():
+ """Set the verbosity to the `ERROR` level."""
+ return set_verbosity(ERROR)
+
+
+def disable_default_handler() -> None:
+ """Disable the default handler of the HuggingFace Transformers's root logger."""
+
+ _configure_library_root_logger()
+
+ assert _default_handler is not None
+ _get_library_root_logger().removeHandler(_default_handler)
+
+
+def enable_default_handler() -> None:
+ """Enable the default handler of the HuggingFace Transformers's root logger."""
+
+ _configure_library_root_logger()
+
+ assert _default_handler is not None
+ _get_library_root_logger().addHandler(_default_handler)
+
+
+def add_handler(handler: logging.Handler) -> None:
+ """adds a handler to the HuggingFace Transformers's root logger."""
+
+ _configure_library_root_logger()
+
+ assert handler is not None
+ _get_library_root_logger().addHandler(handler)
+
+
+def remove_handler(handler: logging.Handler) -> None:
+ """removes given handler from the HuggingFace Transformers's root logger."""
+
+ _configure_library_root_logger()
+
+ assert handler is not None and handler not in _get_library_root_logger().handlers
+ _get_library_root_logger().removeHandler(handler)
+
+
+def disable_propagation() -> None:
+ """
+ Disable propagation of the library log outputs. Note that log propagation is disabled by default.
+ """
+
+ _configure_library_root_logger()
+ _get_library_root_logger().propagate = False
+
+
+def enable_propagation() -> None:
+ """
+ Enable propagation of the library log outputs. Please disable the HuggingFace Transformers's default handler to
+ prevent double logging if the root logger has been configured.
+ """
+
+ _configure_library_root_logger()
+ _get_library_root_logger().propagate = True
+
+
+def enable_explicit_format() -> None:
+ """
+ Enable explicit formatting for every HuggingFace Transformers's logger. The explicit formatter is as follows:
+ ```
+ [LEVELNAME|FILENAME|LINE NUMBER] TIME >> MESSAGE
+ ```
+ All handlers currently bound to the root logger are affected by this method.
+ """
+ handlers = _get_library_root_logger().handlers
+
+ for handler in handlers:
+ formatter = logging.Formatter("[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s")
+ handler.setFormatter(formatter)
+
+
+def reset_format() -> None:
+ """
+ Resets the formatting for HuggingFace Transformers's loggers.
+
+ All handlers currently bound to the root logger are affected by this method.
+ """
+ handlers = _get_library_root_logger().handlers
+
+ for handler in handlers:
+ handler.setFormatter(None)
+
+
+def warning_advice(self, *args, **kwargs):
+ """
+ This method is identical to `logger.warning()`, but if env var TRANSFORMERS_NO_ADVISORY_WARNINGS=1 is set, this
+ warning will not be printed
+ """
+ no_advisory_warnings = os.getenv("TRANSFORMERS_NO_ADVISORY_WARNINGS", False)
+ if no_advisory_warnings:
+ return
+ self.warning(*args, **kwargs)
+
+
+logging.Logger.warning_advice = warning_advice
+
+
+@functools.lru_cache(None)
+def warning_once(self, *args, **kwargs):
+ """
+ This method is identical to `logger.warning()`, but will emit the warning with the same message only once
+
+ Note: The cache is for the function arguments, so 2 different callers using the same arguments will hit the cache.
+ The assumption here is that all warning messages are unique across the code. If they aren't then need to switch to
+ another type of cache that includes the caller frame information in the hashing function.
+ """
+ self.warning(*args, **kwargs)
+
+
+logging.Logger.warning_once = warning_once
+
+
+class EmptyTqdm:
+ """Dummy tqdm which doesn't do anything."""
+
+ def __init__(self, *args, **kwargs): # pylint: disable=unused-argument
+ self._iterator = args[0] if args else None
+
+ def __iter__(self):
+ return iter(self._iterator)
+
+ def __getattr__(self, _):
+ """Return empty function."""
+
+ def empty_fn(*args, **kwargs): # pylint: disable=unused-argument
+ return
+
+ return empty_fn
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, type_, value, traceback):
+ return
+
+
+class _tqdm_cls:
+ def __call__(self, *args, **kwargs):
+ if _tqdm_active:
+ return tqdm_lib.tqdm(*args, **kwargs)
+ else:
+ return EmptyTqdm(*args, **kwargs)
+
+ def set_lock(self, *args, **kwargs):
+ self._lock = None
+ if _tqdm_active:
+ return tqdm_lib.tqdm.set_lock(*args, **kwargs)
+
+ def get_lock(self):
+ if _tqdm_active:
+ return tqdm_lib.tqdm.get_lock()
+
+
+tqdm = _tqdm_cls()
+
+
+def is_progress_bar_enabled() -> bool:
+ """Return a boolean indicating whether tqdm progress bars are enabled."""
+ global _tqdm_active
+ return bool(_tqdm_active)
+
+
+def enable_progress_bar():
+ """Enable tqdm progress bar."""
+ global _tqdm_active
+ _tqdm_active = True
+ hf_hub_utils.enable_progress_bars()
+
+
+def disable_progress_bar():
+ """Disable tqdm progress bar."""
+ global _tqdm_active
+ _tqdm_active = False
+ hf_hub_utils.disable_progress_bars()
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/utils/peft_utils.py b/env-llmeval/lib/python3.10/site-packages/transformers/utils/peft_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..2078f1ae960955b7c615819f25081978fdab2563
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/utils/peft_utils.py
@@ -0,0 +1,124 @@
+# Copyright 2023 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import importlib
+import os
+from typing import Dict, Optional, Union
+
+from packaging import version
+
+from .hub import cached_file
+from .import_utils import is_peft_available
+
+
+ADAPTER_CONFIG_NAME = "adapter_config.json"
+ADAPTER_WEIGHTS_NAME = "adapter_model.bin"
+ADAPTER_SAFE_WEIGHTS_NAME = "adapter_model.safetensors"
+
+
+def find_adapter_config_file(
+ model_id: str,
+ cache_dir: Optional[Union[str, os.PathLike]] = None,
+ force_download: bool = False,
+ resume_download: bool = False,
+ proxies: Optional[Dict[str, str]] = None,
+ token: Optional[Union[bool, str]] = None,
+ revision: Optional[str] = None,
+ local_files_only: bool = False,
+ subfolder: str = "",
+ _commit_hash: Optional[str] = None,
+) -> Optional[str]:
+ r"""
+ Simply checks if the model stored on the Hub or locally is an adapter model or not, return the path of the adapter
+ config file if it is, None otherwise.
+
+ Args:
+ model_id (`str`):
+ The identifier of the model to look for, can be either a local path or an id to the repository on the Hub.
+ cache_dir (`str` or `os.PathLike`, *optional*):
+ Path to a directory in which a downloaded pretrained model configuration should be cached if the standard
+ cache should not be used.
+ force_download (`bool`, *optional*, defaults to `False`):
+ Whether or not to force to (re-)download the configuration files and override the cached versions if they
+ exist.
+ resume_download (`bool`, *optional*, defaults to `False`):
+ Whether or not to delete incompletely received file. Attempts to resume the download if such a file exists.
+ proxies (`Dict[str, str]`, *optional*):
+ A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',
+ 'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request.
+ token (`str` or *bool*, *optional*):
+ The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated
+ when running `huggingface-cli login` (stored in `~/.huggingface`).
+ revision (`str`, *optional*, defaults to `"main"`):
+ The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
+ git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any
+ identifier allowed by git.
+
+
+
+ To test a pull request you made on the Hub, you can pass `revision="refs/pr/".
+
+
+
+ local_files_only (`bool`, *optional*, defaults to `False`):
+ If `True`, will only try to load the tokenizer configuration from local files.
+ subfolder (`str`, *optional*, defaults to `""`):
+ In case the relevant files are located inside a subfolder of the model repo on huggingface.co, you can
+ specify the folder name here.
+ """
+ adapter_cached_filename = None
+ if model_id is None:
+ return None
+ elif os.path.isdir(model_id):
+ list_remote_files = os.listdir(model_id)
+ if ADAPTER_CONFIG_NAME in list_remote_files:
+ adapter_cached_filename = os.path.join(model_id, ADAPTER_CONFIG_NAME)
+ else:
+ adapter_cached_filename = cached_file(
+ model_id,
+ ADAPTER_CONFIG_NAME,
+ cache_dir=cache_dir,
+ force_download=force_download,
+ resume_download=resume_download,
+ proxies=proxies,
+ token=token,
+ revision=revision,
+ local_files_only=local_files_only,
+ subfolder=subfolder,
+ _commit_hash=_commit_hash,
+ _raise_exceptions_for_gated_repo=False,
+ _raise_exceptions_for_missing_entries=False,
+ _raise_exceptions_for_connection_errors=False,
+ )
+
+ return adapter_cached_filename
+
+
+def check_peft_version(min_version: str) -> None:
+ r"""
+ Checks if the version of PEFT is compatible.
+
+ Args:
+ version (`str`):
+ The version of PEFT to check against.
+ """
+ if not is_peft_available():
+ raise ValueError("PEFT is not installed. Please install it with `pip install peft`")
+
+ is_peft_version_compatible = version.parse(importlib.metadata.version("peft")) >= version.parse(min_version)
+
+ if not is_peft_version_compatible:
+ raise ValueError(
+ f"The version of PEFT you are using is not compatible, please use a version that is greater"
+ f" than {min_version}"
+ )