diff --git a/ckpts/universal/global_step40/zero/21.input_layernorm.weight/fp32.pt b/ckpts/universal/global_step40/zero/21.input_layernorm.weight/fp32.pt
new file mode 100644
index 0000000000000000000000000000000000000000..46a548b73b8aa9a59475bc0a8a21ac0239c8cdf6
--- /dev/null
+++ b/ckpts/universal/global_step40/zero/21.input_layernorm.weight/fp32.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f8547783fa200231df4ac185143a6a692505610ef5ee7af0073050092b6f47af
+size 9293
diff --git a/ckpts/universal/global_step40/zero/24.mlp.dense_4h_to_h.weight/exp_avg.pt b/ckpts/universal/global_step40/zero/24.mlp.dense_4h_to_h.weight/exp_avg.pt
new file mode 100644
index 0000000000000000000000000000000000000000..f3190b40273edd5bc6fc17739e4c3d92dd1f02f5
--- /dev/null
+++ b/ckpts/universal/global_step40/zero/24.mlp.dense_4h_to_h.weight/exp_avg.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:aa4fefe14b4d25115363f7ae1cd0a7430037049d07491d23560465434c470798
+size 33555612
diff --git a/ckpts/universal/global_step40/zero/24.mlp.dense_4h_to_h.weight/exp_avg_sq.pt b/ckpts/universal/global_step40/zero/24.mlp.dense_4h_to_h.weight/exp_avg_sq.pt
new file mode 100644
index 0000000000000000000000000000000000000000..e0fa6643c10c9db93252131aa0b64eb855b84918
--- /dev/null
+++ b/ckpts/universal/global_step40/zero/24.mlp.dense_4h_to_h.weight/exp_avg_sq.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:39549c344344b6cfc5fefe5749c589d1bef3772bd20e34b059f503f92bf1bd06
+size 33555627
diff --git a/venv/lib/python3.10/site-packages/transformers/__pycache__/convert_pytorch_checkpoint_to_tf2.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/__pycache__/convert_pytorch_checkpoint_to_tf2.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5ddd05874d9e42aeea8e59702e2a8dbe2ba3ab02
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/__pycache__/convert_pytorch_checkpoint_to_tf2.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/__pycache__/image_utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/__pycache__/image_utils.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f2659c210fcdd0c952102b1d3230c5a7f000bc5f
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/__pycache__/image_utils.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/commands/__init__.py b/venv/lib/python3.10/site-packages/transformers/commands/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..aa5d95a85b538171ec9cf4fa16e892df1efdef6b
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/commands/__init__.py
@@ -0,0 +1,27 @@
+# Copyright 2020 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from abc import ABC, abstractmethod
+from argparse import ArgumentParser
+
+
+class BaseTransformersCLICommand(ABC):
+ @staticmethod
+ @abstractmethod
+ def register_subcommand(parser: ArgumentParser):
+ raise NotImplementedError()
+
+ @abstractmethod
+ def run(self):
+ raise NotImplementedError()
diff --git a/venv/lib/python3.10/site-packages/transformers/commands/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/commands/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3278adcaa79c68d503ecc0de0969f9a5a4726071
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/commands/__pycache__/__init__.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/commands/__pycache__/add_new_model.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/commands/__pycache__/add_new_model.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..35bffce63b494c33eb491f7ed1abcc44125bc3c4
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/commands/__pycache__/add_new_model.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/commands/__pycache__/add_new_model_like.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/commands/__pycache__/add_new_model_like.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c74c5a815f0a3ec92bdd986a1087cfb91d979eeb
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/commands/__pycache__/add_new_model_like.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/commands/__pycache__/convert.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/commands/__pycache__/convert.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..bd1c1b5f2164e69631dcbb08c065ef387634ac0d
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/commands/__pycache__/convert.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/commands/__pycache__/download.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/commands/__pycache__/download.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b00694cfbd77b245e708f8851b04eceac8db3cf5
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/commands/__pycache__/download.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/commands/__pycache__/env.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/commands/__pycache__/env.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5e54d9c87afa13732ac22082ef0982b1aeac8780
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/commands/__pycache__/env.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/commands/__pycache__/lfs.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/commands/__pycache__/lfs.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d906e0c6fbbc2fb071e27f902f1166a9e923b674
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/commands/__pycache__/lfs.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/commands/__pycache__/pt_to_tf.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/commands/__pycache__/pt_to_tf.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ca310736d021f0e2d048f20f64f8fdf3b5be4821
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/commands/__pycache__/pt_to_tf.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/commands/__pycache__/run.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/commands/__pycache__/run.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..df2da1b79be9a49d2744f5ecffc624de090ead50
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/commands/__pycache__/run.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/commands/__pycache__/serving.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/commands/__pycache__/serving.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f84ce213582a9923c8fbd8c14eb6a58570297b46
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/commands/__pycache__/serving.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/commands/__pycache__/train.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/commands/__pycache__/train.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d5b5dbedbcb9345b56d573d6243691a97270b7a1
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/commands/__pycache__/train.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/commands/__pycache__/transformers_cli.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/commands/__pycache__/transformers_cli.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..66bc74ec20c0d22551417be7ea6f2d27a9021a54
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/commands/__pycache__/transformers_cli.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/commands/__pycache__/user.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/commands/__pycache__/user.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9a54cb0c9408ba17d1dcdd2bc372576e333a5107
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/commands/__pycache__/user.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/commands/add_new_model.py b/venv/lib/python3.10/site-packages/transformers/commands/add_new_model.py
new file mode 100644
index 0000000000000000000000000000000000000000..87949827d9f8844f931375f21fcc06df51acb155
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/commands/add_new_model.py
@@ -0,0 +1,259 @@
+# Copyright 2020 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import json
+import os
+import shutil
+import warnings
+from argparse import ArgumentParser, Namespace
+from pathlib import Path
+from typing import List
+
+from ..utils import logging
+from . import BaseTransformersCLICommand
+
+
+try:
+ from cookiecutter.main import cookiecutter
+
+ _has_cookiecutter = True
+except ImportError:
+ _has_cookiecutter = False
+
+logger = logging.get_logger(__name__) # pylint: disable=invalid-name
+
+
+def add_new_model_command_factory(args: Namespace):
+ return AddNewModelCommand(args.testing, args.testing_file, path=args.path)
+
+
+class AddNewModelCommand(BaseTransformersCLICommand):
+ @staticmethod
+ def register_subcommand(parser: ArgumentParser):
+ add_new_model_parser = parser.add_parser("add-new-model")
+ add_new_model_parser.add_argument("--testing", action="store_true", help="If in testing mode.")
+ add_new_model_parser.add_argument("--testing_file", type=str, help="Configuration file on which to run.")
+ add_new_model_parser.add_argument(
+ "--path", type=str, help="Path to cookiecutter. Should only be used for testing purposes."
+ )
+ add_new_model_parser.set_defaults(func=add_new_model_command_factory)
+
+ def __init__(self, testing: bool, testing_file: str, path=None, *args):
+ self._testing = testing
+ self._testing_file = testing_file
+ self._path = path
+
+ def run(self):
+ warnings.warn(
+ "The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. "
+ "It is not actively maintained anymore, so might give a result that won't pass all tests and quality "
+ "checks, you should use `transformers-cli add-new-model-like` instead."
+ )
+ if not _has_cookiecutter:
+ raise ImportError(
+ "Model creation dependencies are required to use the `add_new_model` command. Install them by running "
+ "the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n"
+ )
+ # Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
+ directories = [directory for directory in os.listdir() if "cookiecutter-template-" == directory[:22]]
+ if len(directories) > 0:
+ raise ValueError(
+ "Several directories starting with `cookiecutter-template-` in current working directory. "
+ "Please clean your directory by removing all folders starting with `cookiecutter-template-` or "
+ "change your working directory."
+ )
+
+ path_to_transformer_root = (
+ Path(__file__).parent.parent.parent.parent if self._path is None else Path(self._path).parent.parent
+ )
+ path_to_cookiecutter = path_to_transformer_root / "templates" / "adding_a_new_model"
+
+ # Execute cookiecutter
+ if not self._testing:
+ cookiecutter(str(path_to_cookiecutter))
+ else:
+ with open(self._testing_file, "r") as configuration_file:
+ testing_configuration = json.load(configuration_file)
+
+ cookiecutter(
+ str(path_to_cookiecutter if self._path is None else self._path),
+ no_input=True,
+ extra_context=testing_configuration,
+ )
+
+ directory = [directory for directory in os.listdir() if "cookiecutter-template-" in directory[:22]][0]
+
+ # Retrieve configuration
+ with open(directory + "/configuration.json", "r") as configuration_file:
+ configuration = json.load(configuration_file)
+
+ lowercase_model_name = configuration["lowercase_modelname"]
+ generate_tensorflow_pytorch_and_flax = configuration["generate_tensorflow_pytorch_and_flax"]
+ os.remove(f"{directory}/configuration.json")
+
+ output_pytorch = "PyTorch" in generate_tensorflow_pytorch_and_flax
+ output_tensorflow = "TensorFlow" in generate_tensorflow_pytorch_and_flax
+ output_flax = "Flax" in generate_tensorflow_pytorch_and_flax
+
+ model_dir = f"{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}"
+ os.makedirs(model_dir, exist_ok=True)
+ os.makedirs(f"{path_to_transformer_root}/tests/models/{lowercase_model_name}", exist_ok=True)
+
+ # Tests require submodules as they have parent imports
+ with open(f"{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py", "w"):
+ pass
+
+ shutil.move(
+ f"{directory}/__init__.py",
+ f"{model_dir}/__init__.py",
+ )
+ shutil.move(
+ f"{directory}/configuration_{lowercase_model_name}.py",
+ f"{model_dir}/configuration_{lowercase_model_name}.py",
+ )
+
+ def remove_copy_lines(path):
+ with open(path, "r") as f:
+ lines = f.readlines()
+ with open(path, "w") as f:
+ for line in lines:
+ if "# Copied from transformers." not in line:
+ f.write(line)
+
+ if output_pytorch:
+ if not self._testing:
+ remove_copy_lines(f"{directory}/modeling_{lowercase_model_name}.py")
+
+ shutil.move(
+ f"{directory}/modeling_{lowercase_model_name}.py",
+ f"{model_dir}/modeling_{lowercase_model_name}.py",
+ )
+
+ shutil.move(
+ f"{directory}/test_modeling_{lowercase_model_name}.py",
+ f"{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py",
+ )
+ else:
+ os.remove(f"{directory}/modeling_{lowercase_model_name}.py")
+ os.remove(f"{directory}/test_modeling_{lowercase_model_name}.py")
+
+ if output_tensorflow:
+ if not self._testing:
+ remove_copy_lines(f"{directory}/modeling_tf_{lowercase_model_name}.py")
+
+ shutil.move(
+ f"{directory}/modeling_tf_{lowercase_model_name}.py",
+ f"{model_dir}/modeling_tf_{lowercase_model_name}.py",
+ )
+
+ shutil.move(
+ f"{directory}/test_modeling_tf_{lowercase_model_name}.py",
+ f"{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py",
+ )
+ else:
+ os.remove(f"{directory}/modeling_tf_{lowercase_model_name}.py")
+ os.remove(f"{directory}/test_modeling_tf_{lowercase_model_name}.py")
+
+ if output_flax:
+ if not self._testing:
+ remove_copy_lines(f"{directory}/modeling_flax_{lowercase_model_name}.py")
+
+ shutil.move(
+ f"{directory}/modeling_flax_{lowercase_model_name}.py",
+ f"{model_dir}/modeling_flax_{lowercase_model_name}.py",
+ )
+
+ shutil.move(
+ f"{directory}/test_modeling_flax_{lowercase_model_name}.py",
+ f"{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py",
+ )
+ else:
+ os.remove(f"{directory}/modeling_flax_{lowercase_model_name}.py")
+ os.remove(f"{directory}/test_modeling_flax_{lowercase_model_name}.py")
+
+ shutil.move(
+ f"{directory}/{lowercase_model_name}.md",
+ f"{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md",
+ )
+
+ shutil.move(
+ f"{directory}/tokenization_{lowercase_model_name}.py",
+ f"{model_dir}/tokenization_{lowercase_model_name}.py",
+ )
+
+ shutil.move(
+ f"{directory}/tokenization_fast_{lowercase_model_name}.py",
+ f"{model_dir}/tokenization_{lowercase_model_name}_fast.py",
+ )
+
+ from os import fdopen, remove
+ from shutil import copymode, move
+ from tempfile import mkstemp
+
+ def replace(original_file: str, line_to_copy_below: str, lines_to_copy: List[str]):
+ # Create temp file
+ fh, abs_path = mkstemp()
+ line_found = False
+ with fdopen(fh, "w") as new_file:
+ with open(original_file) as old_file:
+ for line in old_file:
+ new_file.write(line)
+ if line_to_copy_below in line:
+ line_found = True
+ for line_to_copy in lines_to_copy:
+ new_file.write(line_to_copy)
+
+ if not line_found:
+ raise ValueError(f"Line {line_to_copy_below} was not found in file.")
+
+ # Copy the file permissions from the old file to the new file
+ copymode(original_file, abs_path)
+ # Remove original file
+ remove(original_file)
+ # Move new file
+ move(abs_path, original_file)
+
+ def skip_units(line):
+ return (
+ ("generating PyTorch" in line and not output_pytorch)
+ or ("generating TensorFlow" in line and not output_tensorflow)
+ or ("generating Flax" in line and not output_flax)
+ )
+
+ def replace_in_files(path_to_datafile):
+ with open(path_to_datafile) as datafile:
+ lines_to_copy = []
+ skip_file = False
+ skip_snippet = False
+ for line in datafile:
+ if "# To replace in: " in line and "##" not in line:
+ file_to_replace_in = line.split('"')[1]
+ skip_file = skip_units(line)
+ elif "# Below: " in line and "##" not in line:
+ line_to_copy_below = line.split('"')[1]
+ skip_snippet = skip_units(line)
+ elif "# End." in line and "##" not in line:
+ if not skip_file and not skip_snippet:
+ replace(file_to_replace_in, line_to_copy_below, lines_to_copy)
+
+ lines_to_copy = []
+ elif "# Replace with" in line and "##" not in line:
+ lines_to_copy = []
+ elif "##" not in line:
+ lines_to_copy.append(line)
+
+ remove(path_to_datafile)
+
+ replace_in_files(f"{directory}/to_replace_{lowercase_model_name}.py")
+ os.rmdir(directory)
diff --git a/venv/lib/python3.10/site-packages/transformers/commands/add_new_model_like.py b/venv/lib/python3.10/site-packages/transformers/commands/add_new_model_like.py
new file mode 100644
index 0000000000000000000000000000000000000000..626e8373192a6c40993e5471e85335318e2b7ffd
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/commands/add_new_model_like.py
@@ -0,0 +1,1713 @@
+# Copyright 2021 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import difflib
+import json
+import os
+import re
+from argparse import ArgumentParser, Namespace
+from dataclasses import dataclass
+from datetime import date
+from itertools import chain
+from pathlib import Path
+from typing import Any, Callable, Dict, List, Optional, Pattern, Tuple, Union
+
+import yaml
+
+from ..models import auto as auto_module
+from ..models.auto.configuration_auto import model_type_to_module_name
+from ..utils import is_flax_available, is_tf_available, is_torch_available, logging
+from . import BaseTransformersCLICommand
+
+
+logger = logging.get_logger(__name__) # pylint: disable=invalid-name
+
+
+CURRENT_YEAR = date.today().year
+TRANSFORMERS_PATH = Path(__file__).parent.parent
+REPO_PATH = TRANSFORMERS_PATH.parent.parent
+
+
+@dataclass
+class ModelPatterns:
+ """
+ Holds the basic information about a new model for the add-new-model-like command.
+
+ Args:
+ model_name (`str`): The model name.
+ checkpoint (`str`): The checkpoint to use for doc examples.
+ model_type (`str`, *optional*):
+ The model type, the identifier used internally in the library like `bert` or `xlm-roberta`. Will default to
+ `model_name` lowercased with spaces replaced with minuses (-).
+ model_lower_cased (`str`, *optional*):
+ The lowercased version of the model name, to use for the module name or function names. Will default to
+ `model_name` lowercased with spaces and minuses replaced with underscores.
+ model_camel_cased (`str`, *optional*):
+ The camel-cased version of the model name, to use for the class names. Will default to `model_name`
+ camel-cased (with spaces and minuses both considered as word separators.
+ model_upper_cased (`str`, *optional*):
+ The uppercased version of the model name, to use for the constant names. Will default to `model_name`
+ uppercased with spaces and minuses replaced with underscores.
+ config_class (`str`, *optional*):
+ The tokenizer class associated with this model. Will default to `"{model_camel_cased}Config"`.
+ tokenizer_class (`str`, *optional*):
+ The tokenizer class associated with this model (leave to `None` for models that don't use a tokenizer).
+ image_processor_class (`str`, *optional*):
+ The image processor class associated with this model (leave to `None` for models that don't use an image
+ processor).
+ feature_extractor_class (`str`, *optional*):
+ The feature extractor class associated with this model (leave to `None` for models that don't use a feature
+ extractor).
+ processor_class (`str`, *optional*):
+ The processor class associated with this model (leave to `None` for models that don't use a processor).
+ """
+
+ model_name: str
+ checkpoint: str
+ model_type: Optional[str] = None
+ model_lower_cased: Optional[str] = None
+ model_camel_cased: Optional[str] = None
+ model_upper_cased: Optional[str] = None
+ config_class: Optional[str] = None
+ tokenizer_class: Optional[str] = None
+ image_processor_class: Optional[str] = None
+ feature_extractor_class: Optional[str] = None
+ processor_class: Optional[str] = None
+
+ def __post_init__(self):
+ if self.model_type is None:
+ self.model_type = self.model_name.lower().replace(" ", "-")
+ if self.model_lower_cased is None:
+ self.model_lower_cased = self.model_name.lower().replace(" ", "_").replace("-", "_")
+ if self.model_camel_cased is None:
+ # Split the model name on - and space
+ words = self.model_name.split(" ")
+ words = list(chain(*[w.split("-") for w in words]))
+ # Make sure each word is capitalized
+ words = [w[0].upper() + w[1:] for w in words]
+ self.model_camel_cased = "".join(words)
+ if self.model_upper_cased is None:
+ self.model_upper_cased = self.model_name.upper().replace(" ", "_").replace("-", "_")
+ if self.config_class is None:
+ self.config_class = f"{self.model_camel_cased}Config"
+
+
+ATTRIBUTE_TO_PLACEHOLDER = {
+ "config_class": "[CONFIG_CLASS]",
+ "tokenizer_class": "[TOKENIZER_CLASS]",
+ "image_processor_class": "[IMAGE_PROCESSOR_CLASS]",
+ "feature_extractor_class": "[FEATURE_EXTRACTOR_CLASS]",
+ "processor_class": "[PROCESSOR_CLASS]",
+ "checkpoint": "[CHECKPOINT]",
+ "model_type": "[MODEL_TYPE]",
+ "model_upper_cased": "[MODEL_UPPER_CASED]",
+ "model_camel_cased": "[MODEL_CAMELCASED]",
+ "model_lower_cased": "[MODEL_LOWER_CASED]",
+ "model_name": "[MODEL_NAME]",
+}
+
+
+def is_empty_line(line: str) -> bool:
+ """
+ Determines whether a line is empty or not.
+ """
+ return len(line) == 0 or line.isspace()
+
+
+def find_indent(line: str) -> int:
+ """
+ Returns the number of spaces that start a line indent.
+ """
+ search = re.search(r"^(\s*)(?:\S|$)", line)
+ if search is None:
+ return 0
+ return len(search.groups()[0])
+
+
+def parse_module_content(content: str) -> List[str]:
+ """
+ Parse the content of a module in the list of objects it defines.
+
+ Args:
+ content (`str`): The content to parse
+
+ Returns:
+ `List[str]`: The list of objects defined in the module.
+ """
+ objects = []
+ current_object = []
+ lines = content.split("\n")
+ # Doc-styler takes everything between two triple quotes in docstrings, so we need a fake """ here to go with this.
+ end_markers = [")", "]", "}", '"""']
+
+ for line in lines:
+ # End of an object
+ is_valid_object = len(current_object) > 0
+ if is_valid_object and len(current_object) == 1:
+ is_valid_object = not current_object[0].startswith("# Copied from")
+ if not is_empty_line(line) and find_indent(line) == 0 and is_valid_object:
+ # Closing parts should be included in current object
+ if line in end_markers:
+ current_object.append(line)
+ objects.append("\n".join(current_object))
+ current_object = []
+ else:
+ objects.append("\n".join(current_object))
+ current_object = [line]
+ else:
+ current_object.append(line)
+
+ # Add last object
+ if len(current_object) > 0:
+ objects.append("\n".join(current_object))
+
+ return objects
+
+
+def extract_block(content: str, indent_level: int = 0) -> str:
+ """Return the first block in `content` with the indent level `indent_level`.
+
+ The first line in `content` should be indented at `indent_level` level, otherwise an error will be thrown.
+
+ This method will immediately stop the search when a (non-empty) line with indent level less than `indent_level` is
+ encountered.
+
+ Args:
+ content (`str`): The content to parse
+ indent_level (`int`, *optional*, default to 0): The indent level of the blocks to search for
+
+ Returns:
+ `str`: The first block in `content` with the indent level `indent_level`.
+ """
+ current_object = []
+ lines = content.split("\n")
+ # Doc-styler takes everything between two triple quotes in docstrings, so we need a fake """ here to go with this.
+ end_markers = [")", "]", "}", '"""']
+
+ for idx, line in enumerate(lines):
+ if idx == 0 and indent_level > 0 and not is_empty_line(line) and find_indent(line) != indent_level:
+ raise ValueError(
+ f"When `indent_level > 0`, the first line in `content` should have indent level {indent_level}. Got "
+ f"{find_indent(line)} instead."
+ )
+
+ if find_indent(line) < indent_level and not is_empty_line(line):
+ break
+
+ # End of an object
+ is_valid_object = len(current_object) > 0
+ if (
+ not is_empty_line(line)
+ and not line.endswith(":")
+ and find_indent(line) == indent_level
+ and is_valid_object
+ ):
+ # Closing parts should be included in current object
+ if line.lstrip() in end_markers:
+ current_object.append(line)
+ return "\n".join(current_object)
+ else:
+ current_object.append(line)
+
+ # Add last object
+ if len(current_object) > 0:
+ return "\n".join(current_object)
+
+
+def add_content_to_text(
+ text: str,
+ content: str,
+ add_after: Optional[Union[str, Pattern]] = None,
+ add_before: Optional[Union[str, Pattern]] = None,
+ exact_match: bool = False,
+) -> str:
+ """
+ A utility to add some content inside a given text.
+
+ Args:
+ text (`str`): The text in which we want to insert some content.
+ content (`str`): The content to add.
+ add_after (`str` or `Pattern`):
+ The pattern to test on a line of `text`, the new content is added after the first instance matching it.
+ add_before (`str` or `Pattern`):
+ The pattern to test on a line of `text`, the new content is added before the first instance matching it.
+ exact_match (`bool`, *optional*, defaults to `False`):
+ A line is considered a match with `add_after` or `add_before` if it matches exactly when `exact_match=True`,
+ otherwise, if `add_after`/`add_before` is present in the line.
+
+
+
+ The arguments `add_after` and `add_before` are mutually exclusive, and one exactly needs to be provided.
+
+
+
+ Returns:
+ `str`: The text with the new content added if a match was found.
+ """
+ if add_after is None and add_before is None:
+ raise ValueError("You need to pass either `add_after` or `add_before`")
+ if add_after is not None and add_before is not None:
+ raise ValueError("You can't pass both `add_after` or `add_before`")
+ pattern = add_after if add_before is None else add_before
+
+ def this_is_the_line(line):
+ if isinstance(pattern, Pattern):
+ return pattern.search(line) is not None
+ elif exact_match:
+ return pattern == line
+ else:
+ return pattern in line
+
+ new_lines = []
+ for line in text.split("\n"):
+ if this_is_the_line(line):
+ if add_before is not None:
+ new_lines.append(content)
+ new_lines.append(line)
+ if add_after is not None:
+ new_lines.append(content)
+ else:
+ new_lines.append(line)
+
+ return "\n".join(new_lines)
+
+
+def add_content_to_file(
+ file_name: Union[str, os.PathLike],
+ content: str,
+ add_after: Optional[Union[str, Pattern]] = None,
+ add_before: Optional[Union[str, Pattern]] = None,
+ exact_match: bool = False,
+):
+ """
+ A utility to add some content inside a given file.
+
+ Args:
+ file_name (`str` or `os.PathLike`): The name of the file in which we want to insert some content.
+ content (`str`): The content to add.
+ add_after (`str` or `Pattern`):
+ The pattern to test on a line of `text`, the new content is added after the first instance matching it.
+ add_before (`str` or `Pattern`):
+ The pattern to test on a line of `text`, the new content is added before the first instance matching it.
+ exact_match (`bool`, *optional*, defaults to `False`):
+ A line is considered a match with `add_after` or `add_before` if it matches exactly when `exact_match=True`,
+ otherwise, if `add_after`/`add_before` is present in the line.
+
+
+
+ The arguments `add_after` and `add_before` are mutually exclusive, and one exactly needs to be provided.
+
+
+ """
+ with open(file_name, "r", encoding="utf-8") as f:
+ old_content = f.read()
+
+ new_content = add_content_to_text(
+ old_content, content, add_after=add_after, add_before=add_before, exact_match=exact_match
+ )
+
+ with open(file_name, "w", encoding="utf-8") as f:
+ f.write(new_content)
+
+
+def replace_model_patterns(
+ text: str, old_model_patterns: ModelPatterns, new_model_patterns: ModelPatterns
+) -> Tuple[str, str]:
+ """
+ Replace all patterns present in a given text.
+
+ Args:
+ text (`str`): The text to treat.
+ old_model_patterns (`ModelPatterns`): The patterns for the old model.
+ new_model_patterns (`ModelPatterns`): The patterns for the new model.
+
+ Returns:
+ `Tuple(str, str)`: A tuple of with the treated text and the replacement actually done in it.
+ """
+ # The order is crucially important as we will check and replace in that order. For instance the config probably
+ # contains the camel-cased named, but will be treated before.
+ attributes_to_check = ["config_class"]
+ # Add relevant preprocessing classes
+ for attr in ["tokenizer_class", "image_processor_class", "feature_extractor_class", "processor_class"]:
+ if getattr(old_model_patterns, attr) is not None and getattr(new_model_patterns, attr) is not None:
+ attributes_to_check.append(attr)
+
+ # Special cases for checkpoint and model_type
+ if old_model_patterns.checkpoint not in [old_model_patterns.model_type, old_model_patterns.model_lower_cased]:
+ attributes_to_check.append("checkpoint")
+ if old_model_patterns.model_type != old_model_patterns.model_lower_cased:
+ attributes_to_check.append("model_type")
+ else:
+ text = re.sub(
+ rf'(\s*)model_type = "{old_model_patterns.model_type}"',
+ r'\1model_type = "[MODEL_TYPE]"',
+ text,
+ )
+
+ # Special case when the model camel cased and upper cased names are the same for the old model (like for GPT2) but
+ # not the new one. We can't just do a replace in all the text and will need a special regex
+ if old_model_patterns.model_upper_cased == old_model_patterns.model_camel_cased:
+ old_model_value = old_model_patterns.model_upper_cased
+ if re.search(rf"{old_model_value}_[A-Z_]*[^A-Z_]", text) is not None:
+ text = re.sub(rf"{old_model_value}([A-Z_]*)([^a-zA-Z_])", r"[MODEL_UPPER_CASED]\1\2", text)
+ else:
+ attributes_to_check.append("model_upper_cased")
+
+ attributes_to_check.extend(["model_camel_cased", "model_lower_cased", "model_name"])
+
+ # Now let's replace every other attribute by their placeholder
+ for attr in attributes_to_check:
+ text = text.replace(getattr(old_model_patterns, attr), ATTRIBUTE_TO_PLACEHOLDER[attr])
+
+ # Finally we can replace the placeholder byt the new values.
+ replacements = []
+ for attr, placeholder in ATTRIBUTE_TO_PLACEHOLDER.items():
+ if placeholder in text:
+ replacements.append((getattr(old_model_patterns, attr), getattr(new_model_patterns, attr)))
+ text = text.replace(placeholder, getattr(new_model_patterns, attr))
+
+ # If we have two inconsistent replacements, we don't return anything (ex: GPT2->GPT_NEW and GPT2->GPTNew)
+ old_replacement_values = [old for old, new in replacements]
+ if len(set(old_replacement_values)) != len(old_replacement_values):
+ return text, ""
+
+ replacements = simplify_replacements(replacements)
+ replacements = [f"{old}->{new}" for old, new in replacements]
+ return text, ",".join(replacements)
+
+
+def simplify_replacements(replacements):
+ """
+ Simplify a list of replacement patterns to make sure there are no needless ones.
+
+ For instance in the sequence "Bert->BertNew, BertConfig->BertNewConfig, bert->bert_new", the replacement
+ "BertConfig->BertNewConfig" is implied by "Bert->BertNew" so not needed.
+
+ Args:
+ replacements (`List[Tuple[str, str]]`): List of patterns (old, new)
+
+ Returns:
+ `List[Tuple[str, str]]`: The list of patterns simplified.
+ """
+ if len(replacements) <= 1:
+ # Nothing to simplify
+ return replacements
+
+ # Next let's sort replacements by length as a replacement can only "imply" another replacement if it's shorter.
+ replacements.sort(key=lambda x: len(x[0]))
+
+ idx = 0
+ while idx < len(replacements):
+ old, new = replacements[idx]
+ # Loop through all replacements after
+ j = idx + 1
+ while j < len(replacements):
+ old_2, new_2 = replacements[j]
+ # If the replacement is implied by the current one, we can drop it.
+ if old_2.replace(old, new) == new_2:
+ replacements.pop(j)
+ else:
+ j += 1
+ idx += 1
+
+ return replacements
+
+
+def get_module_from_file(module_file: Union[str, os.PathLike]) -> str:
+ """
+ Returns the module name corresponding to a module file.
+ """
+ full_module_path = Path(module_file).absolute()
+ module_parts = full_module_path.with_suffix("").parts
+
+ # Find the first part named transformers, starting from the end.
+ idx = len(module_parts) - 1
+ while idx >= 0 and module_parts[idx] != "transformers":
+ idx -= 1
+ if idx < 0:
+ raise ValueError(f"{module_file} is not a transformers module.")
+
+ return ".".join(module_parts[idx:])
+
+
+SPECIAL_PATTERNS = {
+ "_CHECKPOINT_FOR_DOC =": "checkpoint",
+ "_CONFIG_FOR_DOC =": "config_class",
+ "_TOKENIZER_FOR_DOC =": "tokenizer_class",
+ "_IMAGE_PROCESSOR_FOR_DOC =": "image_processor_class",
+ "_FEAT_EXTRACTOR_FOR_DOC =": "feature_extractor_class",
+ "_PROCESSOR_FOR_DOC =": "processor_class",
+}
+
+
+_re_class_func = re.compile(r"^(?:class|def)\s+([^\s:\(]+)\s*(?:\(|\:)", flags=re.MULTILINE)
+
+
+def remove_attributes(obj, target_attr):
+ """Remove `target_attr` in `obj`."""
+ lines = obj.split(os.linesep)
+
+ target_idx = None
+ for idx, line in enumerate(lines):
+ # search for assignment
+ if line.lstrip().startswith(f"{target_attr} = "):
+ target_idx = idx
+ break
+ # search for function/method definition
+ elif line.lstrip().startswith(f"def {target_attr}("):
+ target_idx = idx
+ break
+
+ # target not found
+ if target_idx is None:
+ return obj
+
+ line = lines[target_idx]
+ indent_level = find_indent(line)
+ # forward pass to find the ending of the block (including empty lines)
+ parsed = extract_block("\n".join(lines[target_idx:]), indent_level)
+ num_lines = len(parsed.split("\n"))
+ for idx in range(num_lines):
+ lines[target_idx + idx] = None
+
+ # backward pass to find comments or decorator
+ for idx in range(target_idx - 1, -1, -1):
+ line = lines[idx]
+ if (line.lstrip().startswith("#") or line.lstrip().startswith("@")) and find_indent(line) == indent_level:
+ lines[idx] = None
+ else:
+ break
+
+ new_obj = os.linesep.join([x for x in lines if x is not None])
+
+ return new_obj
+
+
+def duplicate_module(
+ module_file: Union[str, os.PathLike],
+ old_model_patterns: ModelPatterns,
+ new_model_patterns: ModelPatterns,
+ dest_file: Optional[str] = None,
+ add_copied_from: bool = True,
+ attrs_to_remove: List[str] = None,
+):
+ """
+ Create a new module from an existing one and adapting all function and classes names from old patterns to new ones.
+
+ Args:
+ module_file (`str` or `os.PathLike`): Path to the module to duplicate.
+ old_model_patterns (`ModelPatterns`): The patterns for the old model.
+ new_model_patterns (`ModelPatterns`): The patterns for the new model.
+ dest_file (`str` or `os.PathLike`, *optional*): Path to the new module.
+ add_copied_from (`bool`, *optional*, defaults to `True`):
+ Whether or not to add `# Copied from` statements in the duplicated module.
+ """
+ if dest_file is None:
+ dest_file = str(module_file).replace(
+ old_model_patterns.model_lower_cased, new_model_patterns.model_lower_cased
+ )
+
+ with open(module_file, "r", encoding="utf-8") as f:
+ content = f.read()
+
+ content = re.sub(r"# Copyright (\d+)\s", f"# Copyright {CURRENT_YEAR} ", content)
+ objects = parse_module_content(content)
+
+ # Loop and treat all objects
+ new_objects = []
+ for obj in objects:
+ special_pattern = False
+ for pattern, attr in SPECIAL_PATTERNS.items():
+ if pattern in obj:
+ obj = obj.replace(getattr(old_model_patterns, attr), getattr(new_model_patterns, attr))
+ new_objects.append(obj)
+ special_pattern = True
+ break
+
+ if special_pattern:
+ continue
+
+ # Regular classes functions
+ old_obj = obj
+ obj, replacement = replace_model_patterns(obj, old_model_patterns, new_model_patterns)
+ has_copied_from = re.search(r"^#\s+Copied from", obj, flags=re.MULTILINE) is not None
+ if add_copied_from and not has_copied_from and _re_class_func.search(obj) is not None and len(replacement) > 0:
+ # Copied from statement must be added just before the class/function definition, which may not be the
+ # first line because of decorators.
+ module_name = get_module_from_file(module_file)
+ old_object_name = _re_class_func.search(old_obj).groups()[0]
+ obj = add_content_to_text(
+ obj, f"# Copied from {module_name}.{old_object_name} with {replacement}", add_before=_re_class_func
+ )
+ # In all cases, we remove Copied from statement with indent on methods.
+ obj = re.sub("\n[ ]+# Copied from [^\n]*\n", "\n", obj)
+
+ new_objects.append(obj)
+
+ content = "\n".join(new_objects)
+ # Remove some attributes that we don't want to copy to the new file(s)
+ if attrs_to_remove is not None:
+ for attr in attrs_to_remove:
+ content = remove_attributes(content, target_attr=attr)
+
+ with open(dest_file, "w", encoding="utf-8") as f:
+ f.write(content)
+
+
+def filter_framework_files(
+ files: List[Union[str, os.PathLike]], frameworks: Optional[List[str]] = None
+) -> List[Union[str, os.PathLike]]:
+ """
+ Filter a list of files to only keep the ones corresponding to a list of frameworks.
+
+ Args:
+ files (`List[Union[str, os.PathLike]]`): The list of files to filter.
+ frameworks (`List[str]`, *optional*): The list of allowed frameworks.
+
+ Returns:
+ `List[Union[str, os.PathLike]]`: The list of filtered files.
+ """
+ if frameworks is None:
+ frameworks = get_default_frameworks()
+
+ framework_to_file = {}
+ others = []
+ for f in files:
+ parts = Path(f).name.split("_")
+ if "modeling" not in parts:
+ others.append(f)
+ continue
+ if "tf" in parts:
+ framework_to_file["tf"] = f
+ elif "flax" in parts:
+ framework_to_file["flax"] = f
+ else:
+ framework_to_file["pt"] = f
+
+ return [framework_to_file[f] for f in frameworks if f in framework_to_file] + others
+
+
+def get_model_files(model_type: str, frameworks: Optional[List[str]] = None) -> Dict[str, Union[Path, List[Path]]]:
+ """
+ Retrieves all the files associated to a model.
+
+ Args:
+ model_type (`str`): A valid model type (like "bert" or "gpt2")
+ frameworks (`List[str]`, *optional*):
+ If passed, will only keep the model files corresponding to the passed frameworks.
+
+ Returns:
+ `Dict[str, Union[Path, List[Path]]]`: A dictionary with the following keys:
+ - **doc_file** -- The documentation file for the model.
+ - **model_files** -- All the files in the model module.
+ - **test_files** -- The test files for the model.
+ """
+ module_name = model_type_to_module_name(model_type)
+
+ model_module = TRANSFORMERS_PATH / "models" / module_name
+ model_files = list(model_module.glob("*.py"))
+ model_files = filter_framework_files(model_files, frameworks=frameworks)
+
+ doc_file = REPO_PATH / "docs" / "source" / "en" / "model_doc" / f"{model_type}.md"
+
+ # Basic pattern for test files
+ test_files = [
+ f"test_modeling_{module_name}.py",
+ f"test_modeling_tf_{module_name}.py",
+ f"test_modeling_flax_{module_name}.py",
+ f"test_tokenization_{module_name}.py",
+ f"test_image_processing_{module_name}.py",
+ f"test_feature_extraction_{module_name}.py",
+ f"test_processor_{module_name}.py",
+ ]
+ test_files = filter_framework_files(test_files, frameworks=frameworks)
+ # Add the test directory
+ test_files = [REPO_PATH / "tests" / "models" / module_name / f for f in test_files]
+ # Filter by existing files
+ test_files = [f for f in test_files if f.exists()]
+
+ return {"doc_file": doc_file, "model_files": model_files, "module_name": module_name, "test_files": test_files}
+
+
+_re_checkpoint_for_doc = re.compile(r"^_CHECKPOINT_FOR_DOC\s+=\s+(\S*)\s*$", flags=re.MULTILINE)
+
+
+def find_base_model_checkpoint(
+ model_type: str, model_files: Optional[Dict[str, Union[Path, List[Path]]]] = None
+) -> str:
+ """
+ Finds the model checkpoint used in the docstrings for a given model.
+
+ Args:
+ model_type (`str`): A valid model type (like "bert" or "gpt2")
+ model_files (`Dict[str, Union[Path, List[Path]]`, *optional*):
+ The files associated to `model_type`. Can be passed to speed up the function, otherwise will be computed.
+
+ Returns:
+ `str`: The checkpoint used.
+ """
+ if model_files is None:
+ model_files = get_model_files(model_type)
+ module_files = model_files["model_files"]
+ for fname in module_files:
+ if "modeling" not in str(fname):
+ continue
+
+ with open(fname, "r", encoding="utf-8") as f:
+ content = f.read()
+ if _re_checkpoint_for_doc.search(content) is not None:
+ checkpoint = _re_checkpoint_for_doc.search(content).groups()[0]
+ # Remove quotes
+ checkpoint = checkpoint.replace('"', "")
+ checkpoint = checkpoint.replace("'", "")
+ return checkpoint
+
+ # TODO: Find some kind of fallback if there is no _CHECKPOINT_FOR_DOC in any of the modeling file.
+ return ""
+
+
+def get_default_frameworks():
+ """
+ Returns the list of frameworks (PyTorch, TensorFlow, Flax) that are installed in the environment.
+ """
+ frameworks = []
+ if is_torch_available():
+ frameworks.append("pt")
+ if is_tf_available():
+ frameworks.append("tf")
+ if is_flax_available():
+ frameworks.append("flax")
+ return frameworks
+
+
+_re_model_mapping = re.compile("MODEL_([A-Z_]*)MAPPING_NAMES")
+
+
+def retrieve_model_classes(model_type: str, frameworks: Optional[List[str]] = None) -> Dict[str, List[str]]:
+ """
+ Retrieve the model classes associated to a given model.
+
+ Args:
+ model_type (`str`): A valid model type (like "bert" or "gpt2")
+ frameworks (`List[str]`, *optional*):
+ The frameworks to look for. Will default to `["pt", "tf", "flax"]`, passing a smaller list will restrict
+ the classes returned.
+
+ Returns:
+ `Dict[str, List[str]]`: A dictionary with one key per framework and the list of model classes associated to
+ that framework as values.
+ """
+ if frameworks is None:
+ frameworks = get_default_frameworks()
+
+ modules = {
+ "pt": auto_module.modeling_auto if is_torch_available() else None,
+ "tf": auto_module.modeling_tf_auto if is_tf_available() else None,
+ "flax": auto_module.modeling_flax_auto if is_flax_available() else None,
+ }
+
+ model_classes = {}
+ for framework in frameworks:
+ new_model_classes = []
+ if modules[framework] is None:
+ raise ValueError(f"You selected {framework} in the frameworks, but it is not installed.")
+ model_mappings = [attr for attr in dir(modules[framework]) if _re_model_mapping.search(attr) is not None]
+ for model_mapping_name in model_mappings:
+ model_mapping = getattr(modules[framework], model_mapping_name)
+ if model_type in model_mapping:
+ new_model_classes.append(model_mapping[model_type])
+
+ if len(new_model_classes) > 0:
+ # Remove duplicates
+ model_classes[framework] = list(set(new_model_classes))
+
+ return model_classes
+
+
+def retrieve_info_for_model(model_type, frameworks: Optional[List[str]] = None):
+ """
+ Retrieves all the information from a given model_type.
+
+ Args:
+ model_type (`str`): A valid model type (like "bert" or "gpt2")
+ frameworks (`List[str]`, *optional*):
+ If passed, will only keep the info corresponding to the passed frameworks.
+
+ Returns:
+ `Dict`: A dictionary with the following keys:
+ - **frameworks** (`List[str]`): The list of frameworks that back this model type.
+ - **model_classes** (`Dict[str, List[str]]`): The model classes implemented for that model type.
+ - **model_files** (`Dict[str, Union[Path, List[Path]]]`): The files associated with that model type.
+ - **model_patterns** (`ModelPatterns`): The various patterns for the model.
+ """
+ if model_type not in auto_module.MODEL_NAMES_MAPPING:
+ raise ValueError(f"{model_type} is not a valid model type.")
+
+ model_name = auto_module.MODEL_NAMES_MAPPING[model_type]
+ config_class = auto_module.configuration_auto.CONFIG_MAPPING_NAMES[model_type]
+ if model_type in auto_module.tokenization_auto.TOKENIZER_MAPPING_NAMES:
+ tokenizer_classes = auto_module.tokenization_auto.TOKENIZER_MAPPING_NAMES[model_type]
+ tokenizer_class = tokenizer_classes[0] if tokenizer_classes[0] is not None else tokenizer_classes[1]
+ else:
+ tokenizer_class = None
+ image_processor_class = auto_module.image_processing_auto.IMAGE_PROCESSOR_MAPPING_NAMES.get(model_type, None)
+ feature_extractor_class = auto_module.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES.get(model_type, None)
+ processor_class = auto_module.processing_auto.PROCESSOR_MAPPING_NAMES.get(model_type, None)
+
+ model_files = get_model_files(model_type, frameworks=frameworks)
+ model_camel_cased = config_class.replace("Config", "")
+
+ available_frameworks = []
+ for fname in model_files["model_files"]:
+ if "modeling_tf" in str(fname):
+ available_frameworks.append("tf")
+ elif "modeling_flax" in str(fname):
+ available_frameworks.append("flax")
+ elif "modeling" in str(fname):
+ available_frameworks.append("pt")
+
+ if frameworks is None:
+ frameworks = get_default_frameworks()
+
+ frameworks = [f for f in frameworks if f in available_frameworks]
+
+ model_classes = retrieve_model_classes(model_type, frameworks=frameworks)
+
+ model_upper_cased = model_camel_cased.upper()
+ model_patterns = ModelPatterns(
+ model_name,
+ checkpoint=find_base_model_checkpoint(model_type, model_files=model_files),
+ model_type=model_type,
+ model_camel_cased=model_camel_cased,
+ model_lower_cased=model_files["module_name"],
+ model_upper_cased=model_upper_cased,
+ config_class=config_class,
+ tokenizer_class=tokenizer_class,
+ image_processor_class=image_processor_class,
+ feature_extractor_class=feature_extractor_class,
+ processor_class=processor_class,
+ )
+
+ return {
+ "frameworks": frameworks,
+ "model_classes": model_classes,
+ "model_files": model_files,
+ "model_patterns": model_patterns,
+ }
+
+
+def clean_frameworks_in_init(
+ init_file: Union[str, os.PathLike], frameworks: Optional[List[str]] = None, keep_processing: bool = True
+):
+ """
+ Removes all the import lines that don't belong to a given list of frameworks or concern tokenizers/feature
+ extractors/image processors/processors in an init.
+
+ Args:
+ init_file (`str` or `os.PathLike`): The path to the init to treat.
+ frameworks (`List[str]`, *optional*):
+ If passed, this will remove all imports that are subject to a framework not in frameworks
+ keep_processing (`bool`, *optional*, defaults to `True`):
+ Whether or not to keep the preprocessing (tokenizer, feature extractor, image processor, processor) imports
+ in the init.
+ """
+ if frameworks is None:
+ frameworks = get_default_frameworks()
+
+ names = {"pt": "torch"}
+ to_remove = [names.get(f, f) for f in ["pt", "tf", "flax"] if f not in frameworks]
+ if not keep_processing:
+ to_remove.extend(["sentencepiece", "tokenizers", "vision"])
+
+ if len(to_remove) == 0:
+ # Nothing to do
+ return
+
+ remove_pattern = "|".join(to_remove)
+ re_conditional_imports = re.compile(rf"^\s*if not is_({remove_pattern})_available\(\):\s*$")
+ re_try = re.compile(r"\s*try:")
+ re_else = re.compile(r"\s*else:")
+ re_is_xxx_available = re.compile(rf"is_({remove_pattern})_available")
+
+ with open(init_file, "r", encoding="utf-8") as f:
+ content = f.read()
+
+ lines = content.split("\n")
+ new_lines = []
+ idx = 0
+ while idx < len(lines):
+ # Conditional imports in try-except-else blocks
+ if (re_conditional_imports.search(lines[idx]) is not None) and (re_try.search(lines[idx - 1]) is not None):
+ # Remove the preceding `try:`
+ new_lines.pop()
+ idx += 1
+ # Iterate until `else:`
+ while is_empty_line(lines[idx]) or re_else.search(lines[idx]) is None:
+ idx += 1
+ idx += 1
+ indent = find_indent(lines[idx])
+ while find_indent(lines[idx]) >= indent or is_empty_line(lines[idx]):
+ idx += 1
+ # Remove the import from utils
+ elif re_is_xxx_available.search(lines[idx]) is not None:
+ line = lines[idx]
+ for framework in to_remove:
+ line = line.replace(f", is_{framework}_available", "")
+ line = line.replace(f"is_{framework}_available, ", "")
+ line = line.replace(f"is_{framework}_available,", "")
+ line = line.replace(f"is_{framework}_available", "")
+
+ if len(line.strip()) > 0:
+ new_lines.append(line)
+ idx += 1
+ # Otherwise we keep the line, except if it's a tokenizer import and we don't want to keep it.
+ elif keep_processing or (
+ re.search(r'^\s*"(tokenization|processing|feature_extraction|image_processing)', lines[idx]) is None
+ and re.search(r"^\s*from .(tokenization|processing|feature_extraction|image_processing)", lines[idx])
+ is None
+ ):
+ new_lines.append(lines[idx])
+ idx += 1
+ else:
+ idx += 1
+
+ with open(init_file, "w", encoding="utf-8") as f:
+ f.write("\n".join(new_lines))
+
+
+def add_model_to_main_init(
+ old_model_patterns: ModelPatterns,
+ new_model_patterns: ModelPatterns,
+ frameworks: Optional[List[str]] = None,
+ with_processing: bool = True,
+):
+ """
+ Add a model to the main init of Transformers.
+
+ Args:
+ old_model_patterns (`ModelPatterns`): The patterns for the old model.
+ new_model_patterns (`ModelPatterns`): The patterns for the new model.
+ frameworks (`List[str]`, *optional*):
+ If specified, only the models implemented in those frameworks will be added.
+ with_processsing (`bool`, *optional*, defaults to `True`):
+ Whether the tokenizer/feature extractor/processor of the model should also be added to the init or not.
+ """
+ with open(TRANSFORMERS_PATH / "__init__.py", "r", encoding="utf-8") as f:
+ content = f.read()
+
+ lines = content.split("\n")
+ idx = 0
+ new_lines = []
+ framework = None
+ while idx < len(lines):
+ new_framework = False
+ if not is_empty_line(lines[idx]) and find_indent(lines[idx]) == 0:
+ framework = None
+ elif lines[idx].lstrip().startswith("if not is_torch_available"):
+ framework = "pt"
+ new_framework = True
+ elif lines[idx].lstrip().startswith("if not is_tf_available"):
+ framework = "tf"
+ new_framework = True
+ elif lines[idx].lstrip().startswith("if not is_flax_available"):
+ framework = "flax"
+ new_framework = True
+
+ if new_framework:
+ # For a new framework, we need to skip until the else: block to get where the imports are.
+ while lines[idx].strip() != "else:":
+ new_lines.append(lines[idx])
+ idx += 1
+
+ # Skip if we are in a framework not wanted.
+ if framework is not None and frameworks is not None and framework not in frameworks:
+ new_lines.append(lines[idx])
+ idx += 1
+ elif re.search(rf'models.{old_model_patterns.model_lower_cased}( |")', lines[idx]) is not None:
+ block = [lines[idx]]
+ indent = find_indent(lines[idx])
+ idx += 1
+ while find_indent(lines[idx]) > indent:
+ block.append(lines[idx])
+ idx += 1
+ if lines[idx].strip() in [")", "]", "],"]:
+ block.append(lines[idx])
+ idx += 1
+ block = "\n".join(block)
+ new_lines.append(block)
+
+ add_block = True
+ if not with_processing:
+ processing_classes = [
+ old_model_patterns.tokenizer_class,
+ old_model_patterns.image_processor_class,
+ old_model_patterns.feature_extractor_class,
+ old_model_patterns.processor_class,
+ ]
+ # Only keep the ones that are not None
+ processing_classes = [c for c in processing_classes if c is not None]
+ for processing_class in processing_classes:
+ block = block.replace(f' "{processing_class}",', "")
+ block = block.replace(f', "{processing_class}"', "")
+ block = block.replace(f" {processing_class},", "")
+ block = block.replace(f", {processing_class}", "")
+
+ if processing_class in block:
+ add_block = False
+ if add_block:
+ new_lines.append(replace_model_patterns(block, old_model_patterns, new_model_patterns)[0])
+ else:
+ new_lines.append(lines[idx])
+ idx += 1
+
+ with open(TRANSFORMERS_PATH / "__init__.py", "w", encoding="utf-8") as f:
+ f.write("\n".join(new_lines))
+
+
+def insert_tokenizer_in_auto_module(old_model_patterns: ModelPatterns, new_model_patterns: ModelPatterns):
+ """
+ Add a tokenizer to the relevant mappings in the auto module.
+
+ Args:
+ old_model_patterns (`ModelPatterns`): The patterns for the old model.
+ new_model_patterns (`ModelPatterns`): The patterns for the new model.
+ """
+ if old_model_patterns.tokenizer_class is None or new_model_patterns.tokenizer_class is None:
+ return
+
+ with open(TRANSFORMERS_PATH / "models" / "auto" / "tokenization_auto.py", "r", encoding="utf-8") as f:
+ content = f.read()
+
+ lines = content.split("\n")
+ idx = 0
+ # First we get to the TOKENIZER_MAPPING_NAMES block.
+ while not lines[idx].startswith(" TOKENIZER_MAPPING_NAMES = OrderedDict("):
+ idx += 1
+ idx += 1
+
+ # That block will end at this prompt:
+ while not lines[idx].startswith("TOKENIZER_MAPPING = _LazyAutoMapping"):
+ # Either all the tokenizer block is defined on one line, in which case, it ends with "),"
+ if lines[idx].endswith(","):
+ block = lines[idx]
+ # Otherwise it takes several lines until we get to a "),"
+ else:
+ block = []
+ while not lines[idx].startswith(" ),"):
+ block.append(lines[idx])
+ idx += 1
+ block = "\n".join(block)
+ idx += 1
+
+ # If we find the model type and tokenizer class in that block, we have the old model tokenizer block
+ if f'"{old_model_patterns.model_type}"' in block and old_model_patterns.tokenizer_class in block:
+ break
+
+ new_block = block.replace(old_model_patterns.model_type, new_model_patterns.model_type)
+ new_block = new_block.replace(old_model_patterns.tokenizer_class, new_model_patterns.tokenizer_class)
+
+ new_lines = lines[:idx] + [new_block] + lines[idx:]
+ with open(TRANSFORMERS_PATH / "models" / "auto" / "tokenization_auto.py", "w", encoding="utf-8") as f:
+ f.write("\n".join(new_lines))
+
+
+AUTO_CLASSES_PATTERNS = {
+ "configuration_auto.py": [
+ ' ("{model_type}", "{model_name}"),',
+ ' ("{model_type}", "{config_class}"),',
+ ' ("{model_type}", "{pretrained_archive_map}"),',
+ ],
+ "feature_extraction_auto.py": [' ("{model_type}", "{feature_extractor_class}"),'],
+ "image_processing_auto.py": [' ("{model_type}", "{image_processor_class}"),'],
+ "modeling_auto.py": [' ("{model_type}", "{any_pt_class}"),'],
+ "modeling_tf_auto.py": [' ("{model_type}", "{any_tf_class}"),'],
+ "modeling_flax_auto.py": [' ("{model_type}", "{any_flax_class}"),'],
+ "processing_auto.py": [' ("{model_type}", "{processor_class}"),'],
+}
+
+
+def add_model_to_auto_classes(
+ old_model_patterns: ModelPatterns, new_model_patterns: ModelPatterns, model_classes: Dict[str, List[str]]
+):
+ """
+ Add a model to the relevant mappings in the auto module.
+
+ Args:
+ old_model_patterns (`ModelPatterns`): The patterns for the old model.
+ new_model_patterns (`ModelPatterns`): The patterns for the new model.
+ model_classes (`Dict[str, List[str]]`): A dictionary framework to list of model classes implemented.
+ """
+ for filename in AUTO_CLASSES_PATTERNS:
+ # Extend patterns with all model classes if necessary
+ new_patterns = []
+ for pattern in AUTO_CLASSES_PATTERNS[filename]:
+ if re.search("any_([a-z]*)_class", pattern) is not None:
+ framework = re.search("any_([a-z]*)_class", pattern).groups()[0]
+ if framework in model_classes:
+ new_patterns.extend(
+ [
+ pattern.replace("{" + f"any_{framework}_class" + "}", cls)
+ for cls in model_classes[framework]
+ ]
+ )
+ elif "{config_class}" in pattern:
+ new_patterns.append(pattern.replace("{config_class}", old_model_patterns.config_class))
+ elif "{image_processor_class}" in pattern:
+ if (
+ old_model_patterns.image_processor_class is not None
+ and new_model_patterns.image_processor_class is not None
+ ):
+ new_patterns.append(
+ pattern.replace("{image_processor_class}", old_model_patterns.image_processor_class)
+ )
+ elif "{feature_extractor_class}" in pattern:
+ if (
+ old_model_patterns.feature_extractor_class is not None
+ and new_model_patterns.feature_extractor_class is not None
+ ):
+ new_patterns.append(
+ pattern.replace("{feature_extractor_class}", old_model_patterns.feature_extractor_class)
+ )
+ elif "{processor_class}" in pattern:
+ if old_model_patterns.processor_class is not None and new_model_patterns.processor_class is not None:
+ new_patterns.append(pattern.replace("{processor_class}", old_model_patterns.processor_class))
+ else:
+ new_patterns.append(pattern)
+
+ # Loop through all patterns.
+ for pattern in new_patterns:
+ full_name = TRANSFORMERS_PATH / "models" / "auto" / filename
+ old_model_line = pattern
+ new_model_line = pattern
+ for attr in ["model_type", "model_name"]:
+ old_model_line = old_model_line.replace("{" + attr + "}", getattr(old_model_patterns, attr))
+ new_model_line = new_model_line.replace("{" + attr + "}", getattr(new_model_patterns, attr))
+ new_model_line = new_model_line.replace(
+ old_model_patterns.model_camel_cased, new_model_patterns.model_camel_cased
+ )
+
+ add_content_to_file(full_name, new_model_line, add_after=old_model_line)
+
+ # Tokenizers require special handling
+ insert_tokenizer_in_auto_module(old_model_patterns, new_model_patterns)
+
+
+DOC_OVERVIEW_TEMPLATE = """## Overview
+
+The {model_name} model was proposed in []() by .
+
+
+The abstract from the paper is the following:
+
+**
+
+Tips:
+
+
+
+This model was contributed by [INSERT YOUR HF USERNAME HERE](https://huggingface.co/).
+The original code can be found [here]().
+
+"""
+
+
+def duplicate_doc_file(
+ doc_file: Union[str, os.PathLike],
+ old_model_patterns: ModelPatterns,
+ new_model_patterns: ModelPatterns,
+ dest_file: Optional[Union[str, os.PathLike]] = None,
+ frameworks: Optional[List[str]] = None,
+):
+ """
+ Duplicate a documentation file and adapts it for a new model.
+
+ Args:
+ module_file (`str` or `os.PathLike`): Path to the doc file to duplicate.
+ old_model_patterns (`ModelPatterns`): The patterns for the old model.
+ new_model_patterns (`ModelPatterns`): The patterns for the new model.
+ dest_file (`str` or `os.PathLike`, *optional*): Path to the new doc file.
+ Will default to the a file named `{new_model_patterns.model_type}.md` in the same folder as `module_file`.
+ frameworks (`List[str]`, *optional*):
+ If passed, will only keep the model classes corresponding to this list of frameworks in the new doc file.
+ """
+ with open(doc_file, "r", encoding="utf-8") as f:
+ content = f.read()
+
+ content = re.sub(r"