Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- ckpts/universal/global_step40/zero/21.input_layernorm.weight/fp32.pt +3 -0
- ckpts/universal/global_step40/zero/24.mlp.dense_4h_to_h.weight/exp_avg.pt +3 -0
- ckpts/universal/global_step40/zero/24.mlp.dense_4h_to_h.weight/exp_avg_sq.pt +3 -0
- venv/lib/python3.10/site-packages/transformers/__pycache__/convert_pytorch_checkpoint_to_tf2.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/transformers/__pycache__/image_utils.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/transformers/commands/__init__.py +27 -0
- venv/lib/python3.10/site-packages/transformers/commands/__pycache__/__init__.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/transformers/commands/__pycache__/add_new_model.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/transformers/commands/__pycache__/add_new_model_like.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/transformers/commands/__pycache__/convert.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/transformers/commands/__pycache__/download.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/transformers/commands/__pycache__/env.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/transformers/commands/__pycache__/lfs.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/transformers/commands/__pycache__/pt_to_tf.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/transformers/commands/__pycache__/run.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/transformers/commands/__pycache__/serving.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/transformers/commands/__pycache__/train.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/transformers/commands/__pycache__/transformers_cli.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/transformers/commands/__pycache__/user.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/transformers/commands/add_new_model.py +259 -0
- venv/lib/python3.10/site-packages/transformers/commands/add_new_model_like.py +1713 -0
- venv/lib/python3.10/site-packages/transformers/commands/convert.py +165 -0
- venv/lib/python3.10/site-packages/transformers/commands/download.py +56 -0
- venv/lib/python3.10/site-packages/transformers/commands/env.py +143 -0
- venv/lib/python3.10/site-packages/transformers/commands/lfs.py +226 -0
- venv/lib/python3.10/site-packages/transformers/commands/pt_to_tf.py +425 -0
- venv/lib/python3.10/site-packages/transformers/commands/run.py +110 -0
- venv/lib/python3.10/site-packages/transformers/commands/serving.py +228 -0
- venv/lib/python3.10/site-packages/transformers/commands/train.py +158 -0
- venv/lib/python3.10/site-packages/transformers/commands/transformers_cli.py +59 -0
- venv/lib/python3.10/site-packages/transformers/commands/user.py +197 -0
- venv/lib/python3.10/site-packages/transformers/generation/__init__.py +310 -0
- venv/lib/python3.10/site-packages/transformers/generation/__pycache__/__init__.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/transformers/generation/__pycache__/beam_constraints.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/transformers/generation/__pycache__/beam_search.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/transformers/generation/__pycache__/candidate_generator.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/transformers/generation/__pycache__/configuration_utils.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/transformers/generation/__pycache__/flax_logits_process.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/transformers/generation/__pycache__/flax_utils.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/transformers/generation/__pycache__/logits_process.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/transformers/generation/__pycache__/stopping_criteria.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/transformers/generation/__pycache__/streamers.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/transformers/generation/__pycache__/tf_logits_process.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/transformers/generation/__pycache__/tf_utils.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/transformers/generation/__pycache__/utils.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/transformers/generation/beam_constraints.py +521 -0
- venv/lib/python3.10/site-packages/transformers/generation/beam_search.py +1005 -0
- venv/lib/python3.10/site-packages/transformers/generation/candidate_generator.py +425 -0
- venv/lib/python3.10/site-packages/transformers/generation/configuration_utils.py +1092 -0
- venv/lib/python3.10/site-packages/transformers/generation/flax_logits_process.py +544 -0
ckpts/universal/global_step40/zero/21.input_layernorm.weight/fp32.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f8547783fa200231df4ac185143a6a692505610ef5ee7af0073050092b6f47af
|
3 |
+
size 9293
|
ckpts/universal/global_step40/zero/24.mlp.dense_4h_to_h.weight/exp_avg.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:aa4fefe14b4d25115363f7ae1cd0a7430037049d07491d23560465434c470798
|
3 |
+
size 33555612
|
ckpts/universal/global_step40/zero/24.mlp.dense_4h_to_h.weight/exp_avg_sq.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:39549c344344b6cfc5fefe5749c589d1bef3772bd20e34b059f503f92bf1bd06
|
3 |
+
size 33555627
|
venv/lib/python3.10/site-packages/transformers/__pycache__/convert_pytorch_checkpoint_to_tf2.cpython-310.pyc
ADDED
Binary file (8.65 kB). View file
|
|
venv/lib/python3.10/site-packages/transformers/__pycache__/image_utils.cpython-310.pyc
ADDED
Binary file (23.7 kB). View file
|
|
venv/lib/python3.10/site-packages/transformers/commands/__init__.py
ADDED
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2020 The HuggingFace Team. All rights reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
|
15 |
+
from abc import ABC, abstractmethod
|
16 |
+
from argparse import ArgumentParser
|
17 |
+
|
18 |
+
|
19 |
+
class BaseTransformersCLICommand(ABC):
|
20 |
+
@staticmethod
|
21 |
+
@abstractmethod
|
22 |
+
def register_subcommand(parser: ArgumentParser):
|
23 |
+
raise NotImplementedError()
|
24 |
+
|
25 |
+
@abstractmethod
|
26 |
+
def run(self):
|
27 |
+
raise NotImplementedError()
|
venv/lib/python3.10/site-packages/transformers/commands/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (828 Bytes). View file
|
|
venv/lib/python3.10/site-packages/transformers/commands/__pycache__/add_new_model.cpython-310.pyc
ADDED
Binary file (7.11 kB). View file
|
|
venv/lib/python3.10/site-packages/transformers/commands/__pycache__/add_new_model_like.cpython-310.pyc
ADDED
Binary file (48 kB). View file
|
|
venv/lib/python3.10/site-packages/transformers/commands/__pycache__/convert.cpython-310.pyc
ADDED
Binary file (4.64 kB). View file
|
|
venv/lib/python3.10/site-packages/transformers/commands/__pycache__/download.cpython-310.pyc
ADDED
Binary file (2.03 kB). View file
|
|
venv/lib/python3.10/site-packages/transformers/commands/__pycache__/env.cpython-310.pyc
ADDED
Binary file (4.13 kB). View file
|
|
venv/lib/python3.10/site-packages/transformers/commands/__pycache__/lfs.cpython-310.pyc
ADDED
Binary file (7.28 kB). View file
|
|
venv/lib/python3.10/site-packages/transformers/commands/__pycache__/pt_to_tf.cpython-310.pyc
ADDED
Binary file (12.8 kB). View file
|
|
venv/lib/python3.10/site-packages/transformers/commands/__pycache__/run.cpython-310.pyc
ADDED
Binary file (3.45 kB). View file
|
|
venv/lib/python3.10/site-packages/transformers/commands/__pycache__/serving.cpython-310.pyc
ADDED
Binary file (6.84 kB). View file
|
|
venv/lib/python3.10/site-packages/transformers/commands/__pycache__/train.cpython-310.pyc
ADDED
Binary file (4.67 kB). View file
|
|
venv/lib/python3.10/site-packages/transformers/commands/__pycache__/transformers_cli.cpython-310.pyc
ADDED
Binary file (1.37 kB). View file
|
|
venv/lib/python3.10/site-packages/transformers/commands/__pycache__/user.cpython-310.pyc
ADDED
Binary file (7.31 kB). View file
|
|
venv/lib/python3.10/site-packages/transformers/commands/add_new_model.py
ADDED
@@ -0,0 +1,259 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2020 The HuggingFace Team. All rights reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
|
15 |
+
import json
|
16 |
+
import os
|
17 |
+
import shutil
|
18 |
+
import warnings
|
19 |
+
from argparse import ArgumentParser, Namespace
|
20 |
+
from pathlib import Path
|
21 |
+
from typing import List
|
22 |
+
|
23 |
+
from ..utils import logging
|
24 |
+
from . import BaseTransformersCLICommand
|
25 |
+
|
26 |
+
|
27 |
+
try:
|
28 |
+
from cookiecutter.main import cookiecutter
|
29 |
+
|
30 |
+
_has_cookiecutter = True
|
31 |
+
except ImportError:
|
32 |
+
_has_cookiecutter = False
|
33 |
+
|
34 |
+
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
35 |
+
|
36 |
+
|
37 |
+
def add_new_model_command_factory(args: Namespace):
|
38 |
+
return AddNewModelCommand(args.testing, args.testing_file, path=args.path)
|
39 |
+
|
40 |
+
|
41 |
+
class AddNewModelCommand(BaseTransformersCLICommand):
|
42 |
+
@staticmethod
|
43 |
+
def register_subcommand(parser: ArgumentParser):
|
44 |
+
add_new_model_parser = parser.add_parser("add-new-model")
|
45 |
+
add_new_model_parser.add_argument("--testing", action="store_true", help="If in testing mode.")
|
46 |
+
add_new_model_parser.add_argument("--testing_file", type=str, help="Configuration file on which to run.")
|
47 |
+
add_new_model_parser.add_argument(
|
48 |
+
"--path", type=str, help="Path to cookiecutter. Should only be used for testing purposes."
|
49 |
+
)
|
50 |
+
add_new_model_parser.set_defaults(func=add_new_model_command_factory)
|
51 |
+
|
52 |
+
def __init__(self, testing: bool, testing_file: str, path=None, *args):
|
53 |
+
self._testing = testing
|
54 |
+
self._testing_file = testing_file
|
55 |
+
self._path = path
|
56 |
+
|
57 |
+
def run(self):
|
58 |
+
warnings.warn(
|
59 |
+
"The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. "
|
60 |
+
"It is not actively maintained anymore, so might give a result that won't pass all tests and quality "
|
61 |
+
"checks, you should use `transformers-cli add-new-model-like` instead."
|
62 |
+
)
|
63 |
+
if not _has_cookiecutter:
|
64 |
+
raise ImportError(
|
65 |
+
"Model creation dependencies are required to use the `add_new_model` command. Install them by running "
|
66 |
+
"the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n"
|
67 |
+
)
|
68 |
+
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
|
69 |
+
directories = [directory for directory in os.listdir() if "cookiecutter-template-" == directory[:22]]
|
70 |
+
if len(directories) > 0:
|
71 |
+
raise ValueError(
|
72 |
+
"Several directories starting with `cookiecutter-template-` in current working directory. "
|
73 |
+
"Please clean your directory by removing all folders starting with `cookiecutter-template-` or "
|
74 |
+
"change your working directory."
|
75 |
+
)
|
76 |
+
|
77 |
+
path_to_transformer_root = (
|
78 |
+
Path(__file__).parent.parent.parent.parent if self._path is None else Path(self._path).parent.parent
|
79 |
+
)
|
80 |
+
path_to_cookiecutter = path_to_transformer_root / "templates" / "adding_a_new_model"
|
81 |
+
|
82 |
+
# Execute cookiecutter
|
83 |
+
if not self._testing:
|
84 |
+
cookiecutter(str(path_to_cookiecutter))
|
85 |
+
else:
|
86 |
+
with open(self._testing_file, "r") as configuration_file:
|
87 |
+
testing_configuration = json.load(configuration_file)
|
88 |
+
|
89 |
+
cookiecutter(
|
90 |
+
str(path_to_cookiecutter if self._path is None else self._path),
|
91 |
+
no_input=True,
|
92 |
+
extra_context=testing_configuration,
|
93 |
+
)
|
94 |
+
|
95 |
+
directory = [directory for directory in os.listdir() if "cookiecutter-template-" in directory[:22]][0]
|
96 |
+
|
97 |
+
# Retrieve configuration
|
98 |
+
with open(directory + "/configuration.json", "r") as configuration_file:
|
99 |
+
configuration = json.load(configuration_file)
|
100 |
+
|
101 |
+
lowercase_model_name = configuration["lowercase_modelname"]
|
102 |
+
generate_tensorflow_pytorch_and_flax = configuration["generate_tensorflow_pytorch_and_flax"]
|
103 |
+
os.remove(f"{directory}/configuration.json")
|
104 |
+
|
105 |
+
output_pytorch = "PyTorch" in generate_tensorflow_pytorch_and_flax
|
106 |
+
output_tensorflow = "TensorFlow" in generate_tensorflow_pytorch_and_flax
|
107 |
+
output_flax = "Flax" in generate_tensorflow_pytorch_and_flax
|
108 |
+
|
109 |
+
model_dir = f"{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}"
|
110 |
+
os.makedirs(model_dir, exist_ok=True)
|
111 |
+
os.makedirs(f"{path_to_transformer_root}/tests/models/{lowercase_model_name}", exist_ok=True)
|
112 |
+
|
113 |
+
# Tests require submodules as they have parent imports
|
114 |
+
with open(f"{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py", "w"):
|
115 |
+
pass
|
116 |
+
|
117 |
+
shutil.move(
|
118 |
+
f"{directory}/__init__.py",
|
119 |
+
f"{model_dir}/__init__.py",
|
120 |
+
)
|
121 |
+
shutil.move(
|
122 |
+
f"{directory}/configuration_{lowercase_model_name}.py",
|
123 |
+
f"{model_dir}/configuration_{lowercase_model_name}.py",
|
124 |
+
)
|
125 |
+
|
126 |
+
def remove_copy_lines(path):
|
127 |
+
with open(path, "r") as f:
|
128 |
+
lines = f.readlines()
|
129 |
+
with open(path, "w") as f:
|
130 |
+
for line in lines:
|
131 |
+
if "# Copied from transformers." not in line:
|
132 |
+
f.write(line)
|
133 |
+
|
134 |
+
if output_pytorch:
|
135 |
+
if not self._testing:
|
136 |
+
remove_copy_lines(f"{directory}/modeling_{lowercase_model_name}.py")
|
137 |
+
|
138 |
+
shutil.move(
|
139 |
+
f"{directory}/modeling_{lowercase_model_name}.py",
|
140 |
+
f"{model_dir}/modeling_{lowercase_model_name}.py",
|
141 |
+
)
|
142 |
+
|
143 |
+
shutil.move(
|
144 |
+
f"{directory}/test_modeling_{lowercase_model_name}.py",
|
145 |
+
f"{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py",
|
146 |
+
)
|
147 |
+
else:
|
148 |
+
os.remove(f"{directory}/modeling_{lowercase_model_name}.py")
|
149 |
+
os.remove(f"{directory}/test_modeling_{lowercase_model_name}.py")
|
150 |
+
|
151 |
+
if output_tensorflow:
|
152 |
+
if not self._testing:
|
153 |
+
remove_copy_lines(f"{directory}/modeling_tf_{lowercase_model_name}.py")
|
154 |
+
|
155 |
+
shutil.move(
|
156 |
+
f"{directory}/modeling_tf_{lowercase_model_name}.py",
|
157 |
+
f"{model_dir}/modeling_tf_{lowercase_model_name}.py",
|
158 |
+
)
|
159 |
+
|
160 |
+
shutil.move(
|
161 |
+
f"{directory}/test_modeling_tf_{lowercase_model_name}.py",
|
162 |
+
f"{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py",
|
163 |
+
)
|
164 |
+
else:
|
165 |
+
os.remove(f"{directory}/modeling_tf_{lowercase_model_name}.py")
|
166 |
+
os.remove(f"{directory}/test_modeling_tf_{lowercase_model_name}.py")
|
167 |
+
|
168 |
+
if output_flax:
|
169 |
+
if not self._testing:
|
170 |
+
remove_copy_lines(f"{directory}/modeling_flax_{lowercase_model_name}.py")
|
171 |
+
|
172 |
+
shutil.move(
|
173 |
+
f"{directory}/modeling_flax_{lowercase_model_name}.py",
|
174 |
+
f"{model_dir}/modeling_flax_{lowercase_model_name}.py",
|
175 |
+
)
|
176 |
+
|
177 |
+
shutil.move(
|
178 |
+
f"{directory}/test_modeling_flax_{lowercase_model_name}.py",
|
179 |
+
f"{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py",
|
180 |
+
)
|
181 |
+
else:
|
182 |
+
os.remove(f"{directory}/modeling_flax_{lowercase_model_name}.py")
|
183 |
+
os.remove(f"{directory}/test_modeling_flax_{lowercase_model_name}.py")
|
184 |
+
|
185 |
+
shutil.move(
|
186 |
+
f"{directory}/{lowercase_model_name}.md",
|
187 |
+
f"{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md",
|
188 |
+
)
|
189 |
+
|
190 |
+
shutil.move(
|
191 |
+
f"{directory}/tokenization_{lowercase_model_name}.py",
|
192 |
+
f"{model_dir}/tokenization_{lowercase_model_name}.py",
|
193 |
+
)
|
194 |
+
|
195 |
+
shutil.move(
|
196 |
+
f"{directory}/tokenization_fast_{lowercase_model_name}.py",
|
197 |
+
f"{model_dir}/tokenization_{lowercase_model_name}_fast.py",
|
198 |
+
)
|
199 |
+
|
200 |
+
from os import fdopen, remove
|
201 |
+
from shutil import copymode, move
|
202 |
+
from tempfile import mkstemp
|
203 |
+
|
204 |
+
def replace(original_file: str, line_to_copy_below: str, lines_to_copy: List[str]):
|
205 |
+
# Create temp file
|
206 |
+
fh, abs_path = mkstemp()
|
207 |
+
line_found = False
|
208 |
+
with fdopen(fh, "w") as new_file:
|
209 |
+
with open(original_file) as old_file:
|
210 |
+
for line in old_file:
|
211 |
+
new_file.write(line)
|
212 |
+
if line_to_copy_below in line:
|
213 |
+
line_found = True
|
214 |
+
for line_to_copy in lines_to_copy:
|
215 |
+
new_file.write(line_to_copy)
|
216 |
+
|
217 |
+
if not line_found:
|
218 |
+
raise ValueError(f"Line {line_to_copy_below} was not found in file.")
|
219 |
+
|
220 |
+
# Copy the file permissions from the old file to the new file
|
221 |
+
copymode(original_file, abs_path)
|
222 |
+
# Remove original file
|
223 |
+
remove(original_file)
|
224 |
+
# Move new file
|
225 |
+
move(abs_path, original_file)
|
226 |
+
|
227 |
+
def skip_units(line):
|
228 |
+
return (
|
229 |
+
("generating PyTorch" in line and not output_pytorch)
|
230 |
+
or ("generating TensorFlow" in line and not output_tensorflow)
|
231 |
+
or ("generating Flax" in line and not output_flax)
|
232 |
+
)
|
233 |
+
|
234 |
+
def replace_in_files(path_to_datafile):
|
235 |
+
with open(path_to_datafile) as datafile:
|
236 |
+
lines_to_copy = []
|
237 |
+
skip_file = False
|
238 |
+
skip_snippet = False
|
239 |
+
for line in datafile:
|
240 |
+
if "# To replace in: " in line and "##" not in line:
|
241 |
+
file_to_replace_in = line.split('"')[1]
|
242 |
+
skip_file = skip_units(line)
|
243 |
+
elif "# Below: " in line and "##" not in line:
|
244 |
+
line_to_copy_below = line.split('"')[1]
|
245 |
+
skip_snippet = skip_units(line)
|
246 |
+
elif "# End." in line and "##" not in line:
|
247 |
+
if not skip_file and not skip_snippet:
|
248 |
+
replace(file_to_replace_in, line_to_copy_below, lines_to_copy)
|
249 |
+
|
250 |
+
lines_to_copy = []
|
251 |
+
elif "# Replace with" in line and "##" not in line:
|
252 |
+
lines_to_copy = []
|
253 |
+
elif "##" not in line:
|
254 |
+
lines_to_copy.append(line)
|
255 |
+
|
256 |
+
remove(path_to_datafile)
|
257 |
+
|
258 |
+
replace_in_files(f"{directory}/to_replace_{lowercase_model_name}.py")
|
259 |
+
os.rmdir(directory)
|
venv/lib/python3.10/site-packages/transformers/commands/add_new_model_like.py
ADDED
@@ -0,0 +1,1713 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2021 The HuggingFace Team. All rights reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
|
15 |
+
import difflib
|
16 |
+
import json
|
17 |
+
import os
|
18 |
+
import re
|
19 |
+
from argparse import ArgumentParser, Namespace
|
20 |
+
from dataclasses import dataclass
|
21 |
+
from datetime import date
|
22 |
+
from itertools import chain
|
23 |
+
from pathlib import Path
|
24 |
+
from typing import Any, Callable, Dict, List, Optional, Pattern, Tuple, Union
|
25 |
+
|
26 |
+
import yaml
|
27 |
+
|
28 |
+
from ..models import auto as auto_module
|
29 |
+
from ..models.auto.configuration_auto import model_type_to_module_name
|
30 |
+
from ..utils import is_flax_available, is_tf_available, is_torch_available, logging
|
31 |
+
from . import BaseTransformersCLICommand
|
32 |
+
|
33 |
+
|
34 |
+
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
35 |
+
|
36 |
+
|
37 |
+
CURRENT_YEAR = date.today().year
|
38 |
+
TRANSFORMERS_PATH = Path(__file__).parent.parent
|
39 |
+
REPO_PATH = TRANSFORMERS_PATH.parent.parent
|
40 |
+
|
41 |
+
|
42 |
+
@dataclass
|
43 |
+
class ModelPatterns:
|
44 |
+
"""
|
45 |
+
Holds the basic information about a new model for the add-new-model-like command.
|
46 |
+
|
47 |
+
Args:
|
48 |
+
model_name (`str`): The model name.
|
49 |
+
checkpoint (`str`): The checkpoint to use for doc examples.
|
50 |
+
model_type (`str`, *optional*):
|
51 |
+
The model type, the identifier used internally in the library like `bert` or `xlm-roberta`. Will default to
|
52 |
+
`model_name` lowercased with spaces replaced with minuses (-).
|
53 |
+
model_lower_cased (`str`, *optional*):
|
54 |
+
The lowercased version of the model name, to use for the module name or function names. Will default to
|
55 |
+
`model_name` lowercased with spaces and minuses replaced with underscores.
|
56 |
+
model_camel_cased (`str`, *optional*):
|
57 |
+
The camel-cased version of the model name, to use for the class names. Will default to `model_name`
|
58 |
+
camel-cased (with spaces and minuses both considered as word separators.
|
59 |
+
model_upper_cased (`str`, *optional*):
|
60 |
+
The uppercased version of the model name, to use for the constant names. Will default to `model_name`
|
61 |
+
uppercased with spaces and minuses replaced with underscores.
|
62 |
+
config_class (`str`, *optional*):
|
63 |
+
The tokenizer class associated with this model. Will default to `"{model_camel_cased}Config"`.
|
64 |
+
tokenizer_class (`str`, *optional*):
|
65 |
+
The tokenizer class associated with this model (leave to `None` for models that don't use a tokenizer).
|
66 |
+
image_processor_class (`str`, *optional*):
|
67 |
+
The image processor class associated with this model (leave to `None` for models that don't use an image
|
68 |
+
processor).
|
69 |
+
feature_extractor_class (`str`, *optional*):
|
70 |
+
The feature extractor class associated with this model (leave to `None` for models that don't use a feature
|
71 |
+
extractor).
|
72 |
+
processor_class (`str`, *optional*):
|
73 |
+
The processor class associated with this model (leave to `None` for models that don't use a processor).
|
74 |
+
"""
|
75 |
+
|
76 |
+
model_name: str
|
77 |
+
checkpoint: str
|
78 |
+
model_type: Optional[str] = None
|
79 |
+
model_lower_cased: Optional[str] = None
|
80 |
+
model_camel_cased: Optional[str] = None
|
81 |
+
model_upper_cased: Optional[str] = None
|
82 |
+
config_class: Optional[str] = None
|
83 |
+
tokenizer_class: Optional[str] = None
|
84 |
+
image_processor_class: Optional[str] = None
|
85 |
+
feature_extractor_class: Optional[str] = None
|
86 |
+
processor_class: Optional[str] = None
|
87 |
+
|
88 |
+
def __post_init__(self):
|
89 |
+
if self.model_type is None:
|
90 |
+
self.model_type = self.model_name.lower().replace(" ", "-")
|
91 |
+
if self.model_lower_cased is None:
|
92 |
+
self.model_lower_cased = self.model_name.lower().replace(" ", "_").replace("-", "_")
|
93 |
+
if self.model_camel_cased is None:
|
94 |
+
# Split the model name on - and space
|
95 |
+
words = self.model_name.split(" ")
|
96 |
+
words = list(chain(*[w.split("-") for w in words]))
|
97 |
+
# Make sure each word is capitalized
|
98 |
+
words = [w[0].upper() + w[1:] for w in words]
|
99 |
+
self.model_camel_cased = "".join(words)
|
100 |
+
if self.model_upper_cased is None:
|
101 |
+
self.model_upper_cased = self.model_name.upper().replace(" ", "_").replace("-", "_")
|
102 |
+
if self.config_class is None:
|
103 |
+
self.config_class = f"{self.model_camel_cased}Config"
|
104 |
+
|
105 |
+
|
106 |
+
ATTRIBUTE_TO_PLACEHOLDER = {
|
107 |
+
"config_class": "[CONFIG_CLASS]",
|
108 |
+
"tokenizer_class": "[TOKENIZER_CLASS]",
|
109 |
+
"image_processor_class": "[IMAGE_PROCESSOR_CLASS]",
|
110 |
+
"feature_extractor_class": "[FEATURE_EXTRACTOR_CLASS]",
|
111 |
+
"processor_class": "[PROCESSOR_CLASS]",
|
112 |
+
"checkpoint": "[CHECKPOINT]",
|
113 |
+
"model_type": "[MODEL_TYPE]",
|
114 |
+
"model_upper_cased": "[MODEL_UPPER_CASED]",
|
115 |
+
"model_camel_cased": "[MODEL_CAMELCASED]",
|
116 |
+
"model_lower_cased": "[MODEL_LOWER_CASED]",
|
117 |
+
"model_name": "[MODEL_NAME]",
|
118 |
+
}
|
119 |
+
|
120 |
+
|
121 |
+
def is_empty_line(line: str) -> bool:
|
122 |
+
"""
|
123 |
+
Determines whether a line is empty or not.
|
124 |
+
"""
|
125 |
+
return len(line) == 0 or line.isspace()
|
126 |
+
|
127 |
+
|
128 |
+
def find_indent(line: str) -> int:
|
129 |
+
"""
|
130 |
+
Returns the number of spaces that start a line indent.
|
131 |
+
"""
|
132 |
+
search = re.search(r"^(\s*)(?:\S|$)", line)
|
133 |
+
if search is None:
|
134 |
+
return 0
|
135 |
+
return len(search.groups()[0])
|
136 |
+
|
137 |
+
|
138 |
+
def parse_module_content(content: str) -> List[str]:
|
139 |
+
"""
|
140 |
+
Parse the content of a module in the list of objects it defines.
|
141 |
+
|
142 |
+
Args:
|
143 |
+
content (`str`): The content to parse
|
144 |
+
|
145 |
+
Returns:
|
146 |
+
`List[str]`: The list of objects defined in the module.
|
147 |
+
"""
|
148 |
+
objects = []
|
149 |
+
current_object = []
|
150 |
+
lines = content.split("\n")
|
151 |
+
# Doc-styler takes everything between two triple quotes in docstrings, so we need a fake """ here to go with this.
|
152 |
+
end_markers = [")", "]", "}", '"""']
|
153 |
+
|
154 |
+
for line in lines:
|
155 |
+
# End of an object
|
156 |
+
is_valid_object = len(current_object) > 0
|
157 |
+
if is_valid_object and len(current_object) == 1:
|
158 |
+
is_valid_object = not current_object[0].startswith("# Copied from")
|
159 |
+
if not is_empty_line(line) and find_indent(line) == 0 and is_valid_object:
|
160 |
+
# Closing parts should be included in current object
|
161 |
+
if line in end_markers:
|
162 |
+
current_object.append(line)
|
163 |
+
objects.append("\n".join(current_object))
|
164 |
+
current_object = []
|
165 |
+
else:
|
166 |
+
objects.append("\n".join(current_object))
|
167 |
+
current_object = [line]
|
168 |
+
else:
|
169 |
+
current_object.append(line)
|
170 |
+
|
171 |
+
# Add last object
|
172 |
+
if len(current_object) > 0:
|
173 |
+
objects.append("\n".join(current_object))
|
174 |
+
|
175 |
+
return objects
|
176 |
+
|
177 |
+
|
178 |
+
def extract_block(content: str, indent_level: int = 0) -> str:
|
179 |
+
"""Return the first block in `content` with the indent level `indent_level`.
|
180 |
+
|
181 |
+
The first line in `content` should be indented at `indent_level` level, otherwise an error will be thrown.
|
182 |
+
|
183 |
+
This method will immediately stop the search when a (non-empty) line with indent level less than `indent_level` is
|
184 |
+
encountered.
|
185 |
+
|
186 |
+
Args:
|
187 |
+
content (`str`): The content to parse
|
188 |
+
indent_level (`int`, *optional*, default to 0): The indent level of the blocks to search for
|
189 |
+
|
190 |
+
Returns:
|
191 |
+
`str`: The first block in `content` with the indent level `indent_level`.
|
192 |
+
"""
|
193 |
+
current_object = []
|
194 |
+
lines = content.split("\n")
|
195 |
+
# Doc-styler takes everything between two triple quotes in docstrings, so we need a fake """ here to go with this.
|
196 |
+
end_markers = [")", "]", "}", '"""']
|
197 |
+
|
198 |
+
for idx, line in enumerate(lines):
|
199 |
+
if idx == 0 and indent_level > 0 and not is_empty_line(line) and find_indent(line) != indent_level:
|
200 |
+
raise ValueError(
|
201 |
+
f"When `indent_level > 0`, the first line in `content` should have indent level {indent_level}. Got "
|
202 |
+
f"{find_indent(line)} instead."
|
203 |
+
)
|
204 |
+
|
205 |
+
if find_indent(line) < indent_level and not is_empty_line(line):
|
206 |
+
break
|
207 |
+
|
208 |
+
# End of an object
|
209 |
+
is_valid_object = len(current_object) > 0
|
210 |
+
if (
|
211 |
+
not is_empty_line(line)
|
212 |
+
and not line.endswith(":")
|
213 |
+
and find_indent(line) == indent_level
|
214 |
+
and is_valid_object
|
215 |
+
):
|
216 |
+
# Closing parts should be included in current object
|
217 |
+
if line.lstrip() in end_markers:
|
218 |
+
current_object.append(line)
|
219 |
+
return "\n".join(current_object)
|
220 |
+
else:
|
221 |
+
current_object.append(line)
|
222 |
+
|
223 |
+
# Add last object
|
224 |
+
if len(current_object) > 0:
|
225 |
+
return "\n".join(current_object)
|
226 |
+
|
227 |
+
|
228 |
+
def add_content_to_text(
|
229 |
+
text: str,
|
230 |
+
content: str,
|
231 |
+
add_after: Optional[Union[str, Pattern]] = None,
|
232 |
+
add_before: Optional[Union[str, Pattern]] = None,
|
233 |
+
exact_match: bool = False,
|
234 |
+
) -> str:
|
235 |
+
"""
|
236 |
+
A utility to add some content inside a given text.
|
237 |
+
|
238 |
+
Args:
|
239 |
+
text (`str`): The text in which we want to insert some content.
|
240 |
+
content (`str`): The content to add.
|
241 |
+
add_after (`str` or `Pattern`):
|
242 |
+
The pattern to test on a line of `text`, the new content is added after the first instance matching it.
|
243 |
+
add_before (`str` or `Pattern`):
|
244 |
+
The pattern to test on a line of `text`, the new content is added before the first instance matching it.
|
245 |
+
exact_match (`bool`, *optional*, defaults to `False`):
|
246 |
+
A line is considered a match with `add_after` or `add_before` if it matches exactly when `exact_match=True`,
|
247 |
+
otherwise, if `add_after`/`add_before` is present in the line.
|
248 |
+
|
249 |
+
<Tip warning={true}>
|
250 |
+
|
251 |
+
The arguments `add_after` and `add_before` are mutually exclusive, and one exactly needs to be provided.
|
252 |
+
|
253 |
+
</Tip>
|
254 |
+
|
255 |
+
Returns:
|
256 |
+
`str`: The text with the new content added if a match was found.
|
257 |
+
"""
|
258 |
+
if add_after is None and add_before is None:
|
259 |
+
raise ValueError("You need to pass either `add_after` or `add_before`")
|
260 |
+
if add_after is not None and add_before is not None:
|
261 |
+
raise ValueError("You can't pass both `add_after` or `add_before`")
|
262 |
+
pattern = add_after if add_before is None else add_before
|
263 |
+
|
264 |
+
def this_is_the_line(line):
|
265 |
+
if isinstance(pattern, Pattern):
|
266 |
+
return pattern.search(line) is not None
|
267 |
+
elif exact_match:
|
268 |
+
return pattern == line
|
269 |
+
else:
|
270 |
+
return pattern in line
|
271 |
+
|
272 |
+
new_lines = []
|
273 |
+
for line in text.split("\n"):
|
274 |
+
if this_is_the_line(line):
|
275 |
+
if add_before is not None:
|
276 |
+
new_lines.append(content)
|
277 |
+
new_lines.append(line)
|
278 |
+
if add_after is not None:
|
279 |
+
new_lines.append(content)
|
280 |
+
else:
|
281 |
+
new_lines.append(line)
|
282 |
+
|
283 |
+
return "\n".join(new_lines)
|
284 |
+
|
285 |
+
|
286 |
+
def add_content_to_file(
|
287 |
+
file_name: Union[str, os.PathLike],
|
288 |
+
content: str,
|
289 |
+
add_after: Optional[Union[str, Pattern]] = None,
|
290 |
+
add_before: Optional[Union[str, Pattern]] = None,
|
291 |
+
exact_match: bool = False,
|
292 |
+
):
|
293 |
+
"""
|
294 |
+
A utility to add some content inside a given file.
|
295 |
+
|
296 |
+
Args:
|
297 |
+
file_name (`str` or `os.PathLike`): The name of the file in which we want to insert some content.
|
298 |
+
content (`str`): The content to add.
|
299 |
+
add_after (`str` or `Pattern`):
|
300 |
+
The pattern to test on a line of `text`, the new content is added after the first instance matching it.
|
301 |
+
add_before (`str` or `Pattern`):
|
302 |
+
The pattern to test on a line of `text`, the new content is added before the first instance matching it.
|
303 |
+
exact_match (`bool`, *optional*, defaults to `False`):
|
304 |
+
A line is considered a match with `add_after` or `add_before` if it matches exactly when `exact_match=True`,
|
305 |
+
otherwise, if `add_after`/`add_before` is present in the line.
|
306 |
+
|
307 |
+
<Tip warning={true}>
|
308 |
+
|
309 |
+
The arguments `add_after` and `add_before` are mutually exclusive, and one exactly needs to be provided.
|
310 |
+
|
311 |
+
</Tip>
|
312 |
+
"""
|
313 |
+
with open(file_name, "r", encoding="utf-8") as f:
|
314 |
+
old_content = f.read()
|
315 |
+
|
316 |
+
new_content = add_content_to_text(
|
317 |
+
old_content, content, add_after=add_after, add_before=add_before, exact_match=exact_match
|
318 |
+
)
|
319 |
+
|
320 |
+
with open(file_name, "w", encoding="utf-8") as f:
|
321 |
+
f.write(new_content)
|
322 |
+
|
323 |
+
|
324 |
+
def replace_model_patterns(
|
325 |
+
text: str, old_model_patterns: ModelPatterns, new_model_patterns: ModelPatterns
|
326 |
+
) -> Tuple[str, str]:
|
327 |
+
"""
|
328 |
+
Replace all patterns present in a given text.
|
329 |
+
|
330 |
+
Args:
|
331 |
+
text (`str`): The text to treat.
|
332 |
+
old_model_patterns (`ModelPatterns`): The patterns for the old model.
|
333 |
+
new_model_patterns (`ModelPatterns`): The patterns for the new model.
|
334 |
+
|
335 |
+
Returns:
|
336 |
+
`Tuple(str, str)`: A tuple of with the treated text and the replacement actually done in it.
|
337 |
+
"""
|
338 |
+
# The order is crucially important as we will check and replace in that order. For instance the config probably
|
339 |
+
# contains the camel-cased named, but will be treated before.
|
340 |
+
attributes_to_check = ["config_class"]
|
341 |
+
# Add relevant preprocessing classes
|
342 |
+
for attr in ["tokenizer_class", "image_processor_class", "feature_extractor_class", "processor_class"]:
|
343 |
+
if getattr(old_model_patterns, attr) is not None and getattr(new_model_patterns, attr) is not None:
|
344 |
+
attributes_to_check.append(attr)
|
345 |
+
|
346 |
+
# Special cases for checkpoint and model_type
|
347 |
+
if old_model_patterns.checkpoint not in [old_model_patterns.model_type, old_model_patterns.model_lower_cased]:
|
348 |
+
attributes_to_check.append("checkpoint")
|
349 |
+
if old_model_patterns.model_type != old_model_patterns.model_lower_cased:
|
350 |
+
attributes_to_check.append("model_type")
|
351 |
+
else:
|
352 |
+
text = re.sub(
|
353 |
+
rf'(\s*)model_type = "{old_model_patterns.model_type}"',
|
354 |
+
r'\1model_type = "[MODEL_TYPE]"',
|
355 |
+
text,
|
356 |
+
)
|
357 |
+
|
358 |
+
# Special case when the model camel cased and upper cased names are the same for the old model (like for GPT2) but
|
359 |
+
# not the new one. We can't just do a replace in all the text and will need a special regex
|
360 |
+
if old_model_patterns.model_upper_cased == old_model_patterns.model_camel_cased:
|
361 |
+
old_model_value = old_model_patterns.model_upper_cased
|
362 |
+
if re.search(rf"{old_model_value}_[A-Z_]*[^A-Z_]", text) is not None:
|
363 |
+
text = re.sub(rf"{old_model_value}([A-Z_]*)([^a-zA-Z_])", r"[MODEL_UPPER_CASED]\1\2", text)
|
364 |
+
else:
|
365 |
+
attributes_to_check.append("model_upper_cased")
|
366 |
+
|
367 |
+
attributes_to_check.extend(["model_camel_cased", "model_lower_cased", "model_name"])
|
368 |
+
|
369 |
+
# Now let's replace every other attribute by their placeholder
|
370 |
+
for attr in attributes_to_check:
|
371 |
+
text = text.replace(getattr(old_model_patterns, attr), ATTRIBUTE_TO_PLACEHOLDER[attr])
|
372 |
+
|
373 |
+
# Finally we can replace the placeholder byt the new values.
|
374 |
+
replacements = []
|
375 |
+
for attr, placeholder in ATTRIBUTE_TO_PLACEHOLDER.items():
|
376 |
+
if placeholder in text:
|
377 |
+
replacements.append((getattr(old_model_patterns, attr), getattr(new_model_patterns, attr)))
|
378 |
+
text = text.replace(placeholder, getattr(new_model_patterns, attr))
|
379 |
+
|
380 |
+
# If we have two inconsistent replacements, we don't return anything (ex: GPT2->GPT_NEW and GPT2->GPTNew)
|
381 |
+
old_replacement_values = [old for old, new in replacements]
|
382 |
+
if len(set(old_replacement_values)) != len(old_replacement_values):
|
383 |
+
return text, ""
|
384 |
+
|
385 |
+
replacements = simplify_replacements(replacements)
|
386 |
+
replacements = [f"{old}->{new}" for old, new in replacements]
|
387 |
+
return text, ",".join(replacements)
|
388 |
+
|
389 |
+
|
390 |
+
def simplify_replacements(replacements):
|
391 |
+
"""
|
392 |
+
Simplify a list of replacement patterns to make sure there are no needless ones.
|
393 |
+
|
394 |
+
For instance in the sequence "Bert->BertNew, BertConfig->BertNewConfig, bert->bert_new", the replacement
|
395 |
+
"BertConfig->BertNewConfig" is implied by "Bert->BertNew" so not needed.
|
396 |
+
|
397 |
+
Args:
|
398 |
+
replacements (`List[Tuple[str, str]]`): List of patterns (old, new)
|
399 |
+
|
400 |
+
Returns:
|
401 |
+
`List[Tuple[str, str]]`: The list of patterns simplified.
|
402 |
+
"""
|
403 |
+
if len(replacements) <= 1:
|
404 |
+
# Nothing to simplify
|
405 |
+
return replacements
|
406 |
+
|
407 |
+
# Next let's sort replacements by length as a replacement can only "imply" another replacement if it's shorter.
|
408 |
+
replacements.sort(key=lambda x: len(x[0]))
|
409 |
+
|
410 |
+
idx = 0
|
411 |
+
while idx < len(replacements):
|
412 |
+
old, new = replacements[idx]
|
413 |
+
# Loop through all replacements after
|
414 |
+
j = idx + 1
|
415 |
+
while j < len(replacements):
|
416 |
+
old_2, new_2 = replacements[j]
|
417 |
+
# If the replacement is implied by the current one, we can drop it.
|
418 |
+
if old_2.replace(old, new) == new_2:
|
419 |
+
replacements.pop(j)
|
420 |
+
else:
|
421 |
+
j += 1
|
422 |
+
idx += 1
|
423 |
+
|
424 |
+
return replacements
|
425 |
+
|
426 |
+
|
427 |
+
def get_module_from_file(module_file: Union[str, os.PathLike]) -> str:
|
428 |
+
"""
|
429 |
+
Returns the module name corresponding to a module file.
|
430 |
+
"""
|
431 |
+
full_module_path = Path(module_file).absolute()
|
432 |
+
module_parts = full_module_path.with_suffix("").parts
|
433 |
+
|
434 |
+
# Find the first part named transformers, starting from the end.
|
435 |
+
idx = len(module_parts) - 1
|
436 |
+
while idx >= 0 and module_parts[idx] != "transformers":
|
437 |
+
idx -= 1
|
438 |
+
if idx < 0:
|
439 |
+
raise ValueError(f"{module_file} is not a transformers module.")
|
440 |
+
|
441 |
+
return ".".join(module_parts[idx:])
|
442 |
+
|
443 |
+
|
444 |
+
SPECIAL_PATTERNS = {
|
445 |
+
"_CHECKPOINT_FOR_DOC =": "checkpoint",
|
446 |
+
"_CONFIG_FOR_DOC =": "config_class",
|
447 |
+
"_TOKENIZER_FOR_DOC =": "tokenizer_class",
|
448 |
+
"_IMAGE_PROCESSOR_FOR_DOC =": "image_processor_class",
|
449 |
+
"_FEAT_EXTRACTOR_FOR_DOC =": "feature_extractor_class",
|
450 |
+
"_PROCESSOR_FOR_DOC =": "processor_class",
|
451 |
+
}
|
452 |
+
|
453 |
+
|
454 |
+
_re_class_func = re.compile(r"^(?:class|def)\s+([^\s:\(]+)\s*(?:\(|\:)", flags=re.MULTILINE)
|
455 |
+
|
456 |
+
|
457 |
+
def remove_attributes(obj, target_attr):
|
458 |
+
"""Remove `target_attr` in `obj`."""
|
459 |
+
lines = obj.split(os.linesep)
|
460 |
+
|
461 |
+
target_idx = None
|
462 |
+
for idx, line in enumerate(lines):
|
463 |
+
# search for assignment
|
464 |
+
if line.lstrip().startswith(f"{target_attr} = "):
|
465 |
+
target_idx = idx
|
466 |
+
break
|
467 |
+
# search for function/method definition
|
468 |
+
elif line.lstrip().startswith(f"def {target_attr}("):
|
469 |
+
target_idx = idx
|
470 |
+
break
|
471 |
+
|
472 |
+
# target not found
|
473 |
+
if target_idx is None:
|
474 |
+
return obj
|
475 |
+
|
476 |
+
line = lines[target_idx]
|
477 |
+
indent_level = find_indent(line)
|
478 |
+
# forward pass to find the ending of the block (including empty lines)
|
479 |
+
parsed = extract_block("\n".join(lines[target_idx:]), indent_level)
|
480 |
+
num_lines = len(parsed.split("\n"))
|
481 |
+
for idx in range(num_lines):
|
482 |
+
lines[target_idx + idx] = None
|
483 |
+
|
484 |
+
# backward pass to find comments or decorator
|
485 |
+
for idx in range(target_idx - 1, -1, -1):
|
486 |
+
line = lines[idx]
|
487 |
+
if (line.lstrip().startswith("#") or line.lstrip().startswith("@")) and find_indent(line) == indent_level:
|
488 |
+
lines[idx] = None
|
489 |
+
else:
|
490 |
+
break
|
491 |
+
|
492 |
+
new_obj = os.linesep.join([x for x in lines if x is not None])
|
493 |
+
|
494 |
+
return new_obj
|
495 |
+
|
496 |
+
|
497 |
+
def duplicate_module(
|
498 |
+
module_file: Union[str, os.PathLike],
|
499 |
+
old_model_patterns: ModelPatterns,
|
500 |
+
new_model_patterns: ModelPatterns,
|
501 |
+
dest_file: Optional[str] = None,
|
502 |
+
add_copied_from: bool = True,
|
503 |
+
attrs_to_remove: List[str] = None,
|
504 |
+
):
|
505 |
+
"""
|
506 |
+
Create a new module from an existing one and adapting all function and classes names from old patterns to new ones.
|
507 |
+
|
508 |
+
Args:
|
509 |
+
module_file (`str` or `os.PathLike`): Path to the module to duplicate.
|
510 |
+
old_model_patterns (`ModelPatterns`): The patterns for the old model.
|
511 |
+
new_model_patterns (`ModelPatterns`): The patterns for the new model.
|
512 |
+
dest_file (`str` or `os.PathLike`, *optional*): Path to the new module.
|
513 |
+
add_copied_from (`bool`, *optional*, defaults to `True`):
|
514 |
+
Whether or not to add `# Copied from` statements in the duplicated module.
|
515 |
+
"""
|
516 |
+
if dest_file is None:
|
517 |
+
dest_file = str(module_file).replace(
|
518 |
+
old_model_patterns.model_lower_cased, new_model_patterns.model_lower_cased
|
519 |
+
)
|
520 |
+
|
521 |
+
with open(module_file, "r", encoding="utf-8") as f:
|
522 |
+
content = f.read()
|
523 |
+
|
524 |
+
content = re.sub(r"# Copyright (\d+)\s", f"# Copyright {CURRENT_YEAR} ", content)
|
525 |
+
objects = parse_module_content(content)
|
526 |
+
|
527 |
+
# Loop and treat all objects
|
528 |
+
new_objects = []
|
529 |
+
for obj in objects:
|
530 |
+
special_pattern = False
|
531 |
+
for pattern, attr in SPECIAL_PATTERNS.items():
|
532 |
+
if pattern in obj:
|
533 |
+
obj = obj.replace(getattr(old_model_patterns, attr), getattr(new_model_patterns, attr))
|
534 |
+
new_objects.append(obj)
|
535 |
+
special_pattern = True
|
536 |
+
break
|
537 |
+
|
538 |
+
if special_pattern:
|
539 |
+
continue
|
540 |
+
|
541 |
+
# Regular classes functions
|
542 |
+
old_obj = obj
|
543 |
+
obj, replacement = replace_model_patterns(obj, old_model_patterns, new_model_patterns)
|
544 |
+
has_copied_from = re.search(r"^#\s+Copied from", obj, flags=re.MULTILINE) is not None
|
545 |
+
if add_copied_from and not has_copied_from and _re_class_func.search(obj) is not None and len(replacement) > 0:
|
546 |
+
# Copied from statement must be added just before the class/function definition, which may not be the
|
547 |
+
# first line because of decorators.
|
548 |
+
module_name = get_module_from_file(module_file)
|
549 |
+
old_object_name = _re_class_func.search(old_obj).groups()[0]
|
550 |
+
obj = add_content_to_text(
|
551 |
+
obj, f"# Copied from {module_name}.{old_object_name} with {replacement}", add_before=_re_class_func
|
552 |
+
)
|
553 |
+
# In all cases, we remove Copied from statement with indent on methods.
|
554 |
+
obj = re.sub("\n[ ]+# Copied from [^\n]*\n", "\n", obj)
|
555 |
+
|
556 |
+
new_objects.append(obj)
|
557 |
+
|
558 |
+
content = "\n".join(new_objects)
|
559 |
+
# Remove some attributes that we don't want to copy to the new file(s)
|
560 |
+
if attrs_to_remove is not None:
|
561 |
+
for attr in attrs_to_remove:
|
562 |
+
content = remove_attributes(content, target_attr=attr)
|
563 |
+
|
564 |
+
with open(dest_file, "w", encoding="utf-8") as f:
|
565 |
+
f.write(content)
|
566 |
+
|
567 |
+
|
568 |
+
def filter_framework_files(
|
569 |
+
files: List[Union[str, os.PathLike]], frameworks: Optional[List[str]] = None
|
570 |
+
) -> List[Union[str, os.PathLike]]:
|
571 |
+
"""
|
572 |
+
Filter a list of files to only keep the ones corresponding to a list of frameworks.
|
573 |
+
|
574 |
+
Args:
|
575 |
+
files (`List[Union[str, os.PathLike]]`): The list of files to filter.
|
576 |
+
frameworks (`List[str]`, *optional*): The list of allowed frameworks.
|
577 |
+
|
578 |
+
Returns:
|
579 |
+
`List[Union[str, os.PathLike]]`: The list of filtered files.
|
580 |
+
"""
|
581 |
+
if frameworks is None:
|
582 |
+
frameworks = get_default_frameworks()
|
583 |
+
|
584 |
+
framework_to_file = {}
|
585 |
+
others = []
|
586 |
+
for f in files:
|
587 |
+
parts = Path(f).name.split("_")
|
588 |
+
if "modeling" not in parts:
|
589 |
+
others.append(f)
|
590 |
+
continue
|
591 |
+
if "tf" in parts:
|
592 |
+
framework_to_file["tf"] = f
|
593 |
+
elif "flax" in parts:
|
594 |
+
framework_to_file["flax"] = f
|
595 |
+
else:
|
596 |
+
framework_to_file["pt"] = f
|
597 |
+
|
598 |
+
return [framework_to_file[f] for f in frameworks if f in framework_to_file] + others
|
599 |
+
|
600 |
+
|
601 |
+
def get_model_files(model_type: str, frameworks: Optional[List[str]] = None) -> Dict[str, Union[Path, List[Path]]]:
|
602 |
+
"""
|
603 |
+
Retrieves all the files associated to a model.
|
604 |
+
|
605 |
+
Args:
|
606 |
+
model_type (`str`): A valid model type (like "bert" or "gpt2")
|
607 |
+
frameworks (`List[str]`, *optional*):
|
608 |
+
If passed, will only keep the model files corresponding to the passed frameworks.
|
609 |
+
|
610 |
+
Returns:
|
611 |
+
`Dict[str, Union[Path, List[Path]]]`: A dictionary with the following keys:
|
612 |
+
- **doc_file** -- The documentation file for the model.
|
613 |
+
- **model_files** -- All the files in the model module.
|
614 |
+
- **test_files** -- The test files for the model.
|
615 |
+
"""
|
616 |
+
module_name = model_type_to_module_name(model_type)
|
617 |
+
|
618 |
+
model_module = TRANSFORMERS_PATH / "models" / module_name
|
619 |
+
model_files = list(model_module.glob("*.py"))
|
620 |
+
model_files = filter_framework_files(model_files, frameworks=frameworks)
|
621 |
+
|
622 |
+
doc_file = REPO_PATH / "docs" / "source" / "en" / "model_doc" / f"{model_type}.md"
|
623 |
+
|
624 |
+
# Basic pattern for test files
|
625 |
+
test_files = [
|
626 |
+
f"test_modeling_{module_name}.py",
|
627 |
+
f"test_modeling_tf_{module_name}.py",
|
628 |
+
f"test_modeling_flax_{module_name}.py",
|
629 |
+
f"test_tokenization_{module_name}.py",
|
630 |
+
f"test_image_processing_{module_name}.py",
|
631 |
+
f"test_feature_extraction_{module_name}.py",
|
632 |
+
f"test_processor_{module_name}.py",
|
633 |
+
]
|
634 |
+
test_files = filter_framework_files(test_files, frameworks=frameworks)
|
635 |
+
# Add the test directory
|
636 |
+
test_files = [REPO_PATH / "tests" / "models" / module_name / f for f in test_files]
|
637 |
+
# Filter by existing files
|
638 |
+
test_files = [f for f in test_files if f.exists()]
|
639 |
+
|
640 |
+
return {"doc_file": doc_file, "model_files": model_files, "module_name": module_name, "test_files": test_files}
|
641 |
+
|
642 |
+
|
643 |
+
_re_checkpoint_for_doc = re.compile(r"^_CHECKPOINT_FOR_DOC\s+=\s+(\S*)\s*$", flags=re.MULTILINE)
|
644 |
+
|
645 |
+
|
646 |
+
def find_base_model_checkpoint(
|
647 |
+
model_type: str, model_files: Optional[Dict[str, Union[Path, List[Path]]]] = None
|
648 |
+
) -> str:
|
649 |
+
"""
|
650 |
+
Finds the model checkpoint used in the docstrings for a given model.
|
651 |
+
|
652 |
+
Args:
|
653 |
+
model_type (`str`): A valid model type (like "bert" or "gpt2")
|
654 |
+
model_files (`Dict[str, Union[Path, List[Path]]`, *optional*):
|
655 |
+
The files associated to `model_type`. Can be passed to speed up the function, otherwise will be computed.
|
656 |
+
|
657 |
+
Returns:
|
658 |
+
`str`: The checkpoint used.
|
659 |
+
"""
|
660 |
+
if model_files is None:
|
661 |
+
model_files = get_model_files(model_type)
|
662 |
+
module_files = model_files["model_files"]
|
663 |
+
for fname in module_files:
|
664 |
+
if "modeling" not in str(fname):
|
665 |
+
continue
|
666 |
+
|
667 |
+
with open(fname, "r", encoding="utf-8") as f:
|
668 |
+
content = f.read()
|
669 |
+
if _re_checkpoint_for_doc.search(content) is not None:
|
670 |
+
checkpoint = _re_checkpoint_for_doc.search(content).groups()[0]
|
671 |
+
# Remove quotes
|
672 |
+
checkpoint = checkpoint.replace('"', "")
|
673 |
+
checkpoint = checkpoint.replace("'", "")
|
674 |
+
return checkpoint
|
675 |
+
|
676 |
+
# TODO: Find some kind of fallback if there is no _CHECKPOINT_FOR_DOC in any of the modeling file.
|
677 |
+
return ""
|
678 |
+
|
679 |
+
|
680 |
+
def get_default_frameworks():
|
681 |
+
"""
|
682 |
+
Returns the list of frameworks (PyTorch, TensorFlow, Flax) that are installed in the environment.
|
683 |
+
"""
|
684 |
+
frameworks = []
|
685 |
+
if is_torch_available():
|
686 |
+
frameworks.append("pt")
|
687 |
+
if is_tf_available():
|
688 |
+
frameworks.append("tf")
|
689 |
+
if is_flax_available():
|
690 |
+
frameworks.append("flax")
|
691 |
+
return frameworks
|
692 |
+
|
693 |
+
|
694 |
+
_re_model_mapping = re.compile("MODEL_([A-Z_]*)MAPPING_NAMES")
|
695 |
+
|
696 |
+
|
697 |
+
def retrieve_model_classes(model_type: str, frameworks: Optional[List[str]] = None) -> Dict[str, List[str]]:
|
698 |
+
"""
|
699 |
+
Retrieve the model classes associated to a given model.
|
700 |
+
|
701 |
+
Args:
|
702 |
+
model_type (`str`): A valid model type (like "bert" or "gpt2")
|
703 |
+
frameworks (`List[str]`, *optional*):
|
704 |
+
The frameworks to look for. Will default to `["pt", "tf", "flax"]`, passing a smaller list will restrict
|
705 |
+
the classes returned.
|
706 |
+
|
707 |
+
Returns:
|
708 |
+
`Dict[str, List[str]]`: A dictionary with one key per framework and the list of model classes associated to
|
709 |
+
that framework as values.
|
710 |
+
"""
|
711 |
+
if frameworks is None:
|
712 |
+
frameworks = get_default_frameworks()
|
713 |
+
|
714 |
+
modules = {
|
715 |
+
"pt": auto_module.modeling_auto if is_torch_available() else None,
|
716 |
+
"tf": auto_module.modeling_tf_auto if is_tf_available() else None,
|
717 |
+
"flax": auto_module.modeling_flax_auto if is_flax_available() else None,
|
718 |
+
}
|
719 |
+
|
720 |
+
model_classes = {}
|
721 |
+
for framework in frameworks:
|
722 |
+
new_model_classes = []
|
723 |
+
if modules[framework] is None:
|
724 |
+
raise ValueError(f"You selected {framework} in the frameworks, but it is not installed.")
|
725 |
+
model_mappings = [attr for attr in dir(modules[framework]) if _re_model_mapping.search(attr) is not None]
|
726 |
+
for model_mapping_name in model_mappings:
|
727 |
+
model_mapping = getattr(modules[framework], model_mapping_name)
|
728 |
+
if model_type in model_mapping:
|
729 |
+
new_model_classes.append(model_mapping[model_type])
|
730 |
+
|
731 |
+
if len(new_model_classes) > 0:
|
732 |
+
# Remove duplicates
|
733 |
+
model_classes[framework] = list(set(new_model_classes))
|
734 |
+
|
735 |
+
return model_classes
|
736 |
+
|
737 |
+
|
738 |
+
def retrieve_info_for_model(model_type, frameworks: Optional[List[str]] = None):
|
739 |
+
"""
|
740 |
+
Retrieves all the information from a given model_type.
|
741 |
+
|
742 |
+
Args:
|
743 |
+
model_type (`str`): A valid model type (like "bert" or "gpt2")
|
744 |
+
frameworks (`List[str]`, *optional*):
|
745 |
+
If passed, will only keep the info corresponding to the passed frameworks.
|
746 |
+
|
747 |
+
Returns:
|
748 |
+
`Dict`: A dictionary with the following keys:
|
749 |
+
- **frameworks** (`List[str]`): The list of frameworks that back this model type.
|
750 |
+
- **model_classes** (`Dict[str, List[str]]`): The model classes implemented for that model type.
|
751 |
+
- **model_files** (`Dict[str, Union[Path, List[Path]]]`): The files associated with that model type.
|
752 |
+
- **model_patterns** (`ModelPatterns`): The various patterns for the model.
|
753 |
+
"""
|
754 |
+
if model_type not in auto_module.MODEL_NAMES_MAPPING:
|
755 |
+
raise ValueError(f"{model_type} is not a valid model type.")
|
756 |
+
|
757 |
+
model_name = auto_module.MODEL_NAMES_MAPPING[model_type]
|
758 |
+
config_class = auto_module.configuration_auto.CONFIG_MAPPING_NAMES[model_type]
|
759 |
+
if model_type in auto_module.tokenization_auto.TOKENIZER_MAPPING_NAMES:
|
760 |
+
tokenizer_classes = auto_module.tokenization_auto.TOKENIZER_MAPPING_NAMES[model_type]
|
761 |
+
tokenizer_class = tokenizer_classes[0] if tokenizer_classes[0] is not None else tokenizer_classes[1]
|
762 |
+
else:
|
763 |
+
tokenizer_class = None
|
764 |
+
image_processor_class = auto_module.image_processing_auto.IMAGE_PROCESSOR_MAPPING_NAMES.get(model_type, None)
|
765 |
+
feature_extractor_class = auto_module.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES.get(model_type, None)
|
766 |
+
processor_class = auto_module.processing_auto.PROCESSOR_MAPPING_NAMES.get(model_type, None)
|
767 |
+
|
768 |
+
model_files = get_model_files(model_type, frameworks=frameworks)
|
769 |
+
model_camel_cased = config_class.replace("Config", "")
|
770 |
+
|
771 |
+
available_frameworks = []
|
772 |
+
for fname in model_files["model_files"]:
|
773 |
+
if "modeling_tf" in str(fname):
|
774 |
+
available_frameworks.append("tf")
|
775 |
+
elif "modeling_flax" in str(fname):
|
776 |
+
available_frameworks.append("flax")
|
777 |
+
elif "modeling" in str(fname):
|
778 |
+
available_frameworks.append("pt")
|
779 |
+
|
780 |
+
if frameworks is None:
|
781 |
+
frameworks = get_default_frameworks()
|
782 |
+
|
783 |
+
frameworks = [f for f in frameworks if f in available_frameworks]
|
784 |
+
|
785 |
+
model_classes = retrieve_model_classes(model_type, frameworks=frameworks)
|
786 |
+
|
787 |
+
model_upper_cased = model_camel_cased.upper()
|
788 |
+
model_patterns = ModelPatterns(
|
789 |
+
model_name,
|
790 |
+
checkpoint=find_base_model_checkpoint(model_type, model_files=model_files),
|
791 |
+
model_type=model_type,
|
792 |
+
model_camel_cased=model_camel_cased,
|
793 |
+
model_lower_cased=model_files["module_name"],
|
794 |
+
model_upper_cased=model_upper_cased,
|
795 |
+
config_class=config_class,
|
796 |
+
tokenizer_class=tokenizer_class,
|
797 |
+
image_processor_class=image_processor_class,
|
798 |
+
feature_extractor_class=feature_extractor_class,
|
799 |
+
processor_class=processor_class,
|
800 |
+
)
|
801 |
+
|
802 |
+
return {
|
803 |
+
"frameworks": frameworks,
|
804 |
+
"model_classes": model_classes,
|
805 |
+
"model_files": model_files,
|
806 |
+
"model_patterns": model_patterns,
|
807 |
+
}
|
808 |
+
|
809 |
+
|
810 |
+
def clean_frameworks_in_init(
|
811 |
+
init_file: Union[str, os.PathLike], frameworks: Optional[List[str]] = None, keep_processing: bool = True
|
812 |
+
):
|
813 |
+
"""
|
814 |
+
Removes all the import lines that don't belong to a given list of frameworks or concern tokenizers/feature
|
815 |
+
extractors/image processors/processors in an init.
|
816 |
+
|
817 |
+
Args:
|
818 |
+
init_file (`str` or `os.PathLike`): The path to the init to treat.
|
819 |
+
frameworks (`List[str]`, *optional*):
|
820 |
+
If passed, this will remove all imports that are subject to a framework not in frameworks
|
821 |
+
keep_processing (`bool`, *optional*, defaults to `True`):
|
822 |
+
Whether or not to keep the preprocessing (tokenizer, feature extractor, image processor, processor) imports
|
823 |
+
in the init.
|
824 |
+
"""
|
825 |
+
if frameworks is None:
|
826 |
+
frameworks = get_default_frameworks()
|
827 |
+
|
828 |
+
names = {"pt": "torch"}
|
829 |
+
to_remove = [names.get(f, f) for f in ["pt", "tf", "flax"] if f not in frameworks]
|
830 |
+
if not keep_processing:
|
831 |
+
to_remove.extend(["sentencepiece", "tokenizers", "vision"])
|
832 |
+
|
833 |
+
if len(to_remove) == 0:
|
834 |
+
# Nothing to do
|
835 |
+
return
|
836 |
+
|
837 |
+
remove_pattern = "|".join(to_remove)
|
838 |
+
re_conditional_imports = re.compile(rf"^\s*if not is_({remove_pattern})_available\(\):\s*$")
|
839 |
+
re_try = re.compile(r"\s*try:")
|
840 |
+
re_else = re.compile(r"\s*else:")
|
841 |
+
re_is_xxx_available = re.compile(rf"is_({remove_pattern})_available")
|
842 |
+
|
843 |
+
with open(init_file, "r", encoding="utf-8") as f:
|
844 |
+
content = f.read()
|
845 |
+
|
846 |
+
lines = content.split("\n")
|
847 |
+
new_lines = []
|
848 |
+
idx = 0
|
849 |
+
while idx < len(lines):
|
850 |
+
# Conditional imports in try-except-else blocks
|
851 |
+
if (re_conditional_imports.search(lines[idx]) is not None) and (re_try.search(lines[idx - 1]) is not None):
|
852 |
+
# Remove the preceding `try:`
|
853 |
+
new_lines.pop()
|
854 |
+
idx += 1
|
855 |
+
# Iterate until `else:`
|
856 |
+
while is_empty_line(lines[idx]) or re_else.search(lines[idx]) is None:
|
857 |
+
idx += 1
|
858 |
+
idx += 1
|
859 |
+
indent = find_indent(lines[idx])
|
860 |
+
while find_indent(lines[idx]) >= indent or is_empty_line(lines[idx]):
|
861 |
+
idx += 1
|
862 |
+
# Remove the import from utils
|
863 |
+
elif re_is_xxx_available.search(lines[idx]) is not None:
|
864 |
+
line = lines[idx]
|
865 |
+
for framework in to_remove:
|
866 |
+
line = line.replace(f", is_{framework}_available", "")
|
867 |
+
line = line.replace(f"is_{framework}_available, ", "")
|
868 |
+
line = line.replace(f"is_{framework}_available,", "")
|
869 |
+
line = line.replace(f"is_{framework}_available", "")
|
870 |
+
|
871 |
+
if len(line.strip()) > 0:
|
872 |
+
new_lines.append(line)
|
873 |
+
idx += 1
|
874 |
+
# Otherwise we keep the line, except if it's a tokenizer import and we don't want to keep it.
|
875 |
+
elif keep_processing or (
|
876 |
+
re.search(r'^\s*"(tokenization|processing|feature_extraction|image_processing)', lines[idx]) is None
|
877 |
+
and re.search(r"^\s*from .(tokenization|processing|feature_extraction|image_processing)", lines[idx])
|
878 |
+
is None
|
879 |
+
):
|
880 |
+
new_lines.append(lines[idx])
|
881 |
+
idx += 1
|
882 |
+
else:
|
883 |
+
idx += 1
|
884 |
+
|
885 |
+
with open(init_file, "w", encoding="utf-8") as f:
|
886 |
+
f.write("\n".join(new_lines))
|
887 |
+
|
888 |
+
|
889 |
+
def add_model_to_main_init(
|
890 |
+
old_model_patterns: ModelPatterns,
|
891 |
+
new_model_patterns: ModelPatterns,
|
892 |
+
frameworks: Optional[List[str]] = None,
|
893 |
+
with_processing: bool = True,
|
894 |
+
):
|
895 |
+
"""
|
896 |
+
Add a model to the main init of Transformers.
|
897 |
+
|
898 |
+
Args:
|
899 |
+
old_model_patterns (`ModelPatterns`): The patterns for the old model.
|
900 |
+
new_model_patterns (`ModelPatterns`): The patterns for the new model.
|
901 |
+
frameworks (`List[str]`, *optional*):
|
902 |
+
If specified, only the models implemented in those frameworks will be added.
|
903 |
+
with_processsing (`bool`, *optional*, defaults to `True`):
|
904 |
+
Whether the tokenizer/feature extractor/processor of the model should also be added to the init or not.
|
905 |
+
"""
|
906 |
+
with open(TRANSFORMERS_PATH / "__init__.py", "r", encoding="utf-8") as f:
|
907 |
+
content = f.read()
|
908 |
+
|
909 |
+
lines = content.split("\n")
|
910 |
+
idx = 0
|
911 |
+
new_lines = []
|
912 |
+
framework = None
|
913 |
+
while idx < len(lines):
|
914 |
+
new_framework = False
|
915 |
+
if not is_empty_line(lines[idx]) and find_indent(lines[idx]) == 0:
|
916 |
+
framework = None
|
917 |
+
elif lines[idx].lstrip().startswith("if not is_torch_available"):
|
918 |
+
framework = "pt"
|
919 |
+
new_framework = True
|
920 |
+
elif lines[idx].lstrip().startswith("if not is_tf_available"):
|
921 |
+
framework = "tf"
|
922 |
+
new_framework = True
|
923 |
+
elif lines[idx].lstrip().startswith("if not is_flax_available"):
|
924 |
+
framework = "flax"
|
925 |
+
new_framework = True
|
926 |
+
|
927 |
+
if new_framework:
|
928 |
+
# For a new framework, we need to skip until the else: block to get where the imports are.
|
929 |
+
while lines[idx].strip() != "else:":
|
930 |
+
new_lines.append(lines[idx])
|
931 |
+
idx += 1
|
932 |
+
|
933 |
+
# Skip if we are in a framework not wanted.
|
934 |
+
if framework is not None and frameworks is not None and framework not in frameworks:
|
935 |
+
new_lines.append(lines[idx])
|
936 |
+
idx += 1
|
937 |
+
elif re.search(rf'models.{old_model_patterns.model_lower_cased}( |")', lines[idx]) is not None:
|
938 |
+
block = [lines[idx]]
|
939 |
+
indent = find_indent(lines[idx])
|
940 |
+
idx += 1
|
941 |
+
while find_indent(lines[idx]) > indent:
|
942 |
+
block.append(lines[idx])
|
943 |
+
idx += 1
|
944 |
+
if lines[idx].strip() in [")", "]", "],"]:
|
945 |
+
block.append(lines[idx])
|
946 |
+
idx += 1
|
947 |
+
block = "\n".join(block)
|
948 |
+
new_lines.append(block)
|
949 |
+
|
950 |
+
add_block = True
|
951 |
+
if not with_processing:
|
952 |
+
processing_classes = [
|
953 |
+
old_model_patterns.tokenizer_class,
|
954 |
+
old_model_patterns.image_processor_class,
|
955 |
+
old_model_patterns.feature_extractor_class,
|
956 |
+
old_model_patterns.processor_class,
|
957 |
+
]
|
958 |
+
# Only keep the ones that are not None
|
959 |
+
processing_classes = [c for c in processing_classes if c is not None]
|
960 |
+
for processing_class in processing_classes:
|
961 |
+
block = block.replace(f' "{processing_class}",', "")
|
962 |
+
block = block.replace(f', "{processing_class}"', "")
|
963 |
+
block = block.replace(f" {processing_class},", "")
|
964 |
+
block = block.replace(f", {processing_class}", "")
|
965 |
+
|
966 |
+
if processing_class in block:
|
967 |
+
add_block = False
|
968 |
+
if add_block:
|
969 |
+
new_lines.append(replace_model_patterns(block, old_model_patterns, new_model_patterns)[0])
|
970 |
+
else:
|
971 |
+
new_lines.append(lines[idx])
|
972 |
+
idx += 1
|
973 |
+
|
974 |
+
with open(TRANSFORMERS_PATH / "__init__.py", "w", encoding="utf-8") as f:
|
975 |
+
f.write("\n".join(new_lines))
|
976 |
+
|
977 |
+
|
978 |
+
def insert_tokenizer_in_auto_module(old_model_patterns: ModelPatterns, new_model_patterns: ModelPatterns):
|
979 |
+
"""
|
980 |
+
Add a tokenizer to the relevant mappings in the auto module.
|
981 |
+
|
982 |
+
Args:
|
983 |
+
old_model_patterns (`ModelPatterns`): The patterns for the old model.
|
984 |
+
new_model_patterns (`ModelPatterns`): The patterns for the new model.
|
985 |
+
"""
|
986 |
+
if old_model_patterns.tokenizer_class is None or new_model_patterns.tokenizer_class is None:
|
987 |
+
return
|
988 |
+
|
989 |
+
with open(TRANSFORMERS_PATH / "models" / "auto" / "tokenization_auto.py", "r", encoding="utf-8") as f:
|
990 |
+
content = f.read()
|
991 |
+
|
992 |
+
lines = content.split("\n")
|
993 |
+
idx = 0
|
994 |
+
# First we get to the TOKENIZER_MAPPING_NAMES block.
|
995 |
+
while not lines[idx].startswith(" TOKENIZER_MAPPING_NAMES = OrderedDict("):
|
996 |
+
idx += 1
|
997 |
+
idx += 1
|
998 |
+
|
999 |
+
# That block will end at this prompt:
|
1000 |
+
while not lines[idx].startswith("TOKENIZER_MAPPING = _LazyAutoMapping"):
|
1001 |
+
# Either all the tokenizer block is defined on one line, in which case, it ends with "),"
|
1002 |
+
if lines[idx].endswith(","):
|
1003 |
+
block = lines[idx]
|
1004 |
+
# Otherwise it takes several lines until we get to a "),"
|
1005 |
+
else:
|
1006 |
+
block = []
|
1007 |
+
while not lines[idx].startswith(" ),"):
|
1008 |
+
block.append(lines[idx])
|
1009 |
+
idx += 1
|
1010 |
+
block = "\n".join(block)
|
1011 |
+
idx += 1
|
1012 |
+
|
1013 |
+
# If we find the model type and tokenizer class in that block, we have the old model tokenizer block
|
1014 |
+
if f'"{old_model_patterns.model_type}"' in block and old_model_patterns.tokenizer_class in block:
|
1015 |
+
break
|
1016 |
+
|
1017 |
+
new_block = block.replace(old_model_patterns.model_type, new_model_patterns.model_type)
|
1018 |
+
new_block = new_block.replace(old_model_patterns.tokenizer_class, new_model_patterns.tokenizer_class)
|
1019 |
+
|
1020 |
+
new_lines = lines[:idx] + [new_block] + lines[idx:]
|
1021 |
+
with open(TRANSFORMERS_PATH / "models" / "auto" / "tokenization_auto.py", "w", encoding="utf-8") as f:
|
1022 |
+
f.write("\n".join(new_lines))
|
1023 |
+
|
1024 |
+
|
1025 |
+
AUTO_CLASSES_PATTERNS = {
|
1026 |
+
"configuration_auto.py": [
|
1027 |
+
' ("{model_type}", "{model_name}"),',
|
1028 |
+
' ("{model_type}", "{config_class}"),',
|
1029 |
+
' ("{model_type}", "{pretrained_archive_map}"),',
|
1030 |
+
],
|
1031 |
+
"feature_extraction_auto.py": [' ("{model_type}", "{feature_extractor_class}"),'],
|
1032 |
+
"image_processing_auto.py": [' ("{model_type}", "{image_processor_class}"),'],
|
1033 |
+
"modeling_auto.py": [' ("{model_type}", "{any_pt_class}"),'],
|
1034 |
+
"modeling_tf_auto.py": [' ("{model_type}", "{any_tf_class}"),'],
|
1035 |
+
"modeling_flax_auto.py": [' ("{model_type}", "{any_flax_class}"),'],
|
1036 |
+
"processing_auto.py": [' ("{model_type}", "{processor_class}"),'],
|
1037 |
+
}
|
1038 |
+
|
1039 |
+
|
1040 |
+
def add_model_to_auto_classes(
|
1041 |
+
old_model_patterns: ModelPatterns, new_model_patterns: ModelPatterns, model_classes: Dict[str, List[str]]
|
1042 |
+
):
|
1043 |
+
"""
|
1044 |
+
Add a model to the relevant mappings in the auto module.
|
1045 |
+
|
1046 |
+
Args:
|
1047 |
+
old_model_patterns (`ModelPatterns`): The patterns for the old model.
|
1048 |
+
new_model_patterns (`ModelPatterns`): The patterns for the new model.
|
1049 |
+
model_classes (`Dict[str, List[str]]`): A dictionary framework to list of model classes implemented.
|
1050 |
+
"""
|
1051 |
+
for filename in AUTO_CLASSES_PATTERNS:
|
1052 |
+
# Extend patterns with all model classes if necessary
|
1053 |
+
new_patterns = []
|
1054 |
+
for pattern in AUTO_CLASSES_PATTERNS[filename]:
|
1055 |
+
if re.search("any_([a-z]*)_class", pattern) is not None:
|
1056 |
+
framework = re.search("any_([a-z]*)_class", pattern).groups()[0]
|
1057 |
+
if framework in model_classes:
|
1058 |
+
new_patterns.extend(
|
1059 |
+
[
|
1060 |
+
pattern.replace("{" + f"any_{framework}_class" + "}", cls)
|
1061 |
+
for cls in model_classes[framework]
|
1062 |
+
]
|
1063 |
+
)
|
1064 |
+
elif "{config_class}" in pattern:
|
1065 |
+
new_patterns.append(pattern.replace("{config_class}", old_model_patterns.config_class))
|
1066 |
+
elif "{image_processor_class}" in pattern:
|
1067 |
+
if (
|
1068 |
+
old_model_patterns.image_processor_class is not None
|
1069 |
+
and new_model_patterns.image_processor_class is not None
|
1070 |
+
):
|
1071 |
+
new_patterns.append(
|
1072 |
+
pattern.replace("{image_processor_class}", old_model_patterns.image_processor_class)
|
1073 |
+
)
|
1074 |
+
elif "{feature_extractor_class}" in pattern:
|
1075 |
+
if (
|
1076 |
+
old_model_patterns.feature_extractor_class is not None
|
1077 |
+
and new_model_patterns.feature_extractor_class is not None
|
1078 |
+
):
|
1079 |
+
new_patterns.append(
|
1080 |
+
pattern.replace("{feature_extractor_class}", old_model_patterns.feature_extractor_class)
|
1081 |
+
)
|
1082 |
+
elif "{processor_class}" in pattern:
|
1083 |
+
if old_model_patterns.processor_class is not None and new_model_patterns.processor_class is not None:
|
1084 |
+
new_patterns.append(pattern.replace("{processor_class}", old_model_patterns.processor_class))
|
1085 |
+
else:
|
1086 |
+
new_patterns.append(pattern)
|
1087 |
+
|
1088 |
+
# Loop through all patterns.
|
1089 |
+
for pattern in new_patterns:
|
1090 |
+
full_name = TRANSFORMERS_PATH / "models" / "auto" / filename
|
1091 |
+
old_model_line = pattern
|
1092 |
+
new_model_line = pattern
|
1093 |
+
for attr in ["model_type", "model_name"]:
|
1094 |
+
old_model_line = old_model_line.replace("{" + attr + "}", getattr(old_model_patterns, attr))
|
1095 |
+
new_model_line = new_model_line.replace("{" + attr + "}", getattr(new_model_patterns, attr))
|
1096 |
+
new_model_line = new_model_line.replace(
|
1097 |
+
old_model_patterns.model_camel_cased, new_model_patterns.model_camel_cased
|
1098 |
+
)
|
1099 |
+
|
1100 |
+
add_content_to_file(full_name, new_model_line, add_after=old_model_line)
|
1101 |
+
|
1102 |
+
# Tokenizers require special handling
|
1103 |
+
insert_tokenizer_in_auto_module(old_model_patterns, new_model_patterns)
|
1104 |
+
|
1105 |
+
|
1106 |
+
DOC_OVERVIEW_TEMPLATE = """## Overview
|
1107 |
+
|
1108 |
+
The {model_name} model was proposed in [<INSERT PAPER NAME HERE>](<INSERT PAPER LINK HERE>) by <INSERT AUTHORS HERE>.
|
1109 |
+
<INSERT SHORT SUMMARY HERE>
|
1110 |
+
|
1111 |
+
The abstract from the paper is the following:
|
1112 |
+
|
1113 |
+
*<INSERT PAPER ABSTRACT HERE>*
|
1114 |
+
|
1115 |
+
Tips:
|
1116 |
+
|
1117 |
+
<INSERT TIPS ABOUT MODEL HERE>
|
1118 |
+
|
1119 |
+
This model was contributed by [INSERT YOUR HF USERNAME HERE](https://huggingface.co/<INSERT YOUR HF USERNAME HERE>).
|
1120 |
+
The original code can be found [here](<INSERT LINK TO GITHUB REPO HERE>).
|
1121 |
+
|
1122 |
+
"""
|
1123 |
+
|
1124 |
+
|
1125 |
+
def duplicate_doc_file(
|
1126 |
+
doc_file: Union[str, os.PathLike],
|
1127 |
+
old_model_patterns: ModelPatterns,
|
1128 |
+
new_model_patterns: ModelPatterns,
|
1129 |
+
dest_file: Optional[Union[str, os.PathLike]] = None,
|
1130 |
+
frameworks: Optional[List[str]] = None,
|
1131 |
+
):
|
1132 |
+
"""
|
1133 |
+
Duplicate a documentation file and adapts it for a new model.
|
1134 |
+
|
1135 |
+
Args:
|
1136 |
+
module_file (`str` or `os.PathLike`): Path to the doc file to duplicate.
|
1137 |
+
old_model_patterns (`ModelPatterns`): The patterns for the old model.
|
1138 |
+
new_model_patterns (`ModelPatterns`): The patterns for the new model.
|
1139 |
+
dest_file (`str` or `os.PathLike`, *optional*): Path to the new doc file.
|
1140 |
+
Will default to the a file named `{new_model_patterns.model_type}.md` in the same folder as `module_file`.
|
1141 |
+
frameworks (`List[str]`, *optional*):
|
1142 |
+
If passed, will only keep the model classes corresponding to this list of frameworks in the new doc file.
|
1143 |
+
"""
|
1144 |
+
with open(doc_file, "r", encoding="utf-8") as f:
|
1145 |
+
content = f.read()
|
1146 |
+
|
1147 |
+
content = re.sub(r"<!--\s*Copyright (\d+)\s", f"<!--Copyright {CURRENT_YEAR} ", content)
|
1148 |
+
if frameworks is None:
|
1149 |
+
frameworks = get_default_frameworks()
|
1150 |
+
if dest_file is None:
|
1151 |
+
dest_file = Path(doc_file).parent / f"{new_model_patterns.model_type}.md"
|
1152 |
+
|
1153 |
+
# Parse the doc file in blocks. One block per section/header
|
1154 |
+
lines = content.split("\n")
|
1155 |
+
blocks = []
|
1156 |
+
current_block = []
|
1157 |
+
|
1158 |
+
for line in lines:
|
1159 |
+
if line.startswith("#"):
|
1160 |
+
blocks.append("\n".join(current_block))
|
1161 |
+
current_block = [line]
|
1162 |
+
else:
|
1163 |
+
current_block.append(line)
|
1164 |
+
blocks.append("\n".join(current_block))
|
1165 |
+
|
1166 |
+
new_blocks = []
|
1167 |
+
in_classes = False
|
1168 |
+
for block in blocks:
|
1169 |
+
# Copyright
|
1170 |
+
if not block.startswith("#"):
|
1171 |
+
new_blocks.append(block)
|
1172 |
+
# Main title
|
1173 |
+
elif re.search(r"^#\s+\S+", block) is not None:
|
1174 |
+
new_blocks.append(f"# {new_model_patterns.model_name}\n")
|
1175 |
+
# The config starts the part of the doc with the classes.
|
1176 |
+
elif not in_classes and old_model_patterns.config_class in block.split("\n")[0]:
|
1177 |
+
in_classes = True
|
1178 |
+
new_blocks.append(DOC_OVERVIEW_TEMPLATE.format(model_name=new_model_patterns.model_name))
|
1179 |
+
new_block, _ = replace_model_patterns(block, old_model_patterns, new_model_patterns)
|
1180 |
+
new_blocks.append(new_block)
|
1181 |
+
# In classes
|
1182 |
+
elif in_classes:
|
1183 |
+
in_classes = True
|
1184 |
+
block_title = block.split("\n")[0]
|
1185 |
+
block_class = re.search(r"^#+\s+(\S.*)$", block_title).groups()[0]
|
1186 |
+
new_block, _ = replace_model_patterns(block, old_model_patterns, new_model_patterns)
|
1187 |
+
|
1188 |
+
if "Tokenizer" in block_class:
|
1189 |
+
# We only add the tokenizer if necessary
|
1190 |
+
if old_model_patterns.tokenizer_class != new_model_patterns.tokenizer_class:
|
1191 |
+
new_blocks.append(new_block)
|
1192 |
+
elif "ImageProcessor" in block_class:
|
1193 |
+
# We only add the image processor if necessary
|
1194 |
+
if old_model_patterns.image_processor_class != new_model_patterns.image_processor_class:
|
1195 |
+
new_blocks.append(new_block)
|
1196 |
+
elif "FeatureExtractor" in block_class:
|
1197 |
+
# We only add the feature extractor if necessary
|
1198 |
+
if old_model_patterns.feature_extractor_class != new_model_patterns.feature_extractor_class:
|
1199 |
+
new_blocks.append(new_block)
|
1200 |
+
elif "Processor" in block_class:
|
1201 |
+
# We only add the processor if necessary
|
1202 |
+
if old_model_patterns.processor_class != new_model_patterns.processor_class:
|
1203 |
+
new_blocks.append(new_block)
|
1204 |
+
elif block_class.startswith("Flax"):
|
1205 |
+
# We only add Flax models if in the selected frameworks
|
1206 |
+
if "flax" in frameworks:
|
1207 |
+
new_blocks.append(new_block)
|
1208 |
+
elif block_class.startswith("TF"):
|
1209 |
+
# We only add TF models if in the selected frameworks
|
1210 |
+
if "tf" in frameworks:
|
1211 |
+
new_blocks.append(new_block)
|
1212 |
+
elif len(block_class.split(" ")) == 1:
|
1213 |
+
# We only add PyTorch models if in the selected frameworks
|
1214 |
+
if "pt" in frameworks:
|
1215 |
+
new_blocks.append(new_block)
|
1216 |
+
else:
|
1217 |
+
new_blocks.append(new_block)
|
1218 |
+
|
1219 |
+
with open(dest_file, "w", encoding="utf-8") as f:
|
1220 |
+
f.write("\n".join(new_blocks))
|
1221 |
+
|
1222 |
+
|
1223 |
+
def insert_model_in_doc_toc(old_model_patterns, new_model_patterns):
|
1224 |
+
"""
|
1225 |
+
Insert the new model in the doc TOC, in the same section as the old model.
|
1226 |
+
|
1227 |
+
Args:
|
1228 |
+
old_model_patterns (`ModelPatterns`): The patterns for the old model.
|
1229 |
+
new_model_patterns (`ModelPatterns`): The patterns for the new model.
|
1230 |
+
"""
|
1231 |
+
toc_file = REPO_PATH / "docs" / "source" / "en" / "_toctree.yml"
|
1232 |
+
with open(toc_file, "r", encoding="utf8") as f:
|
1233 |
+
content = yaml.safe_load(f)
|
1234 |
+
|
1235 |
+
# Get to the model API doc
|
1236 |
+
api_idx = 0
|
1237 |
+
while content[api_idx]["title"] != "API":
|
1238 |
+
api_idx += 1
|
1239 |
+
api_doc = content[api_idx]["sections"]
|
1240 |
+
|
1241 |
+
model_idx = 0
|
1242 |
+
while api_doc[model_idx]["title"] != "Models":
|
1243 |
+
model_idx += 1
|
1244 |
+
model_doc = api_doc[model_idx]["sections"]
|
1245 |
+
|
1246 |
+
# Find the base model in the Toc
|
1247 |
+
old_model_type = old_model_patterns.model_type
|
1248 |
+
section_idx = 0
|
1249 |
+
while section_idx < len(model_doc):
|
1250 |
+
sections = [entry["local"] for entry in model_doc[section_idx]["sections"]]
|
1251 |
+
if f"model_doc/{old_model_type}" in sections:
|
1252 |
+
break
|
1253 |
+
|
1254 |
+
section_idx += 1
|
1255 |
+
|
1256 |
+
if section_idx == len(model_doc):
|
1257 |
+
old_model = old_model_patterns.model_name
|
1258 |
+
new_model = new_model_patterns.model_name
|
1259 |
+
print(f"Did not find {old_model} in the table of content, so you will need to add {new_model} manually.")
|
1260 |
+
return
|
1261 |
+
|
1262 |
+
# Add the new model in the same toc
|
1263 |
+
toc_entry = {"local": f"model_doc/{new_model_patterns.model_type}", "title": new_model_patterns.model_name}
|
1264 |
+
model_doc[section_idx]["sections"].append(toc_entry)
|
1265 |
+
model_doc[section_idx]["sections"] = sorted(model_doc[section_idx]["sections"], key=lambda s: s["title"].lower())
|
1266 |
+
api_doc[model_idx]["sections"] = model_doc
|
1267 |
+
content[api_idx]["sections"] = api_doc
|
1268 |
+
|
1269 |
+
with open(toc_file, "w", encoding="utf-8") as f:
|
1270 |
+
f.write(yaml.dump(content, allow_unicode=True))
|
1271 |
+
|
1272 |
+
|
1273 |
+
def create_new_model_like(
|
1274 |
+
model_type: str,
|
1275 |
+
new_model_patterns: ModelPatterns,
|
1276 |
+
add_copied_from: bool = True,
|
1277 |
+
frameworks: Optional[List[str]] = None,
|
1278 |
+
old_checkpoint: Optional[str] = None,
|
1279 |
+
):
|
1280 |
+
"""
|
1281 |
+
Creates a new model module like a given model of the Transformers library.
|
1282 |
+
|
1283 |
+
Args:
|
1284 |
+
model_type (`str`): The model type to duplicate (like "bert" or "gpt2")
|
1285 |
+
new_model_patterns (`ModelPatterns`): The patterns for the new model.
|
1286 |
+
add_copied_from (`bool`, *optional*, defaults to `True`):
|
1287 |
+
Whether or not to add "Copied from" statements to all classes in the new model modeling files.
|
1288 |
+
frameworks (`List[str]`, *optional*):
|
1289 |
+
If passed, will limit the duplicate to the frameworks specified.
|
1290 |
+
old_checkpoint (`str`, *optional*):
|
1291 |
+
The name of the base checkpoint for the old model. Should be passed along when it can't be automatically
|
1292 |
+
recovered from the `model_type`.
|
1293 |
+
"""
|
1294 |
+
# Retrieve all the old model info.
|
1295 |
+
model_info = retrieve_info_for_model(model_type, frameworks=frameworks)
|
1296 |
+
model_files = model_info["model_files"]
|
1297 |
+
old_model_patterns = model_info["model_patterns"]
|
1298 |
+
if old_checkpoint is not None:
|
1299 |
+
old_model_patterns.checkpoint = old_checkpoint
|
1300 |
+
if len(old_model_patterns.checkpoint) == 0:
|
1301 |
+
raise ValueError(
|
1302 |
+
"The old model checkpoint could not be recovered from the model type. Please pass it to the "
|
1303 |
+
"`old_checkpoint` argument."
|
1304 |
+
)
|
1305 |
+
|
1306 |
+
keep_old_processing = True
|
1307 |
+
for processing_attr in ["image_processor_class", "feature_extractor_class", "processor_class", "tokenizer_class"]:
|
1308 |
+
if getattr(old_model_patterns, processing_attr) != getattr(new_model_patterns, processing_attr):
|
1309 |
+
keep_old_processing = False
|
1310 |
+
|
1311 |
+
model_classes = model_info["model_classes"]
|
1312 |
+
|
1313 |
+
# 1. We create the module for our new model.
|
1314 |
+
old_module_name = model_files["module_name"]
|
1315 |
+
module_folder = TRANSFORMERS_PATH / "models" / new_model_patterns.model_lower_cased
|
1316 |
+
os.makedirs(module_folder, exist_ok=True)
|
1317 |
+
|
1318 |
+
files_to_adapt = model_files["model_files"]
|
1319 |
+
if keep_old_processing:
|
1320 |
+
files_to_adapt = [
|
1321 |
+
f
|
1322 |
+
for f in files_to_adapt
|
1323 |
+
if "tokenization" not in str(f)
|
1324 |
+
and "processing" not in str(f)
|
1325 |
+
and "feature_extraction" not in str(f)
|
1326 |
+
and "image_processing" not in str(f)
|
1327 |
+
]
|
1328 |
+
|
1329 |
+
os.makedirs(module_folder, exist_ok=True)
|
1330 |
+
for module_file in files_to_adapt:
|
1331 |
+
new_module_name = module_file.name.replace(
|
1332 |
+
old_model_patterns.model_lower_cased, new_model_patterns.model_lower_cased
|
1333 |
+
)
|
1334 |
+
dest_file = module_folder / new_module_name
|
1335 |
+
duplicate_module(
|
1336 |
+
module_file,
|
1337 |
+
old_model_patterns,
|
1338 |
+
new_model_patterns,
|
1339 |
+
dest_file=dest_file,
|
1340 |
+
add_copied_from=add_copied_from and "modeling" in new_module_name,
|
1341 |
+
)
|
1342 |
+
|
1343 |
+
clean_frameworks_in_init(
|
1344 |
+
module_folder / "__init__.py", frameworks=frameworks, keep_processing=not keep_old_processing
|
1345 |
+
)
|
1346 |
+
|
1347 |
+
# 2. We add our new model to the models init and the main init
|
1348 |
+
add_content_to_file(
|
1349 |
+
TRANSFORMERS_PATH / "models" / "__init__.py",
|
1350 |
+
f" {new_model_patterns.model_lower_cased},",
|
1351 |
+
add_after=f" {old_module_name},",
|
1352 |
+
exact_match=True,
|
1353 |
+
)
|
1354 |
+
add_model_to_main_init(
|
1355 |
+
old_model_patterns, new_model_patterns, frameworks=frameworks, with_processing=not keep_old_processing
|
1356 |
+
)
|
1357 |
+
|
1358 |
+
# 3. Add test files
|
1359 |
+
files_to_adapt = model_files["test_files"]
|
1360 |
+
if keep_old_processing:
|
1361 |
+
files_to_adapt = [
|
1362 |
+
f
|
1363 |
+
for f in files_to_adapt
|
1364 |
+
if "tokenization" not in str(f)
|
1365 |
+
and "processor" not in str(f)
|
1366 |
+
and "feature_extraction" not in str(f)
|
1367 |
+
and "image_processing" not in str(f)
|
1368 |
+
]
|
1369 |
+
|
1370 |
+
def disable_fx_test(filename: Path) -> bool:
|
1371 |
+
with open(filename) as fp:
|
1372 |
+
content = fp.read()
|
1373 |
+
new_content = re.sub(r"fx_compatible\s*=\s*True", "fx_compatible = False", content)
|
1374 |
+
with open(filename, "w") as fp:
|
1375 |
+
fp.write(new_content)
|
1376 |
+
return content != new_content
|
1377 |
+
|
1378 |
+
disabled_fx_test = False
|
1379 |
+
|
1380 |
+
tests_folder = REPO_PATH / "tests" / "models" / new_model_patterns.model_lower_cased
|
1381 |
+
os.makedirs(tests_folder, exist_ok=True)
|
1382 |
+
with open(tests_folder / "__init__.py", "w"):
|
1383 |
+
pass
|
1384 |
+
|
1385 |
+
for test_file in files_to_adapt:
|
1386 |
+
new_test_file_name = test_file.name.replace(
|
1387 |
+
old_model_patterns.model_lower_cased, new_model_patterns.model_lower_cased
|
1388 |
+
)
|
1389 |
+
dest_file = test_file.parent.parent / new_model_patterns.model_lower_cased / new_test_file_name
|
1390 |
+
duplicate_module(
|
1391 |
+
test_file,
|
1392 |
+
old_model_patterns,
|
1393 |
+
new_model_patterns,
|
1394 |
+
dest_file=dest_file,
|
1395 |
+
add_copied_from=False,
|
1396 |
+
attrs_to_remove=["pipeline_model_mapping", "is_pipeline_test_to_skip"],
|
1397 |
+
)
|
1398 |
+
disabled_fx_test = disabled_fx_test | disable_fx_test(dest_file)
|
1399 |
+
|
1400 |
+
if disabled_fx_test:
|
1401 |
+
print(
|
1402 |
+
"The tests for symbolic tracing with torch.fx were disabled, you can add those once symbolic tracing works"
|
1403 |
+
" for your new model."
|
1404 |
+
)
|
1405 |
+
|
1406 |
+
# 4. Add model to auto classes
|
1407 |
+
add_model_to_auto_classes(old_model_patterns, new_model_patterns, model_classes)
|
1408 |
+
|
1409 |
+
# 5. Add doc file
|
1410 |
+
doc_file = REPO_PATH / "docs" / "source" / "en" / "model_doc" / f"{old_model_patterns.model_type}.md"
|
1411 |
+
duplicate_doc_file(doc_file, old_model_patterns, new_model_patterns, frameworks=frameworks)
|
1412 |
+
insert_model_in_doc_toc(old_model_patterns, new_model_patterns)
|
1413 |
+
|
1414 |
+
# 6. Warn the user for duplicate patterns
|
1415 |
+
if old_model_patterns.model_type == old_model_patterns.checkpoint:
|
1416 |
+
print(
|
1417 |
+
"The model you picked has the same name for the model type and the checkpoint name "
|
1418 |
+
f"({old_model_patterns.model_type}). As a result, it's possible some places where the new checkpoint "
|
1419 |
+
f"should be, you have {new_model_patterns.model_type} instead. You should search for all instances of "
|
1420 |
+
f"{new_model_patterns.model_type} in the new files and check they're not badly used as checkpoints."
|
1421 |
+
)
|
1422 |
+
elif old_model_patterns.model_lower_cased == old_model_patterns.checkpoint:
|
1423 |
+
print(
|
1424 |
+
"The model you picked has the same name for the model type and the checkpoint name "
|
1425 |
+
f"({old_model_patterns.model_lower_cased}). As a result, it's possible some places where the new "
|
1426 |
+
f"checkpoint should be, you have {new_model_patterns.model_lower_cased} instead. You should search for "
|
1427 |
+
f"all instances of {new_model_patterns.model_lower_cased} in the new files and check they're not badly "
|
1428 |
+
"used as checkpoints."
|
1429 |
+
)
|
1430 |
+
if (
|
1431 |
+
old_model_patterns.model_type == old_model_patterns.model_lower_cased
|
1432 |
+
and new_model_patterns.model_type != new_model_patterns.model_lower_cased
|
1433 |
+
):
|
1434 |
+
print(
|
1435 |
+
"The model you picked has the same name for the model type and the lowercased model name "
|
1436 |
+
f"({old_model_patterns.model_lower_cased}). As a result, it's possible some places where the new "
|
1437 |
+
f"model type should be, you have {new_model_patterns.model_lower_cased} instead. You should search for "
|
1438 |
+
f"all instances of {new_model_patterns.model_lower_cased} in the new files and check they're not badly "
|
1439 |
+
"used as the model type."
|
1440 |
+
)
|
1441 |
+
|
1442 |
+
if not keep_old_processing and old_model_patterns.tokenizer_class is not None:
|
1443 |
+
print(
|
1444 |
+
"The constants at the start of the new tokenizer file created needs to be manually fixed. If your new "
|
1445 |
+
"model has a tokenizer fast, you will also need to manually add the converter in the "
|
1446 |
+
"`SLOW_TO_FAST_CONVERTERS` constant of `convert_slow_tokenizer.py`."
|
1447 |
+
)
|
1448 |
+
|
1449 |
+
|
1450 |
+
def add_new_model_like_command_factory(args: Namespace):
|
1451 |
+
return AddNewModelLikeCommand(config_file=args.config_file, path_to_repo=args.path_to_repo)
|
1452 |
+
|
1453 |
+
|
1454 |
+
class AddNewModelLikeCommand(BaseTransformersCLICommand):
|
1455 |
+
@staticmethod
|
1456 |
+
def register_subcommand(parser: ArgumentParser):
|
1457 |
+
add_new_model_like_parser = parser.add_parser("add-new-model-like")
|
1458 |
+
add_new_model_like_parser.add_argument(
|
1459 |
+
"--config_file", type=str, help="A file with all the information for this model creation."
|
1460 |
+
)
|
1461 |
+
add_new_model_like_parser.add_argument(
|
1462 |
+
"--path_to_repo", type=str, help="When not using an editable install, the path to the Transformers repo."
|
1463 |
+
)
|
1464 |
+
add_new_model_like_parser.set_defaults(func=add_new_model_like_command_factory)
|
1465 |
+
|
1466 |
+
def __init__(self, config_file=None, path_to_repo=None, *args):
|
1467 |
+
if config_file is not None:
|
1468 |
+
with open(config_file, "r", encoding="utf-8") as f:
|
1469 |
+
config = json.load(f)
|
1470 |
+
self.old_model_type = config["old_model_type"]
|
1471 |
+
self.model_patterns = ModelPatterns(**config["new_model_patterns"])
|
1472 |
+
self.add_copied_from = config.get("add_copied_from", True)
|
1473 |
+
self.frameworks = config.get("frameworks", get_default_frameworks())
|
1474 |
+
self.old_checkpoint = config.get("old_checkpoint", None)
|
1475 |
+
else:
|
1476 |
+
(
|
1477 |
+
self.old_model_type,
|
1478 |
+
self.model_patterns,
|
1479 |
+
self.add_copied_from,
|
1480 |
+
self.frameworks,
|
1481 |
+
self.old_checkpoint,
|
1482 |
+
) = get_user_input()
|
1483 |
+
|
1484 |
+
self.path_to_repo = path_to_repo
|
1485 |
+
|
1486 |
+
def run(self):
|
1487 |
+
if self.path_to_repo is not None:
|
1488 |
+
# Adapt constants
|
1489 |
+
global TRANSFORMERS_PATH
|
1490 |
+
global REPO_PATH
|
1491 |
+
|
1492 |
+
REPO_PATH = Path(self.path_to_repo)
|
1493 |
+
TRANSFORMERS_PATH = REPO_PATH / "src" / "transformers"
|
1494 |
+
|
1495 |
+
create_new_model_like(
|
1496 |
+
model_type=self.old_model_type,
|
1497 |
+
new_model_patterns=self.model_patterns,
|
1498 |
+
add_copied_from=self.add_copied_from,
|
1499 |
+
frameworks=self.frameworks,
|
1500 |
+
old_checkpoint=self.old_checkpoint,
|
1501 |
+
)
|
1502 |
+
|
1503 |
+
|
1504 |
+
def get_user_field(
|
1505 |
+
question: str,
|
1506 |
+
default_value: Optional[str] = None,
|
1507 |
+
is_valid_answer: Optional[Callable] = None,
|
1508 |
+
convert_to: Optional[Callable] = None,
|
1509 |
+
fallback_message: Optional[str] = None,
|
1510 |
+
) -> Any:
|
1511 |
+
"""
|
1512 |
+
A utility function that asks a question to the user to get an answer, potentially looping until it gets a valid
|
1513 |
+
answer.
|
1514 |
+
|
1515 |
+
Args:
|
1516 |
+
question (`str`): The question to ask the user.
|
1517 |
+
default_value (`str`, *optional*): A potential default value that will be used when the answer is empty.
|
1518 |
+
is_valid_answer (`Callable`, *optional*):
|
1519 |
+
If set, the question will be asked until this function returns `True` on the provided answer.
|
1520 |
+
convert_to (`Callable`, *optional*):
|
1521 |
+
If set, the answer will be passed to this function. If this function raises an error on the procided
|
1522 |
+
answer, the question will be asked again.
|
1523 |
+
fallback_message (`str`, *optional*):
|
1524 |
+
A message that will be displayed each time the question is asked again to the user.
|
1525 |
+
|
1526 |
+
Returns:
|
1527 |
+
`Any`: The answer provided by the user (or the default), passed through the potential conversion function.
|
1528 |
+
"""
|
1529 |
+
if not question.endswith(" "):
|
1530 |
+
question = question + " "
|
1531 |
+
if default_value is not None:
|
1532 |
+
question = f"{question} [{default_value}] "
|
1533 |
+
|
1534 |
+
valid_answer = False
|
1535 |
+
while not valid_answer:
|
1536 |
+
answer = input(question)
|
1537 |
+
if default_value is not None and len(answer) == 0:
|
1538 |
+
answer = default_value
|
1539 |
+
if is_valid_answer is not None:
|
1540 |
+
valid_answer = is_valid_answer(answer)
|
1541 |
+
elif convert_to is not None:
|
1542 |
+
try:
|
1543 |
+
answer = convert_to(answer)
|
1544 |
+
valid_answer = True
|
1545 |
+
except Exception:
|
1546 |
+
valid_answer = False
|
1547 |
+
else:
|
1548 |
+
valid_answer = True
|
1549 |
+
|
1550 |
+
if not valid_answer:
|
1551 |
+
print(fallback_message)
|
1552 |
+
|
1553 |
+
return answer
|
1554 |
+
|
1555 |
+
|
1556 |
+
def convert_to_bool(x: str) -> bool:
|
1557 |
+
"""
|
1558 |
+
Converts a string to a bool.
|
1559 |
+
"""
|
1560 |
+
if x.lower() in ["1", "y", "yes", "true"]:
|
1561 |
+
return True
|
1562 |
+
if x.lower() in ["0", "n", "no", "false"]:
|
1563 |
+
return False
|
1564 |
+
raise ValueError(f"{x} is not a value that can be converted to a bool.")
|
1565 |
+
|
1566 |
+
|
1567 |
+
def get_user_input():
|
1568 |
+
"""
|
1569 |
+
Ask the user for the necessary inputs to add the new model.
|
1570 |
+
"""
|
1571 |
+
model_types = list(auto_module.configuration_auto.MODEL_NAMES_MAPPING.keys())
|
1572 |
+
|
1573 |
+
# Get old model type
|
1574 |
+
valid_model_type = False
|
1575 |
+
while not valid_model_type:
|
1576 |
+
old_model_type = input(
|
1577 |
+
"What is the model you would like to duplicate? Please provide the lowercase `model_type` (e.g. roberta): "
|
1578 |
+
)
|
1579 |
+
if old_model_type in model_types:
|
1580 |
+
valid_model_type = True
|
1581 |
+
else:
|
1582 |
+
print(f"{old_model_type} is not a valid model type.")
|
1583 |
+
near_choices = difflib.get_close_matches(old_model_type, model_types)
|
1584 |
+
if len(near_choices) >= 1:
|
1585 |
+
if len(near_choices) > 1:
|
1586 |
+
near_choices = " or ".join(near_choices)
|
1587 |
+
print(f"Did you mean {near_choices}?")
|
1588 |
+
|
1589 |
+
old_model_info = retrieve_info_for_model(old_model_type)
|
1590 |
+
old_tokenizer_class = old_model_info["model_patterns"].tokenizer_class
|
1591 |
+
old_image_processor_class = old_model_info["model_patterns"].image_processor_class
|
1592 |
+
old_feature_extractor_class = old_model_info["model_patterns"].feature_extractor_class
|
1593 |
+
old_processor_class = old_model_info["model_patterns"].processor_class
|
1594 |
+
old_frameworks = old_model_info["frameworks"]
|
1595 |
+
|
1596 |
+
old_checkpoint = None
|
1597 |
+
if len(old_model_info["model_patterns"].checkpoint) == 0:
|
1598 |
+
old_checkpoint = get_user_field(
|
1599 |
+
"We couldn't find the name of the base checkpoint for that model, please enter it here."
|
1600 |
+
)
|
1601 |
+
|
1602 |
+
model_name = get_user_field(
|
1603 |
+
"What is the name (with no special casing) for your new model in the paper (e.g. RoBERTa)? "
|
1604 |
+
)
|
1605 |
+
default_patterns = ModelPatterns(model_name, model_name)
|
1606 |
+
|
1607 |
+
model_type = get_user_field(
|
1608 |
+
"What identifier would you like to use for the `model_type` of this model? ",
|
1609 |
+
default_value=default_patterns.model_type,
|
1610 |
+
)
|
1611 |
+
model_lower_cased = get_user_field(
|
1612 |
+
"What lowercase name would you like to use for the module (folder) of this model? ",
|
1613 |
+
default_value=default_patterns.model_lower_cased,
|
1614 |
+
)
|
1615 |
+
model_camel_cased = get_user_field(
|
1616 |
+
"What prefix (camel-cased) would you like to use for the model classes of this model (e.g. Roberta)? ",
|
1617 |
+
default_value=default_patterns.model_camel_cased,
|
1618 |
+
)
|
1619 |
+
model_upper_cased = get_user_field(
|
1620 |
+
"What prefix (upper-cased) would you like to use for the constants relative to this model? ",
|
1621 |
+
default_value=default_patterns.model_upper_cased,
|
1622 |
+
)
|
1623 |
+
config_class = get_user_field(
|
1624 |
+
"What will be the name of the config class for this model? ", default_value=f"{model_camel_cased}Config"
|
1625 |
+
)
|
1626 |
+
checkpoint = get_user_field(
|
1627 |
+
"Please give a checkpoint identifier (on the model Hub) for this new model (e.g. facebook/FacebookAI/roberta-base): "
|
1628 |
+
)
|
1629 |
+
|
1630 |
+
old_processing_classes = [
|
1631 |
+
c
|
1632 |
+
for c in [old_image_processor_class, old_feature_extractor_class, old_tokenizer_class, old_processor_class]
|
1633 |
+
if c is not None
|
1634 |
+
]
|
1635 |
+
old_processing_classes = ", ".join(old_processing_classes)
|
1636 |
+
keep_processing = get_user_field(
|
1637 |
+
f"Will your new model use the same processing class as {old_model_type} ({old_processing_classes}) (yes/no)? ",
|
1638 |
+
convert_to=convert_to_bool,
|
1639 |
+
fallback_message="Please answer yes/no, y/n, true/false or 1/0. ",
|
1640 |
+
)
|
1641 |
+
if keep_processing:
|
1642 |
+
image_processor_class = old_image_processor_class
|
1643 |
+
feature_extractor_class = old_feature_extractor_class
|
1644 |
+
processor_class = old_processor_class
|
1645 |
+
tokenizer_class = old_tokenizer_class
|
1646 |
+
else:
|
1647 |
+
if old_tokenizer_class is not None:
|
1648 |
+
tokenizer_class = get_user_field(
|
1649 |
+
"What will be the name of the tokenizer class for this model? ",
|
1650 |
+
default_value=f"{model_camel_cased}Tokenizer",
|
1651 |
+
)
|
1652 |
+
else:
|
1653 |
+
tokenizer_class = None
|
1654 |
+
if old_image_processor_class is not None:
|
1655 |
+
image_processor_class = get_user_field(
|
1656 |
+
"What will be the name of the image processor class for this model? ",
|
1657 |
+
default_value=f"{model_camel_cased}ImageProcessor",
|
1658 |
+
)
|
1659 |
+
else:
|
1660 |
+
image_processor_class = None
|
1661 |
+
if old_feature_extractor_class is not None:
|
1662 |
+
feature_extractor_class = get_user_field(
|
1663 |
+
"What will be the name of the feature extractor class for this model? ",
|
1664 |
+
default_value=f"{model_camel_cased}FeatureExtractor",
|
1665 |
+
)
|
1666 |
+
else:
|
1667 |
+
feature_extractor_class = None
|
1668 |
+
if old_processor_class is not None:
|
1669 |
+
processor_class = get_user_field(
|
1670 |
+
"What will be the name of the processor class for this model? ",
|
1671 |
+
default_value=f"{model_camel_cased}Processor",
|
1672 |
+
)
|
1673 |
+
else:
|
1674 |
+
processor_class = None
|
1675 |
+
|
1676 |
+
model_patterns = ModelPatterns(
|
1677 |
+
model_name,
|
1678 |
+
checkpoint,
|
1679 |
+
model_type=model_type,
|
1680 |
+
model_lower_cased=model_lower_cased,
|
1681 |
+
model_camel_cased=model_camel_cased,
|
1682 |
+
model_upper_cased=model_upper_cased,
|
1683 |
+
config_class=config_class,
|
1684 |
+
tokenizer_class=tokenizer_class,
|
1685 |
+
image_processor_class=image_processor_class,
|
1686 |
+
feature_extractor_class=feature_extractor_class,
|
1687 |
+
processor_class=processor_class,
|
1688 |
+
)
|
1689 |
+
|
1690 |
+
add_copied_from = get_user_field(
|
1691 |
+
"Should we add # Copied from statements when creating the new modeling file (yes/no)? ",
|
1692 |
+
convert_to=convert_to_bool,
|
1693 |
+
default_value="yes",
|
1694 |
+
fallback_message="Please answer yes/no, y/n, true/false or 1/0.",
|
1695 |
+
)
|
1696 |
+
|
1697 |
+
all_frameworks = get_user_field(
|
1698 |
+
"Should we add a version of your new model in all the frameworks implemented by"
|
1699 |
+
f" {old_model_type} ({old_frameworks}) (yes/no)? ",
|
1700 |
+
convert_to=convert_to_bool,
|
1701 |
+
default_value="yes",
|
1702 |
+
fallback_message="Please answer yes/no, y/n, true/false or 1/0.",
|
1703 |
+
)
|
1704 |
+
if all_frameworks:
|
1705 |
+
frameworks = None
|
1706 |
+
else:
|
1707 |
+
frameworks = get_user_field(
|
1708 |
+
"Please enter the list of framworks you want (pt, tf, flax) separated by spaces",
|
1709 |
+
is_valid_answer=lambda x: all(p in ["pt", "tf", "flax"] for p in x.split(" ")),
|
1710 |
+
)
|
1711 |
+
frameworks = list(set(frameworks.split(" ")))
|
1712 |
+
|
1713 |
+
return (old_model_type, model_patterns, add_copied_from, frameworks, old_checkpoint)
|
venv/lib/python3.10/site-packages/transformers/commands/convert.py
ADDED
@@ -0,0 +1,165 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2020 The HuggingFace Team. All rights reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
|
15 |
+
from argparse import ArgumentParser, Namespace
|
16 |
+
|
17 |
+
from ..utils import logging
|
18 |
+
from . import BaseTransformersCLICommand
|
19 |
+
|
20 |
+
|
21 |
+
def convert_command_factory(args: Namespace):
|
22 |
+
"""
|
23 |
+
Factory function used to convert a model TF 1.0 checkpoint in a PyTorch checkpoint.
|
24 |
+
|
25 |
+
Returns: ServeCommand
|
26 |
+
"""
|
27 |
+
return ConvertCommand(
|
28 |
+
args.model_type, args.tf_checkpoint, args.pytorch_dump_output, args.config, args.finetuning_task_name
|
29 |
+
)
|
30 |
+
|
31 |
+
|
32 |
+
IMPORT_ERROR_MESSAGE = """
|
33 |
+
transformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires
|
34 |
+
TensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.
|
35 |
+
"""
|
36 |
+
|
37 |
+
|
38 |
+
class ConvertCommand(BaseTransformersCLICommand):
|
39 |
+
@staticmethod
|
40 |
+
def register_subcommand(parser: ArgumentParser):
|
41 |
+
"""
|
42 |
+
Register this command to argparse so it's available for the transformer-cli
|
43 |
+
|
44 |
+
Args:
|
45 |
+
parser: Root parser to register command-specific arguments
|
46 |
+
"""
|
47 |
+
train_parser = parser.add_parser(
|
48 |
+
"convert",
|
49 |
+
help="CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.",
|
50 |
+
)
|
51 |
+
train_parser.add_argument("--model_type", type=str, required=True, help="Model's type.")
|
52 |
+
train_parser.add_argument(
|
53 |
+
"--tf_checkpoint", type=str, required=True, help="TensorFlow checkpoint path or folder."
|
54 |
+
)
|
55 |
+
train_parser.add_argument(
|
56 |
+
"--pytorch_dump_output", type=str, required=True, help="Path to the PyTorch saved model output."
|
57 |
+
)
|
58 |
+
train_parser.add_argument("--config", type=str, default="", help="Configuration file path or folder.")
|
59 |
+
train_parser.add_argument(
|
60 |
+
"--finetuning_task_name",
|
61 |
+
type=str,
|
62 |
+
default=None,
|
63 |
+
help="Optional fine-tuning task name if the TF model was a finetuned model.",
|
64 |
+
)
|
65 |
+
train_parser.set_defaults(func=convert_command_factory)
|
66 |
+
|
67 |
+
def __init__(
|
68 |
+
self,
|
69 |
+
model_type: str,
|
70 |
+
tf_checkpoint: str,
|
71 |
+
pytorch_dump_output: str,
|
72 |
+
config: str,
|
73 |
+
finetuning_task_name: str,
|
74 |
+
*args,
|
75 |
+
):
|
76 |
+
self._logger = logging.get_logger("transformers-cli/converting")
|
77 |
+
|
78 |
+
self._logger.info(f"Loading model {model_type}")
|
79 |
+
self._model_type = model_type
|
80 |
+
self._tf_checkpoint = tf_checkpoint
|
81 |
+
self._pytorch_dump_output = pytorch_dump_output
|
82 |
+
self._config = config
|
83 |
+
self._finetuning_task_name = finetuning_task_name
|
84 |
+
|
85 |
+
def run(self):
|
86 |
+
if self._model_type == "albert":
|
87 |
+
try:
|
88 |
+
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
|
89 |
+
convert_tf_checkpoint_to_pytorch,
|
90 |
+
)
|
91 |
+
except ImportError:
|
92 |
+
raise ImportError(IMPORT_ERROR_MESSAGE)
|
93 |
+
|
94 |
+
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output)
|
95 |
+
elif self._model_type == "bert":
|
96 |
+
try:
|
97 |
+
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
|
98 |
+
convert_tf_checkpoint_to_pytorch,
|
99 |
+
)
|
100 |
+
except ImportError:
|
101 |
+
raise ImportError(IMPORT_ERROR_MESSAGE)
|
102 |
+
|
103 |
+
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output)
|
104 |
+
elif self._model_type == "funnel":
|
105 |
+
try:
|
106 |
+
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
|
107 |
+
convert_tf_checkpoint_to_pytorch,
|
108 |
+
)
|
109 |
+
except ImportError:
|
110 |
+
raise ImportError(IMPORT_ERROR_MESSAGE)
|
111 |
+
|
112 |
+
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output)
|
113 |
+
elif self._model_type == "t5":
|
114 |
+
try:
|
115 |
+
from ..models.t5.convert_t5_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
|
116 |
+
except ImportError:
|
117 |
+
raise ImportError(IMPORT_ERROR_MESSAGE)
|
118 |
+
|
119 |
+
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output)
|
120 |
+
elif self._model_type == "gpt":
|
121 |
+
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
|
122 |
+
convert_openai_checkpoint_to_pytorch,
|
123 |
+
)
|
124 |
+
|
125 |
+
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output)
|
126 |
+
elif self._model_type == "gpt2":
|
127 |
+
try:
|
128 |
+
from ..models.gpt2.convert_gpt2_original_tf_checkpoint_to_pytorch import (
|
129 |
+
convert_gpt2_checkpoint_to_pytorch,
|
130 |
+
)
|
131 |
+
except ImportError:
|
132 |
+
raise ImportError(IMPORT_ERROR_MESSAGE)
|
133 |
+
|
134 |
+
convert_gpt2_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output)
|
135 |
+
elif self._model_type == "xlnet":
|
136 |
+
try:
|
137 |
+
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
|
138 |
+
convert_xlnet_checkpoint_to_pytorch,
|
139 |
+
)
|
140 |
+
except ImportError:
|
141 |
+
raise ImportError(IMPORT_ERROR_MESSAGE)
|
142 |
+
|
143 |
+
convert_xlnet_checkpoint_to_pytorch(
|
144 |
+
self._tf_checkpoint, self._config, self._pytorch_dump_output, self._finetuning_task_name
|
145 |
+
)
|
146 |
+
elif self._model_type == "xlm":
|
147 |
+
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
|
148 |
+
convert_xlm_checkpoint_to_pytorch,
|
149 |
+
)
|
150 |
+
|
151 |
+
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint, self._pytorch_dump_output)
|
152 |
+
elif self._model_type == "lxmert":
|
153 |
+
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
|
154 |
+
convert_lxmert_checkpoint_to_pytorch,
|
155 |
+
)
|
156 |
+
|
157 |
+
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint, self._pytorch_dump_output)
|
158 |
+
elif self._model_type == "rembert":
|
159 |
+
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
|
160 |
+
convert_rembert_tf_checkpoint_to_pytorch,
|
161 |
+
)
|
162 |
+
|
163 |
+
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output)
|
164 |
+
else:
|
165 |
+
raise ValueError("--model_type should be selected in the list [bert, gpt, gpt2, t5, xlnet, xlm, lxmert]")
|
venv/lib/python3.10/site-packages/transformers/commands/download.py
ADDED
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2020 The HuggingFace Team. All rights reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
|
15 |
+
from argparse import ArgumentParser
|
16 |
+
|
17 |
+
from . import BaseTransformersCLICommand
|
18 |
+
|
19 |
+
|
20 |
+
def download_command_factory(args):
|
21 |
+
return DownloadCommand(args.model, args.cache_dir, args.force, args.trust_remote_code)
|
22 |
+
|
23 |
+
|
24 |
+
class DownloadCommand(BaseTransformersCLICommand):
|
25 |
+
@staticmethod
|
26 |
+
def register_subcommand(parser: ArgumentParser):
|
27 |
+
download_parser = parser.add_parser("download")
|
28 |
+
download_parser.add_argument(
|
29 |
+
"--cache-dir", type=str, default=None, help="Path to location to store the models"
|
30 |
+
)
|
31 |
+
download_parser.add_argument(
|
32 |
+
"--force", action="store_true", help="Force the model to be download even if already in cache-dir"
|
33 |
+
)
|
34 |
+
download_parser.add_argument(
|
35 |
+
"--trust-remote-code",
|
36 |
+
action="store_true",
|
37 |
+
help="Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you've reviewed the code as it will execute on your local machine",
|
38 |
+
)
|
39 |
+
download_parser.add_argument("model", type=str, help="Name of the model to download")
|
40 |
+
download_parser.set_defaults(func=download_command_factory)
|
41 |
+
|
42 |
+
def __init__(self, model: str, cache: str, force: bool, trust_remote_code: bool):
|
43 |
+
self._model = model
|
44 |
+
self._cache = cache
|
45 |
+
self._force = force
|
46 |
+
self._trust_remote_code = trust_remote_code
|
47 |
+
|
48 |
+
def run(self):
|
49 |
+
from ..models.auto import AutoModel, AutoTokenizer
|
50 |
+
|
51 |
+
AutoModel.from_pretrained(
|
52 |
+
self._model, cache_dir=self._cache, force_download=self._force, trust_remote_code=self._trust_remote_code
|
53 |
+
)
|
54 |
+
AutoTokenizer.from_pretrained(
|
55 |
+
self._model, cache_dir=self._cache, force_download=self._force, trust_remote_code=self._trust_remote_code
|
56 |
+
)
|
venv/lib/python3.10/site-packages/transformers/commands/env.py
ADDED
@@ -0,0 +1,143 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2020 The HuggingFace Team. All rights reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
|
15 |
+
import importlib.util
|
16 |
+
import os
|
17 |
+
import platform
|
18 |
+
from argparse import ArgumentParser
|
19 |
+
|
20 |
+
import huggingface_hub
|
21 |
+
|
22 |
+
from .. import __version__ as version
|
23 |
+
from ..utils import (
|
24 |
+
is_accelerate_available,
|
25 |
+
is_flax_available,
|
26 |
+
is_safetensors_available,
|
27 |
+
is_tf_available,
|
28 |
+
is_torch_available,
|
29 |
+
)
|
30 |
+
from . import BaseTransformersCLICommand
|
31 |
+
|
32 |
+
|
33 |
+
def info_command_factory(_):
|
34 |
+
return EnvironmentCommand()
|
35 |
+
|
36 |
+
|
37 |
+
def download_command_factory(args):
|
38 |
+
return EnvironmentCommand(args.accelerate_config_file)
|
39 |
+
|
40 |
+
|
41 |
+
class EnvironmentCommand(BaseTransformersCLICommand):
|
42 |
+
@staticmethod
|
43 |
+
def register_subcommand(parser: ArgumentParser):
|
44 |
+
download_parser = parser.add_parser("env")
|
45 |
+
download_parser.set_defaults(func=info_command_factory)
|
46 |
+
download_parser.add_argument(
|
47 |
+
"--accelerate-config_file",
|
48 |
+
default=None,
|
49 |
+
help="The accelerate config file to use for the default values in the launching script.",
|
50 |
+
)
|
51 |
+
download_parser.set_defaults(func=download_command_factory)
|
52 |
+
|
53 |
+
def __init__(self, accelerate_config_file, *args) -> None:
|
54 |
+
self._accelerate_config_file = accelerate_config_file
|
55 |
+
|
56 |
+
def run(self):
|
57 |
+
safetensors_version = "not installed"
|
58 |
+
if is_safetensors_available():
|
59 |
+
import safetensors
|
60 |
+
|
61 |
+
safetensors_version = safetensors.__version__
|
62 |
+
elif importlib.util.find_spec("safetensors") is not None:
|
63 |
+
import safetensors
|
64 |
+
|
65 |
+
safetensors_version = f"{safetensors.__version__} but is ignored because of PyTorch version too old."
|
66 |
+
|
67 |
+
accelerate_version = "not installed"
|
68 |
+
accelerate_config = accelerate_config_str = "not found"
|
69 |
+
if is_accelerate_available():
|
70 |
+
import accelerate
|
71 |
+
from accelerate.commands.config import default_config_file, load_config_from_file
|
72 |
+
|
73 |
+
accelerate_version = accelerate.__version__
|
74 |
+
# Get the default from the config file.
|
75 |
+
if self._accelerate_config_file is not None or os.path.isfile(default_config_file):
|
76 |
+
accelerate_config = load_config_from_file(self._accelerate_config_file).to_dict()
|
77 |
+
|
78 |
+
accelerate_config_str = (
|
79 |
+
"\n".join([f"\t- {prop}: {val}" for prop, val in accelerate_config.items()])
|
80 |
+
if isinstance(accelerate_config, dict)
|
81 |
+
else f"\t{accelerate_config}"
|
82 |
+
)
|
83 |
+
|
84 |
+
pt_version = "not installed"
|
85 |
+
pt_cuda_available = "NA"
|
86 |
+
if is_torch_available():
|
87 |
+
import torch
|
88 |
+
|
89 |
+
pt_version = torch.__version__
|
90 |
+
pt_cuda_available = torch.cuda.is_available()
|
91 |
+
|
92 |
+
tf_version = "not installed"
|
93 |
+
tf_cuda_available = "NA"
|
94 |
+
if is_tf_available():
|
95 |
+
import tensorflow as tf
|
96 |
+
|
97 |
+
tf_version = tf.__version__
|
98 |
+
try:
|
99 |
+
# deprecated in v2.1
|
100 |
+
tf_cuda_available = tf.test.is_gpu_available()
|
101 |
+
except AttributeError:
|
102 |
+
# returns list of devices, convert to bool
|
103 |
+
tf_cuda_available = bool(tf.config.list_physical_devices("GPU"))
|
104 |
+
|
105 |
+
flax_version = "not installed"
|
106 |
+
jax_version = "not installed"
|
107 |
+
jaxlib_version = "not installed"
|
108 |
+
jax_backend = "NA"
|
109 |
+
if is_flax_available():
|
110 |
+
import flax
|
111 |
+
import jax
|
112 |
+
import jaxlib
|
113 |
+
|
114 |
+
flax_version = flax.__version__
|
115 |
+
jax_version = jax.__version__
|
116 |
+
jaxlib_version = jaxlib.__version__
|
117 |
+
jax_backend = jax.lib.xla_bridge.get_backend().platform
|
118 |
+
|
119 |
+
info = {
|
120 |
+
"`transformers` version": version,
|
121 |
+
"Platform": platform.platform(),
|
122 |
+
"Python version": platform.python_version(),
|
123 |
+
"Huggingface_hub version": huggingface_hub.__version__,
|
124 |
+
"Safetensors version": f"{safetensors_version}",
|
125 |
+
"Accelerate version": f"{accelerate_version}",
|
126 |
+
"Accelerate config": f"{accelerate_config_str}",
|
127 |
+
"PyTorch version (GPU?)": f"{pt_version} ({pt_cuda_available})",
|
128 |
+
"Tensorflow version (GPU?)": f"{tf_version} ({tf_cuda_available})",
|
129 |
+
"Flax version (CPU?/GPU?/TPU?)": f"{flax_version} ({jax_backend})",
|
130 |
+
"Jax version": f"{jax_version}",
|
131 |
+
"JaxLib version": f"{jaxlib_version}",
|
132 |
+
"Using GPU in script?": "<fill in>",
|
133 |
+
"Using distributed or parallel set-up in script?": "<fill in>",
|
134 |
+
}
|
135 |
+
|
136 |
+
print("\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n")
|
137 |
+
print(self.format_dict(info))
|
138 |
+
|
139 |
+
return info
|
140 |
+
|
141 |
+
@staticmethod
|
142 |
+
def format_dict(d):
|
143 |
+
return "\n".join([f"- {prop}: {val}" for prop, val in d.items()]) + "\n"
|
venv/lib/python3.10/site-packages/transformers/commands/lfs.py
ADDED
@@ -0,0 +1,226 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Implementation of a custom transfer agent for the transfer type "multipart" for git-lfs.
|
3 |
+
|
4 |
+
Inspired by: github.com/cbartz/git-lfs-swift-transfer-agent/blob/master/git_lfs_swift_transfer.py
|
5 |
+
|
6 |
+
Spec is: github.com/git-lfs/git-lfs/blob/master/docs/custom-transfers.md
|
7 |
+
|
8 |
+
|
9 |
+
To launch debugger while developing:
|
10 |
+
|
11 |
+
``` [lfs "customtransfer.multipart"]
|
12 |
+
path = /path/to/transformers/.env/bin/python args = -m debugpy --listen 5678 --wait-for-client
|
13 |
+
/path/to/transformers/src/transformers/commands/transformers_cli.py lfs-multipart-upload ```"""
|
14 |
+
|
15 |
+
import json
|
16 |
+
import os
|
17 |
+
import subprocess
|
18 |
+
import sys
|
19 |
+
import warnings
|
20 |
+
from argparse import ArgumentParser
|
21 |
+
from contextlib import AbstractContextManager
|
22 |
+
from typing import Dict, List, Optional
|
23 |
+
|
24 |
+
import requests
|
25 |
+
|
26 |
+
from ..utils import logging
|
27 |
+
from . import BaseTransformersCLICommand
|
28 |
+
|
29 |
+
|
30 |
+
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
31 |
+
|
32 |
+
|
33 |
+
LFS_MULTIPART_UPLOAD_COMMAND = "lfs-multipart-upload"
|
34 |
+
|
35 |
+
|
36 |
+
class LfsCommands(BaseTransformersCLICommand):
|
37 |
+
"""
|
38 |
+
Implementation of a custom transfer agent for the transfer type "multipart" for git-lfs. This lets users upload
|
39 |
+
large files >5GB 🔥. Spec for LFS custom transfer agent is:
|
40 |
+
https://github.com/git-lfs/git-lfs/blob/master/docs/custom-transfers.md
|
41 |
+
|
42 |
+
This introduces two commands to the CLI:
|
43 |
+
|
44 |
+
1. $ transformers-cli lfs-enable-largefiles
|
45 |
+
|
46 |
+
This should be executed once for each model repo that contains a model file >5GB. It's documented in the error
|
47 |
+
message you get if you just try to git push a 5GB file without having enabled it before.
|
48 |
+
|
49 |
+
2. $ transformers-cli lfs-multipart-upload
|
50 |
+
|
51 |
+
This command is called by lfs directly and is not meant to be called by the user.
|
52 |
+
"""
|
53 |
+
|
54 |
+
@staticmethod
|
55 |
+
def register_subcommand(parser: ArgumentParser):
|
56 |
+
enable_parser = parser.add_parser(
|
57 |
+
"lfs-enable-largefiles",
|
58 |
+
help=(
|
59 |
+
"Deprecated: use `huggingface-cli` instead. Configure your repository to enable upload of files > 5GB."
|
60 |
+
),
|
61 |
+
)
|
62 |
+
enable_parser.add_argument("path", type=str, help="Local path to repository you want to configure.")
|
63 |
+
enable_parser.set_defaults(func=lambda args: LfsEnableCommand(args))
|
64 |
+
|
65 |
+
upload_parser = parser.add_parser(
|
66 |
+
LFS_MULTIPART_UPLOAD_COMMAND,
|
67 |
+
help=(
|
68 |
+
"Deprecated: use `huggingface-cli` instead. "
|
69 |
+
"Command will get called by git-lfs, do not call it directly."
|
70 |
+
),
|
71 |
+
)
|
72 |
+
upload_parser.set_defaults(func=lambda args: LfsUploadCommand(args))
|
73 |
+
|
74 |
+
|
75 |
+
class LfsEnableCommand:
|
76 |
+
def __init__(self, args):
|
77 |
+
self.args = args
|
78 |
+
|
79 |
+
def run(self):
|
80 |
+
warnings.warn(
|
81 |
+
"Managing repositories through transformers-cli is deprecated. Please use `huggingface-cli` instead."
|
82 |
+
)
|
83 |
+
local_path = os.path.abspath(self.args.path)
|
84 |
+
if not os.path.isdir(local_path):
|
85 |
+
print("This does not look like a valid git repo.")
|
86 |
+
exit(1)
|
87 |
+
subprocess.run(
|
88 |
+
"git config lfs.customtransfer.multipart.path transformers-cli".split(), check=True, cwd=local_path
|
89 |
+
)
|
90 |
+
subprocess.run(
|
91 |
+
f"git config lfs.customtransfer.multipart.args {LFS_MULTIPART_UPLOAD_COMMAND}".split(),
|
92 |
+
check=True,
|
93 |
+
cwd=local_path,
|
94 |
+
)
|
95 |
+
print("Local repo set up for largefiles")
|
96 |
+
|
97 |
+
|
98 |
+
def write_msg(msg: Dict):
|
99 |
+
"""Write out the message in Line delimited JSON."""
|
100 |
+
msg = json.dumps(msg) + "\n"
|
101 |
+
sys.stdout.write(msg)
|
102 |
+
sys.stdout.flush()
|
103 |
+
|
104 |
+
|
105 |
+
def read_msg() -> Optional[Dict]:
|
106 |
+
"""Read Line delimited JSON from stdin."""
|
107 |
+
msg = json.loads(sys.stdin.readline().strip())
|
108 |
+
|
109 |
+
if "terminate" in (msg.get("type"), msg.get("event")):
|
110 |
+
# terminate message received
|
111 |
+
return None
|
112 |
+
|
113 |
+
if msg.get("event") not in ("download", "upload"):
|
114 |
+
logger.critical("Received unexpected message")
|
115 |
+
sys.exit(1)
|
116 |
+
|
117 |
+
return msg
|
118 |
+
|
119 |
+
|
120 |
+
class FileSlice(AbstractContextManager):
|
121 |
+
"""
|
122 |
+
File-like object that only reads a slice of a file
|
123 |
+
|
124 |
+
Inspired by stackoverflow.com/a/29838711/593036
|
125 |
+
"""
|
126 |
+
|
127 |
+
def __init__(self, filepath: str, seek_from: int, read_limit: int):
|
128 |
+
self.filepath = filepath
|
129 |
+
self.seek_from = seek_from
|
130 |
+
self.read_limit = read_limit
|
131 |
+
self.n_seen = 0
|
132 |
+
|
133 |
+
def __enter__(self):
|
134 |
+
self.f = open(self.filepath, "rb")
|
135 |
+
self.f.seek(self.seek_from)
|
136 |
+
return self
|
137 |
+
|
138 |
+
def __len__(self):
|
139 |
+
total_length = os.fstat(self.f.fileno()).st_size
|
140 |
+
return min(self.read_limit, total_length - self.seek_from)
|
141 |
+
|
142 |
+
def read(self, n=-1):
|
143 |
+
if self.n_seen >= self.read_limit:
|
144 |
+
return b""
|
145 |
+
remaining_amount = self.read_limit - self.n_seen
|
146 |
+
data = self.f.read(remaining_amount if n < 0 else min(n, remaining_amount))
|
147 |
+
self.n_seen += len(data)
|
148 |
+
return data
|
149 |
+
|
150 |
+
def __iter__(self):
|
151 |
+
yield self.read(n=4 * 1024 * 1024)
|
152 |
+
|
153 |
+
def __exit__(self, *args):
|
154 |
+
self.f.close()
|
155 |
+
|
156 |
+
|
157 |
+
class LfsUploadCommand:
|
158 |
+
def __init__(self, args):
|
159 |
+
self.args = args
|
160 |
+
|
161 |
+
def run(self):
|
162 |
+
# Immediately after invoking a custom transfer process, git-lfs
|
163 |
+
# sends initiation data to the process over stdin.
|
164 |
+
# This tells the process useful information about the configuration.
|
165 |
+
init_msg = json.loads(sys.stdin.readline().strip())
|
166 |
+
if not (init_msg.get("event") == "init" and init_msg.get("operation") == "upload"):
|
167 |
+
write_msg({"error": {"code": 32, "message": "Wrong lfs init operation"}})
|
168 |
+
sys.exit(1)
|
169 |
+
|
170 |
+
# The transfer process should use the information it needs from the
|
171 |
+
# initiation structure, and also perform any one-off setup tasks it
|
172 |
+
# needs to do. It should then respond on stdout with a simple empty
|
173 |
+
# confirmation structure, as follows:
|
174 |
+
write_msg({})
|
175 |
+
|
176 |
+
# After the initiation exchange, git-lfs will send any number of
|
177 |
+
# transfer requests to the stdin of the transfer process, in a serial sequence.
|
178 |
+
while True:
|
179 |
+
msg = read_msg()
|
180 |
+
if msg is None:
|
181 |
+
# When all transfers have been processed, git-lfs will send
|
182 |
+
# a terminate event to the stdin of the transfer process.
|
183 |
+
# On receiving this message the transfer process should
|
184 |
+
# clean up and terminate. No response is expected.
|
185 |
+
sys.exit(0)
|
186 |
+
|
187 |
+
oid = msg["oid"]
|
188 |
+
filepath = msg["path"]
|
189 |
+
completion_url = msg["action"]["href"]
|
190 |
+
header = msg["action"]["header"]
|
191 |
+
chunk_size = int(header.pop("chunk_size"))
|
192 |
+
presigned_urls: List[str] = list(header.values())
|
193 |
+
|
194 |
+
parts = []
|
195 |
+
for i, presigned_url in enumerate(presigned_urls):
|
196 |
+
with FileSlice(filepath, seek_from=i * chunk_size, read_limit=chunk_size) as data:
|
197 |
+
r = requests.put(presigned_url, data=data)
|
198 |
+
r.raise_for_status()
|
199 |
+
parts.append(
|
200 |
+
{
|
201 |
+
"etag": r.headers.get("etag"),
|
202 |
+
"partNumber": i + 1,
|
203 |
+
}
|
204 |
+
)
|
205 |
+
# In order to support progress reporting while data is uploading / downloading,
|
206 |
+
# the transfer process should post messages to stdout
|
207 |
+
write_msg(
|
208 |
+
{
|
209 |
+
"event": "progress",
|
210 |
+
"oid": oid,
|
211 |
+
"bytesSoFar": (i + 1) * chunk_size,
|
212 |
+
"bytesSinceLast": chunk_size,
|
213 |
+
}
|
214 |
+
)
|
215 |
+
# Not precise but that's ok.
|
216 |
+
|
217 |
+
r = requests.post(
|
218 |
+
completion_url,
|
219 |
+
json={
|
220 |
+
"oid": oid,
|
221 |
+
"parts": parts,
|
222 |
+
},
|
223 |
+
)
|
224 |
+
r.raise_for_status()
|
225 |
+
|
226 |
+
write_msg({"event": "complete", "oid": oid})
|
venv/lib/python3.10/site-packages/transformers/commands/pt_to_tf.py
ADDED
@@ -0,0 +1,425 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2022 The HuggingFace Team. All rights reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
|
15 |
+
import inspect
|
16 |
+
import os
|
17 |
+
from argparse import ArgumentParser, Namespace
|
18 |
+
from importlib import import_module
|
19 |
+
|
20 |
+
import huggingface_hub
|
21 |
+
import numpy as np
|
22 |
+
from packaging import version
|
23 |
+
|
24 |
+
from .. import (
|
25 |
+
FEATURE_EXTRACTOR_MAPPING,
|
26 |
+
IMAGE_PROCESSOR_MAPPING,
|
27 |
+
PROCESSOR_MAPPING,
|
28 |
+
TOKENIZER_MAPPING,
|
29 |
+
AutoConfig,
|
30 |
+
AutoFeatureExtractor,
|
31 |
+
AutoImageProcessor,
|
32 |
+
AutoProcessor,
|
33 |
+
AutoTokenizer,
|
34 |
+
is_datasets_available,
|
35 |
+
is_tf_available,
|
36 |
+
is_torch_available,
|
37 |
+
)
|
38 |
+
from ..utils import TF2_WEIGHTS_INDEX_NAME, TF2_WEIGHTS_NAME, logging
|
39 |
+
from . import BaseTransformersCLICommand
|
40 |
+
|
41 |
+
|
42 |
+
if is_tf_available():
|
43 |
+
import tensorflow as tf
|
44 |
+
|
45 |
+
tf.config.experimental.enable_tensor_float_32_execution(False)
|
46 |
+
|
47 |
+
if is_torch_available():
|
48 |
+
import torch
|
49 |
+
|
50 |
+
if is_datasets_available():
|
51 |
+
from datasets import load_dataset
|
52 |
+
|
53 |
+
|
54 |
+
MAX_ERROR = 5e-5 # larger error tolerance than in our internal tests, to avoid flaky user-facing errors
|
55 |
+
|
56 |
+
|
57 |
+
def convert_command_factory(args: Namespace):
|
58 |
+
"""
|
59 |
+
Factory function used to convert a model PyTorch checkpoint in a TensorFlow 2 checkpoint.
|
60 |
+
|
61 |
+
Returns: ServeCommand
|
62 |
+
"""
|
63 |
+
return PTtoTFCommand(
|
64 |
+
args.model_name,
|
65 |
+
args.local_dir,
|
66 |
+
args.max_error,
|
67 |
+
args.new_weights,
|
68 |
+
args.no_pr,
|
69 |
+
args.push,
|
70 |
+
args.extra_commit_description,
|
71 |
+
args.override_model_class,
|
72 |
+
)
|
73 |
+
|
74 |
+
|
75 |
+
class PTtoTFCommand(BaseTransformersCLICommand):
|
76 |
+
@staticmethod
|
77 |
+
def register_subcommand(parser: ArgumentParser):
|
78 |
+
"""
|
79 |
+
Register this command to argparse so it's available for the transformer-cli
|
80 |
+
|
81 |
+
Args:
|
82 |
+
parser: Root parser to register command-specific arguments
|
83 |
+
"""
|
84 |
+
train_parser = parser.add_parser(
|
85 |
+
"pt-to-tf",
|
86 |
+
help=(
|
87 |
+
"CLI tool to run convert a transformers model from a PyTorch checkpoint to a TensorFlow checkpoint."
|
88 |
+
" Can also be used to validate existing weights without opening PRs, with --no-pr."
|
89 |
+
),
|
90 |
+
)
|
91 |
+
train_parser.add_argument(
|
92 |
+
"--model-name",
|
93 |
+
type=str,
|
94 |
+
required=True,
|
95 |
+
help="The model name, including owner/organization, as seen on the hub.",
|
96 |
+
)
|
97 |
+
train_parser.add_argument(
|
98 |
+
"--local-dir",
|
99 |
+
type=str,
|
100 |
+
default="",
|
101 |
+
help="Optional local directory of the model repository. Defaults to /tmp/{model_name}",
|
102 |
+
)
|
103 |
+
train_parser.add_argument(
|
104 |
+
"--max-error",
|
105 |
+
type=float,
|
106 |
+
default=MAX_ERROR,
|
107 |
+
help=(
|
108 |
+
f"Maximum error tolerance. Defaults to {MAX_ERROR}. This flag should be avoided, use at your own risk."
|
109 |
+
),
|
110 |
+
)
|
111 |
+
train_parser.add_argument(
|
112 |
+
"--new-weights",
|
113 |
+
action="store_true",
|
114 |
+
help="Optional flag to create new TensorFlow weights, even if they already exist.",
|
115 |
+
)
|
116 |
+
train_parser.add_argument(
|
117 |
+
"--no-pr", action="store_true", help="Optional flag to NOT open a PR with converted weights."
|
118 |
+
)
|
119 |
+
train_parser.add_argument(
|
120 |
+
"--push",
|
121 |
+
action="store_true",
|
122 |
+
help="Optional flag to push the weights directly to `main` (requires permissions)",
|
123 |
+
)
|
124 |
+
train_parser.add_argument(
|
125 |
+
"--extra-commit-description",
|
126 |
+
type=str,
|
127 |
+
default="",
|
128 |
+
help="Optional additional commit description to use when opening a PR (e.g. to tag the owner).",
|
129 |
+
)
|
130 |
+
train_parser.add_argument(
|
131 |
+
"--override-model-class",
|
132 |
+
type=str,
|
133 |
+
default=None,
|
134 |
+
help="If you think you know better than the auto-detector, you can specify the model class here. "
|
135 |
+
"Can be either an AutoModel class or a specific model class like BertForSequenceClassification.",
|
136 |
+
)
|
137 |
+
train_parser.set_defaults(func=convert_command_factory)
|
138 |
+
|
139 |
+
@staticmethod
|
140 |
+
def find_pt_tf_differences(pt_outputs, tf_outputs):
|
141 |
+
"""
|
142 |
+
Compares the TensorFlow and PyTorch outputs, returning a dictionary with all tensor differences.
|
143 |
+
"""
|
144 |
+
# 1. All output attributes must be the same
|
145 |
+
pt_out_attrs = set(pt_outputs.keys())
|
146 |
+
tf_out_attrs = set(tf_outputs.keys())
|
147 |
+
if pt_out_attrs != tf_out_attrs:
|
148 |
+
raise ValueError(
|
149 |
+
f"The model outputs have different attributes, aborting. (Pytorch: {pt_out_attrs}, TensorFlow:"
|
150 |
+
f" {tf_out_attrs})"
|
151 |
+
)
|
152 |
+
|
153 |
+
# 2. For each output attribute, computes the difference
|
154 |
+
def _find_pt_tf_differences(pt_out, tf_out, differences, attr_name=""):
|
155 |
+
# If the current attribute is a tensor, it is a leaf and we make the comparison. Otherwise, we will dig in
|
156 |
+
# recursivelly, keeping the name of the attribute.
|
157 |
+
if isinstance(pt_out, torch.Tensor):
|
158 |
+
tensor_difference = np.max(np.abs(pt_out.numpy() - tf_out.numpy()))
|
159 |
+
differences[attr_name] = tensor_difference
|
160 |
+
else:
|
161 |
+
root_name = attr_name
|
162 |
+
for i, pt_item in enumerate(pt_out):
|
163 |
+
# If it is a named attribute, we keep the name. Otherwise, just its index.
|
164 |
+
if isinstance(pt_item, str):
|
165 |
+
branch_name = root_name + pt_item
|
166 |
+
tf_item = tf_out[pt_item]
|
167 |
+
pt_item = pt_out[pt_item]
|
168 |
+
else:
|
169 |
+
branch_name = root_name + f"[{i}]"
|
170 |
+
tf_item = tf_out[i]
|
171 |
+
differences = _find_pt_tf_differences(pt_item, tf_item, differences, branch_name)
|
172 |
+
|
173 |
+
return differences
|
174 |
+
|
175 |
+
return _find_pt_tf_differences(pt_outputs, tf_outputs, {})
|
176 |
+
|
177 |
+
def __init__(
|
178 |
+
self,
|
179 |
+
model_name: str,
|
180 |
+
local_dir: str,
|
181 |
+
max_error: float,
|
182 |
+
new_weights: bool,
|
183 |
+
no_pr: bool,
|
184 |
+
push: bool,
|
185 |
+
extra_commit_description: str,
|
186 |
+
override_model_class: str,
|
187 |
+
*args,
|
188 |
+
):
|
189 |
+
self._logger = logging.get_logger("transformers-cli/pt_to_tf")
|
190 |
+
self._model_name = model_name
|
191 |
+
self._local_dir = local_dir if local_dir else os.path.join("/tmp", model_name)
|
192 |
+
self._max_error = max_error
|
193 |
+
self._new_weights = new_weights
|
194 |
+
self._no_pr = no_pr
|
195 |
+
self._push = push
|
196 |
+
self._extra_commit_description = extra_commit_description
|
197 |
+
self._override_model_class = override_model_class
|
198 |
+
|
199 |
+
def get_inputs(self, pt_model, tf_dummy_inputs, config):
|
200 |
+
"""
|
201 |
+
Returns the right inputs for the model, based on its signature.
|
202 |
+
"""
|
203 |
+
|
204 |
+
def _get_audio_input():
|
205 |
+
ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
|
206 |
+
speech_samples = ds.sort("id").select(range(2))[:2]["audio"]
|
207 |
+
raw_samples = [x["array"] for x in speech_samples]
|
208 |
+
return raw_samples
|
209 |
+
|
210 |
+
model_config_class = type(pt_model.config)
|
211 |
+
if model_config_class in PROCESSOR_MAPPING:
|
212 |
+
processor = AutoProcessor.from_pretrained(self._local_dir)
|
213 |
+
if model_config_class in TOKENIZER_MAPPING and processor.tokenizer.pad_token is None:
|
214 |
+
processor.tokenizer.pad_token = processor.tokenizer.eos_token
|
215 |
+
elif model_config_class in IMAGE_PROCESSOR_MAPPING:
|
216 |
+
processor = AutoImageProcessor.from_pretrained(self._local_dir)
|
217 |
+
elif model_config_class in FEATURE_EXTRACTOR_MAPPING:
|
218 |
+
processor = AutoFeatureExtractor.from_pretrained(self._local_dir)
|
219 |
+
elif model_config_class in TOKENIZER_MAPPING:
|
220 |
+
processor = AutoTokenizer.from_pretrained(self._local_dir)
|
221 |
+
if processor.pad_token is None:
|
222 |
+
processor.pad_token = processor.eos_token
|
223 |
+
else:
|
224 |
+
raise ValueError(f"Unknown data processing type (model config type: {model_config_class})")
|
225 |
+
|
226 |
+
model_forward_signature = set(inspect.signature(pt_model.forward).parameters.keys())
|
227 |
+
processor_inputs = {}
|
228 |
+
if "input_ids" in model_forward_signature:
|
229 |
+
processor_inputs.update(
|
230 |
+
{
|
231 |
+
"text": ["Hi there!", "I am a batch with more than one row and different input lengths."],
|
232 |
+
"padding": True,
|
233 |
+
"truncation": True,
|
234 |
+
}
|
235 |
+
)
|
236 |
+
if "pixel_values" in model_forward_signature:
|
237 |
+
sample_images = load_dataset("cifar10", "plain_text", split="test")[:2]["img"]
|
238 |
+
processor_inputs.update({"images": sample_images})
|
239 |
+
if "input_features" in model_forward_signature:
|
240 |
+
feature_extractor_signature = inspect.signature(processor.feature_extractor).parameters
|
241 |
+
# Pad to the largest input length by default but take feature extractor default
|
242 |
+
# padding value if it exists e.g. "max_length" and is not False or None
|
243 |
+
if "padding" in feature_extractor_signature:
|
244 |
+
default_strategy = feature_extractor_signature["padding"].default
|
245 |
+
if default_strategy is not False and default_strategy is not None:
|
246 |
+
padding_strategy = default_strategy
|
247 |
+
else:
|
248 |
+
padding_strategy = True
|
249 |
+
else:
|
250 |
+
padding_strategy = True
|
251 |
+
processor_inputs.update({"audio": _get_audio_input(), "padding": padding_strategy})
|
252 |
+
if "input_values" in model_forward_signature: # Wav2Vec2 audio input
|
253 |
+
processor_inputs.update({"audio": _get_audio_input(), "padding": True})
|
254 |
+
pt_input = processor(**processor_inputs, return_tensors="pt")
|
255 |
+
tf_input = processor(**processor_inputs, return_tensors="tf")
|
256 |
+
|
257 |
+
# Extra input requirements, in addition to the input modality
|
258 |
+
if (
|
259 |
+
config.is_encoder_decoder
|
260 |
+
or (hasattr(pt_model, "encoder") and hasattr(pt_model, "decoder"))
|
261 |
+
or "decoder_input_ids" in tf_dummy_inputs
|
262 |
+
):
|
263 |
+
decoder_input_ids = np.asarray([[1], [1]], dtype=int) * (pt_model.config.decoder_start_token_id or 0)
|
264 |
+
pt_input.update({"decoder_input_ids": torch.tensor(decoder_input_ids)})
|
265 |
+
tf_input.update({"decoder_input_ids": tf.convert_to_tensor(decoder_input_ids)})
|
266 |
+
|
267 |
+
return pt_input, tf_input
|
268 |
+
|
269 |
+
def run(self):
|
270 |
+
# hub version 0.9.0 introduced the possibility of programmatically opening PRs with normal write tokens.
|
271 |
+
if version.parse(huggingface_hub.__version__) < version.parse("0.9.0"):
|
272 |
+
raise ImportError(
|
273 |
+
"The huggingface_hub version must be >= 0.9.0 to use this command. Please update your huggingface_hub"
|
274 |
+
" installation."
|
275 |
+
)
|
276 |
+
else:
|
277 |
+
from huggingface_hub import Repository, create_commit
|
278 |
+
from huggingface_hub._commit_api import CommitOperationAdd
|
279 |
+
|
280 |
+
# Fetch remote data
|
281 |
+
repo = Repository(local_dir=self._local_dir, clone_from=self._model_name)
|
282 |
+
|
283 |
+
# Load config and get the appropriate architecture -- the latter is needed to convert the head's weights
|
284 |
+
config = AutoConfig.from_pretrained(self._local_dir)
|
285 |
+
architectures = config.architectures
|
286 |
+
if self._override_model_class is not None:
|
287 |
+
if self._override_model_class.startswith("TF"):
|
288 |
+
architectures = [self._override_model_class[2:]]
|
289 |
+
else:
|
290 |
+
architectures = [self._override_model_class]
|
291 |
+
try:
|
292 |
+
pt_class = getattr(import_module("transformers"), architectures[0])
|
293 |
+
except AttributeError:
|
294 |
+
raise ValueError(f"Model class {self._override_model_class} not found in transformers.")
|
295 |
+
try:
|
296 |
+
tf_class = getattr(import_module("transformers"), "TF" + architectures[0])
|
297 |
+
except AttributeError:
|
298 |
+
raise ValueError(f"TF model class TF{self._override_model_class} not found in transformers.")
|
299 |
+
elif architectures is None: # No architecture defined -- use auto classes
|
300 |
+
pt_class = getattr(import_module("transformers"), "AutoModel")
|
301 |
+
tf_class = getattr(import_module("transformers"), "TFAutoModel")
|
302 |
+
self._logger.warning("No detected architecture, using AutoModel/TFAutoModel")
|
303 |
+
else: # Architecture defined -- use it
|
304 |
+
if len(architectures) > 1:
|
305 |
+
raise ValueError(f"More than one architecture was found, aborting. (architectures = {architectures})")
|
306 |
+
self._logger.warning(f"Detected architecture: {architectures[0]}")
|
307 |
+
pt_class = getattr(import_module("transformers"), architectures[0])
|
308 |
+
try:
|
309 |
+
tf_class = getattr(import_module("transformers"), "TF" + architectures[0])
|
310 |
+
except AttributeError:
|
311 |
+
raise AttributeError(f"The TensorFlow equivalent of {architectures[0]} doesn't exist in transformers.")
|
312 |
+
|
313 |
+
# Check the TF dummy inputs to see what keys we need in the forward pass
|
314 |
+
tf_from_pt_model = tf_class.from_config(config)
|
315 |
+
tf_dummy_inputs = tf_from_pt_model.dummy_inputs
|
316 |
+
|
317 |
+
del tf_from_pt_model # Try to keep only one model in memory at a time
|
318 |
+
|
319 |
+
# Load the model and get some basic inputs
|
320 |
+
pt_model = pt_class.from_pretrained(self._local_dir)
|
321 |
+
pt_model.eval()
|
322 |
+
|
323 |
+
pt_input, tf_input = self.get_inputs(pt_model, tf_dummy_inputs, config)
|
324 |
+
|
325 |
+
with torch.no_grad():
|
326 |
+
pt_outputs = pt_model(**pt_input, output_hidden_states=True)
|
327 |
+
del pt_model # will no longer be used, and may have a large memory footprint
|
328 |
+
|
329 |
+
tf_from_pt_model = tf_class.from_pretrained(self._local_dir, from_pt=True)
|
330 |
+
tf_from_pt_outputs = tf_from_pt_model(**tf_input, output_hidden_states=True, training=False)
|
331 |
+
|
332 |
+
# Confirms that cross loading PT weights into TF worked.
|
333 |
+
crossload_differences = self.find_pt_tf_differences(pt_outputs, tf_from_pt_outputs)
|
334 |
+
output_differences = {k: v for k, v in crossload_differences.items() if "hidden" not in k}
|
335 |
+
hidden_differences = {k: v for k, v in crossload_differences.items() if "hidden" in k}
|
336 |
+
if len(output_differences) == 0 and architectures is not None:
|
337 |
+
raise ValueError(
|
338 |
+
f"Something went wrong -- the config file has architectures ({architectures}), but no model head"
|
339 |
+
" output was found. All outputs start with 'hidden'"
|
340 |
+
)
|
341 |
+
max_crossload_output_diff = max(output_differences.values()) if output_differences else 0.0
|
342 |
+
max_crossload_hidden_diff = max(hidden_differences.values())
|
343 |
+
if max_crossload_output_diff > self._max_error or max_crossload_hidden_diff > self._max_error:
|
344 |
+
raise ValueError(
|
345 |
+
"The cross-loaded TensorFlow model has different outputs, something went wrong!\n"
|
346 |
+
+ f"\nList of maximum output differences above the threshold ({self._max_error}):\n"
|
347 |
+
+ "\n".join([f"{k}: {v:.3e}" for k, v in output_differences.items() if v > self._max_error])
|
348 |
+
+ f"\n\nList of maximum hidden layer differences above the threshold ({self._max_error}):\n"
|
349 |
+
+ "\n".join([f"{k}: {v:.3e}" for k, v in hidden_differences.items() if v > self._max_error])
|
350 |
+
)
|
351 |
+
|
352 |
+
# Save the weights in a TF format (if needed) and confirms that the results are still good
|
353 |
+
tf_weights_path = os.path.join(self._local_dir, TF2_WEIGHTS_NAME)
|
354 |
+
tf_weights_index_path = os.path.join(self._local_dir, TF2_WEIGHTS_INDEX_NAME)
|
355 |
+
if (not os.path.exists(tf_weights_path) and not os.path.exists(tf_weights_index_path)) or self._new_weights:
|
356 |
+
tf_from_pt_model.save_pretrained(self._local_dir)
|
357 |
+
del tf_from_pt_model # will no longer be used, and may have a large memory footprint
|
358 |
+
|
359 |
+
tf_model = tf_class.from_pretrained(self._local_dir)
|
360 |
+
tf_outputs = tf_model(**tf_input, output_hidden_states=True)
|
361 |
+
|
362 |
+
conversion_differences = self.find_pt_tf_differences(pt_outputs, tf_outputs)
|
363 |
+
output_differences = {k: v for k, v in conversion_differences.items() if "hidden" not in k}
|
364 |
+
hidden_differences = {k: v for k, v in conversion_differences.items() if "hidden" in k}
|
365 |
+
if len(output_differences) == 0 and architectures is not None:
|
366 |
+
raise ValueError(
|
367 |
+
f"Something went wrong -- the config file has architectures ({architectures}), but no model head"
|
368 |
+
" output was found. All outputs start with 'hidden'"
|
369 |
+
)
|
370 |
+
max_conversion_output_diff = max(output_differences.values()) if output_differences else 0.0
|
371 |
+
max_conversion_hidden_diff = max(hidden_differences.values())
|
372 |
+
if max_conversion_output_diff > self._max_error or max_conversion_hidden_diff > self._max_error:
|
373 |
+
raise ValueError(
|
374 |
+
"The converted TensorFlow model has different outputs, something went wrong!\n"
|
375 |
+
+ f"\nList of maximum output differences above the threshold ({self._max_error}):\n"
|
376 |
+
+ "\n".join([f"{k}: {v:.3e}" for k, v in output_differences.items() if v > self._max_error])
|
377 |
+
+ f"\n\nList of maximum hidden layer differences above the threshold ({self._max_error}):\n"
|
378 |
+
+ "\n".join([f"{k}: {v:.3e}" for k, v in hidden_differences.items() if v > self._max_error])
|
379 |
+
)
|
380 |
+
|
381 |
+
commit_message = "Update TF weights" if self._new_weights else "Add TF weights"
|
382 |
+
if self._push:
|
383 |
+
repo.git_add(auto_lfs_track=True)
|
384 |
+
repo.git_commit(commit_message)
|
385 |
+
repo.git_push(blocking=True) # this prints a progress bar with the upload
|
386 |
+
self._logger.warning(f"TF weights pushed into {self._model_name}")
|
387 |
+
elif not self._no_pr:
|
388 |
+
self._logger.warning("Uploading the weights into a new PR...")
|
389 |
+
commit_descrition = (
|
390 |
+
"Model converted by the [`transformers`' `pt_to_tf`"
|
391 |
+
" CLI](https://github.com/huggingface/transformers/blob/main/src/transformers/commands/pt_to_tf.py). "
|
392 |
+
"All converted model outputs and hidden layers were validated against its PyTorch counterpart.\n\n"
|
393 |
+
f"Maximum crossload output difference={max_crossload_output_diff:.3e}; "
|
394 |
+
f"Maximum crossload hidden layer difference={max_crossload_hidden_diff:.3e};\n"
|
395 |
+
f"Maximum conversion output difference={max_conversion_output_diff:.3e}; "
|
396 |
+
f"Maximum conversion hidden layer difference={max_conversion_hidden_diff:.3e};\n"
|
397 |
+
)
|
398 |
+
if self._max_error > MAX_ERROR:
|
399 |
+
commit_descrition += (
|
400 |
+
f"\n\nCAUTION: The maximum admissible error was manually increased to {self._max_error}!"
|
401 |
+
)
|
402 |
+
if self._extra_commit_description:
|
403 |
+
commit_descrition += "\n\n" + self._extra_commit_description
|
404 |
+
|
405 |
+
# sharded model -> adds all related files (index and .h5 shards)
|
406 |
+
if os.path.exists(tf_weights_index_path):
|
407 |
+
operations = [
|
408 |
+
CommitOperationAdd(path_in_repo=TF2_WEIGHTS_INDEX_NAME, path_or_fileobj=tf_weights_index_path)
|
409 |
+
]
|
410 |
+
for shard_path in tf.io.gfile.glob(self._local_dir + "/tf_model-*.h5"):
|
411 |
+
operations += [
|
412 |
+
CommitOperationAdd(path_in_repo=os.path.basename(shard_path), path_or_fileobj=shard_path)
|
413 |
+
]
|
414 |
+
else:
|
415 |
+
operations = [CommitOperationAdd(path_in_repo=TF2_WEIGHTS_NAME, path_or_fileobj=tf_weights_path)]
|
416 |
+
|
417 |
+
hub_pr_url = create_commit(
|
418 |
+
repo_id=self._model_name,
|
419 |
+
operations=operations,
|
420 |
+
commit_message=commit_message,
|
421 |
+
commit_description=commit_descrition,
|
422 |
+
repo_type="model",
|
423 |
+
create_pr=True,
|
424 |
+
).pr_url
|
425 |
+
self._logger.warning(f"PR open in {hub_pr_url}")
|
venv/lib/python3.10/site-packages/transformers/commands/run.py
ADDED
@@ -0,0 +1,110 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2020 The HuggingFace Team. All rights reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
|
15 |
+
from argparse import ArgumentParser
|
16 |
+
|
17 |
+
from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline
|
18 |
+
from ..utils import logging
|
19 |
+
from . import BaseTransformersCLICommand
|
20 |
+
|
21 |
+
|
22 |
+
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
23 |
+
|
24 |
+
|
25 |
+
def try_infer_format_from_ext(path: str):
|
26 |
+
if not path:
|
27 |
+
return "pipe"
|
28 |
+
|
29 |
+
for ext in PipelineDataFormat.SUPPORTED_FORMATS:
|
30 |
+
if path.endswith(ext):
|
31 |
+
return ext
|
32 |
+
|
33 |
+
raise Exception(
|
34 |
+
f"Unable to determine file format from file extension {path}. "
|
35 |
+
f"Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}"
|
36 |
+
)
|
37 |
+
|
38 |
+
|
39 |
+
def run_command_factory(args):
|
40 |
+
nlp = pipeline(
|
41 |
+
task=args.task,
|
42 |
+
model=args.model if args.model else None,
|
43 |
+
config=args.config,
|
44 |
+
tokenizer=args.tokenizer,
|
45 |
+
device=args.device,
|
46 |
+
)
|
47 |
+
format = try_infer_format_from_ext(args.input) if args.format == "infer" else args.format
|
48 |
+
reader = PipelineDataFormat.from_str(
|
49 |
+
format=format,
|
50 |
+
output_path=args.output,
|
51 |
+
input_path=args.input,
|
52 |
+
column=args.column if args.column else nlp.default_input_names,
|
53 |
+
overwrite=args.overwrite,
|
54 |
+
)
|
55 |
+
return RunCommand(nlp, reader)
|
56 |
+
|
57 |
+
|
58 |
+
class RunCommand(BaseTransformersCLICommand):
|
59 |
+
def __init__(self, nlp: Pipeline, reader: PipelineDataFormat):
|
60 |
+
self._nlp = nlp
|
61 |
+
self._reader = reader
|
62 |
+
|
63 |
+
@staticmethod
|
64 |
+
def register_subcommand(parser: ArgumentParser):
|
65 |
+
run_parser = parser.add_parser("run", help="Run a pipeline through the CLI")
|
66 |
+
run_parser.add_argument("--task", choices=get_supported_tasks(), help="Task to run")
|
67 |
+
run_parser.add_argument("--input", type=str, help="Path to the file to use for inference")
|
68 |
+
run_parser.add_argument("--output", type=str, help="Path to the file that will be used post to write results.")
|
69 |
+
run_parser.add_argument("--model", type=str, help="Name or path to the model to instantiate.")
|
70 |
+
run_parser.add_argument("--config", type=str, help="Name or path to the model's config to instantiate.")
|
71 |
+
run_parser.add_argument(
|
72 |
+
"--tokenizer", type=str, help="Name of the tokenizer to use. (default: same as the model name)"
|
73 |
+
)
|
74 |
+
run_parser.add_argument(
|
75 |
+
"--column",
|
76 |
+
type=str,
|
77 |
+
help="Name of the column to use as input. (For multi columns input as QA use column1,columns2)",
|
78 |
+
)
|
79 |
+
run_parser.add_argument(
|
80 |
+
"--format",
|
81 |
+
type=str,
|
82 |
+
default="infer",
|
83 |
+
choices=PipelineDataFormat.SUPPORTED_FORMATS,
|
84 |
+
help="Input format to read from",
|
85 |
+
)
|
86 |
+
run_parser.add_argument(
|
87 |
+
"--device",
|
88 |
+
type=int,
|
89 |
+
default=-1,
|
90 |
+
help="Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)",
|
91 |
+
)
|
92 |
+
run_parser.add_argument("--overwrite", action="store_true", help="Allow overwriting the output file.")
|
93 |
+
run_parser.set_defaults(func=run_command_factory)
|
94 |
+
|
95 |
+
def run(self):
|
96 |
+
nlp, outputs = self._nlp, []
|
97 |
+
|
98 |
+
for entry in self._reader:
|
99 |
+
output = nlp(**entry) if self._reader.is_multi_columns else nlp(entry)
|
100 |
+
if isinstance(output, dict):
|
101 |
+
outputs.append(output)
|
102 |
+
else:
|
103 |
+
outputs += output
|
104 |
+
|
105 |
+
# Saving data
|
106 |
+
if self._nlp.binary_output:
|
107 |
+
binary_path = self._reader.save_binary(outputs)
|
108 |
+
logger.warning(f"Current pipeline requires output to be in binary format, saving at {binary_path}")
|
109 |
+
else:
|
110 |
+
self._reader.save(outputs)
|
venv/lib/python3.10/site-packages/transformers/commands/serving.py
ADDED
@@ -0,0 +1,228 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2020 The HuggingFace Team. All rights reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
|
15 |
+
from argparse import ArgumentParser, Namespace
|
16 |
+
from typing import Any, List, Optional
|
17 |
+
|
18 |
+
from ..pipelines import Pipeline, get_supported_tasks, pipeline
|
19 |
+
from ..utils import logging
|
20 |
+
from . import BaseTransformersCLICommand
|
21 |
+
|
22 |
+
|
23 |
+
try:
|
24 |
+
from fastapi import Body, FastAPI, HTTPException
|
25 |
+
from fastapi.routing import APIRoute
|
26 |
+
from pydantic import BaseModel
|
27 |
+
from starlette.responses import JSONResponse
|
28 |
+
from uvicorn import run
|
29 |
+
|
30 |
+
_serve_dependencies_installed = True
|
31 |
+
except (ImportError, AttributeError):
|
32 |
+
BaseModel = object
|
33 |
+
|
34 |
+
def Body(*x, **y):
|
35 |
+
pass
|
36 |
+
|
37 |
+
_serve_dependencies_installed = False
|
38 |
+
|
39 |
+
|
40 |
+
logger = logging.get_logger("transformers-cli/serving")
|
41 |
+
|
42 |
+
|
43 |
+
def serve_command_factory(args: Namespace):
|
44 |
+
"""
|
45 |
+
Factory function used to instantiate serving server from provided command line arguments.
|
46 |
+
|
47 |
+
Returns: ServeCommand
|
48 |
+
"""
|
49 |
+
nlp = pipeline(
|
50 |
+
task=args.task,
|
51 |
+
model=args.model if args.model else None,
|
52 |
+
config=args.config,
|
53 |
+
tokenizer=args.tokenizer,
|
54 |
+
device=args.device,
|
55 |
+
)
|
56 |
+
return ServeCommand(nlp, args.host, args.port, args.workers)
|
57 |
+
|
58 |
+
|
59 |
+
class ServeModelInfoResult(BaseModel):
|
60 |
+
"""
|
61 |
+
Expose model information
|
62 |
+
"""
|
63 |
+
|
64 |
+
infos: dict
|
65 |
+
|
66 |
+
|
67 |
+
class ServeTokenizeResult(BaseModel):
|
68 |
+
"""
|
69 |
+
Tokenize result model
|
70 |
+
"""
|
71 |
+
|
72 |
+
tokens: List[str]
|
73 |
+
tokens_ids: Optional[List[int]]
|
74 |
+
|
75 |
+
|
76 |
+
class ServeDeTokenizeResult(BaseModel):
|
77 |
+
"""
|
78 |
+
DeTokenize result model
|
79 |
+
"""
|
80 |
+
|
81 |
+
text: str
|
82 |
+
|
83 |
+
|
84 |
+
class ServeForwardResult(BaseModel):
|
85 |
+
"""
|
86 |
+
Forward result model
|
87 |
+
"""
|
88 |
+
|
89 |
+
output: Any
|
90 |
+
|
91 |
+
|
92 |
+
class ServeCommand(BaseTransformersCLICommand):
|
93 |
+
@staticmethod
|
94 |
+
def register_subcommand(parser: ArgumentParser):
|
95 |
+
"""
|
96 |
+
Register this command to argparse so it's available for the transformer-cli
|
97 |
+
|
98 |
+
Args:
|
99 |
+
parser: Root parser to register command-specific arguments
|
100 |
+
"""
|
101 |
+
serve_parser = parser.add_parser(
|
102 |
+
"serve", help="CLI tool to run inference requests through REST and GraphQL endpoints."
|
103 |
+
)
|
104 |
+
serve_parser.add_argument(
|
105 |
+
"--task",
|
106 |
+
type=str,
|
107 |
+
choices=get_supported_tasks(),
|
108 |
+
help="The task to run the pipeline on",
|
109 |
+
)
|
110 |
+
serve_parser.add_argument("--host", type=str, default="localhost", help="Interface the server will listen on.")
|
111 |
+
serve_parser.add_argument("--port", type=int, default=8888, help="Port the serving will listen to.")
|
112 |
+
serve_parser.add_argument("--workers", type=int, default=1, help="Number of http workers")
|
113 |
+
serve_parser.add_argument("--model", type=str, help="Model's name or path to stored model.")
|
114 |
+
serve_parser.add_argument("--config", type=str, help="Model's config name or path to stored model.")
|
115 |
+
serve_parser.add_argument("--tokenizer", type=str, help="Tokenizer name to use.")
|
116 |
+
serve_parser.add_argument(
|
117 |
+
"--device",
|
118 |
+
type=int,
|
119 |
+
default=-1,
|
120 |
+
help="Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)",
|
121 |
+
)
|
122 |
+
serve_parser.set_defaults(func=serve_command_factory)
|
123 |
+
|
124 |
+
def __init__(self, pipeline: Pipeline, host: str, port: int, workers: int):
|
125 |
+
self._pipeline = pipeline
|
126 |
+
|
127 |
+
self.host = host
|
128 |
+
self.port = port
|
129 |
+
self.workers = workers
|
130 |
+
|
131 |
+
if not _serve_dependencies_installed:
|
132 |
+
raise RuntimeError(
|
133 |
+
"Using serve command requires FastAPI and uvicorn. "
|
134 |
+
'Please install transformers with [serving]: pip install "transformers[serving]". '
|
135 |
+
"Or install FastAPI and uvicorn separately."
|
136 |
+
)
|
137 |
+
else:
|
138 |
+
logger.info(f"Serving model over {host}:{port}")
|
139 |
+
self._app = FastAPI(
|
140 |
+
routes=[
|
141 |
+
APIRoute(
|
142 |
+
"/",
|
143 |
+
self.model_info,
|
144 |
+
response_model=ServeModelInfoResult,
|
145 |
+
response_class=JSONResponse,
|
146 |
+
methods=["GET"],
|
147 |
+
),
|
148 |
+
APIRoute(
|
149 |
+
"/tokenize",
|
150 |
+
self.tokenize,
|
151 |
+
response_model=ServeTokenizeResult,
|
152 |
+
response_class=JSONResponse,
|
153 |
+
methods=["POST"],
|
154 |
+
),
|
155 |
+
APIRoute(
|
156 |
+
"/detokenize",
|
157 |
+
self.detokenize,
|
158 |
+
response_model=ServeDeTokenizeResult,
|
159 |
+
response_class=JSONResponse,
|
160 |
+
methods=["POST"],
|
161 |
+
),
|
162 |
+
APIRoute(
|
163 |
+
"/forward",
|
164 |
+
self.forward,
|
165 |
+
response_model=ServeForwardResult,
|
166 |
+
response_class=JSONResponse,
|
167 |
+
methods=["POST"],
|
168 |
+
),
|
169 |
+
],
|
170 |
+
timeout=600,
|
171 |
+
)
|
172 |
+
|
173 |
+
def run(self):
|
174 |
+
run(self._app, host=self.host, port=self.port, workers=self.workers)
|
175 |
+
|
176 |
+
def model_info(self):
|
177 |
+
return ServeModelInfoResult(infos=vars(self._pipeline.model.config))
|
178 |
+
|
179 |
+
def tokenize(self, text_input: str = Body(None, embed=True), return_ids: bool = Body(False, embed=True)):
|
180 |
+
"""
|
181 |
+
Tokenize the provided input and eventually returns corresponding tokens id: - **text_input**: String to
|
182 |
+
tokenize - **return_ids**: Boolean flags indicating if the tokens have to be converted to their integer
|
183 |
+
mapping.
|
184 |
+
"""
|
185 |
+
try:
|
186 |
+
tokens_txt = self._pipeline.tokenizer.tokenize(text_input)
|
187 |
+
|
188 |
+
if return_ids:
|
189 |
+
tokens_ids = self._pipeline.tokenizer.convert_tokens_to_ids(tokens_txt)
|
190 |
+
return ServeTokenizeResult(tokens=tokens_txt, tokens_ids=tokens_ids)
|
191 |
+
else:
|
192 |
+
return ServeTokenizeResult(tokens=tokens_txt)
|
193 |
+
|
194 |
+
except Exception as e:
|
195 |
+
raise HTTPException(status_code=500, detail={"model": "", "error": str(e)})
|
196 |
+
|
197 |
+
def detokenize(
|
198 |
+
self,
|
199 |
+
tokens_ids: List[int] = Body(None, embed=True),
|
200 |
+
skip_special_tokens: bool = Body(False, embed=True),
|
201 |
+
cleanup_tokenization_spaces: bool = Body(True, embed=True),
|
202 |
+
):
|
203 |
+
"""
|
204 |
+
Detokenize the provided tokens ids to readable text: - **tokens_ids**: List of tokens ids -
|
205 |
+
**skip_special_tokens**: Flag indicating to not try to decode special tokens - **cleanup_tokenization_spaces**:
|
206 |
+
Flag indicating to remove all leading/trailing spaces and intermediate ones.
|
207 |
+
"""
|
208 |
+
try:
|
209 |
+
decoded_str = self._pipeline.tokenizer.decode(tokens_ids, skip_special_tokens, cleanup_tokenization_spaces)
|
210 |
+
return ServeDeTokenizeResult(model="", text=decoded_str)
|
211 |
+
except Exception as e:
|
212 |
+
raise HTTPException(status_code=500, detail={"model": "", "error": str(e)})
|
213 |
+
|
214 |
+
async def forward(self, inputs=Body(None, embed=True)):
|
215 |
+
"""
|
216 |
+
**inputs**: **attention_mask**: **tokens_type_ids**:
|
217 |
+
"""
|
218 |
+
|
219 |
+
# Check we don't have empty string
|
220 |
+
if len(inputs) == 0:
|
221 |
+
return ServeForwardResult(output=[], attention=[])
|
222 |
+
|
223 |
+
try:
|
224 |
+
# Forward through the model
|
225 |
+
output = self._pipeline(inputs)
|
226 |
+
return ServeForwardResult(output=output)
|
227 |
+
except Exception as e:
|
228 |
+
raise HTTPException(500, {"error": str(e)})
|
venv/lib/python3.10/site-packages/transformers/commands/train.py
ADDED
@@ -0,0 +1,158 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2020 The HuggingFace Team. All rights reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
|
15 |
+
import os
|
16 |
+
from argparse import ArgumentParser, Namespace
|
17 |
+
|
18 |
+
from ..data import SingleSentenceClassificationProcessor as Processor
|
19 |
+
from ..pipelines import TextClassificationPipeline
|
20 |
+
from ..utils import is_tf_available, is_torch_available, logging
|
21 |
+
from . import BaseTransformersCLICommand
|
22 |
+
|
23 |
+
|
24 |
+
if not is_tf_available() and not is_torch_available():
|
25 |
+
raise RuntimeError("At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training")
|
26 |
+
|
27 |
+
# TF training parameters
|
28 |
+
USE_XLA = False
|
29 |
+
USE_AMP = False
|
30 |
+
|
31 |
+
|
32 |
+
def train_command_factory(args: Namespace):
|
33 |
+
"""
|
34 |
+
Factory function used to instantiate training command from provided command line arguments.
|
35 |
+
|
36 |
+
Returns: TrainCommand
|
37 |
+
"""
|
38 |
+
return TrainCommand(args)
|
39 |
+
|
40 |
+
|
41 |
+
class TrainCommand(BaseTransformersCLICommand):
|
42 |
+
@staticmethod
|
43 |
+
def register_subcommand(parser: ArgumentParser):
|
44 |
+
"""
|
45 |
+
Register this command to argparse so it's available for the transformer-cli
|
46 |
+
|
47 |
+
Args:
|
48 |
+
parser: Root parser to register command-specific arguments
|
49 |
+
"""
|
50 |
+
train_parser = parser.add_parser("train", help="CLI tool to train a model on a task.")
|
51 |
+
|
52 |
+
train_parser.add_argument(
|
53 |
+
"--train_data",
|
54 |
+
type=str,
|
55 |
+
required=True,
|
56 |
+
help="path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.",
|
57 |
+
)
|
58 |
+
train_parser.add_argument(
|
59 |
+
"--column_label", type=int, default=0, help="Column of the dataset csv file with example labels."
|
60 |
+
)
|
61 |
+
train_parser.add_argument(
|
62 |
+
"--column_text", type=int, default=1, help="Column of the dataset csv file with example texts."
|
63 |
+
)
|
64 |
+
train_parser.add_argument(
|
65 |
+
"--column_id", type=int, default=2, help="Column of the dataset csv file with example ids."
|
66 |
+
)
|
67 |
+
train_parser.add_argument(
|
68 |
+
"--skip_first_row", action="store_true", help="Skip the first row of the csv file (headers)."
|
69 |
+
)
|
70 |
+
|
71 |
+
train_parser.add_argument("--validation_data", type=str, default="", help="path to validation dataset.")
|
72 |
+
train_parser.add_argument(
|
73 |
+
"--validation_split",
|
74 |
+
type=float,
|
75 |
+
default=0.1,
|
76 |
+
help="if validation dataset is not provided, fraction of train dataset to use as validation dataset.",
|
77 |
+
)
|
78 |
+
|
79 |
+
train_parser.add_argument("--output", type=str, default="./", help="path to saved the trained model.")
|
80 |
+
|
81 |
+
train_parser.add_argument(
|
82 |
+
"--task", type=str, default="text_classification", help="Task to train the model on."
|
83 |
+
)
|
84 |
+
train_parser.add_argument(
|
85 |
+
"--model", type=str, default="google-bert/bert-base-uncased", help="Model's name or path to stored model."
|
86 |
+
)
|
87 |
+
train_parser.add_argument("--train_batch_size", type=int, default=32, help="Batch size for training.")
|
88 |
+
train_parser.add_argument("--valid_batch_size", type=int, default=64, help="Batch size for validation.")
|
89 |
+
train_parser.add_argument("--learning_rate", type=float, default=3e-5, help="Learning rate.")
|
90 |
+
train_parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon for Adam optimizer.")
|
91 |
+
train_parser.set_defaults(func=train_command_factory)
|
92 |
+
|
93 |
+
def __init__(self, args: Namespace):
|
94 |
+
self.logger = logging.get_logger("transformers-cli/training")
|
95 |
+
|
96 |
+
self.framework = "tf" if is_tf_available() else "torch"
|
97 |
+
|
98 |
+
os.makedirs(args.output, exist_ok=True)
|
99 |
+
self.output = args.output
|
100 |
+
|
101 |
+
self.column_label = args.column_label
|
102 |
+
self.column_text = args.column_text
|
103 |
+
self.column_id = args.column_id
|
104 |
+
|
105 |
+
self.logger.info(f"Loading {args.task} pipeline for {args.model}")
|
106 |
+
if args.task == "text_classification":
|
107 |
+
self.pipeline = TextClassificationPipeline.from_pretrained(args.model)
|
108 |
+
elif args.task == "token_classification":
|
109 |
+
raise NotImplementedError
|
110 |
+
elif args.task == "question_answering":
|
111 |
+
raise NotImplementedError
|
112 |
+
|
113 |
+
self.logger.info(f"Loading dataset from {args.train_data}")
|
114 |
+
self.train_dataset = Processor.create_from_csv(
|
115 |
+
args.train_data,
|
116 |
+
column_label=args.column_label,
|
117 |
+
column_text=args.column_text,
|
118 |
+
column_id=args.column_id,
|
119 |
+
skip_first_row=args.skip_first_row,
|
120 |
+
)
|
121 |
+
self.valid_dataset = None
|
122 |
+
if args.validation_data:
|
123 |
+
self.logger.info(f"Loading validation dataset from {args.validation_data}")
|
124 |
+
self.valid_dataset = Processor.create_from_csv(
|
125 |
+
args.validation_data,
|
126 |
+
column_label=args.column_label,
|
127 |
+
column_text=args.column_text,
|
128 |
+
column_id=args.column_id,
|
129 |
+
skip_first_row=args.skip_first_row,
|
130 |
+
)
|
131 |
+
|
132 |
+
self.validation_split = args.validation_split
|
133 |
+
self.train_batch_size = args.train_batch_size
|
134 |
+
self.valid_batch_size = args.valid_batch_size
|
135 |
+
self.learning_rate = args.learning_rate
|
136 |
+
self.adam_epsilon = args.adam_epsilon
|
137 |
+
|
138 |
+
def run(self):
|
139 |
+
if self.framework == "tf":
|
140 |
+
return self.run_tf()
|
141 |
+
return self.run_torch()
|
142 |
+
|
143 |
+
def run_torch(self):
|
144 |
+
raise NotImplementedError
|
145 |
+
|
146 |
+
def run_tf(self):
|
147 |
+
self.pipeline.fit(
|
148 |
+
self.train_dataset,
|
149 |
+
validation_data=self.valid_dataset,
|
150 |
+
validation_split=self.validation_split,
|
151 |
+
learning_rate=self.learning_rate,
|
152 |
+
adam_epsilon=self.adam_epsilon,
|
153 |
+
train_batch_size=self.train_batch_size,
|
154 |
+
valid_batch_size=self.valid_batch_size,
|
155 |
+
)
|
156 |
+
|
157 |
+
# Save trained pipeline
|
158 |
+
self.pipeline.save_pretrained(self.output)
|
venv/lib/python3.10/site-packages/transformers/commands/transformers_cli.py
ADDED
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python
|
2 |
+
# Copyright 2020 The HuggingFace Team. All rights reserved.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
|
16 |
+
from argparse import ArgumentParser
|
17 |
+
|
18 |
+
from .add_new_model import AddNewModelCommand
|
19 |
+
from .add_new_model_like import AddNewModelLikeCommand
|
20 |
+
from .convert import ConvertCommand
|
21 |
+
from .download import DownloadCommand
|
22 |
+
from .env import EnvironmentCommand
|
23 |
+
from .lfs import LfsCommands
|
24 |
+
from .pt_to_tf import PTtoTFCommand
|
25 |
+
from .run import RunCommand
|
26 |
+
from .serving import ServeCommand
|
27 |
+
from .user import UserCommands
|
28 |
+
|
29 |
+
|
30 |
+
def main():
|
31 |
+
parser = ArgumentParser("Transformers CLI tool", usage="transformers-cli <command> [<args>]")
|
32 |
+
commands_parser = parser.add_subparsers(help="transformers-cli command helpers")
|
33 |
+
|
34 |
+
# Register commands
|
35 |
+
ConvertCommand.register_subcommand(commands_parser)
|
36 |
+
DownloadCommand.register_subcommand(commands_parser)
|
37 |
+
EnvironmentCommand.register_subcommand(commands_parser)
|
38 |
+
RunCommand.register_subcommand(commands_parser)
|
39 |
+
ServeCommand.register_subcommand(commands_parser)
|
40 |
+
UserCommands.register_subcommand(commands_parser)
|
41 |
+
AddNewModelCommand.register_subcommand(commands_parser)
|
42 |
+
AddNewModelLikeCommand.register_subcommand(commands_parser)
|
43 |
+
LfsCommands.register_subcommand(commands_parser)
|
44 |
+
PTtoTFCommand.register_subcommand(commands_parser)
|
45 |
+
|
46 |
+
# Let's go
|
47 |
+
args = parser.parse_args()
|
48 |
+
|
49 |
+
if not hasattr(args, "func"):
|
50 |
+
parser.print_help()
|
51 |
+
exit(1)
|
52 |
+
|
53 |
+
# Run
|
54 |
+
service = args.func(args)
|
55 |
+
service.run()
|
56 |
+
|
57 |
+
|
58 |
+
if __name__ == "__main__":
|
59 |
+
main()
|
venv/lib/python3.10/site-packages/transformers/commands/user.py
ADDED
@@ -0,0 +1,197 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2020 The HuggingFace Team. All rights reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
|
15 |
+
import subprocess
|
16 |
+
from argparse import ArgumentParser
|
17 |
+
from typing import List, Union
|
18 |
+
|
19 |
+
from huggingface_hub.hf_api import HfFolder, create_repo, whoami
|
20 |
+
from requests.exceptions import HTTPError
|
21 |
+
|
22 |
+
from . import BaseTransformersCLICommand
|
23 |
+
|
24 |
+
|
25 |
+
class UserCommands(BaseTransformersCLICommand):
|
26 |
+
@staticmethod
|
27 |
+
def register_subcommand(parser: ArgumentParser):
|
28 |
+
login_parser = parser.add_parser("login", help="Log in using the same credentials as on huggingface.co")
|
29 |
+
login_parser.set_defaults(func=lambda args: LoginCommand(args))
|
30 |
+
whoami_parser = parser.add_parser("whoami", help="Find out which huggingface.co account you are logged in as.")
|
31 |
+
whoami_parser.set_defaults(func=lambda args: WhoamiCommand(args))
|
32 |
+
logout_parser = parser.add_parser("logout", help="Log out")
|
33 |
+
logout_parser.set_defaults(func=lambda args: LogoutCommand(args))
|
34 |
+
|
35 |
+
# new system: git-based repo system
|
36 |
+
repo_parser = parser.add_parser(
|
37 |
+
"repo",
|
38 |
+
help="Deprecated: use `huggingface-cli` instead. Commands to interact with your huggingface.co repos.",
|
39 |
+
)
|
40 |
+
repo_subparsers = repo_parser.add_subparsers(
|
41 |
+
help="Deprecated: use `huggingface-cli` instead. huggingface.co repos related commands"
|
42 |
+
)
|
43 |
+
repo_create_parser = repo_subparsers.add_parser(
|
44 |
+
"create", help="Deprecated: use `huggingface-cli` instead. Create a new repo on huggingface.co"
|
45 |
+
)
|
46 |
+
repo_create_parser.add_argument(
|
47 |
+
"name",
|
48 |
+
type=str,
|
49 |
+
help="Name for your model's repo. Will be namespaced under your username to build the model id.",
|
50 |
+
)
|
51 |
+
repo_create_parser.add_argument("--organization", type=str, help="Optional: organization namespace.")
|
52 |
+
repo_create_parser.add_argument("-y", "--yes", action="store_true", help="Optional: answer Yes to the prompt")
|
53 |
+
repo_create_parser.set_defaults(func=lambda args: RepoCreateCommand(args))
|
54 |
+
|
55 |
+
|
56 |
+
class ANSI:
|
57 |
+
"""
|
58 |
+
Helper for en.wikipedia.org/wiki/ANSI_escape_code
|
59 |
+
"""
|
60 |
+
|
61 |
+
_bold = "\u001b[1m"
|
62 |
+
_red = "\u001b[31m"
|
63 |
+
_gray = "\u001b[90m"
|
64 |
+
_reset = "\u001b[0m"
|
65 |
+
|
66 |
+
@classmethod
|
67 |
+
def bold(cls, s):
|
68 |
+
return f"{cls._bold}{s}{cls._reset}"
|
69 |
+
|
70 |
+
@classmethod
|
71 |
+
def red(cls, s):
|
72 |
+
return f"{cls._bold}{cls._red}{s}{cls._reset}"
|
73 |
+
|
74 |
+
@classmethod
|
75 |
+
def gray(cls, s):
|
76 |
+
return f"{cls._gray}{s}{cls._reset}"
|
77 |
+
|
78 |
+
|
79 |
+
def tabulate(rows: List[List[Union[str, int]]], headers: List[str]) -> str:
|
80 |
+
"""
|
81 |
+
Inspired by:
|
82 |
+
|
83 |
+
- stackoverflow.com/a/8356620/593036
|
84 |
+
- stackoverflow.com/questions/9535954/printing-lists-as-tabular-data
|
85 |
+
"""
|
86 |
+
col_widths = [max(len(str(x)) for x in col) for col in zip(*rows, headers)]
|
87 |
+
row_format = ("{{:{}}} " * len(headers)).format(*col_widths)
|
88 |
+
lines = []
|
89 |
+
lines.append(row_format.format(*headers))
|
90 |
+
lines.append(row_format.format(*["-" * w for w in col_widths]))
|
91 |
+
for row in rows:
|
92 |
+
lines.append(row_format.format(*row))
|
93 |
+
return "\n".join(lines)
|
94 |
+
|
95 |
+
|
96 |
+
class BaseUserCommand:
|
97 |
+
def __init__(self, args):
|
98 |
+
self.args = args
|
99 |
+
|
100 |
+
|
101 |
+
class LoginCommand(BaseUserCommand):
|
102 |
+
def run(self):
|
103 |
+
print(
|
104 |
+
ANSI.red(
|
105 |
+
"ERROR! `huggingface-cli login` uses an outdated login mechanism "
|
106 |
+
"that is not compatible with the Hugging Face Hub backend anymore. "
|
107 |
+
"Please use `huggingface-cli login instead."
|
108 |
+
)
|
109 |
+
)
|
110 |
+
|
111 |
+
|
112 |
+
class WhoamiCommand(BaseUserCommand):
|
113 |
+
def run(self):
|
114 |
+
print(
|
115 |
+
ANSI.red(
|
116 |
+
"WARNING! `transformers-cli whoami` is deprecated and will be removed in v5. Please use "
|
117 |
+
"`huggingface-cli whoami` instead."
|
118 |
+
)
|
119 |
+
)
|
120 |
+
token = HfFolder.get_token()
|
121 |
+
if token is None:
|
122 |
+
print("Not logged in")
|
123 |
+
exit()
|
124 |
+
try:
|
125 |
+
user, orgs = whoami(token)
|
126 |
+
print(user)
|
127 |
+
if orgs:
|
128 |
+
print(ANSI.bold("orgs: "), ",".join(orgs))
|
129 |
+
except HTTPError as e:
|
130 |
+
print(e)
|
131 |
+
print(ANSI.red(e.response.text))
|
132 |
+
exit(1)
|
133 |
+
|
134 |
+
|
135 |
+
class LogoutCommand(BaseUserCommand):
|
136 |
+
def run(self):
|
137 |
+
print(
|
138 |
+
ANSI.red(
|
139 |
+
"ERROR! `transformers-cli logout` uses an outdated logout mechanism "
|
140 |
+
"that is not compatible with the Hugging Face Hub backend anymore. "
|
141 |
+
"Please use `huggingface-cli logout instead."
|
142 |
+
)
|
143 |
+
)
|
144 |
+
|
145 |
+
|
146 |
+
class RepoCreateCommand(BaseUserCommand):
|
147 |
+
def run(self):
|
148 |
+
print(
|
149 |
+
ANSI.red(
|
150 |
+
"WARNING! Managing repositories through transformers-cli is deprecated. "
|
151 |
+
"Please use `huggingface-cli` instead."
|
152 |
+
)
|
153 |
+
)
|
154 |
+
token = HfFolder.get_token()
|
155 |
+
if token is None:
|
156 |
+
print("Not logged in")
|
157 |
+
exit(1)
|
158 |
+
try:
|
159 |
+
stdout = subprocess.check_output(["git", "--version"]).decode("utf-8")
|
160 |
+
print(ANSI.gray(stdout.strip()))
|
161 |
+
except FileNotFoundError:
|
162 |
+
print("Looks like you do not have git installed, please install.")
|
163 |
+
|
164 |
+
try:
|
165 |
+
stdout = subprocess.check_output(["git-lfs", "--version"]).decode("utf-8")
|
166 |
+
print(ANSI.gray(stdout.strip()))
|
167 |
+
except FileNotFoundError:
|
168 |
+
print(
|
169 |
+
ANSI.red(
|
170 |
+
"Looks like you do not have git-lfs installed, please install."
|
171 |
+
" You can install from https://git-lfs.github.com/."
|
172 |
+
" Then run `git lfs install` (you only have to do this once)."
|
173 |
+
)
|
174 |
+
)
|
175 |
+
print("")
|
176 |
+
|
177 |
+
user, _ = whoami(token)
|
178 |
+
namespace = self.args.organization if self.args.organization is not None else user
|
179 |
+
full_name = f"{namespace}/{self.args.name}"
|
180 |
+
print(f"You are about to create {ANSI.bold(full_name)}")
|
181 |
+
|
182 |
+
if not self.args.yes:
|
183 |
+
choice = input("Proceed? [Y/n] ").lower()
|
184 |
+
if not (choice == "" or choice == "y" or choice == "yes"):
|
185 |
+
print("Abort")
|
186 |
+
exit()
|
187 |
+
try:
|
188 |
+
url = create_repo(token, name=self.args.name, organization=self.args.organization)
|
189 |
+
except HTTPError as e:
|
190 |
+
print(e)
|
191 |
+
print(ANSI.red(e.response.text))
|
192 |
+
exit(1)
|
193 |
+
print("\nYour repo now lives at:")
|
194 |
+
print(f" {ANSI.bold(url)}")
|
195 |
+
print("\nYou can clone it locally with the command below, and commit/push as usual.")
|
196 |
+
print(f"\n git clone {url}")
|
197 |
+
print("")
|
venv/lib/python3.10/site-packages/transformers/generation/__init__.py
ADDED
@@ -0,0 +1,310 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2022 The HuggingFace Team. All rights reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
|
15 |
+
from typing import TYPE_CHECKING
|
16 |
+
|
17 |
+
from ..utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available
|
18 |
+
|
19 |
+
|
20 |
+
_import_structure = {
|
21 |
+
"configuration_utils": ["GenerationConfig", "GenerationMode"],
|
22 |
+
"streamers": ["TextIteratorStreamer", "TextStreamer"],
|
23 |
+
}
|
24 |
+
|
25 |
+
try:
|
26 |
+
if not is_torch_available():
|
27 |
+
raise OptionalDependencyNotAvailable()
|
28 |
+
except OptionalDependencyNotAvailable:
|
29 |
+
pass
|
30 |
+
else:
|
31 |
+
_import_structure["beam_constraints"] = [
|
32 |
+
"Constraint",
|
33 |
+
"ConstraintListState",
|
34 |
+
"DisjunctiveConstraint",
|
35 |
+
"PhrasalConstraint",
|
36 |
+
]
|
37 |
+
_import_structure["beam_search"] = [
|
38 |
+
"BeamHypotheses",
|
39 |
+
"BeamScorer",
|
40 |
+
"BeamSearchScorer",
|
41 |
+
"ConstrainedBeamSearchScorer",
|
42 |
+
]
|
43 |
+
_import_structure["candidate_generator"] = [
|
44 |
+
"AssistedCandidateGenerator",
|
45 |
+
"CandidateGenerator",
|
46 |
+
"PromptLookupCandidateGenerator",
|
47 |
+
]
|
48 |
+
_import_structure["logits_process"] = [
|
49 |
+
"AlternatingCodebooksLogitsProcessor",
|
50 |
+
"ClassifierFreeGuidanceLogitsProcessor",
|
51 |
+
"EncoderNoRepeatNGramLogitsProcessor",
|
52 |
+
"EncoderRepetitionPenaltyLogitsProcessor",
|
53 |
+
"EpsilonLogitsWarper",
|
54 |
+
"EtaLogitsWarper",
|
55 |
+
"ExponentialDecayLengthPenalty",
|
56 |
+
"ForcedBOSTokenLogitsProcessor",
|
57 |
+
"ForcedEOSTokenLogitsProcessor",
|
58 |
+
"ForceTokensLogitsProcessor",
|
59 |
+
"HammingDiversityLogitsProcessor",
|
60 |
+
"InfNanRemoveLogitsProcessor",
|
61 |
+
"LogitNormalization",
|
62 |
+
"LogitsProcessor",
|
63 |
+
"LogitsProcessorList",
|
64 |
+
"LogitsWarper",
|
65 |
+
"MinLengthLogitsProcessor",
|
66 |
+
"MinNewTokensLengthLogitsProcessor",
|
67 |
+
"NoBadWordsLogitsProcessor",
|
68 |
+
"NoRepeatNGramLogitsProcessor",
|
69 |
+
"PrefixConstrainedLogitsProcessor",
|
70 |
+
"RepetitionPenaltyLogitsProcessor",
|
71 |
+
"SequenceBiasLogitsProcessor",
|
72 |
+
"SuppressTokensLogitsProcessor",
|
73 |
+
"SuppressTokensAtBeginLogitsProcessor",
|
74 |
+
"TemperatureLogitsWarper",
|
75 |
+
"TopKLogitsWarper",
|
76 |
+
"TopPLogitsWarper",
|
77 |
+
"TypicalLogitsWarper",
|
78 |
+
"UnbatchedClassifierFreeGuidanceLogitsProcessor",
|
79 |
+
"WhisperTimeStampLogitsProcessor",
|
80 |
+
]
|
81 |
+
_import_structure["stopping_criteria"] = [
|
82 |
+
"MaxNewTokensCriteria",
|
83 |
+
"MaxLengthCriteria",
|
84 |
+
"MaxTimeCriteria",
|
85 |
+
"EosTokenCriteria",
|
86 |
+
"StoppingCriteria",
|
87 |
+
"StoppingCriteriaList",
|
88 |
+
"validate_stopping_criteria",
|
89 |
+
]
|
90 |
+
_import_structure["utils"] = [
|
91 |
+
"GenerationMixin",
|
92 |
+
"GreedySearchEncoderDecoderOutput",
|
93 |
+
"GreedySearchDecoderOnlyOutput",
|
94 |
+
"SampleEncoderDecoderOutput",
|
95 |
+
"SampleDecoderOnlyOutput",
|
96 |
+
"BeamSearchEncoderDecoderOutput",
|
97 |
+
"BeamSearchDecoderOnlyOutput",
|
98 |
+
"BeamSampleEncoderDecoderOutput",
|
99 |
+
"BeamSampleDecoderOnlyOutput",
|
100 |
+
"ContrastiveSearchEncoderDecoderOutput",
|
101 |
+
"ContrastiveSearchDecoderOnlyOutput",
|
102 |
+
"GenerateBeamDecoderOnlyOutput",
|
103 |
+
"GenerateBeamEncoderDecoderOutput",
|
104 |
+
"GenerateDecoderOnlyOutput",
|
105 |
+
"GenerateEncoderDecoderOutput",
|
106 |
+
]
|
107 |
+
|
108 |
+
try:
|
109 |
+
if not is_tf_available():
|
110 |
+
raise OptionalDependencyNotAvailable()
|
111 |
+
except OptionalDependencyNotAvailable:
|
112 |
+
pass
|
113 |
+
else:
|
114 |
+
_import_structure["tf_logits_process"] = [
|
115 |
+
"TFForcedBOSTokenLogitsProcessor",
|
116 |
+
"TFForcedEOSTokenLogitsProcessor",
|
117 |
+
"TFForceTokensLogitsProcessor",
|
118 |
+
"TFLogitsProcessor",
|
119 |
+
"TFLogitsProcessorList",
|
120 |
+
"TFLogitsWarper",
|
121 |
+
"TFMinLengthLogitsProcessor",
|
122 |
+
"TFNoBadWordsLogitsProcessor",
|
123 |
+
"TFNoRepeatNGramLogitsProcessor",
|
124 |
+
"TFRepetitionPenaltyLogitsProcessor",
|
125 |
+
"TFSuppressTokensAtBeginLogitsProcessor",
|
126 |
+
"TFSuppressTokensLogitsProcessor",
|
127 |
+
"TFTemperatureLogitsWarper",
|
128 |
+
"TFTopKLogitsWarper",
|
129 |
+
"TFTopPLogitsWarper",
|
130 |
+
]
|
131 |
+
_import_structure["tf_utils"] = [
|
132 |
+
"TFGenerationMixin",
|
133 |
+
"TFGreedySearchDecoderOnlyOutput",
|
134 |
+
"TFGreedySearchEncoderDecoderOutput",
|
135 |
+
"TFSampleEncoderDecoderOutput",
|
136 |
+
"TFSampleDecoderOnlyOutput",
|
137 |
+
"TFBeamSearchEncoderDecoderOutput",
|
138 |
+
"TFBeamSearchDecoderOnlyOutput",
|
139 |
+
"TFBeamSampleEncoderDecoderOutput",
|
140 |
+
"TFBeamSampleDecoderOnlyOutput",
|
141 |
+
"TFContrastiveSearchEncoderDecoderOutput",
|
142 |
+
"TFContrastiveSearchDecoderOnlyOutput",
|
143 |
+
]
|
144 |
+
|
145 |
+
try:
|
146 |
+
if not is_flax_available():
|
147 |
+
raise OptionalDependencyNotAvailable()
|
148 |
+
except OptionalDependencyNotAvailable:
|
149 |
+
pass
|
150 |
+
else:
|
151 |
+
_import_structure["flax_logits_process"] = [
|
152 |
+
"FlaxForcedBOSTokenLogitsProcessor",
|
153 |
+
"FlaxForcedEOSTokenLogitsProcessor",
|
154 |
+
"FlaxForceTokensLogitsProcessor",
|
155 |
+
"FlaxLogitsProcessor",
|
156 |
+
"FlaxLogitsProcessorList",
|
157 |
+
"FlaxLogitsWarper",
|
158 |
+
"FlaxMinLengthLogitsProcessor",
|
159 |
+
"FlaxSuppressTokensAtBeginLogitsProcessor",
|
160 |
+
"FlaxSuppressTokensLogitsProcessor",
|
161 |
+
"FlaxTemperatureLogitsWarper",
|
162 |
+
"FlaxTopKLogitsWarper",
|
163 |
+
"FlaxTopPLogitsWarper",
|
164 |
+
"FlaxWhisperTimeStampLogitsProcessor",
|
165 |
+
"FlaxNoRepeatNGramLogitsProcessor",
|
166 |
+
]
|
167 |
+
_import_structure["flax_utils"] = [
|
168 |
+
"FlaxGenerationMixin",
|
169 |
+
"FlaxGreedySearchOutput",
|
170 |
+
"FlaxSampleOutput",
|
171 |
+
"FlaxBeamSearchOutput",
|
172 |
+
]
|
173 |
+
|
174 |
+
if TYPE_CHECKING:
|
175 |
+
from .configuration_utils import GenerationConfig, GenerationMode
|
176 |
+
from .streamers import TextIteratorStreamer, TextStreamer
|
177 |
+
|
178 |
+
try:
|
179 |
+
if not is_torch_available():
|
180 |
+
raise OptionalDependencyNotAvailable()
|
181 |
+
except OptionalDependencyNotAvailable:
|
182 |
+
pass
|
183 |
+
else:
|
184 |
+
from .beam_constraints import Constraint, ConstraintListState, DisjunctiveConstraint, PhrasalConstraint
|
185 |
+
from .beam_search import BeamHypotheses, BeamScorer, BeamSearchScorer, ConstrainedBeamSearchScorer
|
186 |
+
from .candidate_generator import AssistedCandidateGenerator, CandidateGenerator, PromptLookupCandidateGenerator
|
187 |
+
from .logits_process import (
|
188 |
+
AlternatingCodebooksLogitsProcessor,
|
189 |
+
ClassifierFreeGuidanceLogitsProcessor,
|
190 |
+
EncoderNoRepeatNGramLogitsProcessor,
|
191 |
+
EncoderRepetitionPenaltyLogitsProcessor,
|
192 |
+
EpsilonLogitsWarper,
|
193 |
+
EtaLogitsWarper,
|
194 |
+
ExponentialDecayLengthPenalty,
|
195 |
+
ForcedBOSTokenLogitsProcessor,
|
196 |
+
ForcedEOSTokenLogitsProcessor,
|
197 |
+
ForceTokensLogitsProcessor,
|
198 |
+
HammingDiversityLogitsProcessor,
|
199 |
+
InfNanRemoveLogitsProcessor,
|
200 |
+
LogitNormalization,
|
201 |
+
LogitsProcessor,
|
202 |
+
LogitsProcessorList,
|
203 |
+
LogitsWarper,
|
204 |
+
MinLengthLogitsProcessor,
|
205 |
+
MinNewTokensLengthLogitsProcessor,
|
206 |
+
NoBadWordsLogitsProcessor,
|
207 |
+
NoRepeatNGramLogitsProcessor,
|
208 |
+
PrefixConstrainedLogitsProcessor,
|
209 |
+
RepetitionPenaltyLogitsProcessor,
|
210 |
+
SequenceBiasLogitsProcessor,
|
211 |
+
SuppressTokensAtBeginLogitsProcessor,
|
212 |
+
SuppressTokensLogitsProcessor,
|
213 |
+
TemperatureLogitsWarper,
|
214 |
+
TopKLogitsWarper,
|
215 |
+
TopPLogitsWarper,
|
216 |
+
TypicalLogitsWarper,
|
217 |
+
UnbatchedClassifierFreeGuidanceLogitsProcessor,
|
218 |
+
WhisperTimeStampLogitsProcessor,
|
219 |
+
)
|
220 |
+
from .stopping_criteria import (
|
221 |
+
EosTokenCriteria,
|
222 |
+
MaxLengthCriteria,
|
223 |
+
MaxNewTokensCriteria,
|
224 |
+
MaxTimeCriteria,
|
225 |
+
StoppingCriteria,
|
226 |
+
StoppingCriteriaList,
|
227 |
+
validate_stopping_criteria,
|
228 |
+
)
|
229 |
+
from .utils import (
|
230 |
+
BeamSampleDecoderOnlyOutput,
|
231 |
+
BeamSampleEncoderDecoderOutput,
|
232 |
+
BeamSearchDecoderOnlyOutput,
|
233 |
+
BeamSearchEncoderDecoderOutput,
|
234 |
+
ContrastiveSearchDecoderOnlyOutput,
|
235 |
+
ContrastiveSearchEncoderDecoderOutput,
|
236 |
+
GenerateBeamDecoderOnlyOutput,
|
237 |
+
GenerateBeamEncoderDecoderOutput,
|
238 |
+
GenerateDecoderOnlyOutput,
|
239 |
+
GenerateEncoderDecoderOutput,
|
240 |
+
GenerationMixin,
|
241 |
+
GreedySearchDecoderOnlyOutput,
|
242 |
+
GreedySearchEncoderDecoderOutput,
|
243 |
+
SampleDecoderOnlyOutput,
|
244 |
+
SampleEncoderDecoderOutput,
|
245 |
+
)
|
246 |
+
|
247 |
+
try:
|
248 |
+
if not is_tf_available():
|
249 |
+
raise OptionalDependencyNotAvailable()
|
250 |
+
except OptionalDependencyNotAvailable:
|
251 |
+
pass
|
252 |
+
else:
|
253 |
+
from .tf_logits_process import (
|
254 |
+
TFForcedBOSTokenLogitsProcessor,
|
255 |
+
TFForcedEOSTokenLogitsProcessor,
|
256 |
+
TFForceTokensLogitsProcessor,
|
257 |
+
TFLogitsProcessor,
|
258 |
+
TFLogitsProcessorList,
|
259 |
+
TFLogitsWarper,
|
260 |
+
TFMinLengthLogitsProcessor,
|
261 |
+
TFNoBadWordsLogitsProcessor,
|
262 |
+
TFNoRepeatNGramLogitsProcessor,
|
263 |
+
TFRepetitionPenaltyLogitsProcessor,
|
264 |
+
TFSuppressTokensAtBeginLogitsProcessor,
|
265 |
+
TFSuppressTokensLogitsProcessor,
|
266 |
+
TFTemperatureLogitsWarper,
|
267 |
+
TFTopKLogitsWarper,
|
268 |
+
TFTopPLogitsWarper,
|
269 |
+
)
|
270 |
+
from .tf_utils import (
|
271 |
+
TFBeamSampleDecoderOnlyOutput,
|
272 |
+
TFBeamSampleEncoderDecoderOutput,
|
273 |
+
TFBeamSearchDecoderOnlyOutput,
|
274 |
+
TFBeamSearchEncoderDecoderOutput,
|
275 |
+
TFContrastiveSearchDecoderOnlyOutput,
|
276 |
+
TFContrastiveSearchEncoderDecoderOutput,
|
277 |
+
TFGenerationMixin,
|
278 |
+
TFGreedySearchDecoderOnlyOutput,
|
279 |
+
TFGreedySearchEncoderDecoderOutput,
|
280 |
+
TFSampleDecoderOnlyOutput,
|
281 |
+
TFSampleEncoderDecoderOutput,
|
282 |
+
)
|
283 |
+
|
284 |
+
try:
|
285 |
+
if not is_flax_available():
|
286 |
+
raise OptionalDependencyNotAvailable()
|
287 |
+
except OptionalDependencyNotAvailable:
|
288 |
+
pass
|
289 |
+
else:
|
290 |
+
from .flax_logits_process import (
|
291 |
+
FlaxForcedBOSTokenLogitsProcessor,
|
292 |
+
FlaxForcedEOSTokenLogitsProcessor,
|
293 |
+
FlaxForceTokensLogitsProcessor,
|
294 |
+
FlaxLogitsProcessor,
|
295 |
+
FlaxLogitsProcessorList,
|
296 |
+
FlaxLogitsWarper,
|
297 |
+
FlaxMinLengthLogitsProcessor,
|
298 |
+
FlaxNoRepeatNGramLogitsProcessor,
|
299 |
+
FlaxSuppressTokensAtBeginLogitsProcessor,
|
300 |
+
FlaxSuppressTokensLogitsProcessor,
|
301 |
+
FlaxTemperatureLogitsWarper,
|
302 |
+
FlaxTopKLogitsWarper,
|
303 |
+
FlaxTopPLogitsWarper,
|
304 |
+
FlaxWhisperTimeStampLogitsProcessor,
|
305 |
+
)
|
306 |
+
from .flax_utils import FlaxBeamSearchOutput, FlaxGenerationMixin, FlaxGreedySearchOutput, FlaxSampleOutput
|
307 |
+
else:
|
308 |
+
import sys
|
309 |
+
|
310 |
+
sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
|
venv/lib/python3.10/site-packages/transformers/generation/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (5.93 kB). View file
|
|
venv/lib/python3.10/site-packages/transformers/generation/__pycache__/beam_constraints.cpython-310.pyc
ADDED
Binary file (15.9 kB). View file
|
|
venv/lib/python3.10/site-packages/transformers/generation/__pycache__/beam_search.cpython-310.pyc
ADDED
Binary file (28.7 kB). View file
|
|
venv/lib/python3.10/site-packages/transformers/generation/__pycache__/candidate_generator.cpython-310.pyc
ADDED
Binary file (12.7 kB). View file
|
|
venv/lib/python3.10/site-packages/transformers/generation/__pycache__/configuration_utils.cpython-310.pyc
ADDED
Binary file (44.3 kB). View file
|
|
venv/lib/python3.10/site-packages/transformers/generation/__pycache__/flax_logits_process.cpython-310.pyc
ADDED
Binary file (21.7 kB). View file
|
|
venv/lib/python3.10/site-packages/transformers/generation/__pycache__/flax_utils.cpython-310.pyc
ADDED
Binary file (27.6 kB). View file
|
|
venv/lib/python3.10/site-packages/transformers/generation/__pycache__/logits_process.cpython-310.pyc
ADDED
Binary file (96.7 kB). View file
|
|
venv/lib/python3.10/site-packages/transformers/generation/__pycache__/stopping_criteria.cpython-310.pyc
ADDED
Binary file (8.77 kB). View file
|
|
venv/lib/python3.10/site-packages/transformers/generation/__pycache__/streamers.cpython-310.pyc
ADDED
Binary file (7.79 kB). View file
|
|
venv/lib/python3.10/site-packages/transformers/generation/__pycache__/tf_logits_process.cpython-310.pyc
ADDED
Binary file (26.8 kB). View file
|
|
venv/lib/python3.10/site-packages/transformers/generation/__pycache__/tf_utils.cpython-310.pyc
ADDED
Binary file (104 kB). View file
|
|
venv/lib/python3.10/site-packages/transformers/generation/__pycache__/utils.cpython-310.pyc
ADDED
Binary file (156 kB). View file
|
|
venv/lib/python3.10/site-packages/transformers/generation/beam_constraints.py
ADDED
@@ -0,0 +1,521 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from abc import ABC, abstractmethod
|
2 |
+
from typing import List, Optional
|
3 |
+
|
4 |
+
|
5 |
+
class Constraint(ABC):
|
6 |
+
r"""Abstract base class for all constraints that can be applied during generation.
|
7 |
+
It must define how the constraint can be satisfied.
|
8 |
+
|
9 |
+
All classes that inherit Constraint must follow the requirement that
|
10 |
+
|
11 |
+
```py
|
12 |
+
completed = False
|
13 |
+
while not completed:
|
14 |
+
_, completed = constraint.update(constraint.advance())
|
15 |
+
```
|
16 |
+
|
17 |
+
will always terminate (halt).
|
18 |
+
"""
|
19 |
+
|
20 |
+
def __init__(self):
|
21 |
+
# test for the above condition
|
22 |
+
self.test()
|
23 |
+
|
24 |
+
def test(self):
|
25 |
+
"""
|
26 |
+
Tests whether this constraint has been properly defined.
|
27 |
+
"""
|
28 |
+
counter = 0
|
29 |
+
completed = False
|
30 |
+
while not completed:
|
31 |
+
if counter == 1:
|
32 |
+
self.reset()
|
33 |
+
advance = self.advance()
|
34 |
+
if not self.does_advance(advance):
|
35 |
+
raise Exception(
|
36 |
+
"Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true."
|
37 |
+
)
|
38 |
+
|
39 |
+
stepped, completed, reset = self.update(advance)
|
40 |
+
counter += 1
|
41 |
+
|
42 |
+
if counter > 10000:
|
43 |
+
raise Exception("update() does not fulfill the constraint.")
|
44 |
+
|
45 |
+
if self.remaining() != 0:
|
46 |
+
raise Exception("Custom Constraint is not defined correctly.")
|
47 |
+
|
48 |
+
@abstractmethod
|
49 |
+
def advance(self):
|
50 |
+
"""
|
51 |
+
When called, returns the token that would take this constraint one step closer to being fulfilled.
|
52 |
+
|
53 |
+
Return:
|
54 |
+
token_ids(`torch.tensor`): Must be a tensor of a list of indexable tokens, not some integer.
|
55 |
+
"""
|
56 |
+
raise NotImplementedError(
|
57 |
+
f"{self.__class__} is an abstract class. Only classes inheriting this class can be called."
|
58 |
+
)
|
59 |
+
|
60 |
+
@abstractmethod
|
61 |
+
def does_advance(self, token_id: int):
|
62 |
+
"""
|
63 |
+
Reads in a token and returns whether it creates progress.
|
64 |
+
"""
|
65 |
+
raise NotImplementedError(
|
66 |
+
f"{self.__class__} is an abstract class. Only classes inheriting this class can be called."
|
67 |
+
)
|
68 |
+
|
69 |
+
@abstractmethod
|
70 |
+
def update(self, token_id: int):
|
71 |
+
"""
|
72 |
+
Reads in a token and returns booleans that indicate the progress made by it. This function will update the
|
73 |
+
state of this object unlikes `does_advance(self, token_id: int)`.
|
74 |
+
|
75 |
+
This isn't to test whether a certain token will advance the progress; it's to update its state as if it has
|
76 |
+
been generated. This becomes important if token_id != desired token (refer to else statement in
|
77 |
+
PhrasalConstraint)
|
78 |
+
|
79 |
+
Args:
|
80 |
+
token_id(`int`):
|
81 |
+
The id of a newly generated token in the beam search.
|
82 |
+
Return:
|
83 |
+
stepped(`bool`):
|
84 |
+
Whether this constraint has become one step closer to being fulfuilled.
|
85 |
+
completed(`bool`):
|
86 |
+
Whether this constraint has been completely fulfilled by this token being generated.
|
87 |
+
reset (`bool`):
|
88 |
+
Whether this constraint has reset its progress by this token being generated.
|
89 |
+
"""
|
90 |
+
raise NotImplementedError(
|
91 |
+
f"{self.__class__} is an abstract class. Only classes inheriting this class can be called."
|
92 |
+
)
|
93 |
+
|
94 |
+
@abstractmethod
|
95 |
+
def reset(self):
|
96 |
+
"""
|
97 |
+
Resets the state of this constraint to its initialization. We would call this in cases where the fulfillment of
|
98 |
+
a constraint is abrupted by an unwanted token.
|
99 |
+
"""
|
100 |
+
raise NotImplementedError(
|
101 |
+
f"{self.__class__} is an abstract class. Only classes inheriting this class can be called."
|
102 |
+
)
|
103 |
+
|
104 |
+
@abstractmethod
|
105 |
+
def remaining(self):
|
106 |
+
"""
|
107 |
+
Returns the number of remaining steps of `advance()` in order to complete this constraint.
|
108 |
+
"""
|
109 |
+
raise NotImplementedError(
|
110 |
+
f"{self.__class__} is an abstract class. Only classes inheriting this class can be called."
|
111 |
+
)
|
112 |
+
|
113 |
+
@abstractmethod
|
114 |
+
def copy(self, stateful=False):
|
115 |
+
"""
|
116 |
+
Creates a new instance of this constraint.
|
117 |
+
|
118 |
+
Args:
|
119 |
+
stateful(`bool`): Whether to not only copy the constraint for new instance, but also its state.
|
120 |
+
|
121 |
+
Return:
|
122 |
+
constraint(`Constraint`): The same constraint as the one being called from.
|
123 |
+
"""
|
124 |
+
raise NotImplementedError(
|
125 |
+
f"{self.__class__} is an abstract class. Only classes inheriting this class can be called."
|
126 |
+
)
|
127 |
+
|
128 |
+
|
129 |
+
class PhrasalConstraint(Constraint):
|
130 |
+
r"""
|
131 |
+
[`Constraint`] enforcing that an ordered sequence of tokens is included in the output.
|
132 |
+
|
133 |
+
Args:
|
134 |
+
token_ids (`List[int]`):
|
135 |
+
The id of the token that must be generated by the output.
|
136 |
+
"""
|
137 |
+
|
138 |
+
def __init__(self, token_ids: List[int]):
|
139 |
+
super(Constraint, self).__init__()
|
140 |
+
|
141 |
+
if not isinstance(token_ids, list) or len(token_ids) == 0:
|
142 |
+
raise ValueError(f"`token_ids` has to be a non-empty list, but is {token_ids}.")
|
143 |
+
if any((not isinstance(token_id, int) or token_id < 0) for token_id in token_ids):
|
144 |
+
raise ValueError(f"Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.")
|
145 |
+
|
146 |
+
self.token_ids = token_ids
|
147 |
+
|
148 |
+
self.seqlen = len(self.token_ids)
|
149 |
+
self.fulfilled_idx = -1 # the index of the currently fulfilled step
|
150 |
+
self.completed = False
|
151 |
+
|
152 |
+
def advance(self):
|
153 |
+
if self.completed:
|
154 |
+
return None
|
155 |
+
return self.token_ids[self.fulfilled_idx + 1]
|
156 |
+
|
157 |
+
def does_advance(self, token_id: int):
|
158 |
+
if not isinstance(token_id, int):
|
159 |
+
raise ValueError(f"`token_id` has to be an `int`, but is {token_id} of type {type(token_id)}")
|
160 |
+
|
161 |
+
if self.completed:
|
162 |
+
return False
|
163 |
+
|
164 |
+
return token_id == self.token_ids[self.fulfilled_idx + 1]
|
165 |
+
|
166 |
+
def update(self, token_id: int):
|
167 |
+
if not isinstance(token_id, int):
|
168 |
+
raise ValueError(f"`token_id` has to be an `int`, but is {token_id} of type {type(token_id)}")
|
169 |
+
|
170 |
+
stepped = False
|
171 |
+
completed = False
|
172 |
+
reset = False
|
173 |
+
|
174 |
+
if self.does_advance(token_id):
|
175 |
+
self.fulfilled_idx += 1
|
176 |
+
stepped = True
|
177 |
+
if self.fulfilled_idx == (self.seqlen - 1):
|
178 |
+
completed = True
|
179 |
+
self.completed = completed
|
180 |
+
else:
|
181 |
+
# failed to make progress.
|
182 |
+
reset = True
|
183 |
+
self.reset()
|
184 |
+
return stepped, completed, reset
|
185 |
+
|
186 |
+
def reset(self):
|
187 |
+
self.completed = False
|
188 |
+
self.fulfilled_idx = 0
|
189 |
+
|
190 |
+
def remaining(self):
|
191 |
+
return self.seqlen - (self.fulfilled_idx + 1)
|
192 |
+
|
193 |
+
def copy(self, stateful=False):
|
194 |
+
new_constraint = PhrasalConstraint(self.token_ids)
|
195 |
+
|
196 |
+
if stateful:
|
197 |
+
new_constraint.seq_len = self.seqlen
|
198 |
+
new_constraint.fulfilled_idx = self.fulfilled_idx
|
199 |
+
new_constraint.completed = self.completed
|
200 |
+
|
201 |
+
return new_constraint
|
202 |
+
|
203 |
+
|
204 |
+
class DisjunctiveTrie:
|
205 |
+
def __init__(self, nested_token_ids: List[List[int]], no_subsets=True):
|
206 |
+
r"""
|
207 |
+
A helper class that builds a trie with the words represented in `nested_token_ids`.
|
208 |
+
"""
|
209 |
+
self.max_height = max([len(one) for one in nested_token_ids])
|
210 |
+
|
211 |
+
root = {}
|
212 |
+
for token_ids in nested_token_ids:
|
213 |
+
level = root
|
214 |
+
for tidx, token_id in enumerate(token_ids):
|
215 |
+
if token_id not in level:
|
216 |
+
level[token_id] = {}
|
217 |
+
|
218 |
+
level = level[token_id]
|
219 |
+
|
220 |
+
if no_subsets and self.has_subsets(root, nested_token_ids):
|
221 |
+
raise ValueError(
|
222 |
+
"Each list in `nested_token_ids` can't be a complete subset of another list, but is"
|
223 |
+
f" {nested_token_ids}."
|
224 |
+
)
|
225 |
+
|
226 |
+
self.trie = root
|
227 |
+
|
228 |
+
def next_tokens(self, current_seq):
|
229 |
+
"""
|
230 |
+
The next possible tokens that will progress the trie, given the current sequence of tokens in `current_seq`.
|
231 |
+
"""
|
232 |
+
start = self.trie
|
233 |
+
|
234 |
+
for current_token in current_seq:
|
235 |
+
start = start[current_token]
|
236 |
+
|
237 |
+
next_tokens = list(start.keys())
|
238 |
+
|
239 |
+
return next_tokens
|
240 |
+
|
241 |
+
def reached_leaf(self, current_seq):
|
242 |
+
next_tokens = self.next_tokens(current_seq)
|
243 |
+
|
244 |
+
return len(next_tokens) == 0
|
245 |
+
|
246 |
+
def count_leaves(self, root):
|
247 |
+
next_nodes = list(root.values())
|
248 |
+
if len(next_nodes) == 0:
|
249 |
+
return 1
|
250 |
+
else:
|
251 |
+
return sum([self.count_leaves(nn) for nn in next_nodes])
|
252 |
+
|
253 |
+
def has_subsets(self, trie, nested_token_ids):
|
254 |
+
"""
|
255 |
+
Returns whether # of leaves == # of words. Otherwise some word is a subset of another.
|
256 |
+
"""
|
257 |
+
leaf_count = self.count_leaves(trie)
|
258 |
+
return len(nested_token_ids) != leaf_count
|
259 |
+
|
260 |
+
|
261 |
+
class DisjunctiveConstraint(Constraint):
|
262 |
+
r"""
|
263 |
+
A special [`Constraint`] that is fulfilled by fulfilling just one of several constraints.
|
264 |
+
|
265 |
+
Args:
|
266 |
+
nested_token_ids (`List[List[int]]`):
|
267 |
+
A list of words, where each word is a list of ids. This constraint is fulfilled by generating just one from
|
268 |
+
the list of words.
|
269 |
+
"""
|
270 |
+
|
271 |
+
def __init__(self, nested_token_ids: List[List[int]]):
|
272 |
+
super(Constraint, self).__init__()
|
273 |
+
|
274 |
+
if not isinstance(nested_token_ids, list) or len(nested_token_ids) == 0:
|
275 |
+
raise ValueError(f"`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.")
|
276 |
+
if any(not isinstance(token_ids, list) for token_ids in nested_token_ids):
|
277 |
+
raise ValueError(f"`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.")
|
278 |
+
if any(
|
279 |
+
any((not isinstance(token_id, int) or token_id < 0) for token_id in token_ids)
|
280 |
+
for token_ids in nested_token_ids
|
281 |
+
):
|
282 |
+
raise ValueError(
|
283 |
+
f"Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}."
|
284 |
+
)
|
285 |
+
|
286 |
+
self.trie = DisjunctiveTrie(nested_token_ids)
|
287 |
+
self.token_ids = nested_token_ids
|
288 |
+
|
289 |
+
self.seqlen = self.trie.max_height
|
290 |
+
self.current_seq = []
|
291 |
+
self.completed = False
|
292 |
+
|
293 |
+
def advance(self):
|
294 |
+
token_list = self.trie.next_tokens(self.current_seq)
|
295 |
+
|
296 |
+
if len(token_list) == 0:
|
297 |
+
return None
|
298 |
+
else:
|
299 |
+
return token_list
|
300 |
+
|
301 |
+
def does_advance(self, token_id: int):
|
302 |
+
if not isinstance(token_id, int):
|
303 |
+
raise ValueError(f"`token_id` is supposed to be type `int`, but is {token_id} of type {type(token_id)}")
|
304 |
+
|
305 |
+
next_tokens = self.trie.next_tokens(self.current_seq)
|
306 |
+
|
307 |
+
return token_id in next_tokens
|
308 |
+
|
309 |
+
def update(self, token_id: int):
|
310 |
+
if not isinstance(token_id, int):
|
311 |
+
raise ValueError(f"`token_id` is supposed to be type `int`, but is {token_id} of type {type(token_id)}")
|
312 |
+
|
313 |
+
stepped = False
|
314 |
+
completed = False
|
315 |
+
reset = False
|
316 |
+
|
317 |
+
if self.does_advance(token_id):
|
318 |
+
self.current_seq.append(token_id)
|
319 |
+
stepped = True
|
320 |
+
else:
|
321 |
+
reset = True
|
322 |
+
self.reset()
|
323 |
+
|
324 |
+
completed = self.trie.reached_leaf(self.current_seq)
|
325 |
+
self.completed = completed
|
326 |
+
|
327 |
+
return stepped, completed, reset
|
328 |
+
|
329 |
+
def reset(self):
|
330 |
+
self.completed = False
|
331 |
+
self.current_seq = []
|
332 |
+
|
333 |
+
def remaining(self):
|
334 |
+
if self.completed:
|
335 |
+
# since this can be completed without reaching max height
|
336 |
+
return 0
|
337 |
+
else:
|
338 |
+
return self.seqlen - len(self.current_seq)
|
339 |
+
|
340 |
+
def copy(self, stateful=False):
|
341 |
+
new_constraint = DisjunctiveConstraint(self.token_ids)
|
342 |
+
|
343 |
+
if stateful:
|
344 |
+
new_constraint.seq_len = self.seqlen
|
345 |
+
new_constraint.current_seq = self.current_seq
|
346 |
+
new_constraint.completed = self.completed
|
347 |
+
|
348 |
+
return new_constraint
|
349 |
+
|
350 |
+
|
351 |
+
class ConstraintListState:
|
352 |
+
r"""
|
353 |
+
A class for beam scorers to track its progress through a list of constraints.
|
354 |
+
|
355 |
+
Args:
|
356 |
+
constraints (`List[Constraint]`):
|
357 |
+
A list of [`Constraint`] objects that must be fulfilled by the beam scorer.
|
358 |
+
"""
|
359 |
+
|
360 |
+
def __init__(self, constraints: List[Constraint]):
|
361 |
+
self.constraints = constraints
|
362 |
+
|
363 |
+
# max # of steps required to fulfill a given constraint
|
364 |
+
self.max_seqlen = max([c.seqlen for c in constraints])
|
365 |
+
self.n_constraints = len(constraints)
|
366 |
+
self.completed = False
|
367 |
+
|
368 |
+
self.init_state()
|
369 |
+
|
370 |
+
def init_state(self):
|
371 |
+
self.complete_constraints = []
|
372 |
+
self.inprogress_constraint = None
|
373 |
+
self.pending_constraints = [constraint.copy(stateful=False) for constraint in self.constraints]
|
374 |
+
|
375 |
+
def get_bank(self):
|
376 |
+
add = 0
|
377 |
+
if self.inprogress_constraint:
|
378 |
+
# extra points for having a constraint mid-fulfilled
|
379 |
+
add += self.max_seqlen - self.inprogress_constraint.remaining()
|
380 |
+
|
381 |
+
return (len(self.complete_constraints) * self.max_seqlen) + add
|
382 |
+
|
383 |
+
def advance(self):
|
384 |
+
"""The list of tokens to generate such that we can make progress.
|
385 |
+
By "list" we don't mean the list of token that will fully fulfill a constraint.
|
386 |
+
|
387 |
+
Given constraints `c_i = {t_ij | j == # of tokens}`, If we're not in the middle of progressing through a
|
388 |
+
specific constraint `c_i`, we return:
|
389 |
+
|
390 |
+
`[t_k1 for k in indices of unfulfilled constraints]`
|
391 |
+
|
392 |
+
If we are in the middle of a constraint, then we return:
|
393 |
+
`[t_ij]`, where `i` is the index of the inprogress constraint, `j` is the next step for the constraint.
|
394 |
+
|
395 |
+
Though we don't care which constraint is fulfilled first, if we are in the progress of fulfilling a constraint,
|
396 |
+
that's the only one we'll return.
|
397 |
+
"""
|
398 |
+
token_list = []
|
399 |
+
if self.inprogress_constraint is None:
|
400 |
+
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
|
401 |
+
advance = constraint.advance()
|
402 |
+
if isinstance(advance, int):
|
403 |
+
token_list.append(advance)
|
404 |
+
elif isinstance(advance, list):
|
405 |
+
token_list.extend(advance)
|
406 |
+
else:
|
407 |
+
advance = self.inprogress_constraint.advance()
|
408 |
+
if isinstance(advance, int):
|
409 |
+
token_list.append(advance)
|
410 |
+
elif isinstance(advance, list):
|
411 |
+
token_list.extend(advance)
|
412 |
+
|
413 |
+
if len(token_list) == 0:
|
414 |
+
return None
|
415 |
+
else:
|
416 |
+
return token_list
|
417 |
+
|
418 |
+
def reset(self, token_ids: Optional[List[int]]):
|
419 |
+
"""
|
420 |
+
token_ids: the tokens generated thus far to reset the state of the progress through constraints.
|
421 |
+
"""
|
422 |
+
self.init_state()
|
423 |
+
|
424 |
+
if token_ids is not None:
|
425 |
+
for token in token_ids:
|
426 |
+
# completes or steps **one** constraint
|
427 |
+
complete, stepped = self.add(token)
|
428 |
+
|
429 |
+
# the entire list of constraints are fulfilled
|
430 |
+
if self.completed:
|
431 |
+
break
|
432 |
+
|
433 |
+
def add(self, token_id: int):
|
434 |
+
if not isinstance(token_id, int):
|
435 |
+
raise ValueError(f"`token_id` should be an `int`, but is `{token_id}`.")
|
436 |
+
|
437 |
+
complete, stepped = False, False
|
438 |
+
|
439 |
+
if self.completed:
|
440 |
+
complete = True
|
441 |
+
stepped = False
|
442 |
+
return complete, stepped
|
443 |
+
|
444 |
+
if self.inprogress_constraint is not None:
|
445 |
+
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
|
446 |
+
# job, simply update the state
|
447 |
+
|
448 |
+
stepped, complete, reset = self.inprogress_constraint.update(token_id)
|
449 |
+
if reset:
|
450 |
+
# 1. If the next token breaks the progress, then we must restart.
|
451 |
+
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
|
452 |
+
|
453 |
+
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
|
454 |
+
# constraint, not the full list of constraints.
|
455 |
+
|
456 |
+
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=False))
|
457 |
+
self.inprogress_constraint = None
|
458 |
+
|
459 |
+
if complete:
|
460 |
+
# 2. If the next token completes the constraint, move it to completed list, set
|
461 |
+
# inprogress to None. If there are no pending constraints either, then this full list of constraints
|
462 |
+
# is complete.
|
463 |
+
|
464 |
+
self.complete_constraints.append(self.inprogress_constraint)
|
465 |
+
self.inprogress_constraint = None
|
466 |
+
|
467 |
+
if len(self.pending_constraints) == 0:
|
468 |
+
# we're done!
|
469 |
+
self.completed = True
|
470 |
+
|
471 |
+
else:
|
472 |
+
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
|
473 |
+
# of constraints?
|
474 |
+
|
475 |
+
for cidx, pending_constraint in enumerate(self.pending_constraints):
|
476 |
+
if pending_constraint.does_advance(token_id):
|
477 |
+
stepped, complete, reset = pending_constraint.update(token_id)
|
478 |
+
|
479 |
+
if not stepped:
|
480 |
+
raise Exception(
|
481 |
+
"`constraint.update(token_id)` is not yielding incremental progress, "
|
482 |
+
"even though `constraint.does_advance(token_id)` is true."
|
483 |
+
)
|
484 |
+
|
485 |
+
if complete:
|
486 |
+
self.complete_constraints.append(pending_constraint)
|
487 |
+
self.inprogress_constraint = None
|
488 |
+
|
489 |
+
if not complete and stepped:
|
490 |
+
self.inprogress_constraint = pending_constraint
|
491 |
+
|
492 |
+
if complete or stepped:
|
493 |
+
# If we made any progress at all, then it's at least not a "pending constraint".
|
494 |
+
|
495 |
+
self.pending_constraints = (
|
496 |
+
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
|
497 |
+
)
|
498 |
+
|
499 |
+
if len(self.pending_constraints) == 0 and self.inprogress_constraint is None:
|
500 |
+
# If there's no longer any pending after this and no inprogress either, then we must be
|
501 |
+
# complete.
|
502 |
+
|
503 |
+
self.completed = True
|
504 |
+
|
505 |
+
break # prevent accidentally stepping through multiple constraints with just one token.
|
506 |
+
|
507 |
+
return complete, stepped
|
508 |
+
|
509 |
+
def copy(self, stateful=True):
|
510 |
+
new_state = ConstraintListState(self.constraints) # we actually never though self.constraints objects
|
511 |
+
# throughout this process. So it's at initialization state.
|
512 |
+
|
513 |
+
if stateful:
|
514 |
+
new_state.complete_constraints = [
|
515 |
+
constraint.copy(stateful=True) for constraint in self.complete_constraints
|
516 |
+
]
|
517 |
+
if self.inprogress_constraint is not None:
|
518 |
+
new_state.inprogress_constraint = self.inprogress_constraint.copy(stateful=True)
|
519 |
+
new_state.pending_constraints = [constraint.copy() for constraint in self.pending_constraints]
|
520 |
+
|
521 |
+
return new_state
|
venv/lib/python3.10/site-packages/transformers/generation/beam_search.py
ADDED
@@ -0,0 +1,1005 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2020 The HuggingFace Inc. team
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
|
16 |
+
from abc import ABC, abstractmethod
|
17 |
+
from collections import UserDict
|
18 |
+
from typing import Dict, List, Optional, Tuple, Union
|
19 |
+
|
20 |
+
import numpy as np
|
21 |
+
import torch
|
22 |
+
|
23 |
+
from ..utils import add_start_docstrings
|
24 |
+
from .beam_constraints import Constraint, ConstraintListState
|
25 |
+
|
26 |
+
|
27 |
+
PROCESS_INPUTS_DOCSTRING = r"""
|
28 |
+
Args:
|
29 |
+
input_ids (`torch.LongTensor` of shape `(batch_size * num_beams, sequence_length)`):
|
30 |
+
Indices of input sequence tokens in the vocabulary.
|
31 |
+
|
32 |
+
Indices can be obtained using any class inheriting from [`PreTrainedTokenizer`]. See
|
33 |
+
[`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details.
|
34 |
+
|
35 |
+
[What are input IDs?](../glossary#input-ids)
|
36 |
+
next_scores (`torch.FloatTensor` of shape `(batch_size, 2 * num_beams)`):
|
37 |
+
Current scores of the top `2 * num_beams` non-finished beam hypotheses.
|
38 |
+
next_tokens (`torch.LongTensor` of shape `(batch_size, 2 * num_beams)`):
|
39 |
+
`input_ids` of the tokens corresponding to the top `2 * num_beams` non-finished beam hypotheses.
|
40 |
+
next_indices (`torch.LongTensor` of shape `(batch_size, 2 * num_beams)`):
|
41 |
+
Beam indices indicating to which beam hypothesis the `next_tokens` correspond.
|
42 |
+
pad_token_id (`int`, *optional*):
|
43 |
+
The id of the *padding* token.
|
44 |
+
eos_token_id (`Union[int, List[int]]`, *optional*):
|
45 |
+
The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens.
|
46 |
+
beam_indices (`torch.LongTensor`, *optional*):
|
47 |
+
Beam indices indicating to which beam hypothesis each token correspond.
|
48 |
+
group_index (`int`, *optional*):
|
49 |
+
The index of the group of beams. Used with [`~PreTrainedModel.group_beam_search`].
|
50 |
+
|
51 |
+
Return:
|
52 |
+
`UserDict`: A dictionary composed of the fields as defined above:
|
53 |
+
|
54 |
+
- **next_beam_scores** (`torch.FloatTensor` of shape `(batch_size * num_beams)`) -- Updated scores of all
|
55 |
+
non-finished beams.
|
56 |
+
- **next_beam_tokens** (`torch.FloatTensor` of shape `(batch_size * num_beams)`) -- Next tokens to be added
|
57 |
+
to the non-finished beam_hypotheses.
|
58 |
+
- **next_beam_indices** (`torch.FloatTensor` of shape `(batch_size * num_beams)`) -- Beam indices
|
59 |
+
indicating to which beam the next tokens shall be added.
|
60 |
+
|
61 |
+
"""
|
62 |
+
|
63 |
+
FINALIZE_INPUTS_DOCSTRING = r"""
|
64 |
+
Args:
|
65 |
+
input_ids (`torch.LongTensor` of shape `(batch_size * num_beams, sequence_length)`):
|
66 |
+
Indices of input sequence tokens in the vocabulary.
|
67 |
+
|
68 |
+
Indices can be obtained using any class inheriting from [`PreTrainedTokenizer`]. See
|
69 |
+
[`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details.
|
70 |
+
|
71 |
+
[What are input IDs?](../glossary#input-ids)
|
72 |
+
final_beam_scores (`torch.FloatTensor` of shape `(batch_size * num_beams)`):
|
73 |
+
The final scores of all non-finished beams.
|
74 |
+
final_beam_tokens (`torch.FloatTensor` of shape `(batch_size * num_beams)`):
|
75 |
+
The last tokens to be added to the non-finished beam_hypotheses.
|
76 |
+
final_beam_indices (`torch.FloatTensor` of shape `(batch_size * num_beams)`):
|
77 |
+
The beam indices indicating to which beam the `final_beam_tokens` shall be added.
|
78 |
+
pad_token_id (`int`, *optional*):
|
79 |
+
The id of the *padding* token.
|
80 |
+
eos_token_id (`Union[int, List[int]]`, *optional*):
|
81 |
+
The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens.
|
82 |
+
|
83 |
+
Return:
|
84 |
+
`torch.LongTensor` of shape `(batch_size * num_return_sequences, sequence_length)`: The generated sequences.
|
85 |
+
The second dimension (sequence_length) is either equal to `max_length` or shorter if all batches finished early
|
86 |
+
due to the `eos_token_id`.
|
87 |
+
|
88 |
+
"""
|
89 |
+
|
90 |
+
|
91 |
+
class BeamScorer(ABC):
|
92 |
+
"""
|
93 |
+
Abstract base class for all beam scorers that are used for [`~PreTrainedModel.beam_search`] and
|
94 |
+
[`~PreTrainedModel.beam_sample`].
|
95 |
+
"""
|
96 |
+
|
97 |
+
@abstractmethod
|
98 |
+
@add_start_docstrings(PROCESS_INPUTS_DOCSTRING)
|
99 |
+
def process(
|
100 |
+
self,
|
101 |
+
input_ids: torch.LongTensor,
|
102 |
+
next_scores: torch.FloatTensor,
|
103 |
+
next_tokens: torch.LongTensor,
|
104 |
+
next_indices: torch.LongTensor,
|
105 |
+
**kwargs,
|
106 |
+
) -> Tuple[torch.Tensor]:
|
107 |
+
raise NotImplementedError("This is an abstract method.")
|
108 |
+
|
109 |
+
@abstractmethod
|
110 |
+
@add_start_docstrings(FINALIZE_INPUTS_DOCSTRING)
|
111 |
+
def finalize(
|
112 |
+
self,
|
113 |
+
input_ids: torch.LongTensor,
|
114 |
+
next_scores: torch.FloatTensor,
|
115 |
+
next_tokens: torch.LongTensor,
|
116 |
+
next_indices: torch.LongTensor,
|
117 |
+
max_length: int,
|
118 |
+
**kwargs,
|
119 |
+
) -> torch.LongTensor:
|
120 |
+
raise NotImplementedError("This is an abstract method.")
|
121 |
+
|
122 |
+
|
123 |
+
class BeamSearchScorer(BeamScorer):
|
124 |
+
r"""
|
125 |
+
[`BeamScorer`] implementing standard beam search decoding.
|
126 |
+
|
127 |
+
Adapted in part from [Facebook's XLM beam search
|
128 |
+
code](https://github.com/facebookresearch/XLM/blob/9e6f6814d17be4fe5b15f2e6c43eb2b2d76daeb4/src/model/transformer.py#L529).
|
129 |
+
|
130 |
+
Reference for the diverse beam search algorithm and implementation [Ashwin Kalyan's DBS
|
131 |
+
implementation](https://github.com/ashwinkalyan/dbs/blob/master/dbs/beam_utils.lua)
|
132 |
+
|
133 |
+
Args:
|
134 |
+
batch_size (`int`):
|
135 |
+
Batch Size of `input_ids` for which standard beam search decoding is run in parallel.
|
136 |
+
num_beams (`int`):
|
137 |
+
Number of beams for beam search.
|
138 |
+
device (`torch.device`):
|
139 |
+
Defines the device type (*e.g.*, `"cpu"` or `"cuda"`) on which this instance of `BeamSearchScorer` will be
|
140 |
+
allocated.
|
141 |
+
length_penalty (`float`, *optional*, defaults to 1.0):
|
142 |
+
Exponential penalty to the length that is used with beam-based generation. It is applied as an exponent to
|
143 |
+
the sequence length, which in turn is used to divide the score of the sequence. Since the score is the log
|
144 |
+
likelihood of the sequence (i.e. negative), `length_penalty` > 0.0 promotes longer sequences, while
|
145 |
+
`length_penalty` < 0.0 encourages shorter sequences.
|
146 |
+
do_early_stopping (`bool` or `str`, *optional*, defaults to `False`):
|
147 |
+
Controls the stopping condition for beam-based methods, like beam-search. It accepts the following values:
|
148 |
+
`True`, where the generation stops as soon as there are `num_beams` complete candidates; `False`, where an
|
149 |
+
heuristic is applied and the generation stops when is it very unlikely to find better candidates;
|
150 |
+
`"never"`, where the beam search procedure only stops when there cannot be better candidates (canonical
|
151 |
+
beam search algorithm).
|
152 |
+
num_beam_hyps_to_keep (`int`, *optional*, defaults to 1):
|
153 |
+
The number of beam hypotheses that shall be returned upon calling
|
154 |
+
[`~transformers.BeamSearchScorer.finalize`].
|
155 |
+
num_beam_groups (`int`, *optional*, defaults to 1):
|
156 |
+
Number of groups to divide `num_beams` into in order to ensure diversity among different groups of beams.
|
157 |
+
See [this paper](https://arxiv.org/pdf/1610.02424.pdf) for more details.
|
158 |
+
max_length (`int`, *optional*):
|
159 |
+
The maximum length of the sequence to be generated.
|
160 |
+
"""
|
161 |
+
|
162 |
+
def __init__(
|
163 |
+
self,
|
164 |
+
batch_size: int,
|
165 |
+
num_beams: int,
|
166 |
+
device: torch.device,
|
167 |
+
length_penalty: Optional[float] = 1.0,
|
168 |
+
do_early_stopping: Optional[Union[bool, str]] = False,
|
169 |
+
num_beam_hyps_to_keep: Optional[int] = 1,
|
170 |
+
num_beam_groups: Optional[int] = 1,
|
171 |
+
max_length: Optional[int] = None,
|
172 |
+
):
|
173 |
+
self.num_beams = num_beams
|
174 |
+
self.device = device
|
175 |
+
self.length_penalty = length_penalty
|
176 |
+
self.do_early_stopping = do_early_stopping
|
177 |
+
self.num_beam_hyps_to_keep = num_beam_hyps_to_keep
|
178 |
+
self.num_beam_groups = num_beam_groups
|
179 |
+
self.group_size = self.num_beams // self.num_beam_groups
|
180 |
+
|
181 |
+
self._is_init = False
|
182 |
+
# self._beam_hyps[i*self.num_beam_groups+j] is the beam_hyps of the j-th group in the i-th mini-batch.
|
183 |
+
# If group_beam_search is not used, the list consists of `batch_size` beam_hyps.
|
184 |
+
self._beam_hyps = [
|
185 |
+
BeamHypotheses(
|
186 |
+
num_beams=self.group_size,
|
187 |
+
length_penalty=self.length_penalty,
|
188 |
+
early_stopping=self.do_early_stopping,
|
189 |
+
max_length=max_length,
|
190 |
+
)
|
191 |
+
for _ in range(batch_size * self.num_beam_groups)
|
192 |
+
]
|
193 |
+
# self._done[i*self.num_beam_groups+j] indicates whether the generation of the beam_hyps of the j-th group
|
194 |
+
# in the i-th mini-batch is complete.
|
195 |
+
self._done = torch.tensor(
|
196 |
+
[False for _ in range(batch_size * self.num_beam_groups)], dtype=torch.bool, device=self.device
|
197 |
+
)
|
198 |
+
|
199 |
+
if not isinstance(num_beams, int) or num_beams <= 1:
|
200 |
+
raise ValueError(
|
201 |
+
f"`num_beams` has to be an integer strictly greater than 1, but is {num_beams}. For `num_beams` == 1,"
|
202 |
+
" one should make use of `greedy_search` instead."
|
203 |
+
)
|
204 |
+
|
205 |
+
if not isinstance(num_beam_groups, int) or (num_beam_groups > num_beams) or (num_beams % num_beam_groups != 0):
|
206 |
+
raise ValueError(
|
207 |
+
"`num_beam_groups` has to be an integer smaller or equal than `num_beams` and `num_beams` has to be"
|
208 |
+
f" divisible by `num_beam_groups`, but is {num_beam_groups} with `num_beams` being {num_beams}."
|
209 |
+
)
|
210 |
+
|
211 |
+
@property
|
212 |
+
def is_done(self) -> bool:
|
213 |
+
return self._done.all()
|
214 |
+
|
215 |
+
def process(
|
216 |
+
self,
|
217 |
+
input_ids: torch.LongTensor,
|
218 |
+
next_scores: torch.FloatTensor,
|
219 |
+
next_tokens: torch.LongTensor,
|
220 |
+
next_indices: torch.LongTensor,
|
221 |
+
pad_token_id: Optional[int] = None,
|
222 |
+
eos_token_id: Optional[Union[int, List[int]]] = None,
|
223 |
+
beam_indices: Optional[torch.LongTensor] = None,
|
224 |
+
group_index: Optional[int] = 0,
|
225 |
+
decoder_prompt_len: Optional[int] = 0,
|
226 |
+
) -> Dict[str, torch.Tensor]:
|
227 |
+
# add up to the length which the next_scores is calculated on (including decoder prompt)
|
228 |
+
cur_len = input_ids.shape[-1] + 1
|
229 |
+
batch_size = len(self._beam_hyps) // self.num_beam_groups
|
230 |
+
|
231 |
+
if not (batch_size == (input_ids.shape[0] // self.group_size)):
|
232 |
+
if self.num_beam_groups > 1:
|
233 |
+
raise ValueError(
|
234 |
+
f"A group beam size of {input_ids.shape[0]} is used as the input, but a group beam "
|
235 |
+
f"size of {self.group_size} is expected by the beam scorer."
|
236 |
+
)
|
237 |
+
else:
|
238 |
+
raise ValueError(
|
239 |
+
f"A beam size of {input_ids.shape[0]} is used as the input, but a beam size of "
|
240 |
+
f"{self.group_size} is expected by the beam scorer."
|
241 |
+
)
|
242 |
+
|
243 |
+
device = input_ids.device
|
244 |
+
next_beam_scores = torch.zeros((batch_size, self.group_size), dtype=next_scores.dtype, device=device)
|
245 |
+
next_beam_tokens = torch.zeros((batch_size, self.group_size), dtype=next_tokens.dtype, device=device)
|
246 |
+
next_beam_indices = torch.zeros((batch_size, self.group_size), dtype=next_indices.dtype, device=device)
|
247 |
+
|
248 |
+
if isinstance(eos_token_id, int):
|
249 |
+
eos_token_id = [eos_token_id]
|
250 |
+
|
251 |
+
for batch_idx in range(batch_size):
|
252 |
+
batch_group_idx = batch_idx * self.num_beam_groups + group_index
|
253 |
+
if self._done[batch_group_idx]:
|
254 |
+
if self.num_beams < len(self._beam_hyps[batch_group_idx]):
|
255 |
+
raise ValueError(f"Batch can only be done if at least {self.num_beams} beams have been generated")
|
256 |
+
if eos_token_id is None or pad_token_id is None:
|
257 |
+
raise ValueError("Generated beams >= num_beams -> eos_token_id and pad_token have to be defined")
|
258 |
+
# pad the batch
|
259 |
+
next_beam_scores[batch_idx, :] = 0
|
260 |
+
next_beam_tokens[batch_idx, :] = pad_token_id
|
261 |
+
next_beam_indices[batch_idx, :] = 0
|
262 |
+
continue
|
263 |
+
|
264 |
+
# next tokens for this sentence
|
265 |
+
beam_idx = 0
|
266 |
+
for beam_token_rank, (next_token, next_score, next_index) in enumerate(
|
267 |
+
zip(next_tokens[batch_idx], next_scores[batch_idx], next_indices[batch_idx])
|
268 |
+
):
|
269 |
+
batch_beam_idx = batch_idx * self.group_size + next_index
|
270 |
+
# add to generated hypotheses if end of sentence
|
271 |
+
if (eos_token_id is not None) and (next_token.item() in eos_token_id):
|
272 |
+
# if beam_token does not belong to top num_beams tokens, it should not be added
|
273 |
+
is_beam_token_worse_than_top_num_beams = beam_token_rank >= self.group_size
|
274 |
+
if is_beam_token_worse_than_top_num_beams:
|
275 |
+
continue
|
276 |
+
if beam_indices is not None:
|
277 |
+
beam_index = beam_indices[batch_beam_idx]
|
278 |
+
beam_index = beam_index + (batch_beam_idx,)
|
279 |
+
else:
|
280 |
+
beam_index = None
|
281 |
+
|
282 |
+
self._beam_hyps[batch_group_idx].add(
|
283 |
+
input_ids[batch_beam_idx].clone(),
|
284 |
+
next_score.item(),
|
285 |
+
beam_indices=beam_index,
|
286 |
+
generated_len=cur_len - decoder_prompt_len,
|
287 |
+
)
|
288 |
+
else:
|
289 |
+
# add next predicted token since it is not eos_token
|
290 |
+
next_beam_scores[batch_idx, beam_idx] = next_score
|
291 |
+
next_beam_tokens[batch_idx, beam_idx] = next_token
|
292 |
+
next_beam_indices[batch_idx, beam_idx] = batch_beam_idx
|
293 |
+
beam_idx += 1
|
294 |
+
|
295 |
+
# once the beam for next step is full, don't add more tokens to it.
|
296 |
+
if beam_idx == self.group_size:
|
297 |
+
break
|
298 |
+
|
299 |
+
if beam_idx < self.group_size:
|
300 |
+
raise ValueError(
|
301 |
+
f"At most {self.group_size} tokens in {next_tokens[batch_idx]} can be equal to `eos_token_id:"
|
302 |
+
f" {eos_token_id}`. Make sure {next_tokens[batch_idx]} are corrected."
|
303 |
+
)
|
304 |
+
|
305 |
+
# Check if we are done so that we can save a pad step if all(done)
|
306 |
+
self._done[batch_group_idx] = self._done[batch_group_idx] or self._beam_hyps[batch_group_idx].is_done(
|
307 |
+
next_scores[batch_idx].max().item(), cur_len, decoder_prompt_len
|
308 |
+
)
|
309 |
+
|
310 |
+
return UserDict(
|
311 |
+
{
|
312 |
+
"next_beam_scores": next_beam_scores.view(-1),
|
313 |
+
"next_beam_tokens": next_beam_tokens.view(-1),
|
314 |
+
"next_beam_indices": next_beam_indices.view(-1),
|
315 |
+
}
|
316 |
+
)
|
317 |
+
|
318 |
+
def finalize(
|
319 |
+
self,
|
320 |
+
input_ids: torch.LongTensor,
|
321 |
+
final_beam_scores: torch.FloatTensor,
|
322 |
+
final_beam_tokens: torch.LongTensor,
|
323 |
+
final_beam_indices: torch.LongTensor,
|
324 |
+
max_length: int,
|
325 |
+
pad_token_id: Optional[int] = None,
|
326 |
+
eos_token_id: Optional[Union[int, List[int]]] = None,
|
327 |
+
beam_indices: Optional[torch.LongTensor] = None,
|
328 |
+
decoder_prompt_len: Optional[int] = 0,
|
329 |
+
) -> Tuple[torch.LongTensor]:
|
330 |
+
batch_size = len(self._beam_hyps) // self.num_beam_groups
|
331 |
+
|
332 |
+
if isinstance(eos_token_id, int):
|
333 |
+
eos_token_id = [eos_token_id]
|
334 |
+
|
335 |
+
# finalize all open beam hypotheses and add to generated hypotheses
|
336 |
+
for batch_group_idx, beam_hyp in enumerate(self._beam_hyps):
|
337 |
+
if self._done[batch_group_idx]:
|
338 |
+
continue
|
339 |
+
|
340 |
+
# all open beam hypotheses are added to the beam hypothesis
|
341 |
+
# beam hypothesis class automatically keeps the best beams
|
342 |
+
for index_per_group in range(self.group_size):
|
343 |
+
batch_beam_idx = batch_group_idx * self.group_size + index_per_group
|
344 |
+
final_score = final_beam_scores[batch_beam_idx].item()
|
345 |
+
final_tokens = input_ids[batch_beam_idx]
|
346 |
+
beam_index = beam_indices[batch_beam_idx] if beam_indices is not None else None
|
347 |
+
generated_len = final_tokens.shape[-1] - decoder_prompt_len
|
348 |
+
beam_hyp.add(final_tokens, final_score, beam_indices=beam_index, generated_len=generated_len)
|
349 |
+
|
350 |
+
# select the best hypotheses
|
351 |
+
sent_lengths = input_ids.new(batch_size * self.num_beam_hyps_to_keep)
|
352 |
+
best = []
|
353 |
+
best_indices = []
|
354 |
+
best_scores = torch.zeros(batch_size * self.num_beam_hyps_to_keep, device=self.device, dtype=torch.float32)
|
355 |
+
|
356 |
+
# retrieve best hypotheses
|
357 |
+
for i in range(batch_size):
|
358 |
+
beam_hyps_in_batch = self._beam_hyps[i * self.num_beam_groups : (i + 1) * self.num_beam_groups]
|
359 |
+
candidate_beams = [beam for beam_hyp in beam_hyps_in_batch for beam in beam_hyp.beams]
|
360 |
+
sorted_hyps = sorted(candidate_beams, key=lambda x: x[0])
|
361 |
+
for j in range(self.num_beam_hyps_to_keep):
|
362 |
+
best_hyp_tuple = sorted_hyps.pop()
|
363 |
+
best_score = best_hyp_tuple[0]
|
364 |
+
best_hyp = best_hyp_tuple[1]
|
365 |
+
best_index = best_hyp_tuple[2]
|
366 |
+
sent_lengths[self.num_beam_hyps_to_keep * i + j] = len(best_hyp)
|
367 |
+
|
368 |
+
# append hyp to lists
|
369 |
+
best.append(best_hyp)
|
370 |
+
|
371 |
+
# append indices to list
|
372 |
+
best_indices.append(best_index)
|
373 |
+
|
374 |
+
best_scores[i * self.num_beam_hyps_to_keep + j] = best_score
|
375 |
+
|
376 |
+
# prepare for adding eos
|
377 |
+
sent_lengths_max = sent_lengths.max().item() + 1
|
378 |
+
sent_max_len = min(sent_lengths_max, max_length) if max_length is not None else sent_lengths_max
|
379 |
+
decoded: torch.LongTensor = input_ids.new(batch_size * self.num_beam_hyps_to_keep, sent_max_len)
|
380 |
+
|
381 |
+
if len(best_indices) > 0 and best_indices[0] is not None:
|
382 |
+
indices: torch.LongTensor = input_ids.new(batch_size * self.num_beam_hyps_to_keep, sent_max_len)
|
383 |
+
else:
|
384 |
+
indices = None
|
385 |
+
|
386 |
+
# shorter batches are padded if needed
|
387 |
+
if sent_lengths.min().item() != sent_lengths.max().item():
|
388 |
+
if pad_token_id is None:
|
389 |
+
raise ValueError("`pad_token_id` has to be defined")
|
390 |
+
decoded.fill_(pad_token_id)
|
391 |
+
|
392 |
+
if indices is not None:
|
393 |
+
indices.fill_(-1)
|
394 |
+
|
395 |
+
# fill with hypotheses and eos_token_id if the latter fits in
|
396 |
+
for i, (hypo, best_idx) in enumerate(zip(best, best_indices)):
|
397 |
+
decoded[i, : sent_lengths[i]] = hypo
|
398 |
+
|
399 |
+
if indices is not None:
|
400 |
+
indices[i, : len(best_idx)] = torch.tensor(best_idx)
|
401 |
+
|
402 |
+
if sent_lengths[i] < sent_max_len:
|
403 |
+
# inserting only the first eos_token_id
|
404 |
+
decoded[i, sent_lengths[i]] = eos_token_id[0]
|
405 |
+
|
406 |
+
return UserDict(
|
407 |
+
{
|
408 |
+
"sequences": decoded,
|
409 |
+
"sequence_scores": best_scores,
|
410 |
+
"beam_indices": indices,
|
411 |
+
}
|
412 |
+
)
|
413 |
+
|
414 |
+
|
415 |
+
class ConstrainedBeamSearchScorer(BeamScorer):
|
416 |
+
r"""
|
417 |
+
[`BeamScorer`] implementing constrained beam search decoding.
|
418 |
+
|
419 |
+
|
420 |
+
Args:
|
421 |
+
batch_size (`int`):
|
422 |
+
Batch Size of `input_ids` for which standard beam search decoding is run in parallel.
|
423 |
+
num_beams (`int`):
|
424 |
+
Number of beams for beam search.
|
425 |
+
constraints (`List[Constraint]`):
|
426 |
+
A list of positive constraints represented as `Constraint` objects that must be fulfilled in the generation
|
427 |
+
output. For more information, the documentation of [`Constraint`] should be read.
|
428 |
+
device (`torch.device`):
|
429 |
+
Defines the device type (*e.g.*, `"cpu"` or `"cuda"`) on which this instance of `BeamSearchScorer` will be
|
430 |
+
allocated.
|
431 |
+
length_penalty (`float`, *optional*, defaults to 1.0):
|
432 |
+
Exponential penalty to the length that is used with beam-based generation. It is applied as an exponent to
|
433 |
+
the sequence length, which in turn is used to divide the score of the sequence. Since the score is the log
|
434 |
+
likelihood of the sequence (i.e. negative), `length_penalty` > 0.0 promotes longer sequences, while
|
435 |
+
`length_penalty` < 0.0 encourages shorter sequences.
|
436 |
+
do_early_stopping (`bool` or `str`, *optional*, defaults to `False`):
|
437 |
+
Controls the stopping condition for beam-based methods, like beam-search. It accepts the following values:
|
438 |
+
`True`, where the generation stops as soon as there are `num_beams` complete candidates; `False`, where an
|
439 |
+
heuristic is applied and the generation stops when is it very unlikely to find better candidates;
|
440 |
+
`"never"`, where the beam search procedure only stops when there cannot be better candidates (canonical
|
441 |
+
beam search algorithm).
|
442 |
+
num_beam_hyps_to_keep (`int`, *optional*, defaults to 1):
|
443 |
+
The number of beam hypotheses that shall be returned upon calling
|
444 |
+
[`~transformers.BeamSearchScorer.finalize`].
|
445 |
+
num_beam_groups (`int`, *optional*, defaults to 1):
|
446 |
+
Number of groups to divide `num_beams` into in order to ensure diversity among different groups of beams.
|
447 |
+
See [this paper](https://arxiv.org/pdf/1610.02424.pdf) for more details.
|
448 |
+
max_length (`int`, *optional*):
|
449 |
+
The maximum length of the sequence to be generated.
|
450 |
+
"""
|
451 |
+
|
452 |
+
def __init__(
|
453 |
+
self,
|
454 |
+
batch_size: int,
|
455 |
+
num_beams: int,
|
456 |
+
constraints: List[Constraint],
|
457 |
+
device: torch.device,
|
458 |
+
length_penalty: Optional[float] = 1.0,
|
459 |
+
do_early_stopping: Optional[Union[bool, str]] = False,
|
460 |
+
num_beam_hyps_to_keep: Optional[int] = 1,
|
461 |
+
num_beam_groups: Optional[int] = 1,
|
462 |
+
max_length: Optional[int] = None,
|
463 |
+
):
|
464 |
+
self.num_beams = num_beams
|
465 |
+
self.device = device
|
466 |
+
self.length_penalty = length_penalty
|
467 |
+
self.do_early_stopping = do_early_stopping
|
468 |
+
self.num_beam_hyps_to_keep = num_beam_hyps_to_keep
|
469 |
+
self.num_beam_groups = num_beam_groups
|
470 |
+
self.group_size = self.num_beams // self.num_beam_groups
|
471 |
+
self.constraints = constraints
|
472 |
+
|
473 |
+
self._is_init = False
|
474 |
+
self._beam_hyps = [
|
475 |
+
BeamHypotheses(
|
476 |
+
num_beams=self.num_beams,
|
477 |
+
length_penalty=self.length_penalty,
|
478 |
+
early_stopping=self.do_early_stopping,
|
479 |
+
max_length=max_length,
|
480 |
+
)
|
481 |
+
for _ in range(batch_size)
|
482 |
+
]
|
483 |
+
self._done = torch.tensor([False for _ in range(batch_size)], dtype=torch.bool, device=self.device)
|
484 |
+
|
485 |
+
if not isinstance(num_beams, int) or num_beams <= 1:
|
486 |
+
raise ValueError(
|
487 |
+
f"`num_beams` has to be an integer strictly greater than 1, but is {num_beams}. For `num_beams` == 1,"
|
488 |
+
" one should make use of `greedy_search` instead."
|
489 |
+
)
|
490 |
+
|
491 |
+
if not isinstance(num_beam_groups, int) or (num_beam_groups > num_beams) or (num_beams % num_beam_groups != 0):
|
492 |
+
raise ValueError(
|
493 |
+
"`num_beam_groups` has to be an integer smaller or equal than `num_beams` and `num_beams` has to be"
|
494 |
+
f" divisible by `num_beam_groups`, but is {num_beam_groups} with `num_beams` being {num_beams}."
|
495 |
+
)
|
496 |
+
|
497 |
+
@property
|
498 |
+
def is_done(self) -> bool:
|
499 |
+
return self._done.all()
|
500 |
+
|
501 |
+
def make_constraint_states(self, n):
|
502 |
+
return [ConstraintListState([constraint.copy() for constraint in self.constraints]) for _ in range(n)]
|
503 |
+
|
504 |
+
def check_completes_constraints(self, sequence):
|
505 |
+
new_state = self.make_constraint_states(1)[0]
|
506 |
+
new_state.reset(sequence)
|
507 |
+
return new_state.completed
|
508 |
+
|
509 |
+
def process(
|
510 |
+
self,
|
511 |
+
input_ids: torch.LongTensor,
|
512 |
+
next_scores: torch.FloatTensor,
|
513 |
+
next_tokens: torch.LongTensor,
|
514 |
+
next_indices: torch.LongTensor,
|
515 |
+
scores_for_all_vocab: torch.FloatTensor,
|
516 |
+
pad_token_id: Optional[int] = None,
|
517 |
+
eos_token_id: Optional[Union[int, List[int]]] = None,
|
518 |
+
beam_indices: Optional[torch.LongTensor] = None,
|
519 |
+
decoder_prompt_len: Optional[int] = 0,
|
520 |
+
) -> Tuple[torch.Tensor]:
|
521 |
+
r"""
|
522 |
+
Args:
|
523 |
+
input_ids (`torch.LongTensor` of shape `(batch_size * num_beams, sequence_length)`):
|
524 |
+
Indices of input sequence tokens in the vocabulary.
|
525 |
+
|
526 |
+
Indices can be obtained using any class inheriting from [`PreTrainedTokenizer`]. See
|
527 |
+
[`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details.
|
528 |
+
|
529 |
+
[What are input IDs?](../glossary#input-ids)
|
530 |
+
next_scores (`torch.FloatTensor` of shape `(batch_size, 2 * num_beams)`):
|
531 |
+
Current scores of the top `2 * num_beams` non-finished beam hypotheses.
|
532 |
+
next_tokens (`torch.LongTensor` of shape `(batch_size, 2 * num_beams)`):
|
533 |
+
`input_ids` of the tokens corresponding to the top `2 * num_beams` non-finished beam hypotheses.
|
534 |
+
next_indices (`torch.LongTensor` of shape `(batch_size, 2 * num_beams)`):
|
535 |
+
Beam indices indicating to which beam hypothesis the `next_tokens` correspond.
|
536 |
+
scores_for_all_vocab (`torch.FloatTensor` of shape `(batch_size * num_beams, sequence_length)`):
|
537 |
+
The scores of all tokens in the vocabulary for each of the beam hypotheses.
|
538 |
+
pad_token_id (`int`, *optional*):
|
539 |
+
The id of the *padding* token.
|
540 |
+
eos_token_id (`Union[int, List[int]]`, *optional*):
|
541 |
+
The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens.
|
542 |
+
beam_indices (`torch.LongTensor`, *optional*):
|
543 |
+
Beam indices indicating to which beam hypothesis each token correspond.
|
544 |
+
decoder_prompt_len (`int`, *optional*):
|
545 |
+
The length of prompt that is included in the input to decoder.
|
546 |
+
Return:
|
547 |
+
`UserDict`: A dictionary composed of the fields as defined above:
|
548 |
+
|
549 |
+
- **next_beam_scores** (`torch.FloatTensor` of shape `(batch_size * num_beams)`) -- Updated scores of
|
550 |
+
all
|
551 |
+
non-finished beams.
|
552 |
+
|
553 |
+
- **next_beam_tokens** (`torch.FloatTensor` of shape `(batch_size * num_beams)`) -- Next tokens to be
|
554 |
+
added
|
555 |
+
to the non-finished beam_hypotheses.
|
556 |
+
- **next_beam_indices** (`torch.FloatTensor` of shape `(batch_size * num_beams)`) -- Beam indices
|
557 |
+
indicating to which beam the next tokens shall be added.
|
558 |
+
"""
|
559 |
+
|
560 |
+
# add up to the length which the next_scores is calculated on (including decoder prompt)
|
561 |
+
cur_len = input_ids.shape[-1] + 1
|
562 |
+
batch_size = len(self._beam_hyps)
|
563 |
+
if not (batch_size == (input_ids.shape[0] // self.group_size)):
|
564 |
+
if self.num_beam_groups > 1:
|
565 |
+
raise ValueError(
|
566 |
+
f"A group beam size of {input_ids.shape[0]} is used as the input, but a group beam "
|
567 |
+
f"size of {self.group_size} is expected by the beam scorer."
|
568 |
+
)
|
569 |
+
else:
|
570 |
+
raise ValueError(
|
571 |
+
f"A beam size of {input_ids.shape[0]} is used as the input, but a beam size of "
|
572 |
+
f"{self.group_size} is expected by the beam scorer."
|
573 |
+
)
|
574 |
+
|
575 |
+
device = input_ids.device
|
576 |
+
|
577 |
+
next_beam_scores = torch.zeros((batch_size, self.group_size), dtype=next_scores.dtype, device=device)
|
578 |
+
next_beam_tokens = torch.zeros((batch_size, self.group_size), dtype=next_tokens.dtype, device=device)
|
579 |
+
next_beam_indices = torch.zeros((batch_size, self.group_size), dtype=next_indices.dtype, device=device)
|
580 |
+
|
581 |
+
if isinstance(eos_token_id, int):
|
582 |
+
eos_token_id = [eos_token_id]
|
583 |
+
|
584 |
+
for batch_idx, beam_hyp in enumerate(self._beam_hyps):
|
585 |
+
if self._done[batch_idx]:
|
586 |
+
if self.num_beams < len(beam_hyp):
|
587 |
+
raise ValueError(f"Batch can only be done if at least {self.num_beams} beams have been generated")
|
588 |
+
if eos_token_id is None or pad_token_id is None:
|
589 |
+
raise ValueError("Generated beams >= num_beams -> eos_token_id and pad_token have to be defined")
|
590 |
+
# pad the batch
|
591 |
+
next_beam_scores[batch_idx, :] = 0
|
592 |
+
next_beam_tokens[batch_idx, :] = pad_token_id
|
593 |
+
next_beam_indices[batch_idx, :] = 0
|
594 |
+
continue
|
595 |
+
|
596 |
+
# next tokens for this sentence.
|
597 |
+
beam_idx = 0
|
598 |
+
for beam_token_rank, (next_token, next_score, next_index) in enumerate(
|
599 |
+
zip(next_tokens[batch_idx], next_scores[batch_idx], next_indices[batch_idx])
|
600 |
+
):
|
601 |
+
batch_beam_idx = batch_idx * self.group_size + next_index
|
602 |
+
# add to generated hypotheses if end of sentence
|
603 |
+
if (eos_token_id is not None) and (next_token.item() in eos_token_id):
|
604 |
+
# if beam_token does not belong to top num_beams tokens, it should not be added
|
605 |
+
is_beam_token_worse_than_top_num_beams = beam_token_rank >= self.group_size
|
606 |
+
if is_beam_token_worse_than_top_num_beams:
|
607 |
+
continue
|
608 |
+
|
609 |
+
completes_constraint = self.check_completes_constraints(input_ids[batch_beam_idx].cpu().tolist())
|
610 |
+
if completes_constraint:
|
611 |
+
if beam_indices is not None:
|
612 |
+
beam_index = beam_indices[batch_beam_idx]
|
613 |
+
beam_index = beam_index + (batch_beam_idx,)
|
614 |
+
else:
|
615 |
+
beam_index = None
|
616 |
+
|
617 |
+
beam_hyp.add(
|
618 |
+
input_ids[batch_beam_idx].clone(),
|
619 |
+
next_score.item(),
|
620 |
+
beam_indices=beam_index,
|
621 |
+
generated_len=cur_len - decoder_prompt_len,
|
622 |
+
)
|
623 |
+
else:
|
624 |
+
# add next predicted token since it is not eos_token
|
625 |
+
next_beam_scores[batch_idx, beam_idx] = next_score
|
626 |
+
next_beam_tokens[batch_idx, beam_idx] = next_token
|
627 |
+
next_beam_indices[batch_idx, beam_idx] = batch_beam_idx
|
628 |
+
beam_idx += 1
|
629 |
+
|
630 |
+
# once the beam for next step is full, don't add more tokens to it.
|
631 |
+
if beam_idx == self.group_size:
|
632 |
+
break
|
633 |
+
|
634 |
+
new_scores, new_tokens, new_indices = self.step_sentence_constraint(
|
635 |
+
batch_idx,
|
636 |
+
input_ids,
|
637 |
+
scores_for_all_vocab,
|
638 |
+
next_beam_scores[batch_idx],
|
639 |
+
next_beam_tokens[batch_idx],
|
640 |
+
next_beam_indices[batch_idx],
|
641 |
+
)
|
642 |
+
|
643 |
+
next_beam_scores[batch_idx] = new_scores
|
644 |
+
next_beam_tokens[batch_idx] = new_tokens
|
645 |
+
next_beam_indices[batch_idx] = new_indices
|
646 |
+
|
647 |
+
if beam_idx < self.group_size:
|
648 |
+
raise ValueError(
|
649 |
+
f"At most {self.group_size} tokens in {next_tokens[batch_idx]} can be equal to `eos_token_id:"
|
650 |
+
f" {eos_token_id}`. Make sure {next_tokens[batch_idx]} are corrected."
|
651 |
+
)
|
652 |
+
|
653 |
+
# Check if we are done so that we can save a pad step if all(done)
|
654 |
+
self._done[batch_idx] = self._done[batch_idx] or beam_hyp.is_done(
|
655 |
+
next_scores[batch_idx].max().item(), cur_len, decoder_prompt_len
|
656 |
+
)
|
657 |
+
|
658 |
+
return UserDict(
|
659 |
+
{
|
660 |
+
"next_beam_scores": next_beam_scores.view(-1),
|
661 |
+
"next_beam_tokens": next_beam_tokens.view(-1),
|
662 |
+
"next_beam_indices": next_beam_indices.view(-1),
|
663 |
+
}
|
664 |
+
)
|
665 |
+
|
666 |
+
def step_sentence_constraint(
|
667 |
+
self,
|
668 |
+
batch_idx: int,
|
669 |
+
input_ids: torch.LongTensor,
|
670 |
+
vocab_scores: torch.FloatTensor,
|
671 |
+
sent_beam_scores: torch.FloatTensor,
|
672 |
+
sent_beam_tokens: torch.LongTensor,
|
673 |
+
sent_beam_indices: torch.LongTensor,
|
674 |
+
push_progress: bool = False,
|
675 |
+
):
|
676 |
+
# sent_beam_tokens are the next {num_beams} number of tokens that are under consideration for this beam
|
677 |
+
# (candidate next tokens)
|
678 |
+
|
679 |
+
# 1. Adding "advance_tokens"
|
680 |
+
# using ConstraintStateList.advance(), we propose new tokens to be added into this "candidate list" that will
|
681 |
+
# advance us in fulfilling the constraints.
|
682 |
+
|
683 |
+
# 2. Selecting best candidates such that we end up with highest probable candidates
|
684 |
+
# that fulfill our constraints.
|
685 |
+
|
686 |
+
orig_len = sent_beam_indices.size(0)
|
687 |
+
device = sent_beam_indices.device
|
688 |
+
|
689 |
+
# initialize states
|
690 |
+
topk_contraint_states = self.make_constraint_states(orig_len)
|
691 |
+
advance_constraint_states = self.make_constraint_states(orig_len)
|
692 |
+
|
693 |
+
sidx, eidx = batch_idx * orig_len, (batch_idx + 1) * orig_len
|
694 |
+
this_batch_input_ids = input_ids[sidx:eidx]
|
695 |
+
this_batch_token_scores = vocab_scores[sidx:eidx]
|
696 |
+
full_hypotheses = torch.cat((input_ids[sent_beam_indices], sent_beam_tokens.unsqueeze(-1)), dim=-1)
|
697 |
+
|
698 |
+
# need to make new hypothesis that advance the constraints
|
699 |
+
track_new = {
|
700 |
+
"new_seqs": full_hypotheses.tolist(),
|
701 |
+
"new_states": [],
|
702 |
+
"new_indices": [],
|
703 |
+
"new_tokens": [],
|
704 |
+
"new_scores": [],
|
705 |
+
}
|
706 |
+
for seq_idx, pre_seq in enumerate(this_batch_input_ids):
|
707 |
+
# pre_seq = ith sequence generated before this step.
|
708 |
+
|
709 |
+
# input_ids -> (topk) generic beam search best model next tokens
|
710 |
+
# -> (advance) constraints forcing the next token
|
711 |
+
# either way, we need to sort them into "banks" later, so store a "ConstraintListState" for all types of
|
712 |
+
# hypotheses.
|
713 |
+
|
714 |
+
topk_state = topk_contraint_states[seq_idx]
|
715 |
+
topk_state.reset(full_hypotheses[seq_idx].cpu().tolist())
|
716 |
+
|
717 |
+
advance_state = advance_constraint_states[seq_idx]
|
718 |
+
advance_state.reset(pre_seq.cpu().tolist())
|
719 |
+
|
720 |
+
if not advance_state.completed:
|
721 |
+
advance_tokens = torch.LongTensor(advance_state.advance()).to(device)
|
722 |
+
for advance_token in advance_tokens:
|
723 |
+
# since adding each `advance_token` leads to a different hypothesis, create new state instance.
|
724 |
+
new_state = advance_state.copy(stateful=True)
|
725 |
+
new_state.add(advance_token.cpu().tolist())
|
726 |
+
|
727 |
+
advance_seq = torch.cat((pre_seq, advance_token.unsqueeze(0)), -1).cpu().tolist()
|
728 |
+
if advance_seq not in track_new["new_seqs"]:
|
729 |
+
# prevent duplicates, which are basically bound to happen in this process.
|
730 |
+
track_new["new_seqs"].append(advance_seq)
|
731 |
+
track_new["new_indices"].append(sidx + seq_idx) # idx -> global idx across all the batches
|
732 |
+
track_new["new_tokens"].append(advance_token)
|
733 |
+
track_new["new_scores"].append(this_batch_token_scores[seq_idx].take(advance_token))
|
734 |
+
track_new["new_states"].append(new_state)
|
735 |
+
elif push_progress:
|
736 |
+
# Basically, `sent_beam_indices` often chooses very little among `input_ids` the generated sequences that
|
737 |
+
# actually fulfill our constraints. For example, let constraints == ["loves pies"] and
|
738 |
+
|
739 |
+
# pre_seq_1 = "The child loves pies and" pre_seq_2 = "The child plays in the playground and"
|
740 |
+
|
741 |
+
# Without this step, if `sent_beam_indices` is something like [1,1], then
|
742 |
+
# 1. `pre_seq_1` won't be added to the list of (topk) hypothesis since it's not in the indices and
|
743 |
+
# 2. it won't be added to the list of (advance) hypothesis since it's completed already. (this is
|
744 |
+
# the else part of `if constraints_completed[seq_idx]`)
|
745 |
+
# 3. it ends up simply getting removed from consideration.
|
746 |
+
|
747 |
+
# #3 might be fine and actually desired, since it's likely that it's a low-probability output anyways,
|
748 |
+
# especially if it's not in the list of `sent_beam_indices`. But this often leads to lengthened beam
|
749 |
+
# search times, since completed sequences keep getting removed after all this effort for constrained
|
750 |
+
# generation.
|
751 |
+
|
752 |
+
# Here, we basically take `pre_seq_1` and to "push" it into the considered list of hypotheses, by simply
|
753 |
+
# appending the next likely token in the vocabulary and adding it to the list of hypotheses.
|
754 |
+
|
755 |
+
new_score, new_token = torch.max(this_batch_token_scores[seq_idx], 0) # some next probable token
|
756 |
+
advance_seq = torch.cat((pre_seq, new_token.unsqueeze(0)), -1)
|
757 |
+
|
758 |
+
advance_state = advance_constraint_states[seq_idx]
|
759 |
+
|
760 |
+
advance_seq = advance_seq.cpu().tolist()
|
761 |
+
|
762 |
+
advance_state.reset(advance_seq)
|
763 |
+
if advance_seq not in track_new["new_seqs"]:
|
764 |
+
# but still don't want to have duplicates
|
765 |
+
track_new["new_seqs"].append(advance_seq)
|
766 |
+
track_new["new_indices"].append(seq_idx)
|
767 |
+
track_new["new_tokens"].append(new_token)
|
768 |
+
track_new["new_scores"].append(new_score)
|
769 |
+
track_new["new_states"].append(advance_state)
|
770 |
+
|
771 |
+
if len(track_new["new_indices"]) > 0:
|
772 |
+
new_indices = torch.tensor(track_new["new_indices"]).to(device)
|
773 |
+
new_tokens = torch.stack(track_new["new_tokens"]).to(device)
|
774 |
+
new_scores = torch.stack(track_new["new_scores"]).to(device)
|
775 |
+
|
776 |
+
all_states = topk_contraint_states + track_new["new_states"]
|
777 |
+
all_tokens = torch.cat((sent_beam_tokens, new_tokens), -1)
|
778 |
+
all_scores = torch.cat((sent_beam_scores, new_scores), -1)
|
779 |
+
all_banks = torch.tensor([one.get_bank() for one in all_states]).to(device)
|
780 |
+
|
781 |
+
zipped = all_banks * 100 + all_scores
|
782 |
+
indices = zipped.sort(descending=True).indices
|
783 |
+
sorted_banks = all_banks[indices]
|
784 |
+
|
785 |
+
# Then we end up with {sorted among bank C}, {sorted among bank C-1}, ..., {sorted among bank 0}
|
786 |
+
|
787 |
+
counter = -1
|
788 |
+
cur_bank = sorted_banks[0]
|
789 |
+
increments = []
|
790 |
+
for bank in sorted_banks:
|
791 |
+
if bank == cur_bank:
|
792 |
+
counter += 1
|
793 |
+
else:
|
794 |
+
counter = 0
|
795 |
+
cur_bank = bank
|
796 |
+
increments.append(counter)
|
797 |
+
rearrangers = torch.tensor(np.argsort(increments, kind="mergesort"))
|
798 |
+
|
799 |
+
indices = indices[rearrangers][:orig_len]
|
800 |
+
|
801 |
+
sent_beam_scores = all_scores[indices]
|
802 |
+
sent_beam_tokens = all_tokens[indices]
|
803 |
+
sent_beam_indices = torch.cat((sent_beam_indices, new_indices))[indices]
|
804 |
+
|
805 |
+
return sent_beam_scores, sent_beam_tokens, sent_beam_indices
|
806 |
+
|
807 |
+
def finalize(
|
808 |
+
self,
|
809 |
+
input_ids: torch.LongTensor,
|
810 |
+
final_beam_scores: torch.FloatTensor,
|
811 |
+
final_beam_tokens: torch.LongTensor,
|
812 |
+
final_beam_indices: torch.LongTensor,
|
813 |
+
max_length: int,
|
814 |
+
pad_token_id: Optional[int] = None,
|
815 |
+
eos_token_id: Optional[Union[int, List[int]]] = None,
|
816 |
+
beam_indices: Optional[torch.LongTensor] = None,
|
817 |
+
decoder_prompt_len: Optional[int] = 0,
|
818 |
+
) -> Tuple[torch.LongTensor]:
|
819 |
+
batch_size = len(self._beam_hyps)
|
820 |
+
|
821 |
+
if isinstance(eos_token_id, int):
|
822 |
+
eos_token_id = [eos_token_id]
|
823 |
+
|
824 |
+
# finalize all open beam hypotheses and add to generated hypotheses
|
825 |
+
for batch_idx, beam_hyp in enumerate(self._beam_hyps):
|
826 |
+
if self._done[batch_idx]:
|
827 |
+
continue
|
828 |
+
|
829 |
+
# all open beam hypotheses are added to the beam hypothesis
|
830 |
+
# beam hypothesis class automatically keeps the best beams
|
831 |
+
|
832 |
+
ids_collect = []
|
833 |
+
for beam_id in range(self.num_beams):
|
834 |
+
batch_beam_idx = batch_idx * self.num_beams + beam_id
|
835 |
+
final_score = final_beam_scores[batch_beam_idx].item()
|
836 |
+
final_tokens = input_ids[batch_beam_idx]
|
837 |
+
|
838 |
+
completes_constraint = self.check_completes_constraints(final_tokens.cpu().tolist())
|
839 |
+
if completes_constraint:
|
840 |
+
beam_index = beam_indices[batch_beam_idx] if beam_indices is not None else None
|
841 |
+
generated_len = final_tokens.shape[-1] - decoder_prompt_len
|
842 |
+
beam_hyp.add(final_tokens, final_score, beam_indices=beam_index, generated_len=generated_len)
|
843 |
+
ids_collect.append(beam_id)
|
844 |
+
|
845 |
+
# due to overly complex constraints or other factors, sometimes we can't gaurantee a successful
|
846 |
+
# generation. In these cases we simply return the highest scoring outputs.
|
847 |
+
if len(ids_collect) < self.num_beam_hyps_to_keep:
|
848 |
+
for beam_id in range(self.num_beams):
|
849 |
+
if beam_id not in ids_collect:
|
850 |
+
batch_beam_idx = batch_idx * self.num_beams + beam_id
|
851 |
+
final_score = final_beam_scores[batch_beam_idx].item()
|
852 |
+
final_tokens = input_ids[batch_beam_idx]
|
853 |
+
generated_len = final_tokens.shape[-1] - decoder_prompt_len
|
854 |
+
beam_hyp.add(final_tokens, final_score, generated_len=generated_len)
|
855 |
+
if len(ids_collect) >= self.num_beam_hyps_to_keep:
|
856 |
+
break
|
857 |
+
|
858 |
+
# select the best hypotheses
|
859 |
+
sent_lengths = input_ids.new(batch_size * self.num_beam_hyps_to_keep)
|
860 |
+
best = []
|
861 |
+
best_indices = []
|
862 |
+
best_scores = torch.zeros(batch_size * self.num_beam_hyps_to_keep, device=self.device, dtype=torch.float32)
|
863 |
+
|
864 |
+
# retrieve best hypotheses
|
865 |
+
for i, beam_hyp in enumerate(self._beam_hyps):
|
866 |
+
sorted_hyps = sorted(beam_hyp.beams, key=lambda x: x[0])
|
867 |
+
for j in range(self.num_beam_hyps_to_keep):
|
868 |
+
best_hyp_tuple = sorted_hyps.pop()
|
869 |
+
best_score = best_hyp_tuple[0]
|
870 |
+
best_hyp = best_hyp_tuple[1]
|
871 |
+
best_index = best_hyp_tuple[2]
|
872 |
+
sent_lengths[self.num_beam_hyps_to_keep * i + j] = len(best_hyp)
|
873 |
+
|
874 |
+
# append to lists
|
875 |
+
best.append(best_hyp)
|
876 |
+
|
877 |
+
# append indices to list
|
878 |
+
best_indices.append(best_index)
|
879 |
+
|
880 |
+
best_scores[i * self.num_beam_hyps_to_keep + j] = best_score
|
881 |
+
|
882 |
+
# prepare for adding eos
|
883 |
+
sent_lengths_max = sent_lengths.max().item() + 1
|
884 |
+
|
885 |
+
sent_max_len = min(sent_lengths_max, max_length) if max_length is not None else sent_lengths_max
|
886 |
+
decoded: torch.LongTensor = input_ids.new(batch_size * self.num_beam_hyps_to_keep, sent_max_len)
|
887 |
+
|
888 |
+
if len(best_indices) > 0 and best_indices[0] is not None:
|
889 |
+
indices: torch.LongTensor = input_ids.new(batch_size * self.num_beam_hyps_to_keep, sent_max_len)
|
890 |
+
else:
|
891 |
+
indices = None
|
892 |
+
|
893 |
+
# shorter batches are padded if needed
|
894 |
+
if sent_lengths.min().item() != sent_lengths.max().item():
|
895 |
+
if pad_token_id is None:
|
896 |
+
raise ValueError("`pad_token_id` has to be defined")
|
897 |
+
decoded.fill_(pad_token_id)
|
898 |
+
|
899 |
+
if indices is not None:
|
900 |
+
indices.fill_(-1)
|
901 |
+
|
902 |
+
# fill with hypotheses and eos_token_id if the latter fits in
|
903 |
+
for i, (hypo, best_idx) in enumerate(zip(best, best_indices)):
|
904 |
+
decoded[i, : sent_lengths[i]] = hypo
|
905 |
+
|
906 |
+
if indices is not None:
|
907 |
+
indices[i, : len(best_idx)] = torch.tensor(best_idx)
|
908 |
+
|
909 |
+
if sent_lengths[i] < sent_max_len:
|
910 |
+
# inserting only the first eos_token_id
|
911 |
+
decoded[i, sent_lengths[i]] = eos_token_id[0]
|
912 |
+
|
913 |
+
return UserDict(
|
914 |
+
{
|
915 |
+
"sequences": decoded,
|
916 |
+
"sequence_scores": best_scores,
|
917 |
+
"beam_indices": indices,
|
918 |
+
}
|
919 |
+
)
|
920 |
+
|
921 |
+
|
922 |
+
class BeamHypotheses:
|
923 |
+
def __init__(self, num_beams: int, length_penalty: float, early_stopping: bool, max_length: Optional[int] = None):
|
924 |
+
"""
|
925 |
+
Initialize n-best list of hypotheses.
|
926 |
+
"""
|
927 |
+
self.length_penalty = length_penalty
|
928 |
+
self.early_stopping = early_stopping
|
929 |
+
self.max_length = max_length
|
930 |
+
self.num_beams = num_beams
|
931 |
+
self.beams = []
|
932 |
+
self.worst_score = 1e9
|
933 |
+
|
934 |
+
if not isinstance(self.early_stopping, bool) and self.max_length is None:
|
935 |
+
raise ValueError(
|
936 |
+
"When `do_early_stopping` is set to a string, `max_length` must be defined. Ensure it is passed to the"
|
937 |
+
" BeamScorer class instance at initialization time."
|
938 |
+
)
|
939 |
+
|
940 |
+
def __len__(self):
|
941 |
+
"""
|
942 |
+
Number of hypotheses in the list.
|
943 |
+
"""
|
944 |
+
return len(self.beams)
|
945 |
+
|
946 |
+
def add(
|
947 |
+
self,
|
948 |
+
hyp: torch.LongTensor,
|
949 |
+
sum_logprobs: float,
|
950 |
+
beam_indices: Optional[torch.LongTensor] = None,
|
951 |
+
generated_len: Optional[int] = None,
|
952 |
+
):
|
953 |
+
"""
|
954 |
+
Add a new hypothesis to the list.
|
955 |
+
"""
|
956 |
+
if generated_len is not None:
|
957 |
+
score = sum_logprobs / (generated_len**self.length_penalty)
|
958 |
+
# This 'else' case exists for retrocompatibility
|
959 |
+
else:
|
960 |
+
score = sum_logprobs / (hyp.shape[-1] ** self.length_penalty)
|
961 |
+
|
962 |
+
if len(self) < self.num_beams or score > self.worst_score:
|
963 |
+
self.beams.append((score, hyp, beam_indices))
|
964 |
+
if len(self) > self.num_beams:
|
965 |
+
sorted_next_scores = sorted([(s, idx) for idx, (s, _, _) in enumerate(self.beams)])
|
966 |
+
del self.beams[sorted_next_scores[0][1]]
|
967 |
+
self.worst_score = sorted_next_scores[1][0]
|
968 |
+
else:
|
969 |
+
self.worst_score = min(score, self.worst_score)
|
970 |
+
|
971 |
+
def is_done(self, best_sum_logprobs: float, cur_len: int, decoder_prompt_len: Optional[int] = 0) -> bool:
|
972 |
+
"""
|
973 |
+
If there are enough hypotheses and that none of the hypotheses being generated can become better than the worst
|
974 |
+
one in the heap, then we are done with this sentence.
|
975 |
+
"""
|
976 |
+
|
977 |
+
if len(self) < self.num_beams:
|
978 |
+
return False
|
979 |
+
|
980 |
+
# `True`: stop as soon as at least `num_beams` hypotheses are finished
|
981 |
+
if self.early_stopping is True:
|
982 |
+
return True
|
983 |
+
# `False`: heuristic -- compute best possible score from `cur_len`, even though it is not entirely accurate
|
984 |
+
# when `length_penalty` is positive. See the discussion below for more details.
|
985 |
+
# https://github.com/huggingface/transformers/pull/20901#issuecomment-1369845565
|
986 |
+
elif self.early_stopping is False:
|
987 |
+
highest_attainable_score = best_sum_logprobs / (cur_len - decoder_prompt_len) ** self.length_penalty
|
988 |
+
ret = self.worst_score >= highest_attainable_score
|
989 |
+
return ret
|
990 |
+
# `"never"`: compute the best possible score, depending on the signal of `length_penalty`
|
991 |
+
else:
|
992 |
+
# `length_penalty` > 0.0 -> max denominator is obtaned from `max_length`, not from `cur_len` -> min
|
993 |
+
# abs(`highest_attainable_score`) is obtained -> `highest_attainable_score` is negative, hence we obtain
|
994 |
+
# its max this way
|
995 |
+
if self.length_penalty > 0.0:
|
996 |
+
if self.max_length <= decoder_prompt_len:
|
997 |
+
raise ValueError("max_length is not larger than decoder prompt length")
|
998 |
+
highest_attainable_score = (
|
999 |
+
best_sum_logprobs / (self.max_length - decoder_prompt_len) ** self.length_penalty
|
1000 |
+
)
|
1001 |
+
# the opposite logic applies here (max `highest_attainable_score` from `cur_len`)
|
1002 |
+
else:
|
1003 |
+
highest_attainable_score = best_sum_logprobs / (cur_len - decoder_prompt_len) ** self.length_penalty
|
1004 |
+
ret = self.worst_score >= highest_attainable_score
|
1005 |
+
return ret
|
venv/lib/python3.10/site-packages/transformers/generation/candidate_generator.py
ADDED
@@ -0,0 +1,425 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2023 The HuggingFace Inc. team.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
|
16 |
+
import copy
|
17 |
+
from typing import TYPE_CHECKING, Any, Dict, Optional, Tuple
|
18 |
+
|
19 |
+
import torch
|
20 |
+
|
21 |
+
from ..cache_utils import DynamicCache
|
22 |
+
|
23 |
+
|
24 |
+
if TYPE_CHECKING:
|
25 |
+
from ..modeling_utils import PreTrainedModel
|
26 |
+
from .configuration_utils import GenerationConfig
|
27 |
+
from .logits_process import LogitsProcessorList
|
28 |
+
|
29 |
+
|
30 |
+
class CandidateGenerator:
|
31 |
+
"""Abstract base class for all candidate generators that can be applied during assisted generation."""
|
32 |
+
|
33 |
+
def get_candidates(self, input_ids: torch.LongTensor) -> Tuple[torch.LongTensor, Optional[torch.FloatTensor]]:
|
34 |
+
"""
|
35 |
+
Fetches the candidates to be tried for the current input.
|
36 |
+
|
37 |
+
Args:
|
38 |
+
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
|
39 |
+
Indices of input sequence tokens in the vocabulary. [What are input IDs?](../glossary#input-ids)
|
40 |
+
|
41 |
+
Return:
|
42 |
+
`torch.LongTensor` of shape `(batch_size, candidate_length)` containing the candidate sequences to be
|
43 |
+
assessed by the model and, optionally, a `torch.FloatTensor` of shape `(batch_size, candidate_length,
|
44 |
+
vocabulary_size)` containing the logits associated to each candidate.
|
45 |
+
"""
|
46 |
+
raise NotImplementedError(
|
47 |
+
f"{self.__class__} is an abstract class. Only classes inheriting this class can call `get_candidates`."
|
48 |
+
)
|
49 |
+
|
50 |
+
def update_candidate_strategy(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, num_matches: int):
|
51 |
+
"""
|
52 |
+
Updates the candidate generation strategy based on the outcomes.
|
53 |
+
|
54 |
+
Args:
|
55 |
+
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
|
56 |
+
Indices of input sequence tokens in the vocabulary. [What are input IDs?](../glossary#input-ids)
|
57 |
+
scores (`torch.FloatTensor` of shape `(batch_size, candidate_length, config.vocab_size)`):
|
58 |
+
Prediction scores of a language modeling head. These can be logits for each vocabulary when not using
|
59 |
+
beam search or log softmax for each vocabulary token when using beam search
|
60 |
+
num_matches (`int`):
|
61 |
+
The number of matches between the candidate sequences and the model predictions.
|
62 |
+
"""
|
63 |
+
raise NotImplementedError(
|
64 |
+
f"{self.__class__} is an abstract class. Only classes inheriting this class can call "
|
65 |
+
"`update_candidate_strategy`."
|
66 |
+
)
|
67 |
+
|
68 |
+
|
69 |
+
class AssistedCandidateGenerator(CandidateGenerator):
|
70 |
+
"""
|
71 |
+
`CandidateGenerator` class to be used for assisted generation and speculative decoding. This class generates
|
72 |
+
candidates through the use of a smaller model. Read the following blog post for more information:
|
73 |
+
https://huggingface.co/blog/assisted-generation
|
74 |
+
|
75 |
+
Args:
|
76 |
+
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
|
77 |
+
Indices of input sequence tokens in the vocabulary. [What are input IDs?](../glossary#input-ids)
|
78 |
+
assistant_model (`PreTrainedModel`):
|
79 |
+
The model to be used for generating candidates. This model should be smaller than the main model.
|
80 |
+
generation_config (`~generation.GenerationConfig`, *optional*):
|
81 |
+
The generation configuration to be used as base parametrization for the generation call.
|
82 |
+
logits_processor (`LogitsProcessorList`):
|
83 |
+
An instance of [`LogitsProcessorList`]. List of instances of class derived from [`LogitsProcessor`]
|
84 |
+
used to modify the prediction scores of the language modeling head applied at each generation step.
|
85 |
+
model_kwargs (`Dict`):
|
86 |
+
The keyword arguments that will be passed to the main model, and are used as base inputs for the assistant
|
87 |
+
model as well.
|
88 |
+
inputs_tensor (`torch.Tensor`, *optional*):
|
89 |
+
The model input tensor. In encoder-decoder models, this is the encoder input.
|
90 |
+
"""
|
91 |
+
|
92 |
+
def __init__(
|
93 |
+
self,
|
94 |
+
input_ids: torch.LongTensor,
|
95 |
+
assistant_model: "PreTrainedModel",
|
96 |
+
generation_config: "GenerationConfig",
|
97 |
+
logits_processor: "LogitsProcessorList",
|
98 |
+
model_kwargs: Dict,
|
99 |
+
inputs_tensor: Optional[torch.Tensor] = None,
|
100 |
+
):
|
101 |
+
# Make sure all data at the same device as assistant model
|
102 |
+
device = assistant_model.device
|
103 |
+
input_ids = input_ids.to(device)
|
104 |
+
if inputs_tensor is not None:
|
105 |
+
inputs_tensor = inputs_tensor.to(device)
|
106 |
+
|
107 |
+
# Prepare the assistant and the starting number of candidate tokens
|
108 |
+
self.assistant_model = assistant_model
|
109 |
+
self.num_assistant_tokens = assistant_model.generation_config.num_assistant_tokens
|
110 |
+
|
111 |
+
# Prepare the kwargs for the assistant model
|
112 |
+
assistant_kwargs = {}
|
113 |
+
for key, value in model_kwargs.items(): # deepcopy crashes if we attempt to copy encoder outputs with grads
|
114 |
+
if key not in ("encoder_outputs", "assistant_encoder_outputs"):
|
115 |
+
assistant_kwargs[key] = (
|
116 |
+
value.detach().to(device) if isinstance(value, torch.Tensor) else copy.deepcopy(value)
|
117 |
+
)
|
118 |
+
|
119 |
+
if "assistant_encoder_outputs" in model_kwargs:
|
120 |
+
assistant_kwargs["encoder_outputs"] = model_kwargs["assistant_encoder_outputs"]
|
121 |
+
elif assistant_model.config.is_encoder_decoder:
|
122 |
+
inputs_tensor, model_input_name, assistant_kwargs = assistant_model._prepare_model_inputs(
|
123 |
+
inputs_tensor, assistant_model.generation_config.bos_token_id, assistant_kwargs
|
124 |
+
)
|
125 |
+
assistant_kwargs = assistant_model._prepare_encoder_decoder_kwargs_for_generation(
|
126 |
+
inputs_tensor, assistant_kwargs, model_input_name
|
127 |
+
)
|
128 |
+
elif "encoder_outputs" in model_kwargs:
|
129 |
+
assistant_kwargs["encoder_outputs"] = model_kwargs["encoder_outputs"]
|
130 |
+
self.assistant_kwargs = assistant_kwargs
|
131 |
+
|
132 |
+
# Prepare assistant model's keys of inputs
|
133 |
+
if assistant_model.config.is_encoder_decoder:
|
134 |
+
# both are encoder-decoder
|
135 |
+
self.input_ids_key = "decoder_input_ids"
|
136 |
+
elif "encoder_outputs" in assistant_kwargs:
|
137 |
+
# special case for encoder-decoder with decoder-only assistant (like DistilWhisper)
|
138 |
+
self.input_ids_key = "input_ids"
|
139 |
+
self.assistant_kwargs["attention_mask"] = self.assistant_kwargs.get(
|
140 |
+
"decoder_attention_mask",
|
141 |
+
torch.ones((input_ids.shape[0], 1), device=input_ids.device, dtype=torch.long),
|
142 |
+
)
|
143 |
+
else:
|
144 |
+
# both are decoder-only
|
145 |
+
self.input_ids_key = "input_ids"
|
146 |
+
|
147 |
+
# Prepare generation-related options.
|
148 |
+
self.logits_processor = logits_processor
|
149 |
+
self.generation_config = copy.deepcopy(generation_config)
|
150 |
+
self.generation_config.return_dict_in_generate = True
|
151 |
+
self.generation_config.output_scores = True
|
152 |
+
|
153 |
+
# avoid unnecessary warnings that min_length is larger than max_new_tokens
|
154 |
+
self.main_model_min_length = self.generation_config.min_length
|
155 |
+
self.generation_config.min_length = 0
|
156 |
+
self.generation_config.min_new_tokens = None
|
157 |
+
|
158 |
+
def get_candidates(self, input_ids: torch.LongTensor) -> Tuple[torch.LongTensor, Optional[torch.FloatTensor]]:
|
159 |
+
"""
|
160 |
+
Fetches the candidates to be tried for the current input.
|
161 |
+
|
162 |
+
Args:
|
163 |
+
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
|
164 |
+
Indices of input sequence tokens in the vocabulary. [What are input IDs?](../glossary#input-ids)
|
165 |
+
|
166 |
+
Return:
|
167 |
+
`torch.LongTensor` of shape `(batch_size, candidate_length)` containing the candidate sequences to be
|
168 |
+
assessed by the model and a `torch.FloatTensor` of shape `(batch_size, candidate_length,
|
169 |
+
vocabulary_size)` containing the logits associated to each candidate.
|
170 |
+
"""
|
171 |
+
input_ids = input_ids.to(self.assistant_model.device)
|
172 |
+
|
173 |
+
# Don't generate more than `max_length - 1` candidates since the target model generates one extra token.
|
174 |
+
new_cur_len = input_ids.shape[-1]
|
175 |
+
max_new_tokens = min(int(self.num_assistant_tokens), self.generation_config.max_length - new_cur_len - 1)
|
176 |
+
min_new_tokens = max(min(max_new_tokens, self.main_model_min_length - new_cur_len), 0)
|
177 |
+
if max_new_tokens == 0:
|
178 |
+
return input_ids, None
|
179 |
+
|
180 |
+
# 1. If it is not the first round of candidate generation, prepare the inputs based on the input_ids length
|
181 |
+
# (which implicitly contains the number of accepted candidates from the previous round)
|
182 |
+
has_past_key_values = self.assistant_kwargs.get("past_key_values", None) is not None
|
183 |
+
if has_past_key_values:
|
184 |
+
new_cache_size = new_cur_len - 1
|
185 |
+
self.assistant_kwargs["past_key_values"] = _crop_past_key_values(
|
186 |
+
self.assistant_model, self.assistant_kwargs["past_key_values"], new_cache_size - 1
|
187 |
+
) # the assistant does not have the token after the last match, hence the -1
|
188 |
+
|
189 |
+
self.assistant_kwargs = _prepare_attention_mask(
|
190 |
+
self.assistant_kwargs, new_cur_len, self.assistant_model.config.is_encoder_decoder
|
191 |
+
)
|
192 |
+
self.assistant_kwargs = _prepare_token_type_ids(self.assistant_kwargs, new_cur_len)
|
193 |
+
|
194 |
+
# 2. Forecast next N tokens using the assistant model.
|
195 |
+
assistant_generation_kwargs = {
|
196 |
+
self.input_ids_key: input_ids,
|
197 |
+
"min_new_tokens": min_new_tokens,
|
198 |
+
"max_new_tokens": max_new_tokens,
|
199 |
+
"generation_config": self.generation_config,
|
200 |
+
"logits_processor": self.logits_processor,
|
201 |
+
}
|
202 |
+
|
203 |
+
assistant_output = self.assistant_model.generate(**assistant_generation_kwargs, **self.assistant_kwargs)
|
204 |
+
|
205 |
+
# 3. Update variables for the next round of candidate generation
|
206 |
+
self.assistant_kwargs["past_key_values"] = assistant_output.past_key_values
|
207 |
+
|
208 |
+
# 4. Prepare variables for output
|
209 |
+
candidate_logits = torch.stack(assistant_output.scores, dim=1)
|
210 |
+
candidate_ids = assistant_output.sequences
|
211 |
+
return candidate_ids, candidate_logits
|
212 |
+
|
213 |
+
def update_candidate_strategy(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, num_matches: int):
|
214 |
+
"""
|
215 |
+
Updates the candidate generation strategy based on the outcomes.
|
216 |
+
|
217 |
+
Args:
|
218 |
+
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
|
219 |
+
Indices of input sequence tokens in the vocabulary. [What are input IDs?](../glossary#input-ids)
|
220 |
+
scores (`torch.FloatTensor` of shape `(batch_size, candidate_length, config.vocab_size)`):
|
221 |
+
Prediction scores of a language modeling head. These can be logits for each vocabulary when not using
|
222 |
+
beam search or log softmax for each vocabulary token when using beam search
|
223 |
+
num_matches (`int`):
|
224 |
+
The number of matches between the candidate sequences and the model predictions.
|
225 |
+
"""
|
226 |
+
# Adjust the max number of assistant tokens to use in the next iteration. This is a simple heuristic,
|
227 |
+
# probably can be improved -- we want to balance the benefits of getting assistant tokens correct with the
|
228 |
+
# cost of forecasting incorrect assistant tokens.
|
229 |
+
if self.assistant_model.generation_config.num_assistant_tokens_schedule in {
|
230 |
+
"heuristic",
|
231 |
+
"heuristic_transient",
|
232 |
+
}:
|
233 |
+
if num_matches == int(self.num_assistant_tokens):
|
234 |
+
self.num_assistant_tokens += 2.0
|
235 |
+
else:
|
236 |
+
self.num_assistant_tokens = max(1.0, self.num_assistant_tokens - 1.0)
|
237 |
+
|
238 |
+
|
239 |
+
class PromptLookupCandidateGenerator(CandidateGenerator):
|
240 |
+
"""
|
241 |
+
`CandidateGenerator` class to be used for prompt lookup generation. This class generates candidates by looking up
|
242 |
+
likely continuations in the provided prompt (input_ids) itself.
|
243 |
+
Read the following blog post for more information: https://github.com/apoorvumang/prompt-lookup-decoding
|
244 |
+
|
245 |
+
Args:
|
246 |
+
max_matching_ngram_size (`int`):
|
247 |
+
The maximum ngram size to be considered for matching in the prompt
|
248 |
+
num_output_tokens (`int`):
|
249 |
+
The number of tokens to be output as candidate tokens.
|
250 |
+
max_length (`int`):
|
251 |
+
The number of total maximum tokens that can be generated. For decoder-only models that includes the prompt length.
|
252 |
+
Defaults to 20, which is the max length used as default in generation config.
|
253 |
+
"""
|
254 |
+
|
255 |
+
def __init__(
|
256 |
+
self,
|
257 |
+
num_output_tokens: int = 10,
|
258 |
+
max_matching_ngram_size: int = None,
|
259 |
+
max_length: int = 20,
|
260 |
+
):
|
261 |
+
self.num_output_tokens = num_output_tokens
|
262 |
+
self.max_matching_ngram_size = max_matching_ngram_size if max_matching_ngram_size else 2
|
263 |
+
self.max_length = max_length
|
264 |
+
|
265 |
+
if self.max_matching_ngram_size <= 0 or self.num_output_tokens <= 0:
|
266 |
+
raise ValueError("Invalid max_matching_ngram_size or num_output_tokens")
|
267 |
+
|
268 |
+
def get_candidates(self, input_ids: torch.LongTensor) -> Tuple[torch.LongTensor, Optional[torch.FloatTensor]]:
|
269 |
+
"""
|
270 |
+
Fetches the candidates to be tried for the current input.
|
271 |
+
|
272 |
+
Args:
|
273 |
+
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
|
274 |
+
Indices of input sequence tokens in the vocabulary. [What are input IDs?](../glossary#input-ids)
|
275 |
+
|
276 |
+
Return:
|
277 |
+
`torch.LongTensor` of shape `(num_candidates, candidate_length)`: The candidate sequences to be tried.
|
278 |
+
"""
|
279 |
+
input_length = input_ids.size(1)
|
280 |
+
|
281 |
+
# Don't generate more than `max_length - 1` candidates since the target model generates one extra token.
|
282 |
+
if self.max_length == input_length + 1:
|
283 |
+
return input_ids, None
|
284 |
+
|
285 |
+
chosen_ids = None
|
286 |
+
match_found = False
|
287 |
+
for ngram_size in range(min(self.max_matching_ngram_size, input_length - 1), 0, -1):
|
288 |
+
# Create sliding windows of size ngram_size
|
289 |
+
windows = input_ids.unfold(dimension=1, size=ngram_size, step=1)
|
290 |
+
|
291 |
+
# Convert ngram to a tensor for comparison
|
292 |
+
ngram_tensor = input_ids[0, -ngram_size:]
|
293 |
+
|
294 |
+
# Find where the windows match the ngram
|
295 |
+
matches = (windows == ngram_tensor).all(dim=2)
|
296 |
+
|
297 |
+
# Get the indices of matches
|
298 |
+
match_indices = matches.nonzero(as_tuple=True)[1]
|
299 |
+
|
300 |
+
# Iterate through match indices to find a valid continuation
|
301 |
+
for idx in match_indices:
|
302 |
+
start_idx = idx + ngram_size
|
303 |
+
end_idx = start_idx + self.num_output_tokens
|
304 |
+
end_idx = min(end_idx, input_length, self.max_length)
|
305 |
+
|
306 |
+
if start_idx < end_idx:
|
307 |
+
chosen_ids = input_ids[0, start_idx:end_idx]
|
308 |
+
match_found = True
|
309 |
+
break
|
310 |
+
if match_found:
|
311 |
+
break
|
312 |
+
|
313 |
+
if chosen_ids is None or len(chosen_ids) == 0:
|
314 |
+
# In case we didn't find a match return the input sequence unchanged, reverts back to autoregressive decoding
|
315 |
+
return input_ids, None
|
316 |
+
|
317 |
+
# Now need extend input_ids with chosen_ids
|
318 |
+
chosen_ids = chosen_ids.unsqueeze(0)
|
319 |
+
candidate_input_ids = torch.cat((input_ids, chosen_ids), dim=1)
|
320 |
+
# assisted_generation expects logits as well, but we don't have those here, so returning None
|
321 |
+
return candidate_input_ids, None
|
322 |
+
|
323 |
+
def update_candidate_strategy(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, num_matches: int):
|
324 |
+
"""
|
325 |
+
Updates the candidate generation strategy based on the outcomes.
|
326 |
+
|
327 |
+
Args:
|
328 |
+
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
|
329 |
+
Indices of input sequence tokens in the vocabulary. [What are input IDs?](../glossary#input-ids)
|
330 |
+
scores (`torch.FloatTensor` of shape `(batch_size, candidate_length, config.vocab_size)`):
|
331 |
+
Prediction scores of a language modeling head. These can be logits for each vocabulary when not using
|
332 |
+
beam search or log softmax for each vocabulary token when using beam search
|
333 |
+
num_matches (`int`):
|
334 |
+
The number of matches between the candidate sequences and the model predictions.
|
335 |
+
"""
|
336 |
+
# Currently does nothing
|
337 |
+
return
|
338 |
+
|
339 |
+
|
340 |
+
def _crop_past_key_values(model, past_key_values, maximum_length):
|
341 |
+
"""Crops the past key values up to a certain maximum length."""
|
342 |
+
new_past = []
|
343 |
+
if model.config.is_encoder_decoder:
|
344 |
+
for idx in range(len(past_key_values)):
|
345 |
+
new_past.append(
|
346 |
+
(
|
347 |
+
past_key_values[idx][0][:, :, :maximum_length, :],
|
348 |
+
past_key_values[idx][1][:, :, :maximum_length, :],
|
349 |
+
past_key_values[idx][2],
|
350 |
+
past_key_values[idx][3],
|
351 |
+
)
|
352 |
+
)
|
353 |
+
past_key_values = tuple(new_past)
|
354 |
+
# bloom is special
|
355 |
+
elif "bloom" in model.__class__.__name__.lower() or (
|
356 |
+
model.config.architectures is not None and "bloom" in model.config.architectures[0].lower()
|
357 |
+
):
|
358 |
+
for idx in range(len(past_key_values)):
|
359 |
+
new_past.append(
|
360 |
+
(
|
361 |
+
past_key_values[idx][0][:, :, :maximum_length],
|
362 |
+
past_key_values[idx][1][:, :maximum_length, :],
|
363 |
+
)
|
364 |
+
)
|
365 |
+
past_key_values = tuple(new_past)
|
366 |
+
# gptbigcode is too
|
367 |
+
elif "gptbigcode" in model.__class__.__name__.lower() or (
|
368 |
+
model.config.architectures is not None and "gptbigcode" in model.config.architectures[0].lower()
|
369 |
+
):
|
370 |
+
if model.config.multi_query:
|
371 |
+
for idx in range(len(past_key_values)):
|
372 |
+
past_key_values[idx] = past_key_values[idx][:, :maximum_length, :]
|
373 |
+
else:
|
374 |
+
for idx in range(len(past_key_values)):
|
375 |
+
past_key_values[idx] = past_key_values[idx][:, :, :maximum_length, :]
|
376 |
+
elif isinstance(past_key_values, DynamicCache):
|
377 |
+
for idx in range(len(past_key_values.key_cache)):
|
378 |
+
if past_key_values.value_cache[idx].shape[-1] != 0:
|
379 |
+
past_key_values.key_cache[idx] = past_key_values.key_cache[idx][:, :, :maximum_length, :]
|
380 |
+
past_key_values.value_cache[idx] = past_key_values.value_cache[idx][:, :, :maximum_length, :]
|
381 |
+
|
382 |
+
elif past_key_values is not None:
|
383 |
+
for idx in range(len(past_key_values)):
|
384 |
+
new_past.append(
|
385 |
+
(
|
386 |
+
past_key_values[idx][0][:, :, :maximum_length, :],
|
387 |
+
past_key_values[idx][1][:, :, :maximum_length, :],
|
388 |
+
)
|
389 |
+
)
|
390 |
+
past_key_values = tuple(new_past)
|
391 |
+
return past_key_values
|
392 |
+
|
393 |
+
|
394 |
+
def _prepare_attention_mask(model_kwargs: Dict[str, Any], new_length: int, is_encoder_decoder: bool) -> Dict[str, Any]:
|
395 |
+
"""Expands or crops the model's mask for decoding purposes, to the defined length"""
|
396 |
+
|
397 |
+
mask_key = "decoder_attention_mask" if is_encoder_decoder else "attention_mask"
|
398 |
+
if mask_key not in model_kwargs:
|
399 |
+
return model_kwargs
|
400 |
+
|
401 |
+
mask = model_kwargs[mask_key]
|
402 |
+
mask_length_diff = new_length - mask.shape[1]
|
403 |
+
|
404 |
+
if mask_length_diff < 0:
|
405 |
+
model_kwargs[mask_key] = mask[:, :mask_length_diff]
|
406 |
+
elif mask_length_diff > 0:
|
407 |
+
model_kwargs[mask_key] = torch.cat([mask, mask.new_ones((mask.shape[0], mask_length_diff))], dim=-1)
|
408 |
+
return model_kwargs
|
409 |
+
|
410 |
+
|
411 |
+
def _prepare_token_type_ids(model_kwargs: Dict[str, Any], new_length: int) -> Dict[str, Any]:
|
412 |
+
"""Expands or crops the model's token_type_ids for decoding purposes, to the defined length"""
|
413 |
+
if "token_type_ids" not in model_kwargs or model_kwargs["token_type_ids"] is None:
|
414 |
+
return model_kwargs
|
415 |
+
|
416 |
+
token_type_ids = model_kwargs["token_type_ids"]
|
417 |
+
final_token_type = token_type_ids[:, -1].unsqueeze(-1)
|
418 |
+
type_length_diff = new_length - token_type_ids.shape[1]
|
419 |
+
|
420 |
+
if type_length_diff < 0:
|
421 |
+
token_type_ids = token_type_ids[:, :type_length_diff]
|
422 |
+
elif type_length_diff > 0:
|
423 |
+
token_type_copies = final_token_type.repeat(1, type_length_diff)
|
424 |
+
model_kwargs["token_type_ids"] = torch.cat([model_kwargs["token_type_ids"], token_type_copies], dim=-1)
|
425 |
+
return model_kwargs
|
venv/lib/python3.10/site-packages/transformers/generation/configuration_utils.py
ADDED
@@ -0,0 +1,1092 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2022 The HuggingFace Inc. team.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
""" Generation configuration class and utilities."""
|
16 |
+
|
17 |
+
import copy
|
18 |
+
import json
|
19 |
+
import os
|
20 |
+
import warnings
|
21 |
+
from typing import TYPE_CHECKING, Any, Dict, Optional, Union
|
22 |
+
|
23 |
+
from .. import __version__
|
24 |
+
from ..configuration_utils import PretrainedConfig
|
25 |
+
from ..utils import (
|
26 |
+
GENERATION_CONFIG_NAME,
|
27 |
+
ExplicitEnum,
|
28 |
+
PushToHubMixin,
|
29 |
+
cached_file,
|
30 |
+
download_url,
|
31 |
+
extract_commit_hash,
|
32 |
+
is_remote_url,
|
33 |
+
logging,
|
34 |
+
)
|
35 |
+
|
36 |
+
|
37 |
+
if TYPE_CHECKING:
|
38 |
+
from ..modeling_utils import PreTrainedModel
|
39 |
+
|
40 |
+
|
41 |
+
logger = logging.get_logger(__name__)
|
42 |
+
METADATA_FIELDS = ("_from_model_config", "_commit_hash", "_original_object_hash", "transformers_version")
|
43 |
+
|
44 |
+
|
45 |
+
class GenerationMode(ExplicitEnum):
|
46 |
+
"""
|
47 |
+
Possible generation modes, downstream of the [`~generation.GenerationMixin.generate`] method.
|
48 |
+
"""
|
49 |
+
|
50 |
+
# Non-beam methods
|
51 |
+
CONTRASTIVE_SEARCH = "contrastive_search"
|
52 |
+
GREEDY_SEARCH = "greedy_search"
|
53 |
+
SAMPLE = "sample"
|
54 |
+
ASSISTED_GENERATION = "assisted_generation"
|
55 |
+
# Beam methods
|
56 |
+
BEAM_SEARCH = "beam_search"
|
57 |
+
BEAM_SAMPLE = "beam_sample"
|
58 |
+
CONSTRAINED_BEAM_SEARCH = "constrained_beam_search"
|
59 |
+
GROUP_BEAM_SEARCH = "group_beam_search"
|
60 |
+
|
61 |
+
|
62 |
+
class GenerationConfig(PushToHubMixin):
|
63 |
+
# no-format
|
64 |
+
r"""
|
65 |
+
Class that holds a configuration for a generation task. A `generate` call supports the following generation methods
|
66 |
+
for text-decoder, text-to-text, speech-to-text, and vision-to-text models:
|
67 |
+
|
68 |
+
- *greedy decoding* by calling [`~generation.GenerationMixin._greedy_search`] if `num_beams=1` and
|
69 |
+
`do_sample=False`
|
70 |
+
- *contrastive search* by calling [`~generation.GenerationMixin._contrastive_search`] if `penalty_alpha>0.`
|
71 |
+
and `top_k>1`
|
72 |
+
- *multinomial sampling* by calling [`~generation.GenerationMixin._sample`] if `num_beams=1` and
|
73 |
+
`do_sample=True`
|
74 |
+
- *beam-search decoding* by calling [`~generation.GenerationMixin._beam_search`] if `num_beams>1` and
|
75 |
+
`do_sample=False`
|
76 |
+
- *beam-search multinomial sampling* by calling [`~generation.GenerationMixin._beam_sample`] if
|
77 |
+
`num_beams>1` and `do_sample=True`
|
78 |
+
- *diverse beam-search decoding* by calling [`~generation.GenerationMixin._group_beam_search`], if
|
79 |
+
`num_beams>1` and `num_beam_groups>1`
|
80 |
+
- *constrained beam-search decoding* by calling [`~generation.GenerationMixin._constrained_beam_search`], if
|
81 |
+
`constraints!=None` or `force_words_ids!=None`
|
82 |
+
- *assisted decoding* by calling [`~generation.GenerationMixin._assisted_decoding`], if
|
83 |
+
`assistant_model` or `prompt_lookup_num_tokens` is passed to `.generate()`
|
84 |
+
|
85 |
+
You do not need to call any of the above methods directly. Pass custom parameter values to '.generate()'. To learn
|
86 |
+
more about decoding strategies refer to the [text generation strategies guide](../generation_strategies).
|
87 |
+
|
88 |
+
<Tip>
|
89 |
+
|
90 |
+
A large number of these flags control the logits or the stopping criteria of the generation. Make sure you check
|
91 |
+
the [generate-related classes](https://huggingface.co/docs/transformers/internal/generation_utils) for a full
|
92 |
+
description of the possible manipulations, as well as examples of their usage.
|
93 |
+
|
94 |
+
</Tip>
|
95 |
+
|
96 |
+
Arg:
|
97 |
+
> Parameters that control the length of the output
|
98 |
+
|
99 |
+
max_length (`int`, *optional*, defaults to 20):
|
100 |
+
The maximum length the generated tokens can have. Corresponds to the length of the input prompt +
|
101 |
+
`max_new_tokens`. Its effect is overridden by `max_new_tokens`, if also set.
|
102 |
+
max_new_tokens (`int`, *optional*):
|
103 |
+
The maximum numbers of tokens to generate, ignoring the number of tokens in the prompt.
|
104 |
+
min_length (`int`, *optional*, defaults to 0):
|
105 |
+
The minimum length of the sequence to be generated. Corresponds to the length of the input prompt +
|
106 |
+
`min_new_tokens`. Its effect is overridden by `min_new_tokens`, if also set.
|
107 |
+
min_new_tokens (`int`, *optional*):
|
108 |
+
The minimum numbers of tokens to generate, ignoring the number of tokens in the prompt.
|
109 |
+
early_stopping (`bool` or `str`, *optional*, defaults to `False`):
|
110 |
+
Controls the stopping condition for beam-based methods, like beam-search. It accepts the following values:
|
111 |
+
`True`, where the generation stops as soon as there are `num_beams` complete candidates; `False`, where an
|
112 |
+
heuristic is applied and the generation stops when is it very unlikely to find better candidates;
|
113 |
+
`"never"`, where the beam search procedure only stops when there cannot be better candidates (canonical
|
114 |
+
beam search algorithm).
|
115 |
+
max_time(`float`, *optional*):
|
116 |
+
The maximum amount of time you allow the computation to run for in seconds. generation will still finish
|
117 |
+
the current pass after allocated time has been passed.
|
118 |
+
|
119 |
+
> Parameters that control the generation strategy used
|
120 |
+
|
121 |
+
do_sample (`bool`, *optional*, defaults to `False`):
|
122 |
+
Whether or not to use sampling ; use greedy decoding otherwise.
|
123 |
+
num_beams (`int`, *optional*, defaults to 1):
|
124 |
+
Number of beams for beam search. 1 means no beam search.
|
125 |
+
num_beam_groups (`int`, *optional*, defaults to 1):
|
126 |
+
Number of groups to divide `num_beams` into in order to ensure diversity among different groups of beams.
|
127 |
+
[this paper](https://arxiv.org/pdf/1610.02424.pdf) for more details.
|
128 |
+
penalty_alpha (`float`, *optional*):
|
129 |
+
The values balance the model confidence and the degeneration penalty in contrastive search decoding.
|
130 |
+
use_cache (`bool`, *optional*, defaults to `True`):
|
131 |
+
Whether or not the model should use the past last key/values attentions (if applicable to the model) to
|
132 |
+
speed up decoding.
|
133 |
+
|
134 |
+
> Parameters for manipulation of the model output logits
|
135 |
+
|
136 |
+
temperature (`float`, *optional*, defaults to 1.0):
|
137 |
+
The value used to modulate the next token probabilities.
|
138 |
+
top_k (`int`, *optional*, defaults to 50):
|
139 |
+
The number of highest probability vocabulary tokens to keep for top-k-filtering.
|
140 |
+
top_p (`float`, *optional*, defaults to 1.0):
|
141 |
+
If set to float < 1, only the smallest set of most probable tokens with probabilities that add up to
|
142 |
+
`top_p` or higher are kept for generation.
|
143 |
+
typical_p (`float`, *optional*, defaults to 1.0):
|
144 |
+
Local typicality measures how similar the conditional probability of predicting a target token next is to
|
145 |
+
the expected conditional probability of predicting a random token next, given the partial text already
|
146 |
+
generated. If set to float < 1, the smallest set of the most locally typical tokens with probabilities that
|
147 |
+
add up to `typical_p` or higher are kept for generation. See [this
|
148 |
+
paper](https://arxiv.org/pdf/2202.00666.pdf) for more details.
|
149 |
+
epsilon_cutoff (`float`, *optional*, defaults to 0.0):
|
150 |
+
If set to float strictly between 0 and 1, only tokens with a conditional probability greater than
|
151 |
+
`epsilon_cutoff` will be sampled. In the paper, suggested values range from 3e-4 to 9e-4, depending on the
|
152 |
+
size of the model. See [Truncation Sampling as Language Model
|
153 |
+
Desmoothing](https://arxiv.org/abs/2210.15191) for more details.
|
154 |
+
eta_cutoff (`float`, *optional*, defaults to 0.0):
|
155 |
+
Eta sampling is a hybrid of locally typical sampling and epsilon sampling. If set to float strictly between
|
156 |
+
0 and 1, a token is only considered if it is greater than either `eta_cutoff` or `sqrt(eta_cutoff) *
|
157 |
+
exp(-entropy(softmax(next_token_logits)))`. The latter term is intuitively the expected next token
|
158 |
+
probability, scaled by `sqrt(eta_cutoff)`. In the paper, suggested values range from 3e-4 to 2e-3,
|
159 |
+
depending on the size of the model. See [Truncation Sampling as Language Model
|
160 |
+
Desmoothing](https://arxiv.org/abs/2210.15191) for more details.
|
161 |
+
diversity_penalty (`float`, *optional*, defaults to 0.0):
|
162 |
+
This value is subtracted from a beam's score if it generates a token same as any beam from other group at a
|
163 |
+
particular time. Note that `diversity_penalty` is only effective if `group beam search` is enabled.
|
164 |
+
repetition_penalty (`float`, *optional*, defaults to 1.0):
|
165 |
+
The parameter for repetition penalty. 1.0 means no penalty. See [this
|
166 |
+
paper](https://arxiv.org/pdf/1909.05858.pdf) for more details.
|
167 |
+
encoder_repetition_penalty (`float`, *optional*, defaults to 1.0):
|
168 |
+
The paramater for encoder_repetition_penalty. An exponential penalty on sequences that are not in the
|
169 |
+
original input. 1.0 means no penalty.
|
170 |
+
length_penalty (`float`, *optional*, defaults to 1.0):
|
171 |
+
Exponential penalty to the length that is used with beam-based generation. It is applied as an exponent to
|
172 |
+
the sequence length, which in turn is used to divide the score of the sequence. Since the score is the log
|
173 |
+
likelihood of the sequence (i.e. negative), `length_penalty` > 0.0 promotes longer sequences, while
|
174 |
+
`length_penalty` < 0.0 encourages shorter sequences.
|
175 |
+
no_repeat_ngram_size (`int`, *optional*, defaults to 0):
|
176 |
+
If set to int > 0, all ngrams of that size can only occur once.
|
177 |
+
bad_words_ids(`List[List[int]]`, *optional*):
|
178 |
+
List of list of token ids that are not allowed to be generated. Check
|
179 |
+
[`~generation.NoBadWordsLogitsProcessor`] for further documentation and examples.
|
180 |
+
force_words_ids(`List[List[int]]` or `List[List[List[int]]]`, *optional*):
|
181 |
+
List of token ids that must be generated. If given a `List[List[int]]`, this is treated as a simple list of
|
182 |
+
words that must be included, the opposite to `bad_words_ids`. If given `List[List[List[int]]]`, this
|
183 |
+
triggers a [disjunctive constraint](https://github.com/huggingface/transformers/issues/14081), where one
|
184 |
+
can allow different forms of each word.
|
185 |
+
renormalize_logits (`bool`, *optional*, defaults to `False`):
|
186 |
+
Whether to renormalize the logits after applying all the logits processors or warpers (including the custom
|
187 |
+
ones). It's highly recommended to set this flag to `True` as the search algorithms suppose the score logits
|
188 |
+
are normalized but some logit processors or warpers break the normalization.
|
189 |
+
constraints (`List[Constraint]`, *optional*):
|
190 |
+
Custom constraints that can be added to the generation to ensure that the output will contain the use of
|
191 |
+
certain tokens as defined by `Constraint` objects, in the most sensible way possible.
|
192 |
+
forced_bos_token_id (`int`, *optional*, defaults to `model.config.forced_bos_token_id`):
|
193 |
+
The id of the token to force as the first generated token after the `decoder_start_token_id`. Useful for
|
194 |
+
multilingual models like [mBART](../model_doc/mbart) where the first generated token needs to be the target
|
195 |
+
language token.
|
196 |
+
forced_eos_token_id (`Union[int, List[int]]`, *optional*, defaults to `model.config.forced_eos_token_id`):
|
197 |
+
The id of the token to force as the last generated token when `max_length` is reached. Optionally, use a
|
198 |
+
list to set multiple *end-of-sequence* tokens.
|
199 |
+
remove_invalid_values (`bool`, *optional*, defaults to `model.config.remove_invalid_values`):
|
200 |
+
Whether to remove possible *nan* and *inf* outputs of the model to prevent the generation method to crash.
|
201 |
+
Note that using `remove_invalid_values` can slow down generation.
|
202 |
+
exponential_decay_length_penalty (`tuple(int, float)`, *optional*):
|
203 |
+
This Tuple adds an exponentially increasing length penalty, after a certain amount of tokens have been
|
204 |
+
generated. The tuple shall consist of: `(start_index, decay_factor)` where `start_index` indicates where
|
205 |
+
penalty starts and `decay_factor` represents the factor of exponential decay
|
206 |
+
suppress_tokens (`List[int]`, *optional*):
|
207 |
+
A list of tokens that will be suppressed at generation. The `SupressTokens` logit processor will set their
|
208 |
+
log probs to `-inf` so that they are not sampled.
|
209 |
+
begin_suppress_tokens (`List[int]`, *optional*):
|
210 |
+
A list of tokens that will be suppressed at the beginning of the generation. The `SupressBeginTokens` logit
|
211 |
+
processor will set their log probs to `-inf` so that they are not sampled.
|
212 |
+
forced_decoder_ids (`List[List[int]]`, *optional*):
|
213 |
+
A list of pairs of integers which indicates a mapping from generation indices to token indices that will be
|
214 |
+
forced before sampling. For example, `[[1, 123]]` means the second generated token will always be a token
|
215 |
+
of index 123.
|
216 |
+
sequence_bias (`Dict[Tuple[int], float]`, *optional*)):
|
217 |
+
Dictionary that maps a sequence of tokens to its bias term. Positive biases increase the odds of the
|
218 |
+
sequence being selected, while negative biases do the opposite. Check
|
219 |
+
[`~generation.SequenceBiasLogitsProcessor`] for further documentation and examples.
|
220 |
+
guidance_scale (`float`, *optional*):
|
221 |
+
The guidance scale for classifier free guidance (CFG). CFG is enabled by setting `guidance_scale > 1`.
|
222 |
+
Higher guidance scale encourages the model to generate samples that are more closely linked to the input
|
223 |
+
prompt, usually at the expense of poorer quality.
|
224 |
+
low_memory (`bool`, *optional*):
|
225 |
+
Switch to sequential beam search and sequential topk for contrastive search to reduce peak memory.
|
226 |
+
Used with beam search and contrastive search.
|
227 |
+
|
228 |
+
|
229 |
+
> Parameters that define the output variables of `generate`
|
230 |
+
|
231 |
+
num_return_sequences(`int`, *optional*, defaults to 1):
|
232 |
+
The number of independently computed returned sequences for each element in the batch.
|
233 |
+
output_attentions (`bool`, *optional*, defaults to `False`):
|
234 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
|
235 |
+
tensors for more details.
|
236 |
+
output_hidden_states (`bool`, *optional*, defaults to `False`):
|
237 |
+
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
|
238 |
+
more details.
|
239 |
+
output_scores (`bool`, *optional*, defaults to `False`):
|
240 |
+
Whether or not to return the prediction scores. See `scores` under returned tensors for more details.
|
241 |
+
output_logits (`bool`, *optional*):
|
242 |
+
Whether or not to return the unprocessed prediction logit scores. See `logits` under returned tensors for
|
243 |
+
more details.
|
244 |
+
return_dict_in_generate (`bool`, *optional*, defaults to `False`):
|
245 |
+
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
|
246 |
+
|
247 |
+
> Special tokens that can be used at generation time
|
248 |
+
|
249 |
+
pad_token_id (`int`, *optional*):
|
250 |
+
The id of the *padding* token.
|
251 |
+
bos_token_id (`int`, *optional*):
|
252 |
+
The id of the *beginning-of-sequence* token.
|
253 |
+
eos_token_id (`Union[int, List[int]]`, *optional*):
|
254 |
+
The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens.
|
255 |
+
|
256 |
+
> Generation parameters exclusive to encoder-decoder models
|
257 |
+
|
258 |
+
encoder_no_repeat_ngram_size (`int`, *optional*, defaults to 0):
|
259 |
+
If set to int > 0, all ngrams of that size that occur in the `encoder_input_ids` cannot occur in the
|
260 |
+
`decoder_input_ids`.
|
261 |
+
decoder_start_token_id (`Union[int, List[int]]`, *optional*):
|
262 |
+
If an encoder-decoder model starts decoding with a different token than *bos*, the id of that token or a list of length
|
263 |
+
`batch_size`. Indicating a list enables different start ids for each element in the batch
|
264 |
+
(e.g. multilingual models with different target languages in one batch)
|
265 |
+
|
266 |
+
|
267 |
+
> Generation parameters exclusive to [assistant generation](https://arxiv.org/abs/2211.17192)
|
268 |
+
|
269 |
+
num_assistant_tokens (`int`, *optional*, defaults to 5):
|
270 |
+
Defines the number of _speculative tokens_ that shall be generated by the assistant model before being
|
271 |
+
checked by the target model at each iteration. Higher values for `num_assistant_tokens` make the generation
|
272 |
+
more _speculative_ : If the assistant model is performant larger speed-ups can be reached, if the assistant
|
273 |
+
model requires lots of corrections, lower speed-ups are reached.
|
274 |
+
|
275 |
+
num_assistant_tokens_schedule (`str`, *optional*, defaults to `"heuristic"`):
|
276 |
+
Defines the schedule at which max assistant tokens shall be changed during inference.
|
277 |
+
- `"heuristic"`: When all speculative tokens are correct, increase `num_assistant_tokens` by 2 else
|
278 |
+
reduce by 1. `num_assistant_tokens` value is persistent over multiple generation calls with the same assistant model.
|
279 |
+
- `"heuristic_transient"`: Same as `"heuristic"` but `num_assistant_tokens` is reset to its initial value after each generation call.
|
280 |
+
- `"constant"`: `num_assistant_tokens` stays unchanged during generation
|
281 |
+
|
282 |
+
prompt_lookup_num_tokens (`int`, *optional*, default to `None`):
|
283 |
+
The number of tokens to be output as candidate tokens.
|
284 |
+
|
285 |
+
max_matching_ngram_size (`int`, *optional*, default to `None`):
|
286 |
+
The maximum ngram size to be considered for matching in the prompt. Default to 2 if not provided.
|
287 |
+
|
288 |
+
> Parameters specific to the caching mechanism:
|
289 |
+
|
290 |
+
cache_implementation (`str`, *optional*, default to `None`):
|
291 |
+
Cache class that should be used when generating.
|
292 |
+
|
293 |
+
|
294 |
+
> Wild card
|
295 |
+
|
296 |
+
generation_kwargs:
|
297 |
+
Additional generation kwargs will be forwarded to the `generate` function of the model. Kwargs that are not
|
298 |
+
present in `generate`'s signature will be used in the model forward pass.
|
299 |
+
"""
|
300 |
+
|
301 |
+
def __init__(self, **kwargs):
|
302 |
+
# Parameters that control the length of the output
|
303 |
+
self.max_length = kwargs.pop("max_length", 20)
|
304 |
+
self.max_new_tokens = kwargs.pop("max_new_tokens", None)
|
305 |
+
self.min_length = kwargs.pop("min_length", 0)
|
306 |
+
self.min_new_tokens = kwargs.pop("min_new_tokens", None)
|
307 |
+
self.early_stopping = kwargs.pop("early_stopping", False)
|
308 |
+
self.max_time = kwargs.pop("max_time", None)
|
309 |
+
|
310 |
+
# Parameters that control the generation strategy used
|
311 |
+
self.do_sample = kwargs.pop("do_sample", False)
|
312 |
+
self.num_beams = kwargs.pop("num_beams", 1)
|
313 |
+
self.num_beam_groups = kwargs.pop("num_beam_groups", 1)
|
314 |
+
self.penalty_alpha = kwargs.pop("penalty_alpha", None)
|
315 |
+
self.use_cache = kwargs.pop("use_cache", True)
|
316 |
+
|
317 |
+
# Parameters for manipulation of the model output logits
|
318 |
+
self.temperature = kwargs.pop("temperature", 1.0)
|
319 |
+
self.top_k = kwargs.pop("top_k", 50)
|
320 |
+
self.top_p = kwargs.pop("top_p", 1.0)
|
321 |
+
self.typical_p = kwargs.pop("typical_p", 1.0)
|
322 |
+
self.epsilon_cutoff = kwargs.pop("epsilon_cutoff", 0.0)
|
323 |
+
self.eta_cutoff = kwargs.pop("eta_cutoff", 0.0)
|
324 |
+
self.diversity_penalty = kwargs.pop("diversity_penalty", 0.0)
|
325 |
+
self.repetition_penalty = kwargs.pop("repetition_penalty", 1.0)
|
326 |
+
self.encoder_repetition_penalty = kwargs.pop("encoder_repetition_penalty", 1.0)
|
327 |
+
self.length_penalty = kwargs.pop("length_penalty", 1.0)
|
328 |
+
self.no_repeat_ngram_size = kwargs.pop("no_repeat_ngram_size", 0)
|
329 |
+
self.bad_words_ids = kwargs.pop("bad_words_ids", None)
|
330 |
+
self.force_words_ids = kwargs.pop("force_words_ids", None)
|
331 |
+
self.renormalize_logits = kwargs.pop("renormalize_logits", False)
|
332 |
+
self.constraints = kwargs.pop("constraints", None)
|
333 |
+
self.forced_bos_token_id = kwargs.pop("forced_bos_token_id", None)
|
334 |
+
self.forced_eos_token_id = kwargs.pop("forced_eos_token_id", None)
|
335 |
+
self.remove_invalid_values = kwargs.pop("remove_invalid_values", False)
|
336 |
+
self.exponential_decay_length_penalty = kwargs.pop("exponential_decay_length_penalty", None)
|
337 |
+
self.suppress_tokens = kwargs.pop("suppress_tokens", None)
|
338 |
+
self.begin_suppress_tokens = kwargs.pop("begin_suppress_tokens", None)
|
339 |
+
self.forced_decoder_ids = kwargs.pop("forced_decoder_ids", None)
|
340 |
+
self.sequence_bias = kwargs.pop("sequence_bias", None)
|
341 |
+
self.guidance_scale = kwargs.pop("guidance_scale", None)
|
342 |
+
self.low_memory = kwargs.pop("low_memory", None)
|
343 |
+
|
344 |
+
# Parameters that define the output variables of `generate`
|
345 |
+
self.num_return_sequences = kwargs.pop("num_return_sequences", 1)
|
346 |
+
self.output_attentions = kwargs.pop("output_attentions", False)
|
347 |
+
self.output_hidden_states = kwargs.pop("output_hidden_states", False)
|
348 |
+
self.output_scores = kwargs.pop("output_scores", False)
|
349 |
+
self.output_logits = kwargs.pop("output_logits", None)
|
350 |
+
self.return_dict_in_generate = kwargs.pop("return_dict_in_generate", False)
|
351 |
+
|
352 |
+
# Special tokens that can be used at generation time
|
353 |
+
self.pad_token_id = kwargs.pop("pad_token_id", None)
|
354 |
+
self.bos_token_id = kwargs.pop("bos_token_id", None)
|
355 |
+
self.eos_token_id = kwargs.pop("eos_token_id", None)
|
356 |
+
|
357 |
+
# Generation parameters exclusive to encoder-decoder models
|
358 |
+
self.encoder_no_repeat_ngram_size = kwargs.pop("encoder_no_repeat_ngram_size", 0)
|
359 |
+
self.decoder_start_token_id = kwargs.pop("decoder_start_token_id", None)
|
360 |
+
|
361 |
+
# Assistant generation
|
362 |
+
self.num_assistant_tokens = kwargs.pop("num_assistant_tokens", 5)
|
363 |
+
self.num_assistant_tokens_schedule = kwargs.pop("num_assistant_tokens_schedule", "heuristic")
|
364 |
+
|
365 |
+
# Cache implementation
|
366 |
+
self.cache_implementation = kwargs.pop("cache_implementation", None)
|
367 |
+
|
368 |
+
# Prompt lookup decoding
|
369 |
+
self.prompt_lookup_num_tokens = kwargs.pop("prompt_lookup_num_tokens", None)
|
370 |
+
self.max_matching_ngram_size = kwargs.pop("max_matching_ngram_size", None)
|
371 |
+
|
372 |
+
# Wild card
|
373 |
+
self.generation_kwargs = kwargs.pop("generation_kwargs", {})
|
374 |
+
|
375 |
+
# The remaining attributes do not parametrize `.generate()`, but are informative and/or used by the hub
|
376 |
+
# interface.
|
377 |
+
self._from_model_config = kwargs.pop("_from_model_config", False)
|
378 |
+
self._commit_hash = kwargs.pop("_commit_hash", None)
|
379 |
+
self.transformers_version = kwargs.pop("transformers_version", __version__)
|
380 |
+
|
381 |
+
# Additional attributes without default values
|
382 |
+
if not self._from_model_config:
|
383 |
+
# we don't want to copy values from the model config if we're initializing a `GenerationConfig` from a
|
384 |
+
# model's default configuration file
|
385 |
+
for key, value in kwargs.items():
|
386 |
+
try:
|
387 |
+
setattr(self, key, value)
|
388 |
+
except AttributeError as err:
|
389 |
+
logger.error(f"Can't set {key} with value {value} for {self}")
|
390 |
+
raise err
|
391 |
+
|
392 |
+
# Validate the values of the attributes
|
393 |
+
self.validate(is_init=True)
|
394 |
+
|
395 |
+
def __hash__(self):
|
396 |
+
return hash(self.to_json_string(ignore_metadata=True))
|
397 |
+
|
398 |
+
def __eq__(self, other):
|
399 |
+
if not isinstance(other, GenerationConfig):
|
400 |
+
return False
|
401 |
+
|
402 |
+
self_without_metadata = self.to_json_string(use_diff=False, ignore_metadata=True)
|
403 |
+
other_without_metadata = other.to_json_string(use_diff=False, ignore_metadata=True)
|
404 |
+
return self_without_metadata == other_without_metadata
|
405 |
+
|
406 |
+
def __repr__(self):
|
407 |
+
return f"{self.__class__.__name__} {self.to_json_string(ignore_metadata=True)}"
|
408 |
+
|
409 |
+
def get_generation_mode(self, assistant_model: Optional["PreTrainedModel"] = None) -> GenerationMode:
|
410 |
+
"""
|
411 |
+
Returns the generation mode triggered by the [`GenerationConfig`] instance.
|
412 |
+
|
413 |
+
Arg:
|
414 |
+
assistant_model (`PreTrainedModel`, *optional*):
|
415 |
+
The assistant model to be used for assisted generation. If set, the generation mode will be
|
416 |
+
assisted generation.
|
417 |
+
|
418 |
+
Returns:
|
419 |
+
`GenerationMode`: The generation mode triggered by the instance.
|
420 |
+
"""
|
421 |
+
# TODO joao: find out a way of not depending on external fields (e.g. `assistant_model`), then make this a
|
422 |
+
# property and part of the `__repr__`
|
423 |
+
if self.constraints is not None or self.force_words_ids is not None:
|
424 |
+
generation_mode = GenerationMode.CONSTRAINED_BEAM_SEARCH
|
425 |
+
elif self.num_beams == 1:
|
426 |
+
if self.do_sample is False:
|
427 |
+
if (
|
428 |
+
self.top_k is not None
|
429 |
+
and self.top_k > 1
|
430 |
+
and self.penalty_alpha is not None
|
431 |
+
and self.penalty_alpha > 0
|
432 |
+
):
|
433 |
+
generation_mode = GenerationMode.CONTRASTIVE_SEARCH
|
434 |
+
else:
|
435 |
+
generation_mode = GenerationMode.GREEDY_SEARCH
|
436 |
+
else:
|
437 |
+
generation_mode = GenerationMode.SAMPLE
|
438 |
+
else:
|
439 |
+
if self.num_beam_groups > 1:
|
440 |
+
generation_mode = GenerationMode.GROUP_BEAM_SEARCH
|
441 |
+
elif self.do_sample is True:
|
442 |
+
generation_mode = GenerationMode.BEAM_SAMPLE
|
443 |
+
else:
|
444 |
+
generation_mode = GenerationMode.BEAM_SEARCH
|
445 |
+
|
446 |
+
# Assisted generation may extend some generation modes
|
447 |
+
if assistant_model is not None or self.prompt_lookup_num_tokens is not None:
|
448 |
+
if generation_mode in ("greedy_search", "sample"):
|
449 |
+
generation_mode = GenerationMode.ASSISTED_GENERATION
|
450 |
+
else:
|
451 |
+
raise ValueError(
|
452 |
+
"You've set `assistant_model`, which triggers assisted generate. Currently, assisted generate "
|
453 |
+
"is only supported with Greedy Search and Sample."
|
454 |
+
)
|
455 |
+
return generation_mode
|
456 |
+
|
457 |
+
def validate(self, is_init=False):
|
458 |
+
"""
|
459 |
+
Validates the values of the attributes of the [`GenerationConfig`] instance. Raises exceptions in the presence
|
460 |
+
of parameterization that can be detected as incorrect from the configuration instance alone.
|
461 |
+
|
462 |
+
Note that some parameters not validated here are best validated at generate runtime, as they may depend on
|
463 |
+
other inputs and/or the model, such as parameters related to the generation length.
|
464 |
+
|
465 |
+
Arg:
|
466 |
+
is_init (`bool`, *optional*, defaults to `False`):
|
467 |
+
Whether the validation is performed during the initialization of the instance.
|
468 |
+
"""
|
469 |
+
|
470 |
+
# Validation of individual attributes
|
471 |
+
if self.early_stopping not in {True, False, "never"}:
|
472 |
+
raise ValueError(f"`early_stopping` must be a boolean or 'never', but is {self.early_stopping}.")
|
473 |
+
if self.max_new_tokens is not None and self.max_new_tokens <= 0:
|
474 |
+
raise ValueError(f"`max_new_tokens` must be greater than 0, but is {self.max_new_tokens}.")
|
475 |
+
|
476 |
+
# Validation of attribute relations:
|
477 |
+
fix_location = ""
|
478 |
+
if is_init:
|
479 |
+
fix_location = (
|
480 |
+
" This was detected when initializing the generation config instance, which means the corresponding "
|
481 |
+
"file may hold incorrect parameterization and should be fixed."
|
482 |
+
)
|
483 |
+
|
484 |
+
# 1. detect sampling-only parameterization when not in sampling mode
|
485 |
+
if self.do_sample is False:
|
486 |
+
greedy_wrong_parameter_msg = (
|
487 |
+
"`do_sample` is set to `False`. However, `{flag_name}` is set to `{flag_value}` -- this flag is only "
|
488 |
+
"used in sample-based generation modes. You should set `do_sample=True` or unset `{flag_name}`."
|
489 |
+
+ fix_location
|
490 |
+
)
|
491 |
+
if self.temperature is not None and self.temperature != 1.0:
|
492 |
+
warnings.warn(
|
493 |
+
greedy_wrong_parameter_msg.format(flag_name="temperature", flag_value=self.temperature),
|
494 |
+
UserWarning,
|
495 |
+
)
|
496 |
+
if self.top_p is not None and self.top_p != 1.0:
|
497 |
+
warnings.warn(
|
498 |
+
greedy_wrong_parameter_msg.format(flag_name="top_p", flag_value=self.top_p),
|
499 |
+
UserWarning,
|
500 |
+
)
|
501 |
+
if self.typical_p is not None and self.typical_p != 1.0:
|
502 |
+
warnings.warn(
|
503 |
+
greedy_wrong_parameter_msg.format(flag_name="typical_p", flag_value=self.typical_p),
|
504 |
+
UserWarning,
|
505 |
+
)
|
506 |
+
if (
|
507 |
+
self.top_k is not None and self.top_k != 50 and self.penalty_alpha is None
|
508 |
+
): # contrastive search uses top_k
|
509 |
+
warnings.warn(
|
510 |
+
greedy_wrong_parameter_msg.format(flag_name="top_k", flag_value=self.top_k),
|
511 |
+
UserWarning,
|
512 |
+
)
|
513 |
+
if self.epsilon_cutoff is not None and self.epsilon_cutoff != 0.0:
|
514 |
+
warnings.warn(
|
515 |
+
greedy_wrong_parameter_msg.format(flag_name="epsilon_cutoff", flag_value=self.epsilon_cutoff),
|
516 |
+
UserWarning,
|
517 |
+
)
|
518 |
+
if self.eta_cutoff is not None and self.eta_cutoff != 0.0:
|
519 |
+
warnings.warn(
|
520 |
+
greedy_wrong_parameter_msg.format(flag_name="eta_cutoff", flag_value=self.eta_cutoff),
|
521 |
+
UserWarning,
|
522 |
+
)
|
523 |
+
|
524 |
+
# 2. detect beam-only parameterization when not in beam mode
|
525 |
+
if self.num_beams is None:
|
526 |
+
warnings.warn("`num_beams` is set to None - defaulting to 1.", UserWarning)
|
527 |
+
self.num_beams = 1
|
528 |
+
|
529 |
+
if self.num_beams == 1:
|
530 |
+
single_beam_wrong_parameter_msg = (
|
531 |
+
"`num_beams` is set to 1. However, `{flag_name}` is set to `{flag_value}` -- this flag is only used "
|
532 |
+
"in beam-based generation modes. You should set `num_beams>1` or unset `{flag_name}`." + fix_location
|
533 |
+
)
|
534 |
+
if self.early_stopping is not False:
|
535 |
+
warnings.warn(
|
536 |
+
single_beam_wrong_parameter_msg.format(flag_name="early_stopping", flag_value=self.early_stopping),
|
537 |
+
UserWarning,
|
538 |
+
)
|
539 |
+
if self.num_beam_groups is not None and self.num_beam_groups != 1:
|
540 |
+
warnings.warn(
|
541 |
+
single_beam_wrong_parameter_msg.format(
|
542 |
+
flag_name="num_beam_groups", flag_value=self.num_beam_groups
|
543 |
+
),
|
544 |
+
UserWarning,
|
545 |
+
)
|
546 |
+
if self.diversity_penalty is not None and self.diversity_penalty != 0.0:
|
547 |
+
warnings.warn(
|
548 |
+
single_beam_wrong_parameter_msg.format(
|
549 |
+
flag_name="diversity_penalty", flag_value=self.diversity_penalty
|
550 |
+
),
|
551 |
+
UserWarning,
|
552 |
+
)
|
553 |
+
if self.length_penalty is not None and self.length_penalty != 1.0:
|
554 |
+
warnings.warn(
|
555 |
+
single_beam_wrong_parameter_msg.format(flag_name="length_penalty", flag_value=self.length_penalty),
|
556 |
+
UserWarning,
|
557 |
+
)
|
558 |
+
if self.constraints is not None:
|
559 |
+
warnings.warn(
|
560 |
+
single_beam_wrong_parameter_msg.format(flag_name="constraints", flag_value=self.constraints),
|
561 |
+
UserWarning,
|
562 |
+
)
|
563 |
+
|
564 |
+
# 3. detect incorrect paramaterization specific to advanced beam modes
|
565 |
+
else:
|
566 |
+
# constrained beam search
|
567 |
+
if self.constraints is not None or self.force_words_ids is not None:
|
568 |
+
constrained_wrong_parameter_msg = (
|
569 |
+
"one of `constraints`, `force_words_ids` is not `None`, triggering constrained beam search. However, "
|
570 |
+
"`{flag_name}` is set to `{flag_value}`, which is incompatible with this generation mode. Set "
|
571 |
+
"`constraints` and `force_words_ids` to `None` or unset `{flag_name}` to continue." + fix_location
|
572 |
+
)
|
573 |
+
if self.do_sample is True:
|
574 |
+
raise ValueError(
|
575 |
+
constrained_wrong_parameter_msg.format(flag_name="do_sample", flag_value=self.do_sample)
|
576 |
+
)
|
577 |
+
if self.num_beam_groups is not None and self.num_beam_groups != 1:
|
578 |
+
raise ValueError(
|
579 |
+
constrained_wrong_parameter_msg.format(
|
580 |
+
flag_name="num_beam_groups", flag_value=self.num_beam_groups
|
581 |
+
)
|
582 |
+
)
|
583 |
+
# group beam search
|
584 |
+
if self.diversity_penalty != 0.0 or self.num_beam_groups != 1:
|
585 |
+
group_error_prefix = (
|
586 |
+
"`diversity_penalty` is not 0.0 or `num_beam_groups` is not 1, triggering group beam search. In "
|
587 |
+
"this generation mode, "
|
588 |
+
)
|
589 |
+
if self.do_sample is True:
|
590 |
+
raise ValueError(group_error_prefix + "`do_sample` must be set to `False`")
|
591 |
+
if self.num_beams % self.num_beam_groups != 0:
|
592 |
+
raise ValueError(group_error_prefix + "`num_beams` should be divisible by `num_beam_groups`")
|
593 |
+
if self.diversity_penalty == 0.0:
|
594 |
+
raise ValueError(
|
595 |
+
group_error_prefix
|
596 |
+
+ "`diversity_penalty` should be greater than `0.0`, otherwise your groups will be identical."
|
597 |
+
)
|
598 |
+
|
599 |
+
# 4. check `num_return_sequences`
|
600 |
+
if self.num_return_sequences != 1:
|
601 |
+
if self.num_beams == 1:
|
602 |
+
if self.do_sample is False:
|
603 |
+
raise ValueError(
|
604 |
+
"Greedy methods without beam search do not support `num_return_sequences` different than 1 "
|
605 |
+
f"(got {self.num_return_sequences})."
|
606 |
+
)
|
607 |
+
elif self.num_return_sequences > self.num_beams:
|
608 |
+
raise ValueError(
|
609 |
+
f"`num_return_sequences` ({self.num_return_sequences}) has to be smaller or equal to `num_beams` "
|
610 |
+
f"({self.num_beams})."
|
611 |
+
)
|
612 |
+
|
613 |
+
# 5. check common issue: passing `generate` arguments inside the generation config
|
614 |
+
generate_arguments = (
|
615 |
+
"logits_processor",
|
616 |
+
"stopping_criteria",
|
617 |
+
"prefix_allowed_tokens_fn",
|
618 |
+
"synced_gpus",
|
619 |
+
"assistant_model",
|
620 |
+
"streamer",
|
621 |
+
"negative_prompt_ids",
|
622 |
+
"negative_prompt_attention_mask",
|
623 |
+
)
|
624 |
+
for arg in generate_arguments:
|
625 |
+
if hasattr(self, arg):
|
626 |
+
raise ValueError(
|
627 |
+
f"Argument `{arg}` is not a valid argument of `GenerationConfig`. It should be passed to "
|
628 |
+
"`generate()` (or a pipeline) directly."
|
629 |
+
)
|
630 |
+
|
631 |
+
def save_pretrained(
|
632 |
+
self,
|
633 |
+
save_directory: Union[str, os.PathLike],
|
634 |
+
config_file_name: Optional[Union[str, os.PathLike]] = None,
|
635 |
+
push_to_hub: bool = False,
|
636 |
+
**kwargs,
|
637 |
+
):
|
638 |
+
r"""
|
639 |
+
Save a generation configuration object to the directory `save_directory`, so that it can be re-loaded using the
|
640 |
+
[`~GenerationConfig.from_pretrained`] class method.
|
641 |
+
|
642 |
+
Args:
|
643 |
+
save_directory (`str` or `os.PathLike`):
|
644 |
+
Directory where the configuration JSON file will be saved (will be created if it does not exist).
|
645 |
+
config_file_name (`str` or `os.PathLike`, *optional*, defaults to `"generation_config.json"`):
|
646 |
+
Name of the generation configuration JSON file to be saved in `save_directory`.
|
647 |
+
push_to_hub (`bool`, *optional*, defaults to `False`):
|
648 |
+
Whether or not to push your model to the Hugging Face model hub after saving it. You can specify the
|
649 |
+
repository you want to push to with `repo_id` (will default to the name of `save_directory` in your
|
650 |
+
namespace).
|
651 |
+
kwargs (`Dict[str, Any]`, *optional*):
|
652 |
+
Additional key word arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method.
|
653 |
+
"""
|
654 |
+
|
655 |
+
# At save time, validate the instance -- if any warning/exception is thrown, we refuse to save the instance.
|
656 |
+
# This strictness is enforced to prevent bad configurations from being saved and re-used.
|
657 |
+
try:
|
658 |
+
with warnings.catch_warnings(record=True) as caught_warnings:
|
659 |
+
self.validate()
|
660 |
+
if len(caught_warnings) > 0:
|
661 |
+
raise ValueError(str([w.message for w in caught_warnings]))
|
662 |
+
except ValueError as exc:
|
663 |
+
raise ValueError(
|
664 |
+
"The generation config instance is invalid -- `.validate()` throws warnings and/or exceptions. "
|
665 |
+
"Fix these issues to save the configuration.\n\nThrown during validation:\n" + str(exc)
|
666 |
+
)
|
667 |
+
|
668 |
+
use_auth_token = kwargs.pop("use_auth_token", None)
|
669 |
+
|
670 |
+
if use_auth_token is not None:
|
671 |
+
warnings.warn(
|
672 |
+
"The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.",
|
673 |
+
FutureWarning,
|
674 |
+
)
|
675 |
+
if kwargs.get("token", None) is not None:
|
676 |
+
raise ValueError(
|
677 |
+
"`token` and `use_auth_token` are both specified. Please set only the argument `token`."
|
678 |
+
)
|
679 |
+
kwargs["token"] = use_auth_token
|
680 |
+
|
681 |
+
config_file_name = config_file_name if config_file_name is not None else GENERATION_CONFIG_NAME
|
682 |
+
|
683 |
+
if os.path.isfile(save_directory):
|
684 |
+
raise AssertionError(f"Provided path ({save_directory}) should be a directory, not a file")
|
685 |
+
|
686 |
+
os.makedirs(save_directory, exist_ok=True)
|
687 |
+
|
688 |
+
if push_to_hub:
|
689 |
+
commit_message = kwargs.pop("commit_message", None)
|
690 |
+
repo_id = kwargs.pop("repo_id", save_directory.split(os.path.sep)[-1])
|
691 |
+
repo_id = self._create_repo(repo_id, **kwargs)
|
692 |
+
files_timestamps = self._get_files_timestamps(save_directory)
|
693 |
+
|
694 |
+
output_config_file = os.path.join(save_directory, config_file_name)
|
695 |
+
|
696 |
+
self.to_json_file(output_config_file, use_diff=True)
|
697 |
+
logger.info(f"Configuration saved in {output_config_file}")
|
698 |
+
|
699 |
+
if push_to_hub:
|
700 |
+
self._upload_modified_files(
|
701 |
+
save_directory,
|
702 |
+
repo_id,
|
703 |
+
files_timestamps,
|
704 |
+
commit_message=commit_message,
|
705 |
+
token=kwargs.get("token"),
|
706 |
+
)
|
707 |
+
|
708 |
+
@classmethod
|
709 |
+
def from_pretrained(
|
710 |
+
cls,
|
711 |
+
pretrained_model_name: Union[str, os.PathLike],
|
712 |
+
config_file_name: Optional[Union[str, os.PathLike]] = None,
|
713 |
+
cache_dir: Optional[Union[str, os.PathLike]] = None,
|
714 |
+
force_download: bool = False,
|
715 |
+
local_files_only: bool = False,
|
716 |
+
token: Optional[Union[str, bool]] = None,
|
717 |
+
revision: str = "main",
|
718 |
+
**kwargs,
|
719 |
+
) -> "GenerationConfig":
|
720 |
+
r"""
|
721 |
+
Instantiate a [`GenerationConfig`] from a generation configuration file.
|
722 |
+
|
723 |
+
Args:
|
724 |
+
pretrained_model_name (`str` or `os.PathLike`):
|
725 |
+
This can be either:
|
726 |
+
|
727 |
+
- a string, the *model id* of a pretrained model configuration hosted inside a model repo on
|
728 |
+
huggingface.co.
|
729 |
+
- a path to a *directory* containing a configuration file saved using the
|
730 |
+
[`~GenerationConfig.save_pretrained`] method, e.g., `./my_model_directory/`.
|
731 |
+
config_file_name (`str` or `os.PathLike`, *optional*, defaults to `"generation_config.json"`):
|
732 |
+
Name of the generation configuration JSON file to be loaded from `pretrained_model_name`.
|
733 |
+
cache_dir (`str` or `os.PathLike`, *optional*):
|
734 |
+
Path to a directory in which a downloaded pretrained model configuration should be cached if the
|
735 |
+
standard cache should not be used.
|
736 |
+
force_download (`bool`, *optional*, defaults to `False`):
|
737 |
+
Whether or not to force to (re-)download the configuration files and override the cached versions if
|
738 |
+
they exist.
|
739 |
+
resume_download (`bool`, *optional*, defaults to `False`):
|
740 |
+
Whether or not to delete incompletely received file. Attempts to resume the download if such a file
|
741 |
+
exists.
|
742 |
+
proxies (`Dict[str, str]`, *optional*):
|
743 |
+
A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',
|
744 |
+
'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request.
|
745 |
+
token (`str` or `bool`, *optional*):
|
746 |
+
The token to use as HTTP bearer authorization for remote files. If `True`, or not specified, will use
|
747 |
+
the token generated when running `huggingface-cli login` (stored in `~/.huggingface`).
|
748 |
+
revision (`str`, *optional*, defaults to `"main"`):
|
749 |
+
The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
|
750 |
+
git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any
|
751 |
+
identifier allowed by git.
|
752 |
+
|
753 |
+
<Tip>
|
754 |
+
|
755 |
+
To test a pull request you made on the Hub, you can pass `revision="refs/pr/<pr_number>".
|
756 |
+
|
757 |
+
</Tip>
|
758 |
+
|
759 |
+
return_unused_kwargs (`bool`, *optional*, defaults to `False`):
|
760 |
+
If `False`, then this function returns just the final configuration object.
|
761 |
+
|
762 |
+
If `True`, then this functions returns a `Tuple(config, unused_kwargs)` where *unused_kwargs* is a
|
763 |
+
dictionary consisting of the key/value pairs whose keys are not configuration attributes: i.e., the
|
764 |
+
part of `kwargs` which has not been used to update `config` and is otherwise ignored.
|
765 |
+
subfolder (`str`, *optional*, defaults to `""`):
|
766 |
+
In case the relevant files are located inside a subfolder of the model repo on huggingface.co, you can
|
767 |
+
specify the folder name here.
|
768 |
+
kwargs (`Dict[str, Any]`, *optional*):
|
769 |
+
The values in kwargs of any keys which are configuration attributes will be used to override the loaded
|
770 |
+
values. Behavior concerning key/value pairs whose keys are *not* configuration attributes is controlled
|
771 |
+
by the `return_unused_kwargs` keyword parameter.
|
772 |
+
|
773 |
+
Returns:
|
774 |
+
[`GenerationConfig`]: The configuration object instantiated from this pretrained model.
|
775 |
+
|
776 |
+
Examples:
|
777 |
+
|
778 |
+
```python
|
779 |
+
>>> from transformers import GenerationConfig
|
780 |
+
|
781 |
+
>>> # Download configuration from huggingface.co and cache.
|
782 |
+
>>> generation_config = GenerationConfig.from_pretrained("openai-community/gpt2")
|
783 |
+
|
784 |
+
>>> # E.g. config was saved using *save_pretrained('./test/saved_model/')*
|
785 |
+
>>> generation_config.save_pretrained("./test/saved_model/")
|
786 |
+
>>> generation_config = GenerationConfig.from_pretrained("./test/saved_model/")
|
787 |
+
|
788 |
+
>>> # You can also specify configuration names to your generation configuration file
|
789 |
+
>>> generation_config.save_pretrained("./test/saved_model/", config_file_name="my_configuration.json")
|
790 |
+
>>> generation_config = GenerationConfig.from_pretrained("./test/saved_model/", "my_configuration.json")
|
791 |
+
|
792 |
+
>>> # If you'd like to try a minor variation to an existing configuration, you can also pass generation
|
793 |
+
>>> # arguments to `.from_pretrained()`. Be mindful that typos and unused arguments will be ignored
|
794 |
+
>>> generation_config, unused_kwargs = GenerationConfig.from_pretrained(
|
795 |
+
... "openai-community/gpt2", top_k=1, foo=False, do_sample=True, return_unused_kwargs=True
|
796 |
+
... )
|
797 |
+
>>> generation_config.top_k
|
798 |
+
1
|
799 |
+
|
800 |
+
>>> unused_kwargs
|
801 |
+
{'foo': False}
|
802 |
+
```"""
|
803 |
+
config_file_name = config_file_name if config_file_name is not None else GENERATION_CONFIG_NAME
|
804 |
+
|
805 |
+
resume_download = kwargs.pop("resume_download", False)
|
806 |
+
proxies = kwargs.pop("proxies", None)
|
807 |
+
use_auth_token = kwargs.pop("use_auth_token", None)
|
808 |
+
subfolder = kwargs.pop("subfolder", "")
|
809 |
+
from_pipeline = kwargs.pop("_from_pipeline", None)
|
810 |
+
from_auto_class = kwargs.pop("_from_auto", False)
|
811 |
+
commit_hash = kwargs.pop("_commit_hash", None)
|
812 |
+
|
813 |
+
if use_auth_token is not None:
|
814 |
+
warnings.warn(
|
815 |
+
"The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.",
|
816 |
+
FutureWarning,
|
817 |
+
)
|
818 |
+
if token is not None:
|
819 |
+
raise ValueError(
|
820 |
+
"`token` and `use_auth_token` are both specified. Please set only the argument `token`."
|
821 |
+
)
|
822 |
+
token = use_auth_token
|
823 |
+
|
824 |
+
user_agent = {"file_type": "config", "from_auto_class": from_auto_class}
|
825 |
+
if from_pipeline is not None:
|
826 |
+
user_agent["using_pipeline"] = from_pipeline
|
827 |
+
|
828 |
+
config_path = os.path.join(pretrained_model_name, config_file_name)
|
829 |
+
config_path = str(config_path)
|
830 |
+
|
831 |
+
is_local = os.path.exists(config_path)
|
832 |
+
if os.path.isfile(os.path.join(subfolder, config_path)):
|
833 |
+
# Special case when config_path is a local file
|
834 |
+
resolved_config_file = config_path
|
835 |
+
is_local = True
|
836 |
+
elif is_remote_url(config_path):
|
837 |
+
configuration_file = config_path
|
838 |
+
resolved_config_file = download_url(config_path)
|
839 |
+
else:
|
840 |
+
configuration_file = config_file_name
|
841 |
+
try:
|
842 |
+
# Load from local folder or from cache or download from model Hub and cache
|
843 |
+
resolved_config_file = cached_file(
|
844 |
+
pretrained_model_name,
|
845 |
+
configuration_file,
|
846 |
+
cache_dir=cache_dir,
|
847 |
+
force_download=force_download,
|
848 |
+
proxies=proxies,
|
849 |
+
resume_download=resume_download,
|
850 |
+
local_files_only=local_files_only,
|
851 |
+
token=token,
|
852 |
+
user_agent=user_agent,
|
853 |
+
revision=revision,
|
854 |
+
subfolder=subfolder,
|
855 |
+
_commit_hash=commit_hash,
|
856 |
+
)
|
857 |
+
commit_hash = extract_commit_hash(resolved_config_file, commit_hash)
|
858 |
+
except EnvironmentError:
|
859 |
+
# Raise any environment error raise by `cached_file`. It will have a helpful error message adapted to
|
860 |
+
# the original exception.
|
861 |
+
raise
|
862 |
+
except Exception:
|
863 |
+
# For any other exception, we throw a generic error.
|
864 |
+
raise EnvironmentError(
|
865 |
+
f"Can't load the configuration of '{pretrained_model_name}'. If you were trying to load it"
|
866 |
+
" from 'https://huggingface.co/models', make sure you don't have a local directory with the same"
|
867 |
+
f" name. Otherwise, make sure '{pretrained_model_name}' is the correct path to a directory"
|
868 |
+
f" containing a {configuration_file} file"
|
869 |
+
)
|
870 |
+
|
871 |
+
try:
|
872 |
+
# Load config dict
|
873 |
+
config_dict = cls._dict_from_json_file(resolved_config_file)
|
874 |
+
config_dict["_commit_hash"] = commit_hash
|
875 |
+
except (json.JSONDecodeError, UnicodeDecodeError):
|
876 |
+
raise EnvironmentError(
|
877 |
+
f"It looks like the config file at '{resolved_config_file}' is not a valid JSON file."
|
878 |
+
)
|
879 |
+
|
880 |
+
if is_local:
|
881 |
+
logger.info(f"loading configuration file {resolved_config_file}")
|
882 |
+
else:
|
883 |
+
logger.info(f"loading configuration file {configuration_file} from cache at {resolved_config_file}")
|
884 |
+
|
885 |
+
if kwargs.get("return_unused_kwargs") is True:
|
886 |
+
config, unused_kwargs = cls.from_dict(config_dict, **kwargs)
|
887 |
+
config._original_object_hash = hash(config) # Hash to detect whether the instance was modified
|
888 |
+
return config, unused_kwargs
|
889 |
+
else:
|
890 |
+
config = cls.from_dict(config_dict, **kwargs)
|
891 |
+
config._original_object_hash = hash(config) # Hash to detect whether the instance was modified
|
892 |
+
return config
|
893 |
+
|
894 |
+
@classmethod
|
895 |
+
def _dict_from_json_file(cls, json_file: Union[str, os.PathLike]):
|
896 |
+
with open(json_file, "r", encoding="utf-8") as reader:
|
897 |
+
text = reader.read()
|
898 |
+
return json.loads(text)
|
899 |
+
|
900 |
+
@classmethod
|
901 |
+
def from_dict(cls, config_dict: Dict[str, Any], **kwargs) -> "GenerationConfig":
|
902 |
+
"""
|
903 |
+
Instantiates a [`GenerationConfig`] from a Python dictionary of parameters.
|
904 |
+
|
905 |
+
Args:
|
906 |
+
config_dict (`Dict[str, Any]`):
|
907 |
+
Dictionary that will be used to instantiate the configuration object.
|
908 |
+
kwargs (`Dict[str, Any]`):
|
909 |
+
Additional parameters from which to initialize the configuration object.
|
910 |
+
|
911 |
+
Returns:
|
912 |
+
[`GenerationConfig`]: The configuration object instantiated from those parameters.
|
913 |
+
"""
|
914 |
+
return_unused_kwargs = kwargs.pop("return_unused_kwargs", False)
|
915 |
+
# Those arguments may be passed along for our internal telemetry.
|
916 |
+
# We remove them so they don't appear in `return_unused_kwargs`.
|
917 |
+
kwargs.pop("_from_auto", None)
|
918 |
+
kwargs.pop("_from_pipeline", None)
|
919 |
+
# The commit hash might have been updated in the `config_dict`, we don't want the kwargs to erase that update.
|
920 |
+
if "_commit_hash" in kwargs and "_commit_hash" in config_dict:
|
921 |
+
kwargs["_commit_hash"] = config_dict["_commit_hash"]
|
922 |
+
|
923 |
+
# The line below allows model-specific config to be loaded as well through kwargs, with safety checks.
|
924 |
+
# See https://github.com/huggingface/transformers/pull/21269
|
925 |
+
config = cls(**{**config_dict, **kwargs})
|
926 |
+
unused_kwargs = config.update(**kwargs)
|
927 |
+
|
928 |
+
logger.info(f"Generate config {config}")
|
929 |
+
if return_unused_kwargs:
|
930 |
+
return config, unused_kwargs
|
931 |
+
else:
|
932 |
+
return config
|
933 |
+
|
934 |
+
def dict_torch_dtype_to_str(self, d: Dict[str, Any]) -> None:
|
935 |
+
"""
|
936 |
+
Checks whether the passed dictionary and its nested dicts have a *torch_dtype* key and if it's not None,
|
937 |
+
converts torch.dtype to a string of just the type. For example, `torch.float32` get converted into *"float32"*
|
938 |
+
string, which can then be stored in the json format.
|
939 |
+
"""
|
940 |
+
if d.get("torch_dtype", None) is not None and not isinstance(d["torch_dtype"], str):
|
941 |
+
d["torch_dtype"] = str(d["torch_dtype"]).split(".")[1]
|
942 |
+
for value in d.values():
|
943 |
+
if isinstance(value, dict):
|
944 |
+
self.dict_torch_dtype_to_str(value)
|
945 |
+
|
946 |
+
def to_diff_dict(self) -> Dict[str, Any]:
|
947 |
+
"""
|
948 |
+
Removes all attributes from config which correspond to the default config attributes for better readability and
|
949 |
+
serializes to a Python dictionary.
|
950 |
+
|
951 |
+
Returns:
|
952 |
+
`Dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance,
|
953 |
+
"""
|
954 |
+
config_dict = self.to_dict()
|
955 |
+
|
956 |
+
# get the default config dict
|
957 |
+
default_config_dict = GenerationConfig().to_dict()
|
958 |
+
|
959 |
+
serializable_config_dict = {}
|
960 |
+
|
961 |
+
# only serialize values that differ from the default config
|
962 |
+
for key, value in config_dict.items():
|
963 |
+
if key not in default_config_dict or key == "transformers_version" or value != default_config_dict[key]:
|
964 |
+
serializable_config_dict[key] = value
|
965 |
+
|
966 |
+
self.dict_torch_dtype_to_str(serializable_config_dict)
|
967 |
+
return serializable_config_dict
|
968 |
+
|
969 |
+
def to_dict(self) -> Dict[str, Any]:
|
970 |
+
"""
|
971 |
+
Serializes this instance to a Python dictionary.
|
972 |
+
|
973 |
+
Returns:
|
974 |
+
`Dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance.
|
975 |
+
"""
|
976 |
+
output = copy.deepcopy(self.__dict__)
|
977 |
+
|
978 |
+
# Fields to ignore at serialization time
|
979 |
+
if "_commit_hash" in output:
|
980 |
+
del output["_commit_hash"]
|
981 |
+
if "_original_object_hash" in output:
|
982 |
+
del output["_original_object_hash"]
|
983 |
+
|
984 |
+
# Transformers version when serializing this file
|
985 |
+
output["transformers_version"] = __version__
|
986 |
+
|
987 |
+
self.dict_torch_dtype_to_str(output)
|
988 |
+
return output
|
989 |
+
|
990 |
+
def to_json_string(self, use_diff: bool = True, ignore_metadata: bool = False) -> str:
|
991 |
+
"""
|
992 |
+
Serializes this instance to a JSON string.
|
993 |
+
|
994 |
+
Args:
|
995 |
+
use_diff (`bool`, *optional*, defaults to `True`):
|
996 |
+
If set to `True`, only the difference between the config instance and the default `GenerationConfig()`
|
997 |
+
is serialized to JSON string.
|
998 |
+
ignore_metadata (`bool`, *optional*, defaults to `False`):
|
999 |
+
Whether to ignore the metadata fields present in the instance
|
1000 |
+
|
1001 |
+
Returns:
|
1002 |
+
`str`: String containing all the attributes that make up this configuration instance in JSON format.
|
1003 |
+
"""
|
1004 |
+
if use_diff is True:
|
1005 |
+
config_dict = self.to_diff_dict()
|
1006 |
+
else:
|
1007 |
+
config_dict = self.to_dict()
|
1008 |
+
|
1009 |
+
if ignore_metadata:
|
1010 |
+
for metadata_field in METADATA_FIELDS:
|
1011 |
+
config_dict.pop(metadata_field, None)
|
1012 |
+
|
1013 |
+
def convert_keys_to_string(obj):
|
1014 |
+
if isinstance(obj, dict):
|
1015 |
+
return {str(key): convert_keys_to_string(value) for key, value in obj.items()}
|
1016 |
+
elif isinstance(obj, list):
|
1017 |
+
return [convert_keys_to_string(item) for item in obj]
|
1018 |
+
else:
|
1019 |
+
return obj
|
1020 |
+
|
1021 |
+
config_dict = convert_keys_to_string(config_dict)
|
1022 |
+
|
1023 |
+
return json.dumps(config_dict, indent=2, sort_keys=True) + "\n"
|
1024 |
+
|
1025 |
+
def to_json_file(self, json_file_path: Union[str, os.PathLike], use_diff: bool = True):
|
1026 |
+
"""
|
1027 |
+
Save this instance to a JSON file.
|
1028 |
+
|
1029 |
+
Args:
|
1030 |
+
json_file_path (`str` or `os.PathLike`):
|
1031 |
+
Path to the JSON file in which this configuration instance's parameters will be saved.
|
1032 |
+
use_diff (`bool`, *optional*, defaults to `True`):
|
1033 |
+
If set to `True`, only the difference between the config instance and the default `GenerationConfig()`
|
1034 |
+
is serialized to JSON file.
|
1035 |
+
"""
|
1036 |
+
with open(json_file_path, "w", encoding="utf-8") as writer:
|
1037 |
+
writer.write(self.to_json_string(use_diff=use_diff))
|
1038 |
+
|
1039 |
+
@classmethod
|
1040 |
+
def from_model_config(cls, model_config: PretrainedConfig) -> "GenerationConfig":
|
1041 |
+
"""
|
1042 |
+
Instantiates a [`GenerationConfig`] from a [`PretrainedConfig`]. This function is useful to convert legacy
|
1043 |
+
[`PretrainedConfig`] objects, which may contain generation parameters, into a stand-alone [`GenerationConfig`].
|
1044 |
+
|
1045 |
+
Args:
|
1046 |
+
model_config (`PretrainedConfig`):
|
1047 |
+
The model config that will be used to instantiate the generation config.
|
1048 |
+
|
1049 |
+
Returns:
|
1050 |
+
[`GenerationConfig`]: The configuration object instantiated from those parameters.
|
1051 |
+
"""
|
1052 |
+
config_dict = model_config.to_dict()
|
1053 |
+
config_dict.pop("_from_model_config", None)
|
1054 |
+
config = cls.from_dict(config_dict, return_unused_kwargs=False, _from_model_config=True)
|
1055 |
+
|
1056 |
+
# Special case: some models have generation attributes set in the decoder. Use them if still unset in the
|
1057 |
+
# generation config.
|
1058 |
+
for decoder_name in ("decoder", "generator", "text_config"):
|
1059 |
+
if decoder_name in config_dict:
|
1060 |
+
default_generation_config = GenerationConfig()
|
1061 |
+
decoder_config = config_dict[decoder_name]
|
1062 |
+
for attr in config.to_dict().keys():
|
1063 |
+
if attr in decoder_config and getattr(config, attr) == getattr(default_generation_config, attr):
|
1064 |
+
setattr(config, attr, decoder_config[attr])
|
1065 |
+
|
1066 |
+
config._original_object_hash = hash(config) # Hash to detect whether the instance was modified
|
1067 |
+
return config
|
1068 |
+
|
1069 |
+
def update(self, **kwargs):
|
1070 |
+
"""
|
1071 |
+
Updates attributes of this class instance with attributes from `kwargs` if they match existing atributtes,
|
1072 |
+
returning all the unused kwargs.
|
1073 |
+
|
1074 |
+
Args:
|
1075 |
+
kwargs (`Dict[str, Any]`):
|
1076 |
+
Dictionary of attributes to tentatively update this class.
|
1077 |
+
|
1078 |
+
Returns:
|
1079 |
+
`Dict[str, Any]`: Dictionary containing all the key-value pairs that were not used to update the instance.
|
1080 |
+
"""
|
1081 |
+
to_remove = []
|
1082 |
+
for key, value in kwargs.items():
|
1083 |
+
if hasattr(self, key):
|
1084 |
+
setattr(self, key, value)
|
1085 |
+
to_remove.append(key)
|
1086 |
+
|
1087 |
+
# Confirm that the updated instance is still valid
|
1088 |
+
self.validate()
|
1089 |
+
|
1090 |
+
# Remove all the attributes that were updated, without modifying the input dict
|
1091 |
+
unused_kwargs = {key: value for key, value in kwargs.items() if key not in to_remove}
|
1092 |
+
return unused_kwargs
|
venv/lib/python3.10/site-packages/transformers/generation/flax_logits_process.py
ADDED
@@ -0,0 +1,544 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2021 The HuggingFace Inc. team
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
|
16 |
+
import inspect
|
17 |
+
|
18 |
+
import jax
|
19 |
+
import jax.lax as lax
|
20 |
+
import jax.numpy as jnp
|
21 |
+
from jax.experimental import sparse
|
22 |
+
|
23 |
+
from ..utils import add_start_docstrings
|
24 |
+
from ..utils.logging import get_logger
|
25 |
+
|
26 |
+
|
27 |
+
logger = get_logger(__name__)
|
28 |
+
|
29 |
+
|
30 |
+
LOGITS_PROCESSOR_INPUTS_DOCSTRING = r"""
|
31 |
+
Args:
|
32 |
+
input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):
|
33 |
+
Indices of input sequence tokens in the vocabulary.
|
34 |
+
|
35 |
+
Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and
|
36 |
+
[`PreTrainedTokenizer.__call__`] for details.
|
37 |
+
|
38 |
+
[What are input IDs?](../glossary#input-ids)
|
39 |
+
scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):
|
40 |
+
Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam
|
41 |
+
search or log softmax for each vocabulary token when using beam search
|
42 |
+
kwargs (`Dict[str, Any]`, *optional*):
|
43 |
+
Additional logits processor specific kwargs.
|
44 |
+
|
45 |
+
Return:
|
46 |
+
`jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.
|
47 |
+
|
48 |
+
"""
|
49 |
+
|
50 |
+
|
51 |
+
class FlaxLogitsProcessor:
|
52 |
+
"""Abstract base class for all logit processors that can be applied during generation."""
|
53 |
+
|
54 |
+
@add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING)
|
55 |
+
def __call__(self, input_ids: jnp.ndarray, scores: jnp.ndarray) -> jnp.ndarray:
|
56 |
+
"""Flax method for processing logits."""
|
57 |
+
raise NotImplementedError(
|
58 |
+
f"{self.__class__} is an abstract class. Only classes inheriting this class can be called."
|
59 |
+
)
|
60 |
+
|
61 |
+
|
62 |
+
class FlaxLogitsWarper:
|
63 |
+
"""Abstract base class for all logit warpers that can be applied during generation with multinomial sampling."""
|
64 |
+
|
65 |
+
@add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING)
|
66 |
+
def __call__(self, input_ids: jnp.ndarray, scores: jnp.ndarray) -> jnp.ndarray:
|
67 |
+
"""Flax method for warping logits."""
|
68 |
+
raise NotImplementedError(
|
69 |
+
f"{self.__class__} is an abstract class. Only classes inheriting this class can be called."
|
70 |
+
)
|
71 |
+
|
72 |
+
|
73 |
+
class FlaxLogitsProcessorList(list):
|
74 |
+
"""
|
75 |
+
This class can be used to create a list of [`FlaxLogitsProcessor`] or [`FlaxLogitsWarper`] to subsequently process
|
76 |
+
a `scores` input tensor. This class inherits from list and adds a specific *__call__* method to apply each
|
77 |
+
[`FlaxLogitsProcessor`] or [`FlaxLogitsWarper`] to the inputs.
|
78 |
+
"""
|
79 |
+
|
80 |
+
@add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING)
|
81 |
+
def __call__(self, input_ids: jnp.ndarray, scores: jnp.ndarray, cur_len: int, **kwargs) -> jnp.ndarray:
|
82 |
+
for processor in self:
|
83 |
+
function_args = inspect.signature(processor.__call__).parameters
|
84 |
+
if len(function_args) > 3:
|
85 |
+
if not all(arg in kwargs for arg in list(function_args.keys())[2:]):
|
86 |
+
raise ValueError(
|
87 |
+
f"Make sure that all the required parameters: {list(function_args.keys())} for "
|
88 |
+
f"{processor.__class__} are passed to the logits processor."
|
89 |
+
)
|
90 |
+
scores = processor(input_ids, scores, cur_len, **kwargs)
|
91 |
+
else:
|
92 |
+
scores = processor(input_ids, scores, cur_len)
|
93 |
+
return scores
|
94 |
+
|
95 |
+
|
96 |
+
class FlaxTemperatureLogitsWarper(FlaxLogitsWarper):
|
97 |
+
r"""
|
98 |
+
[`FlaxLogitsWarper`] for temperature (exponential scaling output probability distribution).
|
99 |
+
|
100 |
+
Args:
|
101 |
+
temperature (`float`):
|
102 |
+
The value used to module the logits distribution.
|
103 |
+
"""
|
104 |
+
|
105 |
+
def __init__(self, temperature: float):
|
106 |
+
if not isinstance(temperature, float) or not (temperature > 0):
|
107 |
+
raise ValueError(f"`temperature` has to be a strictly positive float, but is {temperature}")
|
108 |
+
|
109 |
+
self.temperature = temperature
|
110 |
+
|
111 |
+
def __call__(self, input_ids: jnp.ndarray, scores: jnp.ndarray, cur_len: int) -> jnp.ndarray:
|
112 |
+
scores = scores / self.temperature
|
113 |
+
return scores
|
114 |
+
|
115 |
+
|
116 |
+
class FlaxTopPLogitsWarper(FlaxLogitsWarper):
|
117 |
+
"""
|
118 |
+
[`FlaxLogitsWarper`] that performs top-p, i.e. restricting to top tokens summing to prob_cut_off <= prob_cut_off.
|
119 |
+
|
120 |
+
Args:
|
121 |
+
top_p (`float`):
|
122 |
+
If set to < 1, only the smallest set of most probable tokens with probabilities that add up to `top_p` or
|
123 |
+
higher are kept for generation.
|
124 |
+
filter_value (`float`, *optional*, defaults to -inf):
|
125 |
+
All filtered values will be set to this float value.
|
126 |
+
min_tokens_to_keep (`int`, *optional*, defaults to 1):
|
127 |
+
Minimum number of tokens that cannot be filtered.
|
128 |
+
"""
|
129 |
+
|
130 |
+
def __init__(self, top_p: float, filter_value: float = -float("Inf"), min_tokens_to_keep: int = 1):
|
131 |
+
if not isinstance(top_p, float) or (top_p < 0 or top_p > 1.0):
|
132 |
+
raise ValueError(f"`top_p` has to be a float > 0 and < 1, but is {top_p}")
|
133 |
+
if not isinstance(min_tokens_to_keep, int) or (min_tokens_to_keep < 1):
|
134 |
+
raise ValueError(f"`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}")
|
135 |
+
|
136 |
+
self.top_p = top_p
|
137 |
+
self.filter_value = filter_value
|
138 |
+
self.min_tokens_to_keep = min_tokens_to_keep
|
139 |
+
|
140 |
+
def __call__(self, input_ids: jnp.ndarray, scores: jnp.ndarray, cur_len: int) -> jnp.ndarray:
|
141 |
+
topk_scores, topk_indices = lax.top_k(scores, scores.shape[-1])
|
142 |
+
|
143 |
+
mask_scores = jnp.full_like(scores, self.filter_value)
|
144 |
+
cumulative_probs = jax.nn.softmax(topk_scores, axis=-1).cumsum(axis=-1)
|
145 |
+
score_mask = cumulative_probs < self.top_p
|
146 |
+
|
147 |
+
# include the token that is higher than top_p as well
|
148 |
+
score_mask = jnp.roll(score_mask, 1)
|
149 |
+
score_mask |= score_mask.at[:, 0].set(True)
|
150 |
+
|
151 |
+
# min tokens to keep
|
152 |
+
score_mask = score_mask.at[:, : self.min_tokens_to_keep].set(True)
|
153 |
+
|
154 |
+
topk_next_scores = jnp.where(score_mask, topk_scores, mask_scores)
|
155 |
+
next_scores = jax.lax.sort_key_val(topk_indices, topk_next_scores)[-1]
|
156 |
+
|
157 |
+
return next_scores
|
158 |
+
|
159 |
+
|
160 |
+
class FlaxTopKLogitsWarper(FlaxLogitsWarper):
|
161 |
+
r"""
|
162 |
+
[`FlaxLogitsWarper`] that performs top-k, i.e. restricting to the k highest probability elements.
|
163 |
+
|
164 |
+
Args:
|
165 |
+
top_k (`int`):
|
166 |
+
The number of highest probability vocabulary tokens to keep for top-k-filtering.
|
167 |
+
filter_value (`float`, *optional*, defaults to -inf):
|
168 |
+
All filtered values will be set to this float value.
|
169 |
+
min_tokens_to_keep (`int`, *optional*, defaults to 1):
|
170 |
+
Minimum number of tokens that cannot be filtered.
|
171 |
+
"""
|
172 |
+
|
173 |
+
def __init__(self, top_k: int, filter_value: float = -float("Inf"), min_tokens_to_keep: int = 1):
|
174 |
+
if not isinstance(top_k, int) or top_k <= 0:
|
175 |
+
raise ValueError(f"`top_k` has to be a strictly positive integer, but is {top_k}")
|
176 |
+
|
177 |
+
self.top_k = max(top_k, min_tokens_to_keep)
|
178 |
+
self.filter_value = filter_value
|
179 |
+
|
180 |
+
def __call__(self, input_ids: jnp.ndarray, scores: jnp.ndarray, cur_len: int) -> jnp.ndarray:
|
181 |
+
batch_size, vocab_size = scores.shape
|
182 |
+
next_scores_flat = jnp.full(batch_size * vocab_size, self.filter_value)
|
183 |
+
|
184 |
+
topk = min(self.top_k, scores.shape[-1]) # Safety check
|
185 |
+
topk_scores, topk_indices = lax.top_k(scores, topk)
|
186 |
+
shift = jnp.broadcast_to((jnp.arange(batch_size) * vocab_size)[:, None], (batch_size, topk)).flatten()
|
187 |
+
topk_scores_flat = topk_scores.flatten()
|
188 |
+
topk_indices_flat = topk_indices.flatten() + shift
|
189 |
+
|
190 |
+
next_scores_flat = next_scores_flat.at[topk_indices_flat].set(topk_scores_flat)
|
191 |
+
next_scores = next_scores_flat.reshape(batch_size, vocab_size)
|
192 |
+
return next_scores
|
193 |
+
|
194 |
+
|
195 |
+
class FlaxForcedBOSTokenLogitsProcessor(FlaxLogitsProcessor):
|
196 |
+
r"""
|
197 |
+
[`FlaxLogitsProcessor`] that enforces the specified token as the first generated token.
|
198 |
+
|
199 |
+
Args:
|
200 |
+
bos_token_id (`int`):
|
201 |
+
The id of the token to force as the first generated token.
|
202 |
+
"""
|
203 |
+
|
204 |
+
def __init__(self, bos_token_id: int):
|
205 |
+
self.bos_token_id = bos_token_id
|
206 |
+
|
207 |
+
def __call__(self, input_ids: jnp.ndarray, scores: jnp.ndarray, cur_len: int) -> jnp.ndarray:
|
208 |
+
new_scores = jnp.full(scores.shape, -float("inf"))
|
209 |
+
|
210 |
+
apply_penalty = 1 - jnp.bool_(cur_len - 1)
|
211 |
+
|
212 |
+
scores = jnp.where(apply_penalty, new_scores.at[:, self.bos_token_id].set(0), scores)
|
213 |
+
|
214 |
+
return scores
|
215 |
+
|
216 |
+
|
217 |
+
class FlaxForcedEOSTokenLogitsProcessor(FlaxLogitsProcessor):
|
218 |
+
r"""
|
219 |
+
[`FlaxLogitsProcessor`] that enforces the specified token as the last generated token when `max_length` is reached.
|
220 |
+
|
221 |
+
Args:
|
222 |
+
max_length (`int`):
|
223 |
+
The maximum length of the sequence to be generated.
|
224 |
+
eos_token_id (`int`):
|
225 |
+
The id of the token to force as the last generated token when `max_length` is reached.
|
226 |
+
"""
|
227 |
+
|
228 |
+
def __init__(self, max_length: int, eos_token_id: int):
|
229 |
+
self.max_length = max_length
|
230 |
+
self.eos_token_id = eos_token_id
|
231 |
+
|
232 |
+
def __call__(self, input_ids: jnp.ndarray, scores: jnp.ndarray, cur_len: int) -> jnp.ndarray:
|
233 |
+
new_scores = jnp.full(scores.shape, -float("inf"))
|
234 |
+
|
235 |
+
apply_penalty = 1 - jnp.bool_(cur_len - self.max_length + 1)
|
236 |
+
|
237 |
+
scores = jnp.where(apply_penalty, new_scores.at[:, self.eos_token_id].set(0), scores)
|
238 |
+
|
239 |
+
return scores
|
240 |
+
|
241 |
+
|
242 |
+
class FlaxMinLengthLogitsProcessor(FlaxLogitsProcessor):
|
243 |
+
r"""
|
244 |
+
[`FlaxLogitsProcessor`] enforcing a min-length by setting EOS probability to 0.
|
245 |
+
|
246 |
+
Args:
|
247 |
+
min_length (`int`):
|
248 |
+
The minimum length below which the score of `eos_token_id` is set to `-float("Inf")`.
|
249 |
+
eos_token_id (`int`):
|
250 |
+
The id of the *end-of-sequence* token.
|
251 |
+
"""
|
252 |
+
|
253 |
+
def __init__(self, min_length: int, eos_token_id: int):
|
254 |
+
if not isinstance(min_length, int) or min_length < 0:
|
255 |
+
raise ValueError(f"`min_length` has to be a positive integer, but is {min_length}")
|
256 |
+
|
257 |
+
if not isinstance(eos_token_id, int) or eos_token_id < 0:
|
258 |
+
raise ValueError(f"`eos_token_id` has to be a positive integer, but is {eos_token_id}")
|
259 |
+
|
260 |
+
self.min_length = min_length
|
261 |
+
self.eos_token_id = eos_token_id
|
262 |
+
|
263 |
+
def __call__(self, input_ids: jnp.ndarray, scores: jnp.ndarray, cur_len: int) -> jnp.ndarray:
|
264 |
+
# create boolean flag to decide if min length penalty should be applied
|
265 |
+
apply_penalty = 1 - jnp.clip(cur_len - self.min_length, 0, 1)
|
266 |
+
|
267 |
+
scores = jnp.where(apply_penalty, scores.at[:, self.eos_token_id].set(-float("inf")), scores)
|
268 |
+
|
269 |
+
return scores
|
270 |
+
|
271 |
+
|
272 |
+
class FlaxSuppressTokensAtBeginLogitsProcessor(FlaxLogitsProcessor):
|
273 |
+
r"""
|
274 |
+
[`FlaxLogitsProcessor`] supressing a list of tokens as soon as the `generate` function starts generating using
|
275 |
+
`begin_index` tokens. This should ensure that the tokens defined by `begin_suppress_tokens` are not sampled at the
|
276 |
+
begining of the generation.
|
277 |
+
|
278 |
+
Args:
|
279 |
+
begin_suppress_tokens (`List[int]`):
|
280 |
+
Tokens to not sample.
|
281 |
+
begin_index (`int`):
|
282 |
+
Index where the tokens are suppressed.
|
283 |
+
"""
|
284 |
+
|
285 |
+
def __init__(self, begin_suppress_tokens, begin_index):
|
286 |
+
self.begin_suppress_tokens = list(begin_suppress_tokens)
|
287 |
+
self.begin_index = begin_index
|
288 |
+
|
289 |
+
def __call__(self, input_ids, scores, cur_len: int):
|
290 |
+
apply_penalty = 1 - jnp.bool_(cur_len - self.begin_index)
|
291 |
+
|
292 |
+
scores = jnp.where(apply_penalty, scores.at[:, self.begin_suppress_tokens].set(-float("inf")), scores)
|
293 |
+
|
294 |
+
return scores
|
295 |
+
|
296 |
+
|
297 |
+
class FlaxSuppressTokensLogitsProcessor(FlaxLogitsProcessor):
|
298 |
+
r"""
|
299 |
+
[`FlaxLogitsProcessor`] suppressing a list of tokens at each decoding step. The processor will set their log probs
|
300 |
+
to be `-inf` so they are not sampled.
|
301 |
+
|
302 |
+
Args:
|
303 |
+
suppress_tokens (`list`):
|
304 |
+
Tokens to not sample.
|
305 |
+
"""
|
306 |
+
|
307 |
+
def __init__(self, suppress_tokens: list):
|
308 |
+
self.suppress_tokens = list(suppress_tokens)
|
309 |
+
|
310 |
+
def __call__(self, input_ids: jnp.ndarray, scores: jnp.ndarray, cur_len: int) -> jnp.ndarray:
|
311 |
+
scores = scores.at[..., self.suppress_tokens].set(-float("inf"))
|
312 |
+
|
313 |
+
return scores
|
314 |
+
|
315 |
+
|
316 |
+
class FlaxForceTokensLogitsProcessor(FlaxLogitsProcessor):
|
317 |
+
r"""
|
318 |
+
[`FlaxLogitsProcessor`] that takes a list of pairs of integers which indicates a mapping from generation indices to
|
319 |
+
token indices that will be forced before sampling. The processor will set their log probs to 0 and all other tokens
|
320 |
+
to `-inf` so that they are sampled at their corresponding index.
|
321 |
+
|
322 |
+
Args:
|
323 |
+
force_token_map (`list`):
|
324 |
+
Map giving token ids and indices where they will be forced to be sampled.
|
325 |
+
"""
|
326 |
+
|
327 |
+
def __init__(self, force_token_map):
|
328 |
+
force_token_map = dict(force_token_map)
|
329 |
+
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
|
330 |
+
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
|
331 |
+
# Indexes without forced tokens will have a negative value.
|
332 |
+
force_token_array = jnp.ones((max(force_token_map.keys()) + 1), dtype=jnp.int32) * -1
|
333 |
+
for index, token in force_token_map.items():
|
334 |
+
if token is not None:
|
335 |
+
force_token_array = force_token_array.at[index].set(token)
|
336 |
+
self.force_token_array = jnp.int32(force_token_array)
|
337 |
+
|
338 |
+
def __call__(self, input_ids: jnp.ndarray, scores: jnp.ndarray, cur_len: int) -> jnp.ndarray:
|
339 |
+
def _force_token(generation_idx):
|
340 |
+
batch_size = scores.shape[0]
|
341 |
+
current_token = self.force_token_array[generation_idx]
|
342 |
+
|
343 |
+
new_scores = jnp.ones_like(scores, dtype=scores.dtype) * -float("inf")
|
344 |
+
updates = jnp.zeros((batch_size, 1), dtype=scores.dtype)
|
345 |
+
new_scores = lax.dynamic_update_slice(new_scores, updates, (0, current_token))
|
346 |
+
return new_scores
|
347 |
+
|
348 |
+
scores = lax.cond(
|
349 |
+
cur_len >= self.force_token_array.shape[0],
|
350 |
+
# If the current length is geq than the length of force_token_array, the processor does nothing.
|
351 |
+
lambda: scores,
|
352 |
+
# Otherwise, it may force a certain token.
|
353 |
+
lambda: lax.cond(
|
354 |
+
self.force_token_array[cur_len] >= 0,
|
355 |
+
# Only valid (positive) tokens are forced
|
356 |
+
lambda: _force_token(cur_len),
|
357 |
+
# Otherwise, the processor does nothing.
|
358 |
+
lambda: scores,
|
359 |
+
),
|
360 |
+
)
|
361 |
+
return scores
|
362 |
+
|
363 |
+
|
364 |
+
class FlaxWhisperTimeStampLogitsProcessor(FlaxLogitsProcessor):
|
365 |
+
r"""
|
366 |
+
Whisper specific Processor. This processor can be used to force a list of tokens. The processor will set their log
|
367 |
+
probs to `inf` so that they are sampled at their corresponding index.
|
368 |
+
|
369 |
+
Args:
|
370 |
+
generate_config (`GenerateConfig`):
|
371 |
+
The generate config used to generate the output. The following parameters are required:
|
372 |
+
eos_token_id (`int`, *optional*, defaults to 50257):
|
373 |
+
The id of the *end-of-sequence* token.
|
374 |
+
no_timestamps_token_id (`int`, *optional*, defaults to 50363):
|
375 |
+
The id of the `"<|notimestamps|>"` token.
|
376 |
+
max_initial_timestamp_index (`int`, *optional*, defaults to 1):
|
377 |
+
Used to set the maximum value of the initial timestamp. This is used to prevent the model from
|
378 |
+
predicting timestamps that are too far in the future.
|
379 |
+
"""
|
380 |
+
|
381 |
+
def __init__(self, generate_config, model_config, decoder_input_length):
|
382 |
+
self.eos_token_id = generate_config.eos_token_id
|
383 |
+
self.no_timestamps_token_id = generate_config.no_timestamps_token_id
|
384 |
+
self.timestamp_begin = generate_config.no_timestamps_token_id + 1
|
385 |
+
|
386 |
+
self.begin_index = decoder_input_length + 1
|
387 |
+
|
388 |
+
if generate_config.is_multilingual:
|
389 |
+
# room for language token and task token
|
390 |
+
self.begin_index += 2
|
391 |
+
if hasattr(generate_config, "max_initial_timestamp_index"):
|
392 |
+
self.max_initial_timestamp_index = generate_config.max_initial_timestamp_index
|
393 |
+
else:
|
394 |
+
self.max_initial_timestamp_index = model_config.vocab_size
|
395 |
+
if self.max_initial_timestamp_index is None:
|
396 |
+
self.max_initial_timestamp_index = model_config.vocab_size
|
397 |
+
|
398 |
+
def __call__(self, input_ids, scores, cur_len):
|
399 |
+
# suppress <|notimestamps|> which is handled by without_timestamps
|
400 |
+
scores = scores.at[:, self.no_timestamps_token_id].set(-float("inf"))
|
401 |
+
|
402 |
+
def handle_pairs(input_ids_k, scores_k):
|
403 |
+
last_was_timestamp = jnp.where((cur_len - self.begin_index) >= 1, True, False)
|
404 |
+
last_was_timestamp = jnp.where(
|
405 |
+
input_ids_k[cur_len - 1] >= self.timestamp_begin,
|
406 |
+
True and last_was_timestamp,
|
407 |
+
False,
|
408 |
+
)
|
409 |
+
|
410 |
+
penultimate_was_timestamp = jnp.where((cur_len - self.begin_index) < 2, True, False)
|
411 |
+
penultimate_was_timestamp = jnp.where(
|
412 |
+
input_ids_k[cur_len - 2] >= self.timestamp_begin,
|
413 |
+
True,
|
414 |
+
penultimate_was_timestamp,
|
415 |
+
)
|
416 |
+
|
417 |
+
return jnp.where(
|
418 |
+
last_was_timestamp,
|
419 |
+
jnp.where(
|
420 |
+
penultimate_was_timestamp > 0,
|
421 |
+
scores_k.at[self.timestamp_begin :].set(-float("inf")),
|
422 |
+
scores_k.at[: self.eos_token_id].set(-float("inf")),
|
423 |
+
),
|
424 |
+
scores_k,
|
425 |
+
)
|
426 |
+
|
427 |
+
scores = jax.vmap(handle_pairs)(input_ids, scores)
|
428 |
+
|
429 |
+
apply_max_initial_timestamp = jnp.where(cur_len == self.begin_index, True, False)
|
430 |
+
apply_max_initial_timestamp = jnp.where(
|
431 |
+
self.max_initial_timestamp_index is not None,
|
432 |
+
True and apply_max_initial_timestamp,
|
433 |
+
False,
|
434 |
+
)
|
435 |
+
|
436 |
+
last_allowed = self.timestamp_begin + self.max_initial_timestamp_index
|
437 |
+
|
438 |
+
scores = jnp.where(
|
439 |
+
apply_max_initial_timestamp,
|
440 |
+
scores.at[:, last_allowed + 1 :].set(-float("inf")),
|
441 |
+
scores,
|
442 |
+
)
|
443 |
+
|
444 |
+
# if sum of probability over timestamps is above any other token, sample timestamp
|
445 |
+
logprobs = jax.nn.log_softmax(scores, axis=-1)
|
446 |
+
|
447 |
+
def handle_cumulative_probs(logprobs_k, scores_k):
|
448 |
+
timestamp_logprob = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :], axis=-1)
|
449 |
+
max_text_token_logprob = jnp.max(logprobs_k[: self.timestamp_begin])
|
450 |
+
return jnp.where(
|
451 |
+
timestamp_logprob > max_text_token_logprob,
|
452 |
+
scores_k.at[: self.timestamp_begin].set(-float("inf")),
|
453 |
+
scores_k,
|
454 |
+
)
|
455 |
+
|
456 |
+
scores = jax.vmap(handle_cumulative_probs)(logprobs, scores)
|
457 |
+
|
458 |
+
return scores
|
459 |
+
|
460 |
+
|
461 |
+
class FlaxNoRepeatNGramLogitsProcessor(FlaxLogitsProcessor):
|
462 |
+
r"""
|
463 |
+
[`FlaxLogitsProcessor`] that enforces no repetition of n-grams. See
|
464 |
+
[Fairseq](https://github.com/pytorch/fairseq/blob/a07cb6f40480928c9e0548b737aadd36ee66ac76/fairseq/sequence_generator.py#L345).
|
465 |
+
|
466 |
+
Args:
|
467 |
+
ngram_size (`int`):
|
468 |
+
All ngrams of size `ngram_size` can only occur once.
|
469 |
+
"""
|
470 |
+
|
471 |
+
def __init__(self, ngram_size: int):
|
472 |
+
if not isinstance(ngram_size, int) or ngram_size <= 0:
|
473 |
+
raise ValueError(f"`ngram_size` has to be a strictly positive integer, but is {ngram_size}")
|
474 |
+
self.ngram_size = ngram_size
|
475 |
+
|
476 |
+
def get_previous_ngrams(self, input_ids: jnp.ndarray, vocab_size: int, cur_len: int):
|
477 |
+
"""
|
478 |
+
get a matrix of size (batch_size,) + (vocab_size,)*n (for n-grams) that
|
479 |
+
represent the n-grams that occured previously.
|
480 |
+
The BCOO representation allow to store only the few non-zero entries, instead of the full (huge) matrix
|
481 |
+
"""
|
482 |
+
batch_size, seq_len = input_ids.shape
|
483 |
+
# number of n-grams in the whole sequence
|
484 |
+
seq_ngrams = seq_len - (self.ngram_size - 1)
|
485 |
+
# number of n-grams in the currently generated sequence
|
486 |
+
cur_ngrams = cur_len - (self.ngram_size - 1)
|
487 |
+
|
488 |
+
def body_fun(i, val):
|
489 |
+
b = i % batch_size
|
490 |
+
pos = i // batch_size
|
491 |
+
return val.at[i].set(
|
492 |
+
jnp.array(
|
493 |
+
[
|
494 |
+
b,
|
495 |
+
]
|
496 |
+
+ [jnp.array(input_ids)[b, pos + j] for j in range(self.ngram_size)]
|
497 |
+
)
|
498 |
+
)
|
499 |
+
|
500 |
+
shape = (batch_size * seq_ngrams, self.ngram_size + 1)
|
501 |
+
all_update_indices = jax.lax.fori_loop(
|
502 |
+
0, batch_size * cur_ngrams, body_fun, jnp.zeros(shape, dtype=input_ids.dtype)
|
503 |
+
)
|
504 |
+
|
505 |
+
# ignore the n-grams not yet generated
|
506 |
+
data = (jnp.arange(batch_size * seq_ngrams) < batch_size * cur_ngrams).astype("float32")
|
507 |
+
|
508 |
+
return sparse.BCOO((data, all_update_indices), shape=(batch_size,) + (vocab_size,) * self.ngram_size)
|
509 |
+
|
510 |
+
def get_banned_tokens_mask(self, latest_tokens: jnp.ndarray, previous_ngrams) -> jnp.ndarray:
|
511 |
+
"""
|
512 |
+
Determines which tokens must be banned given latest tokens and the previously seen
|
513 |
+
ngrams.
|
514 |
+
"""
|
515 |
+
|
516 |
+
@sparse.sparsify
|
517 |
+
@jax.vmap
|
518 |
+
def inner_fn(latest_tokens, previous_ngrams):
|
519 |
+
return previous_ngrams[tuple(latest_tokens)]
|
520 |
+
|
521 |
+
return sparse.bcoo_todense(inner_fn(latest_tokens, previous_ngrams))
|
522 |
+
|
523 |
+
def __call__(self, input_ids: jnp.ndarray, scores: jnp.ndarray, cur_len: int) -> jnp.ndarray:
|
524 |
+
def true_fn():
|
525 |
+
_, vocab_size = scores.shape
|
526 |
+
# store the previously seen n-grams
|
527 |
+
previous_ngrams = self.get_previous_ngrams(input_ids, vocab_size, cur_len)
|
528 |
+
|
529 |
+
# get the n-1 last tokens that prefix the n-gram being generated
|
530 |
+
latest_tokens = jnp.zeros((input_ids.shape[0], self.ngram_size - 1), dtype=input_ids.dtype)
|
531 |
+
latest_tokens = jax.lax.dynamic_update_slice(
|
532 |
+
latest_tokens,
|
533 |
+
jax.lax.dynamic_slice(
|
534 |
+
input_ids, (0, cur_len - (self.ngram_size - 1)), (input_ids.shape[0], (self.ngram_size - 1))
|
535 |
+
),
|
536 |
+
(0, 0),
|
537 |
+
)
|
538 |
+
|
539 |
+
# compute the banned tokens, ie all the tokens that when added to the latest tokens lead to a n-gram that was previously generated
|
540 |
+
banned_tokens_indices_mask = self.get_banned_tokens_mask(latest_tokens, previous_ngrams).astype("bool")
|
541 |
+
return jnp.where(banned_tokens_indices_mask, -float("inf"), scores)
|
542 |
+
|
543 |
+
output = jax.lax.cond((cur_len >= self.ngram_size - 1), true_fn, lambda: scores)
|
544 |
+
return output
|