applied-ai-018 commited on
Commit
f664c07
·
verified ·
1 Parent(s): ee92357

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. llmeval-env/lib/python3.10/site-packages/transformers/commands/__init__.py +27 -0
  2. llmeval-env/lib/python3.10/site-packages/transformers/commands/__pycache__/__init__.cpython-310.pyc +0 -0
  3. llmeval-env/lib/python3.10/site-packages/transformers/commands/__pycache__/add_new_model.cpython-310.pyc +0 -0
  4. llmeval-env/lib/python3.10/site-packages/transformers/commands/__pycache__/convert.cpython-310.pyc +0 -0
  5. llmeval-env/lib/python3.10/site-packages/transformers/commands/__pycache__/download.cpython-310.pyc +0 -0
  6. llmeval-env/lib/python3.10/site-packages/transformers/commands/__pycache__/env.cpython-310.pyc +0 -0
  7. llmeval-env/lib/python3.10/site-packages/transformers/commands/__pycache__/lfs.cpython-310.pyc +0 -0
  8. llmeval-env/lib/python3.10/site-packages/transformers/commands/__pycache__/pt_to_tf.cpython-310.pyc +0 -0
  9. llmeval-env/lib/python3.10/site-packages/transformers/commands/__pycache__/run.cpython-310.pyc +0 -0
  10. llmeval-env/lib/python3.10/site-packages/transformers/commands/__pycache__/serving.cpython-310.pyc +0 -0
  11. llmeval-env/lib/python3.10/site-packages/transformers/commands/__pycache__/transformers_cli.cpython-310.pyc +0 -0
  12. llmeval-env/lib/python3.10/site-packages/transformers/commands/add_new_model.py +259 -0
  13. llmeval-env/lib/python3.10/site-packages/transformers/commands/add_new_model_like.py +1713 -0
  14. llmeval-env/lib/python3.10/site-packages/transformers/commands/convert.py +165 -0
  15. llmeval-env/lib/python3.10/site-packages/transformers/commands/download.py +56 -0
  16. llmeval-env/lib/python3.10/site-packages/transformers/commands/env.py +143 -0
  17. llmeval-env/lib/python3.10/site-packages/transformers/commands/lfs.py +226 -0
  18. llmeval-env/lib/python3.10/site-packages/transformers/commands/pt_to_tf.py +425 -0
  19. llmeval-env/lib/python3.10/site-packages/transformers/commands/run.py +110 -0
  20. llmeval-env/lib/python3.10/site-packages/transformers/commands/serving.py +228 -0
  21. llmeval-env/lib/python3.10/site-packages/transformers/commands/train.py +158 -0
  22. llmeval-env/lib/python3.10/site-packages/transformers/commands/transformers_cli.py +59 -0
  23. llmeval-env/lib/python3.10/site-packages/transformers/commands/user.py +197 -0
  24. llmeval-env/lib/python3.10/site-packages/transformers/models/albert/__pycache__/__init__.cpython-310.pyc +0 -0
  25. llmeval-env/lib/python3.10/site-packages/transformers/models/albert/__pycache__/modeling_flax_albert.cpython-310.pyc +0 -0
  26. llmeval-env/lib/python3.10/site-packages/transformers/models/dbrx/__init__.py +51 -0
  27. llmeval-env/lib/python3.10/site-packages/transformers/models/dbrx/__pycache__/__init__.cpython-310.pyc +0 -0
  28. llmeval-env/lib/python3.10/site-packages/transformers/models/dbrx/__pycache__/configuration_dbrx.cpython-310.pyc +0 -0
  29. llmeval-env/lib/python3.10/site-packages/transformers/models/dbrx/__pycache__/modeling_dbrx.cpython-310.pyc +0 -0
  30. llmeval-env/lib/python3.10/site-packages/transformers/models/dbrx/configuration_dbrx.py +257 -0
  31. llmeval-env/lib/python3.10/site-packages/transformers/models/dbrx/modeling_dbrx.py +1523 -0
  32. llmeval-env/lib/python3.10/site-packages/transformers/models/detr/__init__.py +75 -0
  33. llmeval-env/lib/python3.10/site-packages/transformers/models/detr/__pycache__/__init__.cpython-310.pyc +0 -0
  34. llmeval-env/lib/python3.10/site-packages/transformers/models/detr/__pycache__/configuration_detr.cpython-310.pyc +0 -0
  35. llmeval-env/lib/python3.10/site-packages/transformers/models/detr/__pycache__/convert_detr_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc +0 -0
  36. llmeval-env/lib/python3.10/site-packages/transformers/models/detr/__pycache__/convert_detr_to_pytorch.cpython-310.pyc +0 -0
  37. llmeval-env/lib/python3.10/site-packages/transformers/models/detr/__pycache__/feature_extraction_detr.cpython-310.pyc +0 -0
  38. llmeval-env/lib/python3.10/site-packages/transformers/models/detr/__pycache__/image_processing_detr.cpython-310.pyc +0 -0
  39. llmeval-env/lib/python3.10/site-packages/transformers/models/detr/__pycache__/modeling_detr.cpython-310.pyc +0 -0
  40. llmeval-env/lib/python3.10/site-packages/transformers/models/detr/configuration_detr.py +284 -0
  41. llmeval-env/lib/python3.10/site-packages/transformers/models/detr/convert_detr_original_pytorch_checkpoint_to_pytorch.py +278 -0
  42. llmeval-env/lib/python3.10/site-packages/transformers/models/detr/convert_detr_to_pytorch.py +386 -0
  43. llmeval-env/lib/python3.10/site-packages/transformers/models/detr/feature_extraction_detr.py +43 -0
  44. llmeval-env/lib/python3.10/site-packages/transformers/models/detr/image_processing_detr.py +1965 -0
  45. llmeval-env/lib/python3.10/site-packages/transformers/models/detr/modeling_detr.py +0 -0
  46. llmeval-env/lib/python3.10/site-packages/transformers/models/jukebox/__init__.py +70 -0
  47. llmeval-env/lib/python3.10/site-packages/transformers/models/jukebox/__pycache__/__init__.cpython-310.pyc +0 -0
  48. llmeval-env/lib/python3.10/site-packages/transformers/models/jukebox/__pycache__/convert_jukebox.cpython-310.pyc +0 -0
  49. llmeval-env/lib/python3.10/site-packages/transformers/models/jukebox/__pycache__/tokenization_jukebox.cpython-310.pyc +0 -0
  50. llmeval-env/lib/python3.10/site-packages/transformers/models/jukebox/configuration_jukebox.py +613 -0
llmeval-env/lib/python3.10/site-packages/transformers/commands/__init__.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from abc import ABC, abstractmethod
16
+ from argparse import ArgumentParser
17
+
18
+
19
+ class BaseTransformersCLICommand(ABC):
20
+ @staticmethod
21
+ @abstractmethod
22
+ def register_subcommand(parser: ArgumentParser):
23
+ raise NotImplementedError()
24
+
25
+ @abstractmethod
26
+ def run(self):
27
+ raise NotImplementedError()
llmeval-env/lib/python3.10/site-packages/transformers/commands/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (833 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/commands/__pycache__/add_new_model.cpython-310.pyc ADDED
Binary file (7.12 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/commands/__pycache__/convert.cpython-310.pyc ADDED
Binary file (4.65 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/commands/__pycache__/download.cpython-310.pyc ADDED
Binary file (2.03 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/commands/__pycache__/env.cpython-310.pyc ADDED
Binary file (4.13 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/commands/__pycache__/lfs.cpython-310.pyc ADDED
Binary file (7.29 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/commands/__pycache__/pt_to_tf.cpython-310.pyc ADDED
Binary file (12.8 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/commands/__pycache__/run.cpython-310.pyc ADDED
Binary file (3.45 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/commands/__pycache__/serving.cpython-310.pyc ADDED
Binary file (6.84 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/commands/__pycache__/transformers_cli.cpython-310.pyc ADDED
Binary file (1.37 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/commands/add_new_model.py ADDED
@@ -0,0 +1,259 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import json
16
+ import os
17
+ import shutil
18
+ import warnings
19
+ from argparse import ArgumentParser, Namespace
20
+ from pathlib import Path
21
+ from typing import List
22
+
23
+ from ..utils import logging
24
+ from . import BaseTransformersCLICommand
25
+
26
+
27
+ try:
28
+ from cookiecutter.main import cookiecutter
29
+
30
+ _has_cookiecutter = True
31
+ except ImportError:
32
+ _has_cookiecutter = False
33
+
34
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
35
+
36
+
37
+ def add_new_model_command_factory(args: Namespace):
38
+ return AddNewModelCommand(args.testing, args.testing_file, path=args.path)
39
+
40
+
41
+ class AddNewModelCommand(BaseTransformersCLICommand):
42
+ @staticmethod
43
+ def register_subcommand(parser: ArgumentParser):
44
+ add_new_model_parser = parser.add_parser("add-new-model")
45
+ add_new_model_parser.add_argument("--testing", action="store_true", help="If in testing mode.")
46
+ add_new_model_parser.add_argument("--testing_file", type=str, help="Configuration file on which to run.")
47
+ add_new_model_parser.add_argument(
48
+ "--path", type=str, help="Path to cookiecutter. Should only be used for testing purposes."
49
+ )
50
+ add_new_model_parser.set_defaults(func=add_new_model_command_factory)
51
+
52
+ def __init__(self, testing: bool, testing_file: str, path=None, *args):
53
+ self._testing = testing
54
+ self._testing_file = testing_file
55
+ self._path = path
56
+
57
+ def run(self):
58
+ warnings.warn(
59
+ "The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. "
60
+ "It is not actively maintained anymore, so might give a result that won't pass all tests and quality "
61
+ "checks, you should use `transformers-cli add-new-model-like` instead."
62
+ )
63
+ if not _has_cookiecutter:
64
+ raise ImportError(
65
+ "Model creation dependencies are required to use the `add_new_model` command. Install them by running "
66
+ "the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n"
67
+ )
68
+ # Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
69
+ directories = [directory for directory in os.listdir() if "cookiecutter-template-" == directory[:22]]
70
+ if len(directories) > 0:
71
+ raise ValueError(
72
+ "Several directories starting with `cookiecutter-template-` in current working directory. "
73
+ "Please clean your directory by removing all folders starting with `cookiecutter-template-` or "
74
+ "change your working directory."
75
+ )
76
+
77
+ path_to_transformer_root = (
78
+ Path(__file__).parent.parent.parent.parent if self._path is None else Path(self._path).parent.parent
79
+ )
80
+ path_to_cookiecutter = path_to_transformer_root / "templates" / "adding_a_new_model"
81
+
82
+ # Execute cookiecutter
83
+ if not self._testing:
84
+ cookiecutter(str(path_to_cookiecutter))
85
+ else:
86
+ with open(self._testing_file, "r") as configuration_file:
87
+ testing_configuration = json.load(configuration_file)
88
+
89
+ cookiecutter(
90
+ str(path_to_cookiecutter if self._path is None else self._path),
91
+ no_input=True,
92
+ extra_context=testing_configuration,
93
+ )
94
+
95
+ directory = [directory for directory in os.listdir() if "cookiecutter-template-" in directory[:22]][0]
96
+
97
+ # Retrieve configuration
98
+ with open(directory + "/configuration.json", "r") as configuration_file:
99
+ configuration = json.load(configuration_file)
100
+
101
+ lowercase_model_name = configuration["lowercase_modelname"]
102
+ generate_tensorflow_pytorch_and_flax = configuration["generate_tensorflow_pytorch_and_flax"]
103
+ os.remove(f"{directory}/configuration.json")
104
+
105
+ output_pytorch = "PyTorch" in generate_tensorflow_pytorch_and_flax
106
+ output_tensorflow = "TensorFlow" in generate_tensorflow_pytorch_and_flax
107
+ output_flax = "Flax" in generate_tensorflow_pytorch_and_flax
108
+
109
+ model_dir = f"{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}"
110
+ os.makedirs(model_dir, exist_ok=True)
111
+ os.makedirs(f"{path_to_transformer_root}/tests/models/{lowercase_model_name}", exist_ok=True)
112
+
113
+ # Tests require submodules as they have parent imports
114
+ with open(f"{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py", "w"):
115
+ pass
116
+
117
+ shutil.move(
118
+ f"{directory}/__init__.py",
119
+ f"{model_dir}/__init__.py",
120
+ )
121
+ shutil.move(
122
+ f"{directory}/configuration_{lowercase_model_name}.py",
123
+ f"{model_dir}/configuration_{lowercase_model_name}.py",
124
+ )
125
+
126
+ def remove_copy_lines(path):
127
+ with open(path, "r") as f:
128
+ lines = f.readlines()
129
+ with open(path, "w") as f:
130
+ for line in lines:
131
+ if "# Copied from transformers." not in line:
132
+ f.write(line)
133
+
134
+ if output_pytorch:
135
+ if not self._testing:
136
+ remove_copy_lines(f"{directory}/modeling_{lowercase_model_name}.py")
137
+
138
+ shutil.move(
139
+ f"{directory}/modeling_{lowercase_model_name}.py",
140
+ f"{model_dir}/modeling_{lowercase_model_name}.py",
141
+ )
142
+
143
+ shutil.move(
144
+ f"{directory}/test_modeling_{lowercase_model_name}.py",
145
+ f"{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py",
146
+ )
147
+ else:
148
+ os.remove(f"{directory}/modeling_{lowercase_model_name}.py")
149
+ os.remove(f"{directory}/test_modeling_{lowercase_model_name}.py")
150
+
151
+ if output_tensorflow:
152
+ if not self._testing:
153
+ remove_copy_lines(f"{directory}/modeling_tf_{lowercase_model_name}.py")
154
+
155
+ shutil.move(
156
+ f"{directory}/modeling_tf_{lowercase_model_name}.py",
157
+ f"{model_dir}/modeling_tf_{lowercase_model_name}.py",
158
+ )
159
+
160
+ shutil.move(
161
+ f"{directory}/test_modeling_tf_{lowercase_model_name}.py",
162
+ f"{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py",
163
+ )
164
+ else:
165
+ os.remove(f"{directory}/modeling_tf_{lowercase_model_name}.py")
166
+ os.remove(f"{directory}/test_modeling_tf_{lowercase_model_name}.py")
167
+
168
+ if output_flax:
169
+ if not self._testing:
170
+ remove_copy_lines(f"{directory}/modeling_flax_{lowercase_model_name}.py")
171
+
172
+ shutil.move(
173
+ f"{directory}/modeling_flax_{lowercase_model_name}.py",
174
+ f"{model_dir}/modeling_flax_{lowercase_model_name}.py",
175
+ )
176
+
177
+ shutil.move(
178
+ f"{directory}/test_modeling_flax_{lowercase_model_name}.py",
179
+ f"{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py",
180
+ )
181
+ else:
182
+ os.remove(f"{directory}/modeling_flax_{lowercase_model_name}.py")
183
+ os.remove(f"{directory}/test_modeling_flax_{lowercase_model_name}.py")
184
+
185
+ shutil.move(
186
+ f"{directory}/{lowercase_model_name}.md",
187
+ f"{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md",
188
+ )
189
+
190
+ shutil.move(
191
+ f"{directory}/tokenization_{lowercase_model_name}.py",
192
+ f"{model_dir}/tokenization_{lowercase_model_name}.py",
193
+ )
194
+
195
+ shutil.move(
196
+ f"{directory}/tokenization_fast_{lowercase_model_name}.py",
197
+ f"{model_dir}/tokenization_{lowercase_model_name}_fast.py",
198
+ )
199
+
200
+ from os import fdopen, remove
201
+ from shutil import copymode, move
202
+ from tempfile import mkstemp
203
+
204
+ def replace(original_file: str, line_to_copy_below: str, lines_to_copy: List[str]):
205
+ # Create temp file
206
+ fh, abs_path = mkstemp()
207
+ line_found = False
208
+ with fdopen(fh, "w") as new_file:
209
+ with open(original_file) as old_file:
210
+ for line in old_file:
211
+ new_file.write(line)
212
+ if line_to_copy_below in line:
213
+ line_found = True
214
+ for line_to_copy in lines_to_copy:
215
+ new_file.write(line_to_copy)
216
+
217
+ if not line_found:
218
+ raise ValueError(f"Line {line_to_copy_below} was not found in file.")
219
+
220
+ # Copy the file permissions from the old file to the new file
221
+ copymode(original_file, abs_path)
222
+ # Remove original file
223
+ remove(original_file)
224
+ # Move new file
225
+ move(abs_path, original_file)
226
+
227
+ def skip_units(line):
228
+ return (
229
+ ("generating PyTorch" in line and not output_pytorch)
230
+ or ("generating TensorFlow" in line and not output_tensorflow)
231
+ or ("generating Flax" in line and not output_flax)
232
+ )
233
+
234
+ def replace_in_files(path_to_datafile):
235
+ with open(path_to_datafile) as datafile:
236
+ lines_to_copy = []
237
+ skip_file = False
238
+ skip_snippet = False
239
+ for line in datafile:
240
+ if "# To replace in: " in line and "##" not in line:
241
+ file_to_replace_in = line.split('"')[1]
242
+ skip_file = skip_units(line)
243
+ elif "# Below: " in line and "##" not in line:
244
+ line_to_copy_below = line.split('"')[1]
245
+ skip_snippet = skip_units(line)
246
+ elif "# End." in line and "##" not in line:
247
+ if not skip_file and not skip_snippet:
248
+ replace(file_to_replace_in, line_to_copy_below, lines_to_copy)
249
+
250
+ lines_to_copy = []
251
+ elif "# Replace with" in line and "##" not in line:
252
+ lines_to_copy = []
253
+ elif "##" not in line:
254
+ lines_to_copy.append(line)
255
+
256
+ remove(path_to_datafile)
257
+
258
+ replace_in_files(f"{directory}/to_replace_{lowercase_model_name}.py")
259
+ os.rmdir(directory)
llmeval-env/lib/python3.10/site-packages/transformers/commands/add_new_model_like.py ADDED
@@ -0,0 +1,1713 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2021 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import difflib
16
+ import json
17
+ import os
18
+ import re
19
+ from argparse import ArgumentParser, Namespace
20
+ from dataclasses import dataclass
21
+ from datetime import date
22
+ from itertools import chain
23
+ from pathlib import Path
24
+ from typing import Any, Callable, Dict, List, Optional, Pattern, Tuple, Union
25
+
26
+ import yaml
27
+
28
+ from ..models import auto as auto_module
29
+ from ..models.auto.configuration_auto import model_type_to_module_name
30
+ from ..utils import is_flax_available, is_tf_available, is_torch_available, logging
31
+ from . import BaseTransformersCLICommand
32
+
33
+
34
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
35
+
36
+
37
+ CURRENT_YEAR = date.today().year
38
+ TRANSFORMERS_PATH = Path(__file__).parent.parent
39
+ REPO_PATH = TRANSFORMERS_PATH.parent.parent
40
+
41
+
42
+ @dataclass
43
+ class ModelPatterns:
44
+ """
45
+ Holds the basic information about a new model for the add-new-model-like command.
46
+
47
+ Args:
48
+ model_name (`str`): The model name.
49
+ checkpoint (`str`): The checkpoint to use for doc examples.
50
+ model_type (`str`, *optional*):
51
+ The model type, the identifier used internally in the library like `bert` or `xlm-roberta`. Will default to
52
+ `model_name` lowercased with spaces replaced with minuses (-).
53
+ model_lower_cased (`str`, *optional*):
54
+ The lowercased version of the model name, to use for the module name or function names. Will default to
55
+ `model_name` lowercased with spaces and minuses replaced with underscores.
56
+ model_camel_cased (`str`, *optional*):
57
+ The camel-cased version of the model name, to use for the class names. Will default to `model_name`
58
+ camel-cased (with spaces and minuses both considered as word separators.
59
+ model_upper_cased (`str`, *optional*):
60
+ The uppercased version of the model name, to use for the constant names. Will default to `model_name`
61
+ uppercased with spaces and minuses replaced with underscores.
62
+ config_class (`str`, *optional*):
63
+ The tokenizer class associated with this model. Will default to `"{model_camel_cased}Config"`.
64
+ tokenizer_class (`str`, *optional*):
65
+ The tokenizer class associated with this model (leave to `None` for models that don't use a tokenizer).
66
+ image_processor_class (`str`, *optional*):
67
+ The image processor class associated with this model (leave to `None` for models that don't use an image
68
+ processor).
69
+ feature_extractor_class (`str`, *optional*):
70
+ The feature extractor class associated with this model (leave to `None` for models that don't use a feature
71
+ extractor).
72
+ processor_class (`str`, *optional*):
73
+ The processor class associated with this model (leave to `None` for models that don't use a processor).
74
+ """
75
+
76
+ model_name: str
77
+ checkpoint: str
78
+ model_type: Optional[str] = None
79
+ model_lower_cased: Optional[str] = None
80
+ model_camel_cased: Optional[str] = None
81
+ model_upper_cased: Optional[str] = None
82
+ config_class: Optional[str] = None
83
+ tokenizer_class: Optional[str] = None
84
+ image_processor_class: Optional[str] = None
85
+ feature_extractor_class: Optional[str] = None
86
+ processor_class: Optional[str] = None
87
+
88
+ def __post_init__(self):
89
+ if self.model_type is None:
90
+ self.model_type = self.model_name.lower().replace(" ", "-")
91
+ if self.model_lower_cased is None:
92
+ self.model_lower_cased = self.model_name.lower().replace(" ", "_").replace("-", "_")
93
+ if self.model_camel_cased is None:
94
+ # Split the model name on - and space
95
+ words = self.model_name.split(" ")
96
+ words = list(chain(*[w.split("-") for w in words]))
97
+ # Make sure each word is capitalized
98
+ words = [w[0].upper() + w[1:] for w in words]
99
+ self.model_camel_cased = "".join(words)
100
+ if self.model_upper_cased is None:
101
+ self.model_upper_cased = self.model_name.upper().replace(" ", "_").replace("-", "_")
102
+ if self.config_class is None:
103
+ self.config_class = f"{self.model_camel_cased}Config"
104
+
105
+
106
+ ATTRIBUTE_TO_PLACEHOLDER = {
107
+ "config_class": "[CONFIG_CLASS]",
108
+ "tokenizer_class": "[TOKENIZER_CLASS]",
109
+ "image_processor_class": "[IMAGE_PROCESSOR_CLASS]",
110
+ "feature_extractor_class": "[FEATURE_EXTRACTOR_CLASS]",
111
+ "processor_class": "[PROCESSOR_CLASS]",
112
+ "checkpoint": "[CHECKPOINT]",
113
+ "model_type": "[MODEL_TYPE]",
114
+ "model_upper_cased": "[MODEL_UPPER_CASED]",
115
+ "model_camel_cased": "[MODEL_CAMELCASED]",
116
+ "model_lower_cased": "[MODEL_LOWER_CASED]",
117
+ "model_name": "[MODEL_NAME]",
118
+ }
119
+
120
+
121
+ def is_empty_line(line: str) -> bool:
122
+ """
123
+ Determines whether a line is empty or not.
124
+ """
125
+ return len(line) == 0 or line.isspace()
126
+
127
+
128
+ def find_indent(line: str) -> int:
129
+ """
130
+ Returns the number of spaces that start a line indent.
131
+ """
132
+ search = re.search(r"^(\s*)(?:\S|$)", line)
133
+ if search is None:
134
+ return 0
135
+ return len(search.groups()[0])
136
+
137
+
138
+ def parse_module_content(content: str) -> List[str]:
139
+ """
140
+ Parse the content of a module in the list of objects it defines.
141
+
142
+ Args:
143
+ content (`str`): The content to parse
144
+
145
+ Returns:
146
+ `List[str]`: The list of objects defined in the module.
147
+ """
148
+ objects = []
149
+ current_object = []
150
+ lines = content.split("\n")
151
+ # Doc-styler takes everything between two triple quotes in docstrings, so we need a fake """ here to go with this.
152
+ end_markers = [")", "]", "}", '"""']
153
+
154
+ for line in lines:
155
+ # End of an object
156
+ is_valid_object = len(current_object) > 0
157
+ if is_valid_object and len(current_object) == 1:
158
+ is_valid_object = not current_object[0].startswith("# Copied from")
159
+ if not is_empty_line(line) and find_indent(line) == 0 and is_valid_object:
160
+ # Closing parts should be included in current object
161
+ if line in end_markers:
162
+ current_object.append(line)
163
+ objects.append("\n".join(current_object))
164
+ current_object = []
165
+ else:
166
+ objects.append("\n".join(current_object))
167
+ current_object = [line]
168
+ else:
169
+ current_object.append(line)
170
+
171
+ # Add last object
172
+ if len(current_object) > 0:
173
+ objects.append("\n".join(current_object))
174
+
175
+ return objects
176
+
177
+
178
+ def extract_block(content: str, indent_level: int = 0) -> str:
179
+ """Return the first block in `content` with the indent level `indent_level`.
180
+
181
+ The first line in `content` should be indented at `indent_level` level, otherwise an error will be thrown.
182
+
183
+ This method will immediately stop the search when a (non-empty) line with indent level less than `indent_level` is
184
+ encountered.
185
+
186
+ Args:
187
+ content (`str`): The content to parse
188
+ indent_level (`int`, *optional*, default to 0): The indent level of the blocks to search for
189
+
190
+ Returns:
191
+ `str`: The first block in `content` with the indent level `indent_level`.
192
+ """
193
+ current_object = []
194
+ lines = content.split("\n")
195
+ # Doc-styler takes everything between two triple quotes in docstrings, so we need a fake """ here to go with this.
196
+ end_markers = [")", "]", "}", '"""']
197
+
198
+ for idx, line in enumerate(lines):
199
+ if idx == 0 and indent_level > 0 and not is_empty_line(line) and find_indent(line) != indent_level:
200
+ raise ValueError(
201
+ f"When `indent_level > 0`, the first line in `content` should have indent level {indent_level}. Got "
202
+ f"{find_indent(line)} instead."
203
+ )
204
+
205
+ if find_indent(line) < indent_level and not is_empty_line(line):
206
+ break
207
+
208
+ # End of an object
209
+ is_valid_object = len(current_object) > 0
210
+ if (
211
+ not is_empty_line(line)
212
+ and not line.endswith(":")
213
+ and find_indent(line) == indent_level
214
+ and is_valid_object
215
+ ):
216
+ # Closing parts should be included in current object
217
+ if line.lstrip() in end_markers:
218
+ current_object.append(line)
219
+ return "\n".join(current_object)
220
+ else:
221
+ current_object.append(line)
222
+
223
+ # Add last object
224
+ if len(current_object) > 0:
225
+ return "\n".join(current_object)
226
+
227
+
228
+ def add_content_to_text(
229
+ text: str,
230
+ content: str,
231
+ add_after: Optional[Union[str, Pattern]] = None,
232
+ add_before: Optional[Union[str, Pattern]] = None,
233
+ exact_match: bool = False,
234
+ ) -> str:
235
+ """
236
+ A utility to add some content inside a given text.
237
+
238
+ Args:
239
+ text (`str`): The text in which we want to insert some content.
240
+ content (`str`): The content to add.
241
+ add_after (`str` or `Pattern`):
242
+ The pattern to test on a line of `text`, the new content is added after the first instance matching it.
243
+ add_before (`str` or `Pattern`):
244
+ The pattern to test on a line of `text`, the new content is added before the first instance matching it.
245
+ exact_match (`bool`, *optional*, defaults to `False`):
246
+ A line is considered a match with `add_after` or `add_before` if it matches exactly when `exact_match=True`,
247
+ otherwise, if `add_after`/`add_before` is present in the line.
248
+
249
+ <Tip warning={true}>
250
+
251
+ The arguments `add_after` and `add_before` are mutually exclusive, and one exactly needs to be provided.
252
+
253
+ </Tip>
254
+
255
+ Returns:
256
+ `str`: The text with the new content added if a match was found.
257
+ """
258
+ if add_after is None and add_before is None:
259
+ raise ValueError("You need to pass either `add_after` or `add_before`")
260
+ if add_after is not None and add_before is not None:
261
+ raise ValueError("You can't pass both `add_after` or `add_before`")
262
+ pattern = add_after if add_before is None else add_before
263
+
264
+ def this_is_the_line(line):
265
+ if isinstance(pattern, Pattern):
266
+ return pattern.search(line) is not None
267
+ elif exact_match:
268
+ return pattern == line
269
+ else:
270
+ return pattern in line
271
+
272
+ new_lines = []
273
+ for line in text.split("\n"):
274
+ if this_is_the_line(line):
275
+ if add_before is not None:
276
+ new_lines.append(content)
277
+ new_lines.append(line)
278
+ if add_after is not None:
279
+ new_lines.append(content)
280
+ else:
281
+ new_lines.append(line)
282
+
283
+ return "\n".join(new_lines)
284
+
285
+
286
+ def add_content_to_file(
287
+ file_name: Union[str, os.PathLike],
288
+ content: str,
289
+ add_after: Optional[Union[str, Pattern]] = None,
290
+ add_before: Optional[Union[str, Pattern]] = None,
291
+ exact_match: bool = False,
292
+ ):
293
+ """
294
+ A utility to add some content inside a given file.
295
+
296
+ Args:
297
+ file_name (`str` or `os.PathLike`): The name of the file in which we want to insert some content.
298
+ content (`str`): The content to add.
299
+ add_after (`str` or `Pattern`):
300
+ The pattern to test on a line of `text`, the new content is added after the first instance matching it.
301
+ add_before (`str` or `Pattern`):
302
+ The pattern to test on a line of `text`, the new content is added before the first instance matching it.
303
+ exact_match (`bool`, *optional*, defaults to `False`):
304
+ A line is considered a match with `add_after` or `add_before` if it matches exactly when `exact_match=True`,
305
+ otherwise, if `add_after`/`add_before` is present in the line.
306
+
307
+ <Tip warning={true}>
308
+
309
+ The arguments `add_after` and `add_before` are mutually exclusive, and one exactly needs to be provided.
310
+
311
+ </Tip>
312
+ """
313
+ with open(file_name, "r", encoding="utf-8") as f:
314
+ old_content = f.read()
315
+
316
+ new_content = add_content_to_text(
317
+ old_content, content, add_after=add_after, add_before=add_before, exact_match=exact_match
318
+ )
319
+
320
+ with open(file_name, "w", encoding="utf-8") as f:
321
+ f.write(new_content)
322
+
323
+
324
+ def replace_model_patterns(
325
+ text: str, old_model_patterns: ModelPatterns, new_model_patterns: ModelPatterns
326
+ ) -> Tuple[str, str]:
327
+ """
328
+ Replace all patterns present in a given text.
329
+
330
+ Args:
331
+ text (`str`): The text to treat.
332
+ old_model_patterns (`ModelPatterns`): The patterns for the old model.
333
+ new_model_patterns (`ModelPatterns`): The patterns for the new model.
334
+
335
+ Returns:
336
+ `Tuple(str, str)`: A tuple of with the treated text and the replacement actually done in it.
337
+ """
338
+ # The order is crucially important as we will check and replace in that order. For instance the config probably
339
+ # contains the camel-cased named, but will be treated before.
340
+ attributes_to_check = ["config_class"]
341
+ # Add relevant preprocessing classes
342
+ for attr in ["tokenizer_class", "image_processor_class", "feature_extractor_class", "processor_class"]:
343
+ if getattr(old_model_patterns, attr) is not None and getattr(new_model_patterns, attr) is not None:
344
+ attributes_to_check.append(attr)
345
+
346
+ # Special cases for checkpoint and model_type
347
+ if old_model_patterns.checkpoint not in [old_model_patterns.model_type, old_model_patterns.model_lower_cased]:
348
+ attributes_to_check.append("checkpoint")
349
+ if old_model_patterns.model_type != old_model_patterns.model_lower_cased:
350
+ attributes_to_check.append("model_type")
351
+ else:
352
+ text = re.sub(
353
+ rf'(\s*)model_type = "{old_model_patterns.model_type}"',
354
+ r'\1model_type = "[MODEL_TYPE]"',
355
+ text,
356
+ )
357
+
358
+ # Special case when the model camel cased and upper cased names are the same for the old model (like for GPT2) but
359
+ # not the new one. We can't just do a replace in all the text and will need a special regex
360
+ if old_model_patterns.model_upper_cased == old_model_patterns.model_camel_cased:
361
+ old_model_value = old_model_patterns.model_upper_cased
362
+ if re.search(rf"{old_model_value}_[A-Z_]*[^A-Z_]", text) is not None:
363
+ text = re.sub(rf"{old_model_value}([A-Z_]*)([^a-zA-Z_])", r"[MODEL_UPPER_CASED]\1\2", text)
364
+ else:
365
+ attributes_to_check.append("model_upper_cased")
366
+
367
+ attributes_to_check.extend(["model_camel_cased", "model_lower_cased", "model_name"])
368
+
369
+ # Now let's replace every other attribute by their placeholder
370
+ for attr in attributes_to_check:
371
+ text = text.replace(getattr(old_model_patterns, attr), ATTRIBUTE_TO_PLACEHOLDER[attr])
372
+
373
+ # Finally we can replace the placeholder byt the new values.
374
+ replacements = []
375
+ for attr, placeholder in ATTRIBUTE_TO_PLACEHOLDER.items():
376
+ if placeholder in text:
377
+ replacements.append((getattr(old_model_patterns, attr), getattr(new_model_patterns, attr)))
378
+ text = text.replace(placeholder, getattr(new_model_patterns, attr))
379
+
380
+ # If we have two inconsistent replacements, we don't return anything (ex: GPT2->GPT_NEW and GPT2->GPTNew)
381
+ old_replacement_values = [old for old, new in replacements]
382
+ if len(set(old_replacement_values)) != len(old_replacement_values):
383
+ return text, ""
384
+
385
+ replacements = simplify_replacements(replacements)
386
+ replacements = [f"{old}->{new}" for old, new in replacements]
387
+ return text, ",".join(replacements)
388
+
389
+
390
+ def simplify_replacements(replacements):
391
+ """
392
+ Simplify a list of replacement patterns to make sure there are no needless ones.
393
+
394
+ For instance in the sequence "Bert->BertNew, BertConfig->BertNewConfig, bert->bert_new", the replacement
395
+ "BertConfig->BertNewConfig" is implied by "Bert->BertNew" so not needed.
396
+
397
+ Args:
398
+ replacements (`List[Tuple[str, str]]`): List of patterns (old, new)
399
+
400
+ Returns:
401
+ `List[Tuple[str, str]]`: The list of patterns simplified.
402
+ """
403
+ if len(replacements) <= 1:
404
+ # Nothing to simplify
405
+ return replacements
406
+
407
+ # Next let's sort replacements by length as a replacement can only "imply" another replacement if it's shorter.
408
+ replacements.sort(key=lambda x: len(x[0]))
409
+
410
+ idx = 0
411
+ while idx < len(replacements):
412
+ old, new = replacements[idx]
413
+ # Loop through all replacements after
414
+ j = idx + 1
415
+ while j < len(replacements):
416
+ old_2, new_2 = replacements[j]
417
+ # If the replacement is implied by the current one, we can drop it.
418
+ if old_2.replace(old, new) == new_2:
419
+ replacements.pop(j)
420
+ else:
421
+ j += 1
422
+ idx += 1
423
+
424
+ return replacements
425
+
426
+
427
+ def get_module_from_file(module_file: Union[str, os.PathLike]) -> str:
428
+ """
429
+ Returns the module name corresponding to a module file.
430
+ """
431
+ full_module_path = Path(module_file).absolute()
432
+ module_parts = full_module_path.with_suffix("").parts
433
+
434
+ # Find the first part named transformers, starting from the end.
435
+ idx = len(module_parts) - 1
436
+ while idx >= 0 and module_parts[idx] != "transformers":
437
+ idx -= 1
438
+ if idx < 0:
439
+ raise ValueError(f"{module_file} is not a transformers module.")
440
+
441
+ return ".".join(module_parts[idx:])
442
+
443
+
444
+ SPECIAL_PATTERNS = {
445
+ "_CHECKPOINT_FOR_DOC =": "checkpoint",
446
+ "_CONFIG_FOR_DOC =": "config_class",
447
+ "_TOKENIZER_FOR_DOC =": "tokenizer_class",
448
+ "_IMAGE_PROCESSOR_FOR_DOC =": "image_processor_class",
449
+ "_FEAT_EXTRACTOR_FOR_DOC =": "feature_extractor_class",
450
+ "_PROCESSOR_FOR_DOC =": "processor_class",
451
+ }
452
+
453
+
454
+ _re_class_func = re.compile(r"^(?:class|def)\s+([^\s:\(]+)\s*(?:\(|\:)", flags=re.MULTILINE)
455
+
456
+
457
+ def remove_attributes(obj, target_attr):
458
+ """Remove `target_attr` in `obj`."""
459
+ lines = obj.split(os.linesep)
460
+
461
+ target_idx = None
462
+ for idx, line in enumerate(lines):
463
+ # search for assignment
464
+ if line.lstrip().startswith(f"{target_attr} = "):
465
+ target_idx = idx
466
+ break
467
+ # search for function/method definition
468
+ elif line.lstrip().startswith(f"def {target_attr}("):
469
+ target_idx = idx
470
+ break
471
+
472
+ # target not found
473
+ if target_idx is None:
474
+ return obj
475
+
476
+ line = lines[target_idx]
477
+ indent_level = find_indent(line)
478
+ # forward pass to find the ending of the block (including empty lines)
479
+ parsed = extract_block("\n".join(lines[target_idx:]), indent_level)
480
+ num_lines = len(parsed.split("\n"))
481
+ for idx in range(num_lines):
482
+ lines[target_idx + idx] = None
483
+
484
+ # backward pass to find comments or decorator
485
+ for idx in range(target_idx - 1, -1, -1):
486
+ line = lines[idx]
487
+ if (line.lstrip().startswith("#") or line.lstrip().startswith("@")) and find_indent(line) == indent_level:
488
+ lines[idx] = None
489
+ else:
490
+ break
491
+
492
+ new_obj = os.linesep.join([x for x in lines if x is not None])
493
+
494
+ return new_obj
495
+
496
+
497
+ def duplicate_module(
498
+ module_file: Union[str, os.PathLike],
499
+ old_model_patterns: ModelPatterns,
500
+ new_model_patterns: ModelPatterns,
501
+ dest_file: Optional[str] = None,
502
+ add_copied_from: bool = True,
503
+ attrs_to_remove: List[str] = None,
504
+ ):
505
+ """
506
+ Create a new module from an existing one and adapting all function and classes names from old patterns to new ones.
507
+
508
+ Args:
509
+ module_file (`str` or `os.PathLike`): Path to the module to duplicate.
510
+ old_model_patterns (`ModelPatterns`): The patterns for the old model.
511
+ new_model_patterns (`ModelPatterns`): The patterns for the new model.
512
+ dest_file (`str` or `os.PathLike`, *optional*): Path to the new module.
513
+ add_copied_from (`bool`, *optional*, defaults to `True`):
514
+ Whether or not to add `# Copied from` statements in the duplicated module.
515
+ """
516
+ if dest_file is None:
517
+ dest_file = str(module_file).replace(
518
+ old_model_patterns.model_lower_cased, new_model_patterns.model_lower_cased
519
+ )
520
+
521
+ with open(module_file, "r", encoding="utf-8") as f:
522
+ content = f.read()
523
+
524
+ content = re.sub(r"# Copyright (\d+)\s", f"# Copyright {CURRENT_YEAR} ", content)
525
+ objects = parse_module_content(content)
526
+
527
+ # Loop and treat all objects
528
+ new_objects = []
529
+ for obj in objects:
530
+ special_pattern = False
531
+ for pattern, attr in SPECIAL_PATTERNS.items():
532
+ if pattern in obj:
533
+ obj = obj.replace(getattr(old_model_patterns, attr), getattr(new_model_patterns, attr))
534
+ new_objects.append(obj)
535
+ special_pattern = True
536
+ break
537
+
538
+ if special_pattern:
539
+ continue
540
+
541
+ # Regular classes functions
542
+ old_obj = obj
543
+ obj, replacement = replace_model_patterns(obj, old_model_patterns, new_model_patterns)
544
+ has_copied_from = re.search(r"^#\s+Copied from", obj, flags=re.MULTILINE) is not None
545
+ if add_copied_from and not has_copied_from and _re_class_func.search(obj) is not None and len(replacement) > 0:
546
+ # Copied from statement must be added just before the class/function definition, which may not be the
547
+ # first line because of decorators.
548
+ module_name = get_module_from_file(module_file)
549
+ old_object_name = _re_class_func.search(old_obj).groups()[0]
550
+ obj = add_content_to_text(
551
+ obj, f"# Copied from {module_name}.{old_object_name} with {replacement}", add_before=_re_class_func
552
+ )
553
+ # In all cases, we remove Copied from statement with indent on methods.
554
+ obj = re.sub("\n[ ]+# Copied from [^\n]*\n", "\n", obj)
555
+
556
+ new_objects.append(obj)
557
+
558
+ content = "\n".join(new_objects)
559
+ # Remove some attributes that we don't want to copy to the new file(s)
560
+ if attrs_to_remove is not None:
561
+ for attr in attrs_to_remove:
562
+ content = remove_attributes(content, target_attr=attr)
563
+
564
+ with open(dest_file, "w", encoding="utf-8") as f:
565
+ f.write(content)
566
+
567
+
568
+ def filter_framework_files(
569
+ files: List[Union[str, os.PathLike]], frameworks: Optional[List[str]] = None
570
+ ) -> List[Union[str, os.PathLike]]:
571
+ """
572
+ Filter a list of files to only keep the ones corresponding to a list of frameworks.
573
+
574
+ Args:
575
+ files (`List[Union[str, os.PathLike]]`): The list of files to filter.
576
+ frameworks (`List[str]`, *optional*): The list of allowed frameworks.
577
+
578
+ Returns:
579
+ `List[Union[str, os.PathLike]]`: The list of filtered files.
580
+ """
581
+ if frameworks is None:
582
+ frameworks = get_default_frameworks()
583
+
584
+ framework_to_file = {}
585
+ others = []
586
+ for f in files:
587
+ parts = Path(f).name.split("_")
588
+ if "modeling" not in parts:
589
+ others.append(f)
590
+ continue
591
+ if "tf" in parts:
592
+ framework_to_file["tf"] = f
593
+ elif "flax" in parts:
594
+ framework_to_file["flax"] = f
595
+ else:
596
+ framework_to_file["pt"] = f
597
+
598
+ return [framework_to_file[f] for f in frameworks if f in framework_to_file] + others
599
+
600
+
601
+ def get_model_files(model_type: str, frameworks: Optional[List[str]] = None) -> Dict[str, Union[Path, List[Path]]]:
602
+ """
603
+ Retrieves all the files associated to a model.
604
+
605
+ Args:
606
+ model_type (`str`): A valid model type (like "bert" or "gpt2")
607
+ frameworks (`List[str]`, *optional*):
608
+ If passed, will only keep the model files corresponding to the passed frameworks.
609
+
610
+ Returns:
611
+ `Dict[str, Union[Path, List[Path]]]`: A dictionary with the following keys:
612
+ - **doc_file** -- The documentation file for the model.
613
+ - **model_files** -- All the files in the model module.
614
+ - **test_files** -- The test files for the model.
615
+ """
616
+ module_name = model_type_to_module_name(model_type)
617
+
618
+ model_module = TRANSFORMERS_PATH / "models" / module_name
619
+ model_files = list(model_module.glob("*.py"))
620
+ model_files = filter_framework_files(model_files, frameworks=frameworks)
621
+
622
+ doc_file = REPO_PATH / "docs" / "source" / "en" / "model_doc" / f"{model_type}.md"
623
+
624
+ # Basic pattern for test files
625
+ test_files = [
626
+ f"test_modeling_{module_name}.py",
627
+ f"test_modeling_tf_{module_name}.py",
628
+ f"test_modeling_flax_{module_name}.py",
629
+ f"test_tokenization_{module_name}.py",
630
+ f"test_image_processing_{module_name}.py",
631
+ f"test_feature_extraction_{module_name}.py",
632
+ f"test_processor_{module_name}.py",
633
+ ]
634
+ test_files = filter_framework_files(test_files, frameworks=frameworks)
635
+ # Add the test directory
636
+ test_files = [REPO_PATH / "tests" / "models" / module_name / f for f in test_files]
637
+ # Filter by existing files
638
+ test_files = [f for f in test_files if f.exists()]
639
+
640
+ return {"doc_file": doc_file, "model_files": model_files, "module_name": module_name, "test_files": test_files}
641
+
642
+
643
+ _re_checkpoint_for_doc = re.compile(r"^_CHECKPOINT_FOR_DOC\s+=\s+(\S*)\s*$", flags=re.MULTILINE)
644
+
645
+
646
+ def find_base_model_checkpoint(
647
+ model_type: str, model_files: Optional[Dict[str, Union[Path, List[Path]]]] = None
648
+ ) -> str:
649
+ """
650
+ Finds the model checkpoint used in the docstrings for a given model.
651
+
652
+ Args:
653
+ model_type (`str`): A valid model type (like "bert" or "gpt2")
654
+ model_files (`Dict[str, Union[Path, List[Path]]`, *optional*):
655
+ The files associated to `model_type`. Can be passed to speed up the function, otherwise will be computed.
656
+
657
+ Returns:
658
+ `str`: The checkpoint used.
659
+ """
660
+ if model_files is None:
661
+ model_files = get_model_files(model_type)
662
+ module_files = model_files["model_files"]
663
+ for fname in module_files:
664
+ if "modeling" not in str(fname):
665
+ continue
666
+
667
+ with open(fname, "r", encoding="utf-8") as f:
668
+ content = f.read()
669
+ if _re_checkpoint_for_doc.search(content) is not None:
670
+ checkpoint = _re_checkpoint_for_doc.search(content).groups()[0]
671
+ # Remove quotes
672
+ checkpoint = checkpoint.replace('"', "")
673
+ checkpoint = checkpoint.replace("'", "")
674
+ return checkpoint
675
+
676
+ # TODO: Find some kind of fallback if there is no _CHECKPOINT_FOR_DOC in any of the modeling file.
677
+ return ""
678
+
679
+
680
+ def get_default_frameworks():
681
+ """
682
+ Returns the list of frameworks (PyTorch, TensorFlow, Flax) that are installed in the environment.
683
+ """
684
+ frameworks = []
685
+ if is_torch_available():
686
+ frameworks.append("pt")
687
+ if is_tf_available():
688
+ frameworks.append("tf")
689
+ if is_flax_available():
690
+ frameworks.append("flax")
691
+ return frameworks
692
+
693
+
694
+ _re_model_mapping = re.compile("MODEL_([A-Z_]*)MAPPING_NAMES")
695
+
696
+
697
+ def retrieve_model_classes(model_type: str, frameworks: Optional[List[str]] = None) -> Dict[str, List[str]]:
698
+ """
699
+ Retrieve the model classes associated to a given model.
700
+
701
+ Args:
702
+ model_type (`str`): A valid model type (like "bert" or "gpt2")
703
+ frameworks (`List[str]`, *optional*):
704
+ The frameworks to look for. Will default to `["pt", "tf", "flax"]`, passing a smaller list will restrict
705
+ the classes returned.
706
+
707
+ Returns:
708
+ `Dict[str, List[str]]`: A dictionary with one key per framework and the list of model classes associated to
709
+ that framework as values.
710
+ """
711
+ if frameworks is None:
712
+ frameworks = get_default_frameworks()
713
+
714
+ modules = {
715
+ "pt": auto_module.modeling_auto if is_torch_available() else None,
716
+ "tf": auto_module.modeling_tf_auto if is_tf_available() else None,
717
+ "flax": auto_module.modeling_flax_auto if is_flax_available() else None,
718
+ }
719
+
720
+ model_classes = {}
721
+ for framework in frameworks:
722
+ new_model_classes = []
723
+ if modules[framework] is None:
724
+ raise ValueError(f"You selected {framework} in the frameworks, but it is not installed.")
725
+ model_mappings = [attr for attr in dir(modules[framework]) if _re_model_mapping.search(attr) is not None]
726
+ for model_mapping_name in model_mappings:
727
+ model_mapping = getattr(modules[framework], model_mapping_name)
728
+ if model_type in model_mapping:
729
+ new_model_classes.append(model_mapping[model_type])
730
+
731
+ if len(new_model_classes) > 0:
732
+ # Remove duplicates
733
+ model_classes[framework] = list(set(new_model_classes))
734
+
735
+ return model_classes
736
+
737
+
738
+ def retrieve_info_for_model(model_type, frameworks: Optional[List[str]] = None):
739
+ """
740
+ Retrieves all the information from a given model_type.
741
+
742
+ Args:
743
+ model_type (`str`): A valid model type (like "bert" or "gpt2")
744
+ frameworks (`List[str]`, *optional*):
745
+ If passed, will only keep the info corresponding to the passed frameworks.
746
+
747
+ Returns:
748
+ `Dict`: A dictionary with the following keys:
749
+ - **frameworks** (`List[str]`): The list of frameworks that back this model type.
750
+ - **model_classes** (`Dict[str, List[str]]`): The model classes implemented for that model type.
751
+ - **model_files** (`Dict[str, Union[Path, List[Path]]]`): The files associated with that model type.
752
+ - **model_patterns** (`ModelPatterns`): The various patterns for the model.
753
+ """
754
+ if model_type not in auto_module.MODEL_NAMES_MAPPING:
755
+ raise ValueError(f"{model_type} is not a valid model type.")
756
+
757
+ model_name = auto_module.MODEL_NAMES_MAPPING[model_type]
758
+ config_class = auto_module.configuration_auto.CONFIG_MAPPING_NAMES[model_type]
759
+ if model_type in auto_module.tokenization_auto.TOKENIZER_MAPPING_NAMES:
760
+ tokenizer_classes = auto_module.tokenization_auto.TOKENIZER_MAPPING_NAMES[model_type]
761
+ tokenizer_class = tokenizer_classes[0] if tokenizer_classes[0] is not None else tokenizer_classes[1]
762
+ else:
763
+ tokenizer_class = None
764
+ image_processor_class = auto_module.image_processing_auto.IMAGE_PROCESSOR_MAPPING_NAMES.get(model_type, None)
765
+ feature_extractor_class = auto_module.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES.get(model_type, None)
766
+ processor_class = auto_module.processing_auto.PROCESSOR_MAPPING_NAMES.get(model_type, None)
767
+
768
+ model_files = get_model_files(model_type, frameworks=frameworks)
769
+ model_camel_cased = config_class.replace("Config", "")
770
+
771
+ available_frameworks = []
772
+ for fname in model_files["model_files"]:
773
+ if "modeling_tf" in str(fname):
774
+ available_frameworks.append("tf")
775
+ elif "modeling_flax" in str(fname):
776
+ available_frameworks.append("flax")
777
+ elif "modeling" in str(fname):
778
+ available_frameworks.append("pt")
779
+
780
+ if frameworks is None:
781
+ frameworks = get_default_frameworks()
782
+
783
+ frameworks = [f for f in frameworks if f in available_frameworks]
784
+
785
+ model_classes = retrieve_model_classes(model_type, frameworks=frameworks)
786
+
787
+ model_upper_cased = model_camel_cased.upper()
788
+ model_patterns = ModelPatterns(
789
+ model_name,
790
+ checkpoint=find_base_model_checkpoint(model_type, model_files=model_files),
791
+ model_type=model_type,
792
+ model_camel_cased=model_camel_cased,
793
+ model_lower_cased=model_files["module_name"],
794
+ model_upper_cased=model_upper_cased,
795
+ config_class=config_class,
796
+ tokenizer_class=tokenizer_class,
797
+ image_processor_class=image_processor_class,
798
+ feature_extractor_class=feature_extractor_class,
799
+ processor_class=processor_class,
800
+ )
801
+
802
+ return {
803
+ "frameworks": frameworks,
804
+ "model_classes": model_classes,
805
+ "model_files": model_files,
806
+ "model_patterns": model_patterns,
807
+ }
808
+
809
+
810
+ def clean_frameworks_in_init(
811
+ init_file: Union[str, os.PathLike], frameworks: Optional[List[str]] = None, keep_processing: bool = True
812
+ ):
813
+ """
814
+ Removes all the import lines that don't belong to a given list of frameworks or concern tokenizers/feature
815
+ extractors/image processors/processors in an init.
816
+
817
+ Args:
818
+ init_file (`str` or `os.PathLike`): The path to the init to treat.
819
+ frameworks (`List[str]`, *optional*):
820
+ If passed, this will remove all imports that are subject to a framework not in frameworks
821
+ keep_processing (`bool`, *optional*, defaults to `True`):
822
+ Whether or not to keep the preprocessing (tokenizer, feature extractor, image processor, processor) imports
823
+ in the init.
824
+ """
825
+ if frameworks is None:
826
+ frameworks = get_default_frameworks()
827
+
828
+ names = {"pt": "torch"}
829
+ to_remove = [names.get(f, f) for f in ["pt", "tf", "flax"] if f not in frameworks]
830
+ if not keep_processing:
831
+ to_remove.extend(["sentencepiece", "tokenizers", "vision"])
832
+
833
+ if len(to_remove) == 0:
834
+ # Nothing to do
835
+ return
836
+
837
+ remove_pattern = "|".join(to_remove)
838
+ re_conditional_imports = re.compile(rf"^\s*if not is_({remove_pattern})_available\(\):\s*$")
839
+ re_try = re.compile(r"\s*try:")
840
+ re_else = re.compile(r"\s*else:")
841
+ re_is_xxx_available = re.compile(rf"is_({remove_pattern})_available")
842
+
843
+ with open(init_file, "r", encoding="utf-8") as f:
844
+ content = f.read()
845
+
846
+ lines = content.split("\n")
847
+ new_lines = []
848
+ idx = 0
849
+ while idx < len(lines):
850
+ # Conditional imports in try-except-else blocks
851
+ if (re_conditional_imports.search(lines[idx]) is not None) and (re_try.search(lines[idx - 1]) is not None):
852
+ # Remove the preceding `try:`
853
+ new_lines.pop()
854
+ idx += 1
855
+ # Iterate until `else:`
856
+ while is_empty_line(lines[idx]) or re_else.search(lines[idx]) is None:
857
+ idx += 1
858
+ idx += 1
859
+ indent = find_indent(lines[idx])
860
+ while find_indent(lines[idx]) >= indent or is_empty_line(lines[idx]):
861
+ idx += 1
862
+ # Remove the import from utils
863
+ elif re_is_xxx_available.search(lines[idx]) is not None:
864
+ line = lines[idx]
865
+ for framework in to_remove:
866
+ line = line.replace(f", is_{framework}_available", "")
867
+ line = line.replace(f"is_{framework}_available, ", "")
868
+ line = line.replace(f"is_{framework}_available,", "")
869
+ line = line.replace(f"is_{framework}_available", "")
870
+
871
+ if len(line.strip()) > 0:
872
+ new_lines.append(line)
873
+ idx += 1
874
+ # Otherwise we keep the line, except if it's a tokenizer import and we don't want to keep it.
875
+ elif keep_processing or (
876
+ re.search(r'^\s*"(tokenization|processing|feature_extraction|image_processing)', lines[idx]) is None
877
+ and re.search(r"^\s*from .(tokenization|processing|feature_extraction|image_processing)", lines[idx])
878
+ is None
879
+ ):
880
+ new_lines.append(lines[idx])
881
+ idx += 1
882
+ else:
883
+ idx += 1
884
+
885
+ with open(init_file, "w", encoding="utf-8") as f:
886
+ f.write("\n".join(new_lines))
887
+
888
+
889
+ def add_model_to_main_init(
890
+ old_model_patterns: ModelPatterns,
891
+ new_model_patterns: ModelPatterns,
892
+ frameworks: Optional[List[str]] = None,
893
+ with_processing: bool = True,
894
+ ):
895
+ """
896
+ Add a model to the main init of Transformers.
897
+
898
+ Args:
899
+ old_model_patterns (`ModelPatterns`): The patterns for the old model.
900
+ new_model_patterns (`ModelPatterns`): The patterns for the new model.
901
+ frameworks (`List[str]`, *optional*):
902
+ If specified, only the models implemented in those frameworks will be added.
903
+ with_processsing (`bool`, *optional*, defaults to `True`):
904
+ Whether the tokenizer/feature extractor/processor of the model should also be added to the init or not.
905
+ """
906
+ with open(TRANSFORMERS_PATH / "__init__.py", "r", encoding="utf-8") as f:
907
+ content = f.read()
908
+
909
+ lines = content.split("\n")
910
+ idx = 0
911
+ new_lines = []
912
+ framework = None
913
+ while idx < len(lines):
914
+ new_framework = False
915
+ if not is_empty_line(lines[idx]) and find_indent(lines[idx]) == 0:
916
+ framework = None
917
+ elif lines[idx].lstrip().startswith("if not is_torch_available"):
918
+ framework = "pt"
919
+ new_framework = True
920
+ elif lines[idx].lstrip().startswith("if not is_tf_available"):
921
+ framework = "tf"
922
+ new_framework = True
923
+ elif lines[idx].lstrip().startswith("if not is_flax_available"):
924
+ framework = "flax"
925
+ new_framework = True
926
+
927
+ if new_framework:
928
+ # For a new framework, we need to skip until the else: block to get where the imports are.
929
+ while lines[idx].strip() != "else:":
930
+ new_lines.append(lines[idx])
931
+ idx += 1
932
+
933
+ # Skip if we are in a framework not wanted.
934
+ if framework is not None and frameworks is not None and framework not in frameworks:
935
+ new_lines.append(lines[idx])
936
+ idx += 1
937
+ elif re.search(rf'models.{old_model_patterns.model_lower_cased}( |")', lines[idx]) is not None:
938
+ block = [lines[idx]]
939
+ indent = find_indent(lines[idx])
940
+ idx += 1
941
+ while find_indent(lines[idx]) > indent:
942
+ block.append(lines[idx])
943
+ idx += 1
944
+ if lines[idx].strip() in [")", "]", "],"]:
945
+ block.append(lines[idx])
946
+ idx += 1
947
+ block = "\n".join(block)
948
+ new_lines.append(block)
949
+
950
+ add_block = True
951
+ if not with_processing:
952
+ processing_classes = [
953
+ old_model_patterns.tokenizer_class,
954
+ old_model_patterns.image_processor_class,
955
+ old_model_patterns.feature_extractor_class,
956
+ old_model_patterns.processor_class,
957
+ ]
958
+ # Only keep the ones that are not None
959
+ processing_classes = [c for c in processing_classes if c is not None]
960
+ for processing_class in processing_classes:
961
+ block = block.replace(f' "{processing_class}",', "")
962
+ block = block.replace(f', "{processing_class}"', "")
963
+ block = block.replace(f" {processing_class},", "")
964
+ block = block.replace(f", {processing_class}", "")
965
+
966
+ if processing_class in block:
967
+ add_block = False
968
+ if add_block:
969
+ new_lines.append(replace_model_patterns(block, old_model_patterns, new_model_patterns)[0])
970
+ else:
971
+ new_lines.append(lines[idx])
972
+ idx += 1
973
+
974
+ with open(TRANSFORMERS_PATH / "__init__.py", "w", encoding="utf-8") as f:
975
+ f.write("\n".join(new_lines))
976
+
977
+
978
+ def insert_tokenizer_in_auto_module(old_model_patterns: ModelPatterns, new_model_patterns: ModelPatterns):
979
+ """
980
+ Add a tokenizer to the relevant mappings in the auto module.
981
+
982
+ Args:
983
+ old_model_patterns (`ModelPatterns`): The patterns for the old model.
984
+ new_model_patterns (`ModelPatterns`): The patterns for the new model.
985
+ """
986
+ if old_model_patterns.tokenizer_class is None or new_model_patterns.tokenizer_class is None:
987
+ return
988
+
989
+ with open(TRANSFORMERS_PATH / "models" / "auto" / "tokenization_auto.py", "r", encoding="utf-8") as f:
990
+ content = f.read()
991
+
992
+ lines = content.split("\n")
993
+ idx = 0
994
+ # First we get to the TOKENIZER_MAPPING_NAMES block.
995
+ while not lines[idx].startswith(" TOKENIZER_MAPPING_NAMES = OrderedDict("):
996
+ idx += 1
997
+ idx += 1
998
+
999
+ # That block will end at this prompt:
1000
+ while not lines[idx].startswith("TOKENIZER_MAPPING = _LazyAutoMapping"):
1001
+ # Either all the tokenizer block is defined on one line, in which case, it ends with "),"
1002
+ if lines[idx].endswith(","):
1003
+ block = lines[idx]
1004
+ # Otherwise it takes several lines until we get to a "),"
1005
+ else:
1006
+ block = []
1007
+ while not lines[idx].startswith(" ),"):
1008
+ block.append(lines[idx])
1009
+ idx += 1
1010
+ block = "\n".join(block)
1011
+ idx += 1
1012
+
1013
+ # If we find the model type and tokenizer class in that block, we have the old model tokenizer block
1014
+ if f'"{old_model_patterns.model_type}"' in block and old_model_patterns.tokenizer_class in block:
1015
+ break
1016
+
1017
+ new_block = block.replace(old_model_patterns.model_type, new_model_patterns.model_type)
1018
+ new_block = new_block.replace(old_model_patterns.tokenizer_class, new_model_patterns.tokenizer_class)
1019
+
1020
+ new_lines = lines[:idx] + [new_block] + lines[idx:]
1021
+ with open(TRANSFORMERS_PATH / "models" / "auto" / "tokenization_auto.py", "w", encoding="utf-8") as f:
1022
+ f.write("\n".join(new_lines))
1023
+
1024
+
1025
+ AUTO_CLASSES_PATTERNS = {
1026
+ "configuration_auto.py": [
1027
+ ' ("{model_type}", "{model_name}"),',
1028
+ ' ("{model_type}", "{config_class}"),',
1029
+ ' ("{model_type}", "{pretrained_archive_map}"),',
1030
+ ],
1031
+ "feature_extraction_auto.py": [' ("{model_type}", "{feature_extractor_class}"),'],
1032
+ "image_processing_auto.py": [' ("{model_type}", "{image_processor_class}"),'],
1033
+ "modeling_auto.py": [' ("{model_type}", "{any_pt_class}"),'],
1034
+ "modeling_tf_auto.py": [' ("{model_type}", "{any_tf_class}"),'],
1035
+ "modeling_flax_auto.py": [' ("{model_type}", "{any_flax_class}"),'],
1036
+ "processing_auto.py": [' ("{model_type}", "{processor_class}"),'],
1037
+ }
1038
+
1039
+
1040
+ def add_model_to_auto_classes(
1041
+ old_model_patterns: ModelPatterns, new_model_patterns: ModelPatterns, model_classes: Dict[str, List[str]]
1042
+ ):
1043
+ """
1044
+ Add a model to the relevant mappings in the auto module.
1045
+
1046
+ Args:
1047
+ old_model_patterns (`ModelPatterns`): The patterns for the old model.
1048
+ new_model_patterns (`ModelPatterns`): The patterns for the new model.
1049
+ model_classes (`Dict[str, List[str]]`): A dictionary framework to list of model classes implemented.
1050
+ """
1051
+ for filename in AUTO_CLASSES_PATTERNS:
1052
+ # Extend patterns with all model classes if necessary
1053
+ new_patterns = []
1054
+ for pattern in AUTO_CLASSES_PATTERNS[filename]:
1055
+ if re.search("any_([a-z]*)_class", pattern) is not None:
1056
+ framework = re.search("any_([a-z]*)_class", pattern).groups()[0]
1057
+ if framework in model_classes:
1058
+ new_patterns.extend(
1059
+ [
1060
+ pattern.replace("{" + f"any_{framework}_class" + "}", cls)
1061
+ for cls in model_classes[framework]
1062
+ ]
1063
+ )
1064
+ elif "{config_class}" in pattern:
1065
+ new_patterns.append(pattern.replace("{config_class}", old_model_patterns.config_class))
1066
+ elif "{image_processor_class}" in pattern:
1067
+ if (
1068
+ old_model_patterns.image_processor_class is not None
1069
+ and new_model_patterns.image_processor_class is not None
1070
+ ):
1071
+ new_patterns.append(
1072
+ pattern.replace("{image_processor_class}", old_model_patterns.image_processor_class)
1073
+ )
1074
+ elif "{feature_extractor_class}" in pattern:
1075
+ if (
1076
+ old_model_patterns.feature_extractor_class is not None
1077
+ and new_model_patterns.feature_extractor_class is not None
1078
+ ):
1079
+ new_patterns.append(
1080
+ pattern.replace("{feature_extractor_class}", old_model_patterns.feature_extractor_class)
1081
+ )
1082
+ elif "{processor_class}" in pattern:
1083
+ if old_model_patterns.processor_class is not None and new_model_patterns.processor_class is not None:
1084
+ new_patterns.append(pattern.replace("{processor_class}", old_model_patterns.processor_class))
1085
+ else:
1086
+ new_patterns.append(pattern)
1087
+
1088
+ # Loop through all patterns.
1089
+ for pattern in new_patterns:
1090
+ full_name = TRANSFORMERS_PATH / "models" / "auto" / filename
1091
+ old_model_line = pattern
1092
+ new_model_line = pattern
1093
+ for attr in ["model_type", "model_name"]:
1094
+ old_model_line = old_model_line.replace("{" + attr + "}", getattr(old_model_patterns, attr))
1095
+ new_model_line = new_model_line.replace("{" + attr + "}", getattr(new_model_patterns, attr))
1096
+ new_model_line = new_model_line.replace(
1097
+ old_model_patterns.model_camel_cased, new_model_patterns.model_camel_cased
1098
+ )
1099
+
1100
+ add_content_to_file(full_name, new_model_line, add_after=old_model_line)
1101
+
1102
+ # Tokenizers require special handling
1103
+ insert_tokenizer_in_auto_module(old_model_patterns, new_model_patterns)
1104
+
1105
+
1106
+ DOC_OVERVIEW_TEMPLATE = """## Overview
1107
+
1108
+ The {model_name} model was proposed in [<INSERT PAPER NAME HERE>](<INSERT PAPER LINK HERE>) by <INSERT AUTHORS HERE>.
1109
+ <INSERT SHORT SUMMARY HERE>
1110
+
1111
+ The abstract from the paper is the following:
1112
+
1113
+ *<INSERT PAPER ABSTRACT HERE>*
1114
+
1115
+ Tips:
1116
+
1117
+ <INSERT TIPS ABOUT MODEL HERE>
1118
+
1119
+ This model was contributed by [INSERT YOUR HF USERNAME HERE](https://huggingface.co/<INSERT YOUR HF USERNAME HERE>).
1120
+ The original code can be found [here](<INSERT LINK TO GITHUB REPO HERE>).
1121
+
1122
+ """
1123
+
1124
+
1125
+ def duplicate_doc_file(
1126
+ doc_file: Union[str, os.PathLike],
1127
+ old_model_patterns: ModelPatterns,
1128
+ new_model_patterns: ModelPatterns,
1129
+ dest_file: Optional[Union[str, os.PathLike]] = None,
1130
+ frameworks: Optional[List[str]] = None,
1131
+ ):
1132
+ """
1133
+ Duplicate a documentation file and adapts it for a new model.
1134
+
1135
+ Args:
1136
+ module_file (`str` or `os.PathLike`): Path to the doc file to duplicate.
1137
+ old_model_patterns (`ModelPatterns`): The patterns for the old model.
1138
+ new_model_patterns (`ModelPatterns`): The patterns for the new model.
1139
+ dest_file (`str` or `os.PathLike`, *optional*): Path to the new doc file.
1140
+ Will default to the a file named `{new_model_patterns.model_type}.md` in the same folder as `module_file`.
1141
+ frameworks (`List[str]`, *optional*):
1142
+ If passed, will only keep the model classes corresponding to this list of frameworks in the new doc file.
1143
+ """
1144
+ with open(doc_file, "r", encoding="utf-8") as f:
1145
+ content = f.read()
1146
+
1147
+ content = re.sub(r"<!--\s*Copyright (\d+)\s", f"<!--Copyright {CURRENT_YEAR} ", content)
1148
+ if frameworks is None:
1149
+ frameworks = get_default_frameworks()
1150
+ if dest_file is None:
1151
+ dest_file = Path(doc_file).parent / f"{new_model_patterns.model_type}.md"
1152
+
1153
+ # Parse the doc file in blocks. One block per section/header
1154
+ lines = content.split("\n")
1155
+ blocks = []
1156
+ current_block = []
1157
+
1158
+ for line in lines:
1159
+ if line.startswith("#"):
1160
+ blocks.append("\n".join(current_block))
1161
+ current_block = [line]
1162
+ else:
1163
+ current_block.append(line)
1164
+ blocks.append("\n".join(current_block))
1165
+
1166
+ new_blocks = []
1167
+ in_classes = False
1168
+ for block in blocks:
1169
+ # Copyright
1170
+ if not block.startswith("#"):
1171
+ new_blocks.append(block)
1172
+ # Main title
1173
+ elif re.search(r"^#\s+\S+", block) is not None:
1174
+ new_blocks.append(f"# {new_model_patterns.model_name}\n")
1175
+ # The config starts the part of the doc with the classes.
1176
+ elif not in_classes and old_model_patterns.config_class in block.split("\n")[0]:
1177
+ in_classes = True
1178
+ new_blocks.append(DOC_OVERVIEW_TEMPLATE.format(model_name=new_model_patterns.model_name))
1179
+ new_block, _ = replace_model_patterns(block, old_model_patterns, new_model_patterns)
1180
+ new_blocks.append(new_block)
1181
+ # In classes
1182
+ elif in_classes:
1183
+ in_classes = True
1184
+ block_title = block.split("\n")[0]
1185
+ block_class = re.search(r"^#+\s+(\S.*)$", block_title).groups()[0]
1186
+ new_block, _ = replace_model_patterns(block, old_model_patterns, new_model_patterns)
1187
+
1188
+ if "Tokenizer" in block_class:
1189
+ # We only add the tokenizer if necessary
1190
+ if old_model_patterns.tokenizer_class != new_model_patterns.tokenizer_class:
1191
+ new_blocks.append(new_block)
1192
+ elif "ImageProcessor" in block_class:
1193
+ # We only add the image processor if necessary
1194
+ if old_model_patterns.image_processor_class != new_model_patterns.image_processor_class:
1195
+ new_blocks.append(new_block)
1196
+ elif "FeatureExtractor" in block_class:
1197
+ # We only add the feature extractor if necessary
1198
+ if old_model_patterns.feature_extractor_class != new_model_patterns.feature_extractor_class:
1199
+ new_blocks.append(new_block)
1200
+ elif "Processor" in block_class:
1201
+ # We only add the processor if necessary
1202
+ if old_model_patterns.processor_class != new_model_patterns.processor_class:
1203
+ new_blocks.append(new_block)
1204
+ elif block_class.startswith("Flax"):
1205
+ # We only add Flax models if in the selected frameworks
1206
+ if "flax" in frameworks:
1207
+ new_blocks.append(new_block)
1208
+ elif block_class.startswith("TF"):
1209
+ # We only add TF models if in the selected frameworks
1210
+ if "tf" in frameworks:
1211
+ new_blocks.append(new_block)
1212
+ elif len(block_class.split(" ")) == 1:
1213
+ # We only add PyTorch models if in the selected frameworks
1214
+ if "pt" in frameworks:
1215
+ new_blocks.append(new_block)
1216
+ else:
1217
+ new_blocks.append(new_block)
1218
+
1219
+ with open(dest_file, "w", encoding="utf-8") as f:
1220
+ f.write("\n".join(new_blocks))
1221
+
1222
+
1223
+ def insert_model_in_doc_toc(old_model_patterns, new_model_patterns):
1224
+ """
1225
+ Insert the new model in the doc TOC, in the same section as the old model.
1226
+
1227
+ Args:
1228
+ old_model_patterns (`ModelPatterns`): The patterns for the old model.
1229
+ new_model_patterns (`ModelPatterns`): The patterns for the new model.
1230
+ """
1231
+ toc_file = REPO_PATH / "docs" / "source" / "en" / "_toctree.yml"
1232
+ with open(toc_file, "r", encoding="utf8") as f:
1233
+ content = yaml.safe_load(f)
1234
+
1235
+ # Get to the model API doc
1236
+ api_idx = 0
1237
+ while content[api_idx]["title"] != "API":
1238
+ api_idx += 1
1239
+ api_doc = content[api_idx]["sections"]
1240
+
1241
+ model_idx = 0
1242
+ while api_doc[model_idx]["title"] != "Models":
1243
+ model_idx += 1
1244
+ model_doc = api_doc[model_idx]["sections"]
1245
+
1246
+ # Find the base model in the Toc
1247
+ old_model_type = old_model_patterns.model_type
1248
+ section_idx = 0
1249
+ while section_idx < len(model_doc):
1250
+ sections = [entry["local"] for entry in model_doc[section_idx]["sections"]]
1251
+ if f"model_doc/{old_model_type}" in sections:
1252
+ break
1253
+
1254
+ section_idx += 1
1255
+
1256
+ if section_idx == len(model_doc):
1257
+ old_model = old_model_patterns.model_name
1258
+ new_model = new_model_patterns.model_name
1259
+ print(f"Did not find {old_model} in the table of content, so you will need to add {new_model} manually.")
1260
+ return
1261
+
1262
+ # Add the new model in the same toc
1263
+ toc_entry = {"local": f"model_doc/{new_model_patterns.model_type}", "title": new_model_patterns.model_name}
1264
+ model_doc[section_idx]["sections"].append(toc_entry)
1265
+ model_doc[section_idx]["sections"] = sorted(model_doc[section_idx]["sections"], key=lambda s: s["title"].lower())
1266
+ api_doc[model_idx]["sections"] = model_doc
1267
+ content[api_idx]["sections"] = api_doc
1268
+
1269
+ with open(toc_file, "w", encoding="utf-8") as f:
1270
+ f.write(yaml.dump(content, allow_unicode=True))
1271
+
1272
+
1273
+ def create_new_model_like(
1274
+ model_type: str,
1275
+ new_model_patterns: ModelPatterns,
1276
+ add_copied_from: bool = True,
1277
+ frameworks: Optional[List[str]] = None,
1278
+ old_checkpoint: Optional[str] = None,
1279
+ ):
1280
+ """
1281
+ Creates a new model module like a given model of the Transformers library.
1282
+
1283
+ Args:
1284
+ model_type (`str`): The model type to duplicate (like "bert" or "gpt2")
1285
+ new_model_patterns (`ModelPatterns`): The patterns for the new model.
1286
+ add_copied_from (`bool`, *optional*, defaults to `True`):
1287
+ Whether or not to add "Copied from" statements to all classes in the new model modeling files.
1288
+ frameworks (`List[str]`, *optional*):
1289
+ If passed, will limit the duplicate to the frameworks specified.
1290
+ old_checkpoint (`str`, *optional*):
1291
+ The name of the base checkpoint for the old model. Should be passed along when it can't be automatically
1292
+ recovered from the `model_type`.
1293
+ """
1294
+ # Retrieve all the old model info.
1295
+ model_info = retrieve_info_for_model(model_type, frameworks=frameworks)
1296
+ model_files = model_info["model_files"]
1297
+ old_model_patterns = model_info["model_patterns"]
1298
+ if old_checkpoint is not None:
1299
+ old_model_patterns.checkpoint = old_checkpoint
1300
+ if len(old_model_patterns.checkpoint) == 0:
1301
+ raise ValueError(
1302
+ "The old model checkpoint could not be recovered from the model type. Please pass it to the "
1303
+ "`old_checkpoint` argument."
1304
+ )
1305
+
1306
+ keep_old_processing = True
1307
+ for processing_attr in ["image_processor_class", "feature_extractor_class", "processor_class", "tokenizer_class"]:
1308
+ if getattr(old_model_patterns, processing_attr) != getattr(new_model_patterns, processing_attr):
1309
+ keep_old_processing = False
1310
+
1311
+ model_classes = model_info["model_classes"]
1312
+
1313
+ # 1. We create the module for our new model.
1314
+ old_module_name = model_files["module_name"]
1315
+ module_folder = TRANSFORMERS_PATH / "models" / new_model_patterns.model_lower_cased
1316
+ os.makedirs(module_folder, exist_ok=True)
1317
+
1318
+ files_to_adapt = model_files["model_files"]
1319
+ if keep_old_processing:
1320
+ files_to_adapt = [
1321
+ f
1322
+ for f in files_to_adapt
1323
+ if "tokenization" not in str(f)
1324
+ and "processing" not in str(f)
1325
+ and "feature_extraction" not in str(f)
1326
+ and "image_processing" not in str(f)
1327
+ ]
1328
+
1329
+ os.makedirs(module_folder, exist_ok=True)
1330
+ for module_file in files_to_adapt:
1331
+ new_module_name = module_file.name.replace(
1332
+ old_model_patterns.model_lower_cased, new_model_patterns.model_lower_cased
1333
+ )
1334
+ dest_file = module_folder / new_module_name
1335
+ duplicate_module(
1336
+ module_file,
1337
+ old_model_patterns,
1338
+ new_model_patterns,
1339
+ dest_file=dest_file,
1340
+ add_copied_from=add_copied_from and "modeling" in new_module_name,
1341
+ )
1342
+
1343
+ clean_frameworks_in_init(
1344
+ module_folder / "__init__.py", frameworks=frameworks, keep_processing=not keep_old_processing
1345
+ )
1346
+
1347
+ # 2. We add our new model to the models init and the main init
1348
+ add_content_to_file(
1349
+ TRANSFORMERS_PATH / "models" / "__init__.py",
1350
+ f" {new_model_patterns.model_lower_cased},",
1351
+ add_after=f" {old_module_name},",
1352
+ exact_match=True,
1353
+ )
1354
+ add_model_to_main_init(
1355
+ old_model_patterns, new_model_patterns, frameworks=frameworks, with_processing=not keep_old_processing
1356
+ )
1357
+
1358
+ # 3. Add test files
1359
+ files_to_adapt = model_files["test_files"]
1360
+ if keep_old_processing:
1361
+ files_to_adapt = [
1362
+ f
1363
+ for f in files_to_adapt
1364
+ if "tokenization" not in str(f)
1365
+ and "processor" not in str(f)
1366
+ and "feature_extraction" not in str(f)
1367
+ and "image_processing" not in str(f)
1368
+ ]
1369
+
1370
+ def disable_fx_test(filename: Path) -> bool:
1371
+ with open(filename) as fp:
1372
+ content = fp.read()
1373
+ new_content = re.sub(r"fx_compatible\s*=\s*True", "fx_compatible = False", content)
1374
+ with open(filename, "w") as fp:
1375
+ fp.write(new_content)
1376
+ return content != new_content
1377
+
1378
+ disabled_fx_test = False
1379
+
1380
+ tests_folder = REPO_PATH / "tests" / "models" / new_model_patterns.model_lower_cased
1381
+ os.makedirs(tests_folder, exist_ok=True)
1382
+ with open(tests_folder / "__init__.py", "w"):
1383
+ pass
1384
+
1385
+ for test_file in files_to_adapt:
1386
+ new_test_file_name = test_file.name.replace(
1387
+ old_model_patterns.model_lower_cased, new_model_patterns.model_lower_cased
1388
+ )
1389
+ dest_file = test_file.parent.parent / new_model_patterns.model_lower_cased / new_test_file_name
1390
+ duplicate_module(
1391
+ test_file,
1392
+ old_model_patterns,
1393
+ new_model_patterns,
1394
+ dest_file=dest_file,
1395
+ add_copied_from=False,
1396
+ attrs_to_remove=["pipeline_model_mapping", "is_pipeline_test_to_skip"],
1397
+ )
1398
+ disabled_fx_test = disabled_fx_test | disable_fx_test(dest_file)
1399
+
1400
+ if disabled_fx_test:
1401
+ print(
1402
+ "The tests for symbolic tracing with torch.fx were disabled, you can add those once symbolic tracing works"
1403
+ " for your new model."
1404
+ )
1405
+
1406
+ # 4. Add model to auto classes
1407
+ add_model_to_auto_classes(old_model_patterns, new_model_patterns, model_classes)
1408
+
1409
+ # 5. Add doc file
1410
+ doc_file = REPO_PATH / "docs" / "source" / "en" / "model_doc" / f"{old_model_patterns.model_type}.md"
1411
+ duplicate_doc_file(doc_file, old_model_patterns, new_model_patterns, frameworks=frameworks)
1412
+ insert_model_in_doc_toc(old_model_patterns, new_model_patterns)
1413
+
1414
+ # 6. Warn the user for duplicate patterns
1415
+ if old_model_patterns.model_type == old_model_patterns.checkpoint:
1416
+ print(
1417
+ "The model you picked has the same name for the model type and the checkpoint name "
1418
+ f"({old_model_patterns.model_type}). As a result, it's possible some places where the new checkpoint "
1419
+ f"should be, you have {new_model_patterns.model_type} instead. You should search for all instances of "
1420
+ f"{new_model_patterns.model_type} in the new files and check they're not badly used as checkpoints."
1421
+ )
1422
+ elif old_model_patterns.model_lower_cased == old_model_patterns.checkpoint:
1423
+ print(
1424
+ "The model you picked has the same name for the model type and the checkpoint name "
1425
+ f"({old_model_patterns.model_lower_cased}). As a result, it's possible some places where the new "
1426
+ f"checkpoint should be, you have {new_model_patterns.model_lower_cased} instead. You should search for "
1427
+ f"all instances of {new_model_patterns.model_lower_cased} in the new files and check they're not badly "
1428
+ "used as checkpoints."
1429
+ )
1430
+ if (
1431
+ old_model_patterns.model_type == old_model_patterns.model_lower_cased
1432
+ and new_model_patterns.model_type != new_model_patterns.model_lower_cased
1433
+ ):
1434
+ print(
1435
+ "The model you picked has the same name for the model type and the lowercased model name "
1436
+ f"({old_model_patterns.model_lower_cased}). As a result, it's possible some places where the new "
1437
+ f"model type should be, you have {new_model_patterns.model_lower_cased} instead. You should search for "
1438
+ f"all instances of {new_model_patterns.model_lower_cased} in the new files and check they're not badly "
1439
+ "used as the model type."
1440
+ )
1441
+
1442
+ if not keep_old_processing and old_model_patterns.tokenizer_class is not None:
1443
+ print(
1444
+ "The constants at the start of the new tokenizer file created needs to be manually fixed. If your new "
1445
+ "model has a tokenizer fast, you will also need to manually add the converter in the "
1446
+ "`SLOW_TO_FAST_CONVERTERS` constant of `convert_slow_tokenizer.py`."
1447
+ )
1448
+
1449
+
1450
+ def add_new_model_like_command_factory(args: Namespace):
1451
+ return AddNewModelLikeCommand(config_file=args.config_file, path_to_repo=args.path_to_repo)
1452
+
1453
+
1454
+ class AddNewModelLikeCommand(BaseTransformersCLICommand):
1455
+ @staticmethod
1456
+ def register_subcommand(parser: ArgumentParser):
1457
+ add_new_model_like_parser = parser.add_parser("add-new-model-like")
1458
+ add_new_model_like_parser.add_argument(
1459
+ "--config_file", type=str, help="A file with all the information for this model creation."
1460
+ )
1461
+ add_new_model_like_parser.add_argument(
1462
+ "--path_to_repo", type=str, help="When not using an editable install, the path to the Transformers repo."
1463
+ )
1464
+ add_new_model_like_parser.set_defaults(func=add_new_model_like_command_factory)
1465
+
1466
+ def __init__(self, config_file=None, path_to_repo=None, *args):
1467
+ if config_file is not None:
1468
+ with open(config_file, "r", encoding="utf-8") as f:
1469
+ config = json.load(f)
1470
+ self.old_model_type = config["old_model_type"]
1471
+ self.model_patterns = ModelPatterns(**config["new_model_patterns"])
1472
+ self.add_copied_from = config.get("add_copied_from", True)
1473
+ self.frameworks = config.get("frameworks", get_default_frameworks())
1474
+ self.old_checkpoint = config.get("old_checkpoint", None)
1475
+ else:
1476
+ (
1477
+ self.old_model_type,
1478
+ self.model_patterns,
1479
+ self.add_copied_from,
1480
+ self.frameworks,
1481
+ self.old_checkpoint,
1482
+ ) = get_user_input()
1483
+
1484
+ self.path_to_repo = path_to_repo
1485
+
1486
+ def run(self):
1487
+ if self.path_to_repo is not None:
1488
+ # Adapt constants
1489
+ global TRANSFORMERS_PATH
1490
+ global REPO_PATH
1491
+
1492
+ REPO_PATH = Path(self.path_to_repo)
1493
+ TRANSFORMERS_PATH = REPO_PATH / "src" / "transformers"
1494
+
1495
+ create_new_model_like(
1496
+ model_type=self.old_model_type,
1497
+ new_model_patterns=self.model_patterns,
1498
+ add_copied_from=self.add_copied_from,
1499
+ frameworks=self.frameworks,
1500
+ old_checkpoint=self.old_checkpoint,
1501
+ )
1502
+
1503
+
1504
+ def get_user_field(
1505
+ question: str,
1506
+ default_value: Optional[str] = None,
1507
+ is_valid_answer: Optional[Callable] = None,
1508
+ convert_to: Optional[Callable] = None,
1509
+ fallback_message: Optional[str] = None,
1510
+ ) -> Any:
1511
+ """
1512
+ A utility function that asks a question to the user to get an answer, potentially looping until it gets a valid
1513
+ answer.
1514
+
1515
+ Args:
1516
+ question (`str`): The question to ask the user.
1517
+ default_value (`str`, *optional*): A potential default value that will be used when the answer is empty.
1518
+ is_valid_answer (`Callable`, *optional*):
1519
+ If set, the question will be asked until this function returns `True` on the provided answer.
1520
+ convert_to (`Callable`, *optional*):
1521
+ If set, the answer will be passed to this function. If this function raises an error on the procided
1522
+ answer, the question will be asked again.
1523
+ fallback_message (`str`, *optional*):
1524
+ A message that will be displayed each time the question is asked again to the user.
1525
+
1526
+ Returns:
1527
+ `Any`: The answer provided by the user (or the default), passed through the potential conversion function.
1528
+ """
1529
+ if not question.endswith(" "):
1530
+ question = question + " "
1531
+ if default_value is not None:
1532
+ question = f"{question} [{default_value}] "
1533
+
1534
+ valid_answer = False
1535
+ while not valid_answer:
1536
+ answer = input(question)
1537
+ if default_value is not None and len(answer) == 0:
1538
+ answer = default_value
1539
+ if is_valid_answer is not None:
1540
+ valid_answer = is_valid_answer(answer)
1541
+ elif convert_to is not None:
1542
+ try:
1543
+ answer = convert_to(answer)
1544
+ valid_answer = True
1545
+ except Exception:
1546
+ valid_answer = False
1547
+ else:
1548
+ valid_answer = True
1549
+
1550
+ if not valid_answer:
1551
+ print(fallback_message)
1552
+
1553
+ return answer
1554
+
1555
+
1556
+ def convert_to_bool(x: str) -> bool:
1557
+ """
1558
+ Converts a string to a bool.
1559
+ """
1560
+ if x.lower() in ["1", "y", "yes", "true"]:
1561
+ return True
1562
+ if x.lower() in ["0", "n", "no", "false"]:
1563
+ return False
1564
+ raise ValueError(f"{x} is not a value that can be converted to a bool.")
1565
+
1566
+
1567
+ def get_user_input():
1568
+ """
1569
+ Ask the user for the necessary inputs to add the new model.
1570
+ """
1571
+ model_types = list(auto_module.configuration_auto.MODEL_NAMES_MAPPING.keys())
1572
+
1573
+ # Get old model type
1574
+ valid_model_type = False
1575
+ while not valid_model_type:
1576
+ old_model_type = input(
1577
+ "What is the model you would like to duplicate? Please provide the lowercase `model_type` (e.g. roberta): "
1578
+ )
1579
+ if old_model_type in model_types:
1580
+ valid_model_type = True
1581
+ else:
1582
+ print(f"{old_model_type} is not a valid model type.")
1583
+ near_choices = difflib.get_close_matches(old_model_type, model_types)
1584
+ if len(near_choices) >= 1:
1585
+ if len(near_choices) > 1:
1586
+ near_choices = " or ".join(near_choices)
1587
+ print(f"Did you mean {near_choices}?")
1588
+
1589
+ old_model_info = retrieve_info_for_model(old_model_type)
1590
+ old_tokenizer_class = old_model_info["model_patterns"].tokenizer_class
1591
+ old_image_processor_class = old_model_info["model_patterns"].image_processor_class
1592
+ old_feature_extractor_class = old_model_info["model_patterns"].feature_extractor_class
1593
+ old_processor_class = old_model_info["model_patterns"].processor_class
1594
+ old_frameworks = old_model_info["frameworks"]
1595
+
1596
+ old_checkpoint = None
1597
+ if len(old_model_info["model_patterns"].checkpoint) == 0:
1598
+ old_checkpoint = get_user_field(
1599
+ "We couldn't find the name of the base checkpoint for that model, please enter it here."
1600
+ )
1601
+
1602
+ model_name = get_user_field(
1603
+ "What is the name (with no special casing) for your new model in the paper (e.g. RoBERTa)? "
1604
+ )
1605
+ default_patterns = ModelPatterns(model_name, model_name)
1606
+
1607
+ model_type = get_user_field(
1608
+ "What identifier would you like to use for the `model_type` of this model? ",
1609
+ default_value=default_patterns.model_type,
1610
+ )
1611
+ model_lower_cased = get_user_field(
1612
+ "What lowercase name would you like to use for the module (folder) of this model? ",
1613
+ default_value=default_patterns.model_lower_cased,
1614
+ )
1615
+ model_camel_cased = get_user_field(
1616
+ "What prefix (camel-cased) would you like to use for the model classes of this model (e.g. Roberta)? ",
1617
+ default_value=default_patterns.model_camel_cased,
1618
+ )
1619
+ model_upper_cased = get_user_field(
1620
+ "What prefix (upper-cased) would you like to use for the constants relative to this model? ",
1621
+ default_value=default_patterns.model_upper_cased,
1622
+ )
1623
+ config_class = get_user_field(
1624
+ "What will be the name of the config class for this model? ", default_value=f"{model_camel_cased}Config"
1625
+ )
1626
+ checkpoint = get_user_field(
1627
+ "Please give a checkpoint identifier (on the model Hub) for this new model (e.g. facebook/FacebookAI/roberta-base): "
1628
+ )
1629
+
1630
+ old_processing_classes = [
1631
+ c
1632
+ for c in [old_image_processor_class, old_feature_extractor_class, old_tokenizer_class, old_processor_class]
1633
+ if c is not None
1634
+ ]
1635
+ old_processing_classes = ", ".join(old_processing_classes)
1636
+ keep_processing = get_user_field(
1637
+ f"Will your new model use the same processing class as {old_model_type} ({old_processing_classes}) (yes/no)? ",
1638
+ convert_to=convert_to_bool,
1639
+ fallback_message="Please answer yes/no, y/n, true/false or 1/0. ",
1640
+ )
1641
+ if keep_processing:
1642
+ image_processor_class = old_image_processor_class
1643
+ feature_extractor_class = old_feature_extractor_class
1644
+ processor_class = old_processor_class
1645
+ tokenizer_class = old_tokenizer_class
1646
+ else:
1647
+ if old_tokenizer_class is not None:
1648
+ tokenizer_class = get_user_field(
1649
+ "What will be the name of the tokenizer class for this model? ",
1650
+ default_value=f"{model_camel_cased}Tokenizer",
1651
+ )
1652
+ else:
1653
+ tokenizer_class = None
1654
+ if old_image_processor_class is not None:
1655
+ image_processor_class = get_user_field(
1656
+ "What will be the name of the image processor class for this model? ",
1657
+ default_value=f"{model_camel_cased}ImageProcessor",
1658
+ )
1659
+ else:
1660
+ image_processor_class = None
1661
+ if old_feature_extractor_class is not None:
1662
+ feature_extractor_class = get_user_field(
1663
+ "What will be the name of the feature extractor class for this model? ",
1664
+ default_value=f"{model_camel_cased}FeatureExtractor",
1665
+ )
1666
+ else:
1667
+ feature_extractor_class = None
1668
+ if old_processor_class is not None:
1669
+ processor_class = get_user_field(
1670
+ "What will be the name of the processor class for this model? ",
1671
+ default_value=f"{model_camel_cased}Processor",
1672
+ )
1673
+ else:
1674
+ processor_class = None
1675
+
1676
+ model_patterns = ModelPatterns(
1677
+ model_name,
1678
+ checkpoint,
1679
+ model_type=model_type,
1680
+ model_lower_cased=model_lower_cased,
1681
+ model_camel_cased=model_camel_cased,
1682
+ model_upper_cased=model_upper_cased,
1683
+ config_class=config_class,
1684
+ tokenizer_class=tokenizer_class,
1685
+ image_processor_class=image_processor_class,
1686
+ feature_extractor_class=feature_extractor_class,
1687
+ processor_class=processor_class,
1688
+ )
1689
+
1690
+ add_copied_from = get_user_field(
1691
+ "Should we add # Copied from statements when creating the new modeling file (yes/no)? ",
1692
+ convert_to=convert_to_bool,
1693
+ default_value="yes",
1694
+ fallback_message="Please answer yes/no, y/n, true/false or 1/0.",
1695
+ )
1696
+
1697
+ all_frameworks = get_user_field(
1698
+ "Should we add a version of your new model in all the frameworks implemented by"
1699
+ f" {old_model_type} ({old_frameworks}) (yes/no)? ",
1700
+ convert_to=convert_to_bool,
1701
+ default_value="yes",
1702
+ fallback_message="Please answer yes/no, y/n, true/false or 1/0.",
1703
+ )
1704
+ if all_frameworks:
1705
+ frameworks = None
1706
+ else:
1707
+ frameworks = get_user_field(
1708
+ "Please enter the list of framworks you want (pt, tf, flax) separated by spaces",
1709
+ is_valid_answer=lambda x: all(p in ["pt", "tf", "flax"] for p in x.split(" ")),
1710
+ )
1711
+ frameworks = list(set(frameworks.split(" ")))
1712
+
1713
+ return (old_model_type, model_patterns, add_copied_from, frameworks, old_checkpoint)
llmeval-env/lib/python3.10/site-packages/transformers/commands/convert.py ADDED
@@ -0,0 +1,165 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from argparse import ArgumentParser, Namespace
16
+
17
+ from ..utils import logging
18
+ from . import BaseTransformersCLICommand
19
+
20
+
21
+ def convert_command_factory(args: Namespace):
22
+ """
23
+ Factory function used to convert a model TF 1.0 checkpoint in a PyTorch checkpoint.
24
+
25
+ Returns: ServeCommand
26
+ """
27
+ return ConvertCommand(
28
+ args.model_type, args.tf_checkpoint, args.pytorch_dump_output, args.config, args.finetuning_task_name
29
+ )
30
+
31
+
32
+ IMPORT_ERROR_MESSAGE = """
33
+ transformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires
34
+ TensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.
35
+ """
36
+
37
+
38
+ class ConvertCommand(BaseTransformersCLICommand):
39
+ @staticmethod
40
+ def register_subcommand(parser: ArgumentParser):
41
+ """
42
+ Register this command to argparse so it's available for the transformer-cli
43
+
44
+ Args:
45
+ parser: Root parser to register command-specific arguments
46
+ """
47
+ train_parser = parser.add_parser(
48
+ "convert",
49
+ help="CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.",
50
+ )
51
+ train_parser.add_argument("--model_type", type=str, required=True, help="Model's type.")
52
+ train_parser.add_argument(
53
+ "--tf_checkpoint", type=str, required=True, help="TensorFlow checkpoint path or folder."
54
+ )
55
+ train_parser.add_argument(
56
+ "--pytorch_dump_output", type=str, required=True, help="Path to the PyTorch saved model output."
57
+ )
58
+ train_parser.add_argument("--config", type=str, default="", help="Configuration file path or folder.")
59
+ train_parser.add_argument(
60
+ "--finetuning_task_name",
61
+ type=str,
62
+ default=None,
63
+ help="Optional fine-tuning task name if the TF model was a finetuned model.",
64
+ )
65
+ train_parser.set_defaults(func=convert_command_factory)
66
+
67
+ def __init__(
68
+ self,
69
+ model_type: str,
70
+ tf_checkpoint: str,
71
+ pytorch_dump_output: str,
72
+ config: str,
73
+ finetuning_task_name: str,
74
+ *args,
75
+ ):
76
+ self._logger = logging.get_logger("transformers-cli/converting")
77
+
78
+ self._logger.info(f"Loading model {model_type}")
79
+ self._model_type = model_type
80
+ self._tf_checkpoint = tf_checkpoint
81
+ self._pytorch_dump_output = pytorch_dump_output
82
+ self._config = config
83
+ self._finetuning_task_name = finetuning_task_name
84
+
85
+ def run(self):
86
+ if self._model_type == "albert":
87
+ try:
88
+ from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
89
+ convert_tf_checkpoint_to_pytorch,
90
+ )
91
+ except ImportError:
92
+ raise ImportError(IMPORT_ERROR_MESSAGE)
93
+
94
+ convert_tf_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output)
95
+ elif self._model_type == "bert":
96
+ try:
97
+ from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
98
+ convert_tf_checkpoint_to_pytorch,
99
+ )
100
+ except ImportError:
101
+ raise ImportError(IMPORT_ERROR_MESSAGE)
102
+
103
+ convert_tf_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output)
104
+ elif self._model_type == "funnel":
105
+ try:
106
+ from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
107
+ convert_tf_checkpoint_to_pytorch,
108
+ )
109
+ except ImportError:
110
+ raise ImportError(IMPORT_ERROR_MESSAGE)
111
+
112
+ convert_tf_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output)
113
+ elif self._model_type == "t5":
114
+ try:
115
+ from ..models.t5.convert_t5_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
116
+ except ImportError:
117
+ raise ImportError(IMPORT_ERROR_MESSAGE)
118
+
119
+ convert_tf_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output)
120
+ elif self._model_type == "gpt":
121
+ from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
122
+ convert_openai_checkpoint_to_pytorch,
123
+ )
124
+
125
+ convert_openai_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output)
126
+ elif self._model_type == "gpt2":
127
+ try:
128
+ from ..models.gpt2.convert_gpt2_original_tf_checkpoint_to_pytorch import (
129
+ convert_gpt2_checkpoint_to_pytorch,
130
+ )
131
+ except ImportError:
132
+ raise ImportError(IMPORT_ERROR_MESSAGE)
133
+
134
+ convert_gpt2_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output)
135
+ elif self._model_type == "xlnet":
136
+ try:
137
+ from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
138
+ convert_xlnet_checkpoint_to_pytorch,
139
+ )
140
+ except ImportError:
141
+ raise ImportError(IMPORT_ERROR_MESSAGE)
142
+
143
+ convert_xlnet_checkpoint_to_pytorch(
144
+ self._tf_checkpoint, self._config, self._pytorch_dump_output, self._finetuning_task_name
145
+ )
146
+ elif self._model_type == "xlm":
147
+ from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
148
+ convert_xlm_checkpoint_to_pytorch,
149
+ )
150
+
151
+ convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint, self._pytorch_dump_output)
152
+ elif self._model_type == "lxmert":
153
+ from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
154
+ convert_lxmert_checkpoint_to_pytorch,
155
+ )
156
+
157
+ convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint, self._pytorch_dump_output)
158
+ elif self._model_type == "rembert":
159
+ from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
160
+ convert_rembert_tf_checkpoint_to_pytorch,
161
+ )
162
+
163
+ convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output)
164
+ else:
165
+ raise ValueError("--model_type should be selected in the list [bert, gpt, gpt2, t5, xlnet, xlm, lxmert]")
llmeval-env/lib/python3.10/site-packages/transformers/commands/download.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from argparse import ArgumentParser
16
+
17
+ from . import BaseTransformersCLICommand
18
+
19
+
20
+ def download_command_factory(args):
21
+ return DownloadCommand(args.model, args.cache_dir, args.force, args.trust_remote_code)
22
+
23
+
24
+ class DownloadCommand(BaseTransformersCLICommand):
25
+ @staticmethod
26
+ def register_subcommand(parser: ArgumentParser):
27
+ download_parser = parser.add_parser("download")
28
+ download_parser.add_argument(
29
+ "--cache-dir", type=str, default=None, help="Path to location to store the models"
30
+ )
31
+ download_parser.add_argument(
32
+ "--force", action="store_true", help="Force the model to be download even if already in cache-dir"
33
+ )
34
+ download_parser.add_argument(
35
+ "--trust-remote-code",
36
+ action="store_true",
37
+ help="Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you've reviewed the code as it will execute on your local machine",
38
+ )
39
+ download_parser.add_argument("model", type=str, help="Name of the model to download")
40
+ download_parser.set_defaults(func=download_command_factory)
41
+
42
+ def __init__(self, model: str, cache: str, force: bool, trust_remote_code: bool):
43
+ self._model = model
44
+ self._cache = cache
45
+ self._force = force
46
+ self._trust_remote_code = trust_remote_code
47
+
48
+ def run(self):
49
+ from ..models.auto import AutoModel, AutoTokenizer
50
+
51
+ AutoModel.from_pretrained(
52
+ self._model, cache_dir=self._cache, force_download=self._force, trust_remote_code=self._trust_remote_code
53
+ )
54
+ AutoTokenizer.from_pretrained(
55
+ self._model, cache_dir=self._cache, force_download=self._force, trust_remote_code=self._trust_remote_code
56
+ )
llmeval-env/lib/python3.10/site-packages/transformers/commands/env.py ADDED
@@ -0,0 +1,143 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import importlib.util
16
+ import os
17
+ import platform
18
+ from argparse import ArgumentParser
19
+
20
+ import huggingface_hub
21
+
22
+ from .. import __version__ as version
23
+ from ..utils import (
24
+ is_accelerate_available,
25
+ is_flax_available,
26
+ is_safetensors_available,
27
+ is_tf_available,
28
+ is_torch_available,
29
+ )
30
+ from . import BaseTransformersCLICommand
31
+
32
+
33
+ def info_command_factory(_):
34
+ return EnvironmentCommand()
35
+
36
+
37
+ def download_command_factory(args):
38
+ return EnvironmentCommand(args.accelerate_config_file)
39
+
40
+
41
+ class EnvironmentCommand(BaseTransformersCLICommand):
42
+ @staticmethod
43
+ def register_subcommand(parser: ArgumentParser):
44
+ download_parser = parser.add_parser("env")
45
+ download_parser.set_defaults(func=info_command_factory)
46
+ download_parser.add_argument(
47
+ "--accelerate-config_file",
48
+ default=None,
49
+ help="The accelerate config file to use for the default values in the launching script.",
50
+ )
51
+ download_parser.set_defaults(func=download_command_factory)
52
+
53
+ def __init__(self, accelerate_config_file, *args) -> None:
54
+ self._accelerate_config_file = accelerate_config_file
55
+
56
+ def run(self):
57
+ safetensors_version = "not installed"
58
+ if is_safetensors_available():
59
+ import safetensors
60
+
61
+ safetensors_version = safetensors.__version__
62
+ elif importlib.util.find_spec("safetensors") is not None:
63
+ import safetensors
64
+
65
+ safetensors_version = f"{safetensors.__version__} but is ignored because of PyTorch version too old."
66
+
67
+ accelerate_version = "not installed"
68
+ accelerate_config = accelerate_config_str = "not found"
69
+ if is_accelerate_available():
70
+ import accelerate
71
+ from accelerate.commands.config import default_config_file, load_config_from_file
72
+
73
+ accelerate_version = accelerate.__version__
74
+ # Get the default from the config file.
75
+ if self._accelerate_config_file is not None or os.path.isfile(default_config_file):
76
+ accelerate_config = load_config_from_file(self._accelerate_config_file).to_dict()
77
+
78
+ accelerate_config_str = (
79
+ "\n".join([f"\t- {prop}: {val}" for prop, val in accelerate_config.items()])
80
+ if isinstance(accelerate_config, dict)
81
+ else f"\t{accelerate_config}"
82
+ )
83
+
84
+ pt_version = "not installed"
85
+ pt_cuda_available = "NA"
86
+ if is_torch_available():
87
+ import torch
88
+
89
+ pt_version = torch.__version__
90
+ pt_cuda_available = torch.cuda.is_available()
91
+
92
+ tf_version = "not installed"
93
+ tf_cuda_available = "NA"
94
+ if is_tf_available():
95
+ import tensorflow as tf
96
+
97
+ tf_version = tf.__version__
98
+ try:
99
+ # deprecated in v2.1
100
+ tf_cuda_available = tf.test.is_gpu_available()
101
+ except AttributeError:
102
+ # returns list of devices, convert to bool
103
+ tf_cuda_available = bool(tf.config.list_physical_devices("GPU"))
104
+
105
+ flax_version = "not installed"
106
+ jax_version = "not installed"
107
+ jaxlib_version = "not installed"
108
+ jax_backend = "NA"
109
+ if is_flax_available():
110
+ import flax
111
+ import jax
112
+ import jaxlib
113
+
114
+ flax_version = flax.__version__
115
+ jax_version = jax.__version__
116
+ jaxlib_version = jaxlib.__version__
117
+ jax_backend = jax.lib.xla_bridge.get_backend().platform
118
+
119
+ info = {
120
+ "`transformers` version": version,
121
+ "Platform": platform.platform(),
122
+ "Python version": platform.python_version(),
123
+ "Huggingface_hub version": huggingface_hub.__version__,
124
+ "Safetensors version": f"{safetensors_version}",
125
+ "Accelerate version": f"{accelerate_version}",
126
+ "Accelerate config": f"{accelerate_config_str}",
127
+ "PyTorch version (GPU?)": f"{pt_version} ({pt_cuda_available})",
128
+ "Tensorflow version (GPU?)": f"{tf_version} ({tf_cuda_available})",
129
+ "Flax version (CPU?/GPU?/TPU?)": f"{flax_version} ({jax_backend})",
130
+ "Jax version": f"{jax_version}",
131
+ "JaxLib version": f"{jaxlib_version}",
132
+ "Using GPU in script?": "<fill in>",
133
+ "Using distributed or parallel set-up in script?": "<fill in>",
134
+ }
135
+
136
+ print("\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n")
137
+ print(self.format_dict(info))
138
+
139
+ return info
140
+
141
+ @staticmethod
142
+ def format_dict(d):
143
+ return "\n".join([f"- {prop}: {val}" for prop, val in d.items()]) + "\n"
llmeval-env/lib/python3.10/site-packages/transformers/commands/lfs.py ADDED
@@ -0,0 +1,226 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Implementation of a custom transfer agent for the transfer type "multipart" for git-lfs.
3
+
4
+ Inspired by: github.com/cbartz/git-lfs-swift-transfer-agent/blob/master/git_lfs_swift_transfer.py
5
+
6
+ Spec is: github.com/git-lfs/git-lfs/blob/master/docs/custom-transfers.md
7
+
8
+
9
+ To launch debugger while developing:
10
+
11
+ ``` [lfs "customtransfer.multipart"]
12
+ path = /path/to/transformers/.env/bin/python args = -m debugpy --listen 5678 --wait-for-client
13
+ /path/to/transformers/src/transformers/commands/transformers_cli.py lfs-multipart-upload ```"""
14
+
15
+ import json
16
+ import os
17
+ import subprocess
18
+ import sys
19
+ import warnings
20
+ from argparse import ArgumentParser
21
+ from contextlib import AbstractContextManager
22
+ from typing import Dict, List, Optional
23
+
24
+ import requests
25
+
26
+ from ..utils import logging
27
+ from . import BaseTransformersCLICommand
28
+
29
+
30
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
31
+
32
+
33
+ LFS_MULTIPART_UPLOAD_COMMAND = "lfs-multipart-upload"
34
+
35
+
36
+ class LfsCommands(BaseTransformersCLICommand):
37
+ """
38
+ Implementation of a custom transfer agent for the transfer type "multipart" for git-lfs. This lets users upload
39
+ large files >5GB 🔥. Spec for LFS custom transfer agent is:
40
+ https://github.com/git-lfs/git-lfs/blob/master/docs/custom-transfers.md
41
+
42
+ This introduces two commands to the CLI:
43
+
44
+ 1. $ transformers-cli lfs-enable-largefiles
45
+
46
+ This should be executed once for each model repo that contains a model file >5GB. It's documented in the error
47
+ message you get if you just try to git push a 5GB file without having enabled it before.
48
+
49
+ 2. $ transformers-cli lfs-multipart-upload
50
+
51
+ This command is called by lfs directly and is not meant to be called by the user.
52
+ """
53
+
54
+ @staticmethod
55
+ def register_subcommand(parser: ArgumentParser):
56
+ enable_parser = parser.add_parser(
57
+ "lfs-enable-largefiles",
58
+ help=(
59
+ "Deprecated: use `huggingface-cli` instead. Configure your repository to enable upload of files > 5GB."
60
+ ),
61
+ )
62
+ enable_parser.add_argument("path", type=str, help="Local path to repository you want to configure.")
63
+ enable_parser.set_defaults(func=lambda args: LfsEnableCommand(args))
64
+
65
+ upload_parser = parser.add_parser(
66
+ LFS_MULTIPART_UPLOAD_COMMAND,
67
+ help=(
68
+ "Deprecated: use `huggingface-cli` instead. "
69
+ "Command will get called by git-lfs, do not call it directly."
70
+ ),
71
+ )
72
+ upload_parser.set_defaults(func=lambda args: LfsUploadCommand(args))
73
+
74
+
75
+ class LfsEnableCommand:
76
+ def __init__(self, args):
77
+ self.args = args
78
+
79
+ def run(self):
80
+ warnings.warn(
81
+ "Managing repositories through transformers-cli is deprecated. Please use `huggingface-cli` instead."
82
+ )
83
+ local_path = os.path.abspath(self.args.path)
84
+ if not os.path.isdir(local_path):
85
+ print("This does not look like a valid git repo.")
86
+ exit(1)
87
+ subprocess.run(
88
+ "git config lfs.customtransfer.multipart.path transformers-cli".split(), check=True, cwd=local_path
89
+ )
90
+ subprocess.run(
91
+ f"git config lfs.customtransfer.multipart.args {LFS_MULTIPART_UPLOAD_COMMAND}".split(),
92
+ check=True,
93
+ cwd=local_path,
94
+ )
95
+ print("Local repo set up for largefiles")
96
+
97
+
98
+ def write_msg(msg: Dict):
99
+ """Write out the message in Line delimited JSON."""
100
+ msg = json.dumps(msg) + "\n"
101
+ sys.stdout.write(msg)
102
+ sys.stdout.flush()
103
+
104
+
105
+ def read_msg() -> Optional[Dict]:
106
+ """Read Line delimited JSON from stdin."""
107
+ msg = json.loads(sys.stdin.readline().strip())
108
+
109
+ if "terminate" in (msg.get("type"), msg.get("event")):
110
+ # terminate message received
111
+ return None
112
+
113
+ if msg.get("event") not in ("download", "upload"):
114
+ logger.critical("Received unexpected message")
115
+ sys.exit(1)
116
+
117
+ return msg
118
+
119
+
120
+ class FileSlice(AbstractContextManager):
121
+ """
122
+ File-like object that only reads a slice of a file
123
+
124
+ Inspired by stackoverflow.com/a/29838711/593036
125
+ """
126
+
127
+ def __init__(self, filepath: str, seek_from: int, read_limit: int):
128
+ self.filepath = filepath
129
+ self.seek_from = seek_from
130
+ self.read_limit = read_limit
131
+ self.n_seen = 0
132
+
133
+ def __enter__(self):
134
+ self.f = open(self.filepath, "rb")
135
+ self.f.seek(self.seek_from)
136
+ return self
137
+
138
+ def __len__(self):
139
+ total_length = os.fstat(self.f.fileno()).st_size
140
+ return min(self.read_limit, total_length - self.seek_from)
141
+
142
+ def read(self, n=-1):
143
+ if self.n_seen >= self.read_limit:
144
+ return b""
145
+ remaining_amount = self.read_limit - self.n_seen
146
+ data = self.f.read(remaining_amount if n < 0 else min(n, remaining_amount))
147
+ self.n_seen += len(data)
148
+ return data
149
+
150
+ def __iter__(self):
151
+ yield self.read(n=4 * 1024 * 1024)
152
+
153
+ def __exit__(self, *args):
154
+ self.f.close()
155
+
156
+
157
+ class LfsUploadCommand:
158
+ def __init__(self, args):
159
+ self.args = args
160
+
161
+ def run(self):
162
+ # Immediately after invoking a custom transfer process, git-lfs
163
+ # sends initiation data to the process over stdin.
164
+ # This tells the process useful information about the configuration.
165
+ init_msg = json.loads(sys.stdin.readline().strip())
166
+ if not (init_msg.get("event") == "init" and init_msg.get("operation") == "upload"):
167
+ write_msg({"error": {"code": 32, "message": "Wrong lfs init operation"}})
168
+ sys.exit(1)
169
+
170
+ # The transfer process should use the information it needs from the
171
+ # initiation structure, and also perform any one-off setup tasks it
172
+ # needs to do. It should then respond on stdout with a simple empty
173
+ # confirmation structure, as follows:
174
+ write_msg({})
175
+
176
+ # After the initiation exchange, git-lfs will send any number of
177
+ # transfer requests to the stdin of the transfer process, in a serial sequence.
178
+ while True:
179
+ msg = read_msg()
180
+ if msg is None:
181
+ # When all transfers have been processed, git-lfs will send
182
+ # a terminate event to the stdin of the transfer process.
183
+ # On receiving this message the transfer process should
184
+ # clean up and terminate. No response is expected.
185
+ sys.exit(0)
186
+
187
+ oid = msg["oid"]
188
+ filepath = msg["path"]
189
+ completion_url = msg["action"]["href"]
190
+ header = msg["action"]["header"]
191
+ chunk_size = int(header.pop("chunk_size"))
192
+ presigned_urls: List[str] = list(header.values())
193
+
194
+ parts = []
195
+ for i, presigned_url in enumerate(presigned_urls):
196
+ with FileSlice(filepath, seek_from=i * chunk_size, read_limit=chunk_size) as data:
197
+ r = requests.put(presigned_url, data=data)
198
+ r.raise_for_status()
199
+ parts.append(
200
+ {
201
+ "etag": r.headers.get("etag"),
202
+ "partNumber": i + 1,
203
+ }
204
+ )
205
+ # In order to support progress reporting while data is uploading / downloading,
206
+ # the transfer process should post messages to stdout
207
+ write_msg(
208
+ {
209
+ "event": "progress",
210
+ "oid": oid,
211
+ "bytesSoFar": (i + 1) * chunk_size,
212
+ "bytesSinceLast": chunk_size,
213
+ }
214
+ )
215
+ # Not precise but that's ok.
216
+
217
+ r = requests.post(
218
+ completion_url,
219
+ json={
220
+ "oid": oid,
221
+ "parts": parts,
222
+ },
223
+ )
224
+ r.raise_for_status()
225
+
226
+ write_msg({"event": "complete", "oid": oid})
llmeval-env/lib/python3.10/site-packages/transformers/commands/pt_to_tf.py ADDED
@@ -0,0 +1,425 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import inspect
16
+ import os
17
+ from argparse import ArgumentParser, Namespace
18
+ from importlib import import_module
19
+
20
+ import huggingface_hub
21
+ import numpy as np
22
+ from packaging import version
23
+
24
+ from .. import (
25
+ FEATURE_EXTRACTOR_MAPPING,
26
+ IMAGE_PROCESSOR_MAPPING,
27
+ PROCESSOR_MAPPING,
28
+ TOKENIZER_MAPPING,
29
+ AutoConfig,
30
+ AutoFeatureExtractor,
31
+ AutoImageProcessor,
32
+ AutoProcessor,
33
+ AutoTokenizer,
34
+ is_datasets_available,
35
+ is_tf_available,
36
+ is_torch_available,
37
+ )
38
+ from ..utils import TF2_WEIGHTS_INDEX_NAME, TF2_WEIGHTS_NAME, logging
39
+ from . import BaseTransformersCLICommand
40
+
41
+
42
+ if is_tf_available():
43
+ import tensorflow as tf
44
+
45
+ tf.config.experimental.enable_tensor_float_32_execution(False)
46
+
47
+ if is_torch_available():
48
+ import torch
49
+
50
+ if is_datasets_available():
51
+ from datasets import load_dataset
52
+
53
+
54
+ MAX_ERROR = 5e-5 # larger error tolerance than in our internal tests, to avoid flaky user-facing errors
55
+
56
+
57
+ def convert_command_factory(args: Namespace):
58
+ """
59
+ Factory function used to convert a model PyTorch checkpoint in a TensorFlow 2 checkpoint.
60
+
61
+ Returns: ServeCommand
62
+ """
63
+ return PTtoTFCommand(
64
+ args.model_name,
65
+ args.local_dir,
66
+ args.max_error,
67
+ args.new_weights,
68
+ args.no_pr,
69
+ args.push,
70
+ args.extra_commit_description,
71
+ args.override_model_class,
72
+ )
73
+
74
+
75
+ class PTtoTFCommand(BaseTransformersCLICommand):
76
+ @staticmethod
77
+ def register_subcommand(parser: ArgumentParser):
78
+ """
79
+ Register this command to argparse so it's available for the transformer-cli
80
+
81
+ Args:
82
+ parser: Root parser to register command-specific arguments
83
+ """
84
+ train_parser = parser.add_parser(
85
+ "pt-to-tf",
86
+ help=(
87
+ "CLI tool to run convert a transformers model from a PyTorch checkpoint to a TensorFlow checkpoint."
88
+ " Can also be used to validate existing weights without opening PRs, with --no-pr."
89
+ ),
90
+ )
91
+ train_parser.add_argument(
92
+ "--model-name",
93
+ type=str,
94
+ required=True,
95
+ help="The model name, including owner/organization, as seen on the hub.",
96
+ )
97
+ train_parser.add_argument(
98
+ "--local-dir",
99
+ type=str,
100
+ default="",
101
+ help="Optional local directory of the model repository. Defaults to /tmp/{model_name}",
102
+ )
103
+ train_parser.add_argument(
104
+ "--max-error",
105
+ type=float,
106
+ default=MAX_ERROR,
107
+ help=(
108
+ f"Maximum error tolerance. Defaults to {MAX_ERROR}. This flag should be avoided, use at your own risk."
109
+ ),
110
+ )
111
+ train_parser.add_argument(
112
+ "--new-weights",
113
+ action="store_true",
114
+ help="Optional flag to create new TensorFlow weights, even if they already exist.",
115
+ )
116
+ train_parser.add_argument(
117
+ "--no-pr", action="store_true", help="Optional flag to NOT open a PR with converted weights."
118
+ )
119
+ train_parser.add_argument(
120
+ "--push",
121
+ action="store_true",
122
+ help="Optional flag to push the weights directly to `main` (requires permissions)",
123
+ )
124
+ train_parser.add_argument(
125
+ "--extra-commit-description",
126
+ type=str,
127
+ default="",
128
+ help="Optional additional commit description to use when opening a PR (e.g. to tag the owner).",
129
+ )
130
+ train_parser.add_argument(
131
+ "--override-model-class",
132
+ type=str,
133
+ default=None,
134
+ help="If you think you know better than the auto-detector, you can specify the model class here. "
135
+ "Can be either an AutoModel class or a specific model class like BertForSequenceClassification.",
136
+ )
137
+ train_parser.set_defaults(func=convert_command_factory)
138
+
139
+ @staticmethod
140
+ def find_pt_tf_differences(pt_outputs, tf_outputs):
141
+ """
142
+ Compares the TensorFlow and PyTorch outputs, returning a dictionary with all tensor differences.
143
+ """
144
+ # 1. All output attributes must be the same
145
+ pt_out_attrs = set(pt_outputs.keys())
146
+ tf_out_attrs = set(tf_outputs.keys())
147
+ if pt_out_attrs != tf_out_attrs:
148
+ raise ValueError(
149
+ f"The model outputs have different attributes, aborting. (Pytorch: {pt_out_attrs}, TensorFlow:"
150
+ f" {tf_out_attrs})"
151
+ )
152
+
153
+ # 2. For each output attribute, computes the difference
154
+ def _find_pt_tf_differences(pt_out, tf_out, differences, attr_name=""):
155
+ # If the current attribute is a tensor, it is a leaf and we make the comparison. Otherwise, we will dig in
156
+ # recursivelly, keeping the name of the attribute.
157
+ if isinstance(pt_out, torch.Tensor):
158
+ tensor_difference = np.max(np.abs(pt_out.numpy() - tf_out.numpy()))
159
+ differences[attr_name] = tensor_difference
160
+ else:
161
+ root_name = attr_name
162
+ for i, pt_item in enumerate(pt_out):
163
+ # If it is a named attribute, we keep the name. Otherwise, just its index.
164
+ if isinstance(pt_item, str):
165
+ branch_name = root_name + pt_item
166
+ tf_item = tf_out[pt_item]
167
+ pt_item = pt_out[pt_item]
168
+ else:
169
+ branch_name = root_name + f"[{i}]"
170
+ tf_item = tf_out[i]
171
+ differences = _find_pt_tf_differences(pt_item, tf_item, differences, branch_name)
172
+
173
+ return differences
174
+
175
+ return _find_pt_tf_differences(pt_outputs, tf_outputs, {})
176
+
177
+ def __init__(
178
+ self,
179
+ model_name: str,
180
+ local_dir: str,
181
+ max_error: float,
182
+ new_weights: bool,
183
+ no_pr: bool,
184
+ push: bool,
185
+ extra_commit_description: str,
186
+ override_model_class: str,
187
+ *args,
188
+ ):
189
+ self._logger = logging.get_logger("transformers-cli/pt_to_tf")
190
+ self._model_name = model_name
191
+ self._local_dir = local_dir if local_dir else os.path.join("/tmp", model_name)
192
+ self._max_error = max_error
193
+ self._new_weights = new_weights
194
+ self._no_pr = no_pr
195
+ self._push = push
196
+ self._extra_commit_description = extra_commit_description
197
+ self._override_model_class = override_model_class
198
+
199
+ def get_inputs(self, pt_model, tf_dummy_inputs, config):
200
+ """
201
+ Returns the right inputs for the model, based on its signature.
202
+ """
203
+
204
+ def _get_audio_input():
205
+ ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
206
+ speech_samples = ds.sort("id").select(range(2))[:2]["audio"]
207
+ raw_samples = [x["array"] for x in speech_samples]
208
+ return raw_samples
209
+
210
+ model_config_class = type(pt_model.config)
211
+ if model_config_class in PROCESSOR_MAPPING:
212
+ processor = AutoProcessor.from_pretrained(self._local_dir)
213
+ if model_config_class in TOKENIZER_MAPPING and processor.tokenizer.pad_token is None:
214
+ processor.tokenizer.pad_token = processor.tokenizer.eos_token
215
+ elif model_config_class in IMAGE_PROCESSOR_MAPPING:
216
+ processor = AutoImageProcessor.from_pretrained(self._local_dir)
217
+ elif model_config_class in FEATURE_EXTRACTOR_MAPPING:
218
+ processor = AutoFeatureExtractor.from_pretrained(self._local_dir)
219
+ elif model_config_class in TOKENIZER_MAPPING:
220
+ processor = AutoTokenizer.from_pretrained(self._local_dir)
221
+ if processor.pad_token is None:
222
+ processor.pad_token = processor.eos_token
223
+ else:
224
+ raise ValueError(f"Unknown data processing type (model config type: {model_config_class})")
225
+
226
+ model_forward_signature = set(inspect.signature(pt_model.forward).parameters.keys())
227
+ processor_inputs = {}
228
+ if "input_ids" in model_forward_signature:
229
+ processor_inputs.update(
230
+ {
231
+ "text": ["Hi there!", "I am a batch with more than one row and different input lengths."],
232
+ "padding": True,
233
+ "truncation": True,
234
+ }
235
+ )
236
+ if "pixel_values" in model_forward_signature:
237
+ sample_images = load_dataset("cifar10", "plain_text", split="test")[:2]["img"]
238
+ processor_inputs.update({"images": sample_images})
239
+ if "input_features" in model_forward_signature:
240
+ feature_extractor_signature = inspect.signature(processor.feature_extractor).parameters
241
+ # Pad to the largest input length by default but take feature extractor default
242
+ # padding value if it exists e.g. "max_length" and is not False or None
243
+ if "padding" in feature_extractor_signature:
244
+ default_strategy = feature_extractor_signature["padding"].default
245
+ if default_strategy is not False and default_strategy is not None:
246
+ padding_strategy = default_strategy
247
+ else:
248
+ padding_strategy = True
249
+ else:
250
+ padding_strategy = True
251
+ processor_inputs.update({"audio": _get_audio_input(), "padding": padding_strategy})
252
+ if "input_values" in model_forward_signature: # Wav2Vec2 audio input
253
+ processor_inputs.update({"audio": _get_audio_input(), "padding": True})
254
+ pt_input = processor(**processor_inputs, return_tensors="pt")
255
+ tf_input = processor(**processor_inputs, return_tensors="tf")
256
+
257
+ # Extra input requirements, in addition to the input modality
258
+ if (
259
+ config.is_encoder_decoder
260
+ or (hasattr(pt_model, "encoder") and hasattr(pt_model, "decoder"))
261
+ or "decoder_input_ids" in tf_dummy_inputs
262
+ ):
263
+ decoder_input_ids = np.asarray([[1], [1]], dtype=int) * (pt_model.config.decoder_start_token_id or 0)
264
+ pt_input.update({"decoder_input_ids": torch.tensor(decoder_input_ids)})
265
+ tf_input.update({"decoder_input_ids": tf.convert_to_tensor(decoder_input_ids)})
266
+
267
+ return pt_input, tf_input
268
+
269
+ def run(self):
270
+ # hub version 0.9.0 introduced the possibility of programmatically opening PRs with normal write tokens.
271
+ if version.parse(huggingface_hub.__version__) < version.parse("0.9.0"):
272
+ raise ImportError(
273
+ "The huggingface_hub version must be >= 0.9.0 to use this command. Please update your huggingface_hub"
274
+ " installation."
275
+ )
276
+ else:
277
+ from huggingface_hub import Repository, create_commit
278
+ from huggingface_hub._commit_api import CommitOperationAdd
279
+
280
+ # Fetch remote data
281
+ repo = Repository(local_dir=self._local_dir, clone_from=self._model_name)
282
+
283
+ # Load config and get the appropriate architecture -- the latter is needed to convert the head's weights
284
+ config = AutoConfig.from_pretrained(self._local_dir)
285
+ architectures = config.architectures
286
+ if self._override_model_class is not None:
287
+ if self._override_model_class.startswith("TF"):
288
+ architectures = [self._override_model_class[2:]]
289
+ else:
290
+ architectures = [self._override_model_class]
291
+ try:
292
+ pt_class = getattr(import_module("transformers"), architectures[0])
293
+ except AttributeError:
294
+ raise ValueError(f"Model class {self._override_model_class} not found in transformers.")
295
+ try:
296
+ tf_class = getattr(import_module("transformers"), "TF" + architectures[0])
297
+ except AttributeError:
298
+ raise ValueError(f"TF model class TF{self._override_model_class} not found in transformers.")
299
+ elif architectures is None: # No architecture defined -- use auto classes
300
+ pt_class = getattr(import_module("transformers"), "AutoModel")
301
+ tf_class = getattr(import_module("transformers"), "TFAutoModel")
302
+ self._logger.warning("No detected architecture, using AutoModel/TFAutoModel")
303
+ else: # Architecture defined -- use it
304
+ if len(architectures) > 1:
305
+ raise ValueError(f"More than one architecture was found, aborting. (architectures = {architectures})")
306
+ self._logger.warning(f"Detected architecture: {architectures[0]}")
307
+ pt_class = getattr(import_module("transformers"), architectures[0])
308
+ try:
309
+ tf_class = getattr(import_module("transformers"), "TF" + architectures[0])
310
+ except AttributeError:
311
+ raise AttributeError(f"The TensorFlow equivalent of {architectures[0]} doesn't exist in transformers.")
312
+
313
+ # Check the TF dummy inputs to see what keys we need in the forward pass
314
+ tf_from_pt_model = tf_class.from_config(config)
315
+ tf_dummy_inputs = tf_from_pt_model.dummy_inputs
316
+
317
+ del tf_from_pt_model # Try to keep only one model in memory at a time
318
+
319
+ # Load the model and get some basic inputs
320
+ pt_model = pt_class.from_pretrained(self._local_dir)
321
+ pt_model.eval()
322
+
323
+ pt_input, tf_input = self.get_inputs(pt_model, tf_dummy_inputs, config)
324
+
325
+ with torch.no_grad():
326
+ pt_outputs = pt_model(**pt_input, output_hidden_states=True)
327
+ del pt_model # will no longer be used, and may have a large memory footprint
328
+
329
+ tf_from_pt_model = tf_class.from_pretrained(self._local_dir, from_pt=True)
330
+ tf_from_pt_outputs = tf_from_pt_model(**tf_input, output_hidden_states=True, training=False)
331
+
332
+ # Confirms that cross loading PT weights into TF worked.
333
+ crossload_differences = self.find_pt_tf_differences(pt_outputs, tf_from_pt_outputs)
334
+ output_differences = {k: v for k, v in crossload_differences.items() if "hidden" not in k}
335
+ hidden_differences = {k: v for k, v in crossload_differences.items() if "hidden" in k}
336
+ if len(output_differences) == 0 and architectures is not None:
337
+ raise ValueError(
338
+ f"Something went wrong -- the config file has architectures ({architectures}), but no model head"
339
+ " output was found. All outputs start with 'hidden'"
340
+ )
341
+ max_crossload_output_diff = max(output_differences.values()) if output_differences else 0.0
342
+ max_crossload_hidden_diff = max(hidden_differences.values())
343
+ if max_crossload_output_diff > self._max_error or max_crossload_hidden_diff > self._max_error:
344
+ raise ValueError(
345
+ "The cross-loaded TensorFlow model has different outputs, something went wrong!\n"
346
+ + f"\nList of maximum output differences above the threshold ({self._max_error}):\n"
347
+ + "\n".join([f"{k}: {v:.3e}" for k, v in output_differences.items() if v > self._max_error])
348
+ + f"\n\nList of maximum hidden layer differences above the threshold ({self._max_error}):\n"
349
+ + "\n".join([f"{k}: {v:.3e}" for k, v in hidden_differences.items() if v > self._max_error])
350
+ )
351
+
352
+ # Save the weights in a TF format (if needed) and confirms that the results are still good
353
+ tf_weights_path = os.path.join(self._local_dir, TF2_WEIGHTS_NAME)
354
+ tf_weights_index_path = os.path.join(self._local_dir, TF2_WEIGHTS_INDEX_NAME)
355
+ if (not os.path.exists(tf_weights_path) and not os.path.exists(tf_weights_index_path)) or self._new_weights:
356
+ tf_from_pt_model.save_pretrained(self._local_dir)
357
+ del tf_from_pt_model # will no longer be used, and may have a large memory footprint
358
+
359
+ tf_model = tf_class.from_pretrained(self._local_dir)
360
+ tf_outputs = tf_model(**tf_input, output_hidden_states=True)
361
+
362
+ conversion_differences = self.find_pt_tf_differences(pt_outputs, tf_outputs)
363
+ output_differences = {k: v for k, v in conversion_differences.items() if "hidden" not in k}
364
+ hidden_differences = {k: v for k, v in conversion_differences.items() if "hidden" in k}
365
+ if len(output_differences) == 0 and architectures is not None:
366
+ raise ValueError(
367
+ f"Something went wrong -- the config file has architectures ({architectures}), but no model head"
368
+ " output was found. All outputs start with 'hidden'"
369
+ )
370
+ max_conversion_output_diff = max(output_differences.values()) if output_differences else 0.0
371
+ max_conversion_hidden_diff = max(hidden_differences.values())
372
+ if max_conversion_output_diff > self._max_error or max_conversion_hidden_diff > self._max_error:
373
+ raise ValueError(
374
+ "The converted TensorFlow model has different outputs, something went wrong!\n"
375
+ + f"\nList of maximum output differences above the threshold ({self._max_error}):\n"
376
+ + "\n".join([f"{k}: {v:.3e}" for k, v in output_differences.items() if v > self._max_error])
377
+ + f"\n\nList of maximum hidden layer differences above the threshold ({self._max_error}):\n"
378
+ + "\n".join([f"{k}: {v:.3e}" for k, v in hidden_differences.items() if v > self._max_error])
379
+ )
380
+
381
+ commit_message = "Update TF weights" if self._new_weights else "Add TF weights"
382
+ if self._push:
383
+ repo.git_add(auto_lfs_track=True)
384
+ repo.git_commit(commit_message)
385
+ repo.git_push(blocking=True) # this prints a progress bar with the upload
386
+ self._logger.warning(f"TF weights pushed into {self._model_name}")
387
+ elif not self._no_pr:
388
+ self._logger.warning("Uploading the weights into a new PR...")
389
+ commit_descrition = (
390
+ "Model converted by the [`transformers`' `pt_to_tf`"
391
+ " CLI](https://github.com/huggingface/transformers/blob/main/src/transformers/commands/pt_to_tf.py). "
392
+ "All converted model outputs and hidden layers were validated against its PyTorch counterpart.\n\n"
393
+ f"Maximum crossload output difference={max_crossload_output_diff:.3e}; "
394
+ f"Maximum crossload hidden layer difference={max_crossload_hidden_diff:.3e};\n"
395
+ f"Maximum conversion output difference={max_conversion_output_diff:.3e}; "
396
+ f"Maximum conversion hidden layer difference={max_conversion_hidden_diff:.3e};\n"
397
+ )
398
+ if self._max_error > MAX_ERROR:
399
+ commit_descrition += (
400
+ f"\n\nCAUTION: The maximum admissible error was manually increased to {self._max_error}!"
401
+ )
402
+ if self._extra_commit_description:
403
+ commit_descrition += "\n\n" + self._extra_commit_description
404
+
405
+ # sharded model -> adds all related files (index and .h5 shards)
406
+ if os.path.exists(tf_weights_index_path):
407
+ operations = [
408
+ CommitOperationAdd(path_in_repo=TF2_WEIGHTS_INDEX_NAME, path_or_fileobj=tf_weights_index_path)
409
+ ]
410
+ for shard_path in tf.io.gfile.glob(self._local_dir + "/tf_model-*.h5"):
411
+ operations += [
412
+ CommitOperationAdd(path_in_repo=os.path.basename(shard_path), path_or_fileobj=shard_path)
413
+ ]
414
+ else:
415
+ operations = [CommitOperationAdd(path_in_repo=TF2_WEIGHTS_NAME, path_or_fileobj=tf_weights_path)]
416
+
417
+ hub_pr_url = create_commit(
418
+ repo_id=self._model_name,
419
+ operations=operations,
420
+ commit_message=commit_message,
421
+ commit_description=commit_descrition,
422
+ repo_type="model",
423
+ create_pr=True,
424
+ ).pr_url
425
+ self._logger.warning(f"PR open in {hub_pr_url}")
llmeval-env/lib/python3.10/site-packages/transformers/commands/run.py ADDED
@@ -0,0 +1,110 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from argparse import ArgumentParser
16
+
17
+ from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline
18
+ from ..utils import logging
19
+ from . import BaseTransformersCLICommand
20
+
21
+
22
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
23
+
24
+
25
+ def try_infer_format_from_ext(path: str):
26
+ if not path:
27
+ return "pipe"
28
+
29
+ for ext in PipelineDataFormat.SUPPORTED_FORMATS:
30
+ if path.endswith(ext):
31
+ return ext
32
+
33
+ raise Exception(
34
+ f"Unable to determine file format from file extension {path}. "
35
+ f"Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}"
36
+ )
37
+
38
+
39
+ def run_command_factory(args):
40
+ nlp = pipeline(
41
+ task=args.task,
42
+ model=args.model if args.model else None,
43
+ config=args.config,
44
+ tokenizer=args.tokenizer,
45
+ device=args.device,
46
+ )
47
+ format = try_infer_format_from_ext(args.input) if args.format == "infer" else args.format
48
+ reader = PipelineDataFormat.from_str(
49
+ format=format,
50
+ output_path=args.output,
51
+ input_path=args.input,
52
+ column=args.column if args.column else nlp.default_input_names,
53
+ overwrite=args.overwrite,
54
+ )
55
+ return RunCommand(nlp, reader)
56
+
57
+
58
+ class RunCommand(BaseTransformersCLICommand):
59
+ def __init__(self, nlp: Pipeline, reader: PipelineDataFormat):
60
+ self._nlp = nlp
61
+ self._reader = reader
62
+
63
+ @staticmethod
64
+ def register_subcommand(parser: ArgumentParser):
65
+ run_parser = parser.add_parser("run", help="Run a pipeline through the CLI")
66
+ run_parser.add_argument("--task", choices=get_supported_tasks(), help="Task to run")
67
+ run_parser.add_argument("--input", type=str, help="Path to the file to use for inference")
68
+ run_parser.add_argument("--output", type=str, help="Path to the file that will be used post to write results.")
69
+ run_parser.add_argument("--model", type=str, help="Name or path to the model to instantiate.")
70
+ run_parser.add_argument("--config", type=str, help="Name or path to the model's config to instantiate.")
71
+ run_parser.add_argument(
72
+ "--tokenizer", type=str, help="Name of the tokenizer to use. (default: same as the model name)"
73
+ )
74
+ run_parser.add_argument(
75
+ "--column",
76
+ type=str,
77
+ help="Name of the column to use as input. (For multi columns input as QA use column1,columns2)",
78
+ )
79
+ run_parser.add_argument(
80
+ "--format",
81
+ type=str,
82
+ default="infer",
83
+ choices=PipelineDataFormat.SUPPORTED_FORMATS,
84
+ help="Input format to read from",
85
+ )
86
+ run_parser.add_argument(
87
+ "--device",
88
+ type=int,
89
+ default=-1,
90
+ help="Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)",
91
+ )
92
+ run_parser.add_argument("--overwrite", action="store_true", help="Allow overwriting the output file.")
93
+ run_parser.set_defaults(func=run_command_factory)
94
+
95
+ def run(self):
96
+ nlp, outputs = self._nlp, []
97
+
98
+ for entry in self._reader:
99
+ output = nlp(**entry) if self._reader.is_multi_columns else nlp(entry)
100
+ if isinstance(output, dict):
101
+ outputs.append(output)
102
+ else:
103
+ outputs += output
104
+
105
+ # Saving data
106
+ if self._nlp.binary_output:
107
+ binary_path = self._reader.save_binary(outputs)
108
+ logger.warning(f"Current pipeline requires output to be in binary format, saving at {binary_path}")
109
+ else:
110
+ self._reader.save(outputs)
llmeval-env/lib/python3.10/site-packages/transformers/commands/serving.py ADDED
@@ -0,0 +1,228 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from argparse import ArgumentParser, Namespace
16
+ from typing import Any, List, Optional
17
+
18
+ from ..pipelines import Pipeline, get_supported_tasks, pipeline
19
+ from ..utils import logging
20
+ from . import BaseTransformersCLICommand
21
+
22
+
23
+ try:
24
+ from fastapi import Body, FastAPI, HTTPException
25
+ from fastapi.routing import APIRoute
26
+ from pydantic import BaseModel
27
+ from starlette.responses import JSONResponse
28
+ from uvicorn import run
29
+
30
+ _serve_dependencies_installed = True
31
+ except (ImportError, AttributeError):
32
+ BaseModel = object
33
+
34
+ def Body(*x, **y):
35
+ pass
36
+
37
+ _serve_dependencies_installed = False
38
+
39
+
40
+ logger = logging.get_logger("transformers-cli/serving")
41
+
42
+
43
+ def serve_command_factory(args: Namespace):
44
+ """
45
+ Factory function used to instantiate serving server from provided command line arguments.
46
+
47
+ Returns: ServeCommand
48
+ """
49
+ nlp = pipeline(
50
+ task=args.task,
51
+ model=args.model if args.model else None,
52
+ config=args.config,
53
+ tokenizer=args.tokenizer,
54
+ device=args.device,
55
+ )
56
+ return ServeCommand(nlp, args.host, args.port, args.workers)
57
+
58
+
59
+ class ServeModelInfoResult(BaseModel):
60
+ """
61
+ Expose model information
62
+ """
63
+
64
+ infos: dict
65
+
66
+
67
+ class ServeTokenizeResult(BaseModel):
68
+ """
69
+ Tokenize result model
70
+ """
71
+
72
+ tokens: List[str]
73
+ tokens_ids: Optional[List[int]]
74
+
75
+
76
+ class ServeDeTokenizeResult(BaseModel):
77
+ """
78
+ DeTokenize result model
79
+ """
80
+
81
+ text: str
82
+
83
+
84
+ class ServeForwardResult(BaseModel):
85
+ """
86
+ Forward result model
87
+ """
88
+
89
+ output: Any
90
+
91
+
92
+ class ServeCommand(BaseTransformersCLICommand):
93
+ @staticmethod
94
+ def register_subcommand(parser: ArgumentParser):
95
+ """
96
+ Register this command to argparse so it's available for the transformer-cli
97
+
98
+ Args:
99
+ parser: Root parser to register command-specific arguments
100
+ """
101
+ serve_parser = parser.add_parser(
102
+ "serve", help="CLI tool to run inference requests through REST and GraphQL endpoints."
103
+ )
104
+ serve_parser.add_argument(
105
+ "--task",
106
+ type=str,
107
+ choices=get_supported_tasks(),
108
+ help="The task to run the pipeline on",
109
+ )
110
+ serve_parser.add_argument("--host", type=str, default="localhost", help="Interface the server will listen on.")
111
+ serve_parser.add_argument("--port", type=int, default=8888, help="Port the serving will listen to.")
112
+ serve_parser.add_argument("--workers", type=int, default=1, help="Number of http workers")
113
+ serve_parser.add_argument("--model", type=str, help="Model's name or path to stored model.")
114
+ serve_parser.add_argument("--config", type=str, help="Model's config name or path to stored model.")
115
+ serve_parser.add_argument("--tokenizer", type=str, help="Tokenizer name to use.")
116
+ serve_parser.add_argument(
117
+ "--device",
118
+ type=int,
119
+ default=-1,
120
+ help="Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)",
121
+ )
122
+ serve_parser.set_defaults(func=serve_command_factory)
123
+
124
+ def __init__(self, pipeline: Pipeline, host: str, port: int, workers: int):
125
+ self._pipeline = pipeline
126
+
127
+ self.host = host
128
+ self.port = port
129
+ self.workers = workers
130
+
131
+ if not _serve_dependencies_installed:
132
+ raise RuntimeError(
133
+ "Using serve command requires FastAPI and uvicorn. "
134
+ 'Please install transformers with [serving]: pip install "transformers[serving]". '
135
+ "Or install FastAPI and uvicorn separately."
136
+ )
137
+ else:
138
+ logger.info(f"Serving model over {host}:{port}")
139
+ self._app = FastAPI(
140
+ routes=[
141
+ APIRoute(
142
+ "/",
143
+ self.model_info,
144
+ response_model=ServeModelInfoResult,
145
+ response_class=JSONResponse,
146
+ methods=["GET"],
147
+ ),
148
+ APIRoute(
149
+ "/tokenize",
150
+ self.tokenize,
151
+ response_model=ServeTokenizeResult,
152
+ response_class=JSONResponse,
153
+ methods=["POST"],
154
+ ),
155
+ APIRoute(
156
+ "/detokenize",
157
+ self.detokenize,
158
+ response_model=ServeDeTokenizeResult,
159
+ response_class=JSONResponse,
160
+ methods=["POST"],
161
+ ),
162
+ APIRoute(
163
+ "/forward",
164
+ self.forward,
165
+ response_model=ServeForwardResult,
166
+ response_class=JSONResponse,
167
+ methods=["POST"],
168
+ ),
169
+ ],
170
+ timeout=600,
171
+ )
172
+
173
+ def run(self):
174
+ run(self._app, host=self.host, port=self.port, workers=self.workers)
175
+
176
+ def model_info(self):
177
+ return ServeModelInfoResult(infos=vars(self._pipeline.model.config))
178
+
179
+ def tokenize(self, text_input: str = Body(None, embed=True), return_ids: bool = Body(False, embed=True)):
180
+ """
181
+ Tokenize the provided input and eventually returns corresponding tokens id: - **text_input**: String to
182
+ tokenize - **return_ids**: Boolean flags indicating if the tokens have to be converted to their integer
183
+ mapping.
184
+ """
185
+ try:
186
+ tokens_txt = self._pipeline.tokenizer.tokenize(text_input)
187
+
188
+ if return_ids:
189
+ tokens_ids = self._pipeline.tokenizer.convert_tokens_to_ids(tokens_txt)
190
+ return ServeTokenizeResult(tokens=tokens_txt, tokens_ids=tokens_ids)
191
+ else:
192
+ return ServeTokenizeResult(tokens=tokens_txt)
193
+
194
+ except Exception as e:
195
+ raise HTTPException(status_code=500, detail={"model": "", "error": str(e)})
196
+
197
+ def detokenize(
198
+ self,
199
+ tokens_ids: List[int] = Body(None, embed=True),
200
+ skip_special_tokens: bool = Body(False, embed=True),
201
+ cleanup_tokenization_spaces: bool = Body(True, embed=True),
202
+ ):
203
+ """
204
+ Detokenize the provided tokens ids to readable text: - **tokens_ids**: List of tokens ids -
205
+ **skip_special_tokens**: Flag indicating to not try to decode special tokens - **cleanup_tokenization_spaces**:
206
+ Flag indicating to remove all leading/trailing spaces and intermediate ones.
207
+ """
208
+ try:
209
+ decoded_str = self._pipeline.tokenizer.decode(tokens_ids, skip_special_tokens, cleanup_tokenization_spaces)
210
+ return ServeDeTokenizeResult(model="", text=decoded_str)
211
+ except Exception as e:
212
+ raise HTTPException(status_code=500, detail={"model": "", "error": str(e)})
213
+
214
+ async def forward(self, inputs=Body(None, embed=True)):
215
+ """
216
+ **inputs**: **attention_mask**: **tokens_type_ids**:
217
+ """
218
+
219
+ # Check we don't have empty string
220
+ if len(inputs) == 0:
221
+ return ServeForwardResult(output=[], attention=[])
222
+
223
+ try:
224
+ # Forward through the model
225
+ output = self._pipeline(inputs)
226
+ return ServeForwardResult(output=output)
227
+ except Exception as e:
228
+ raise HTTPException(500, {"error": str(e)})
llmeval-env/lib/python3.10/site-packages/transformers/commands/train.py ADDED
@@ -0,0 +1,158 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import os
16
+ from argparse import ArgumentParser, Namespace
17
+
18
+ from ..data import SingleSentenceClassificationProcessor as Processor
19
+ from ..pipelines import TextClassificationPipeline
20
+ from ..utils import is_tf_available, is_torch_available, logging
21
+ from . import BaseTransformersCLICommand
22
+
23
+
24
+ if not is_tf_available() and not is_torch_available():
25
+ raise RuntimeError("At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training")
26
+
27
+ # TF training parameters
28
+ USE_XLA = False
29
+ USE_AMP = False
30
+
31
+
32
+ def train_command_factory(args: Namespace):
33
+ """
34
+ Factory function used to instantiate training command from provided command line arguments.
35
+
36
+ Returns: TrainCommand
37
+ """
38
+ return TrainCommand(args)
39
+
40
+
41
+ class TrainCommand(BaseTransformersCLICommand):
42
+ @staticmethod
43
+ def register_subcommand(parser: ArgumentParser):
44
+ """
45
+ Register this command to argparse so it's available for the transformer-cli
46
+
47
+ Args:
48
+ parser: Root parser to register command-specific arguments
49
+ """
50
+ train_parser = parser.add_parser("train", help="CLI tool to train a model on a task.")
51
+
52
+ train_parser.add_argument(
53
+ "--train_data",
54
+ type=str,
55
+ required=True,
56
+ help="path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.",
57
+ )
58
+ train_parser.add_argument(
59
+ "--column_label", type=int, default=0, help="Column of the dataset csv file with example labels."
60
+ )
61
+ train_parser.add_argument(
62
+ "--column_text", type=int, default=1, help="Column of the dataset csv file with example texts."
63
+ )
64
+ train_parser.add_argument(
65
+ "--column_id", type=int, default=2, help="Column of the dataset csv file with example ids."
66
+ )
67
+ train_parser.add_argument(
68
+ "--skip_first_row", action="store_true", help="Skip the first row of the csv file (headers)."
69
+ )
70
+
71
+ train_parser.add_argument("--validation_data", type=str, default="", help="path to validation dataset.")
72
+ train_parser.add_argument(
73
+ "--validation_split",
74
+ type=float,
75
+ default=0.1,
76
+ help="if validation dataset is not provided, fraction of train dataset to use as validation dataset.",
77
+ )
78
+
79
+ train_parser.add_argument("--output", type=str, default="./", help="path to saved the trained model.")
80
+
81
+ train_parser.add_argument(
82
+ "--task", type=str, default="text_classification", help="Task to train the model on."
83
+ )
84
+ train_parser.add_argument(
85
+ "--model", type=str, default="google-bert/bert-base-uncased", help="Model's name or path to stored model."
86
+ )
87
+ train_parser.add_argument("--train_batch_size", type=int, default=32, help="Batch size for training.")
88
+ train_parser.add_argument("--valid_batch_size", type=int, default=64, help="Batch size for validation.")
89
+ train_parser.add_argument("--learning_rate", type=float, default=3e-5, help="Learning rate.")
90
+ train_parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon for Adam optimizer.")
91
+ train_parser.set_defaults(func=train_command_factory)
92
+
93
+ def __init__(self, args: Namespace):
94
+ self.logger = logging.get_logger("transformers-cli/training")
95
+
96
+ self.framework = "tf" if is_tf_available() else "torch"
97
+
98
+ os.makedirs(args.output, exist_ok=True)
99
+ self.output = args.output
100
+
101
+ self.column_label = args.column_label
102
+ self.column_text = args.column_text
103
+ self.column_id = args.column_id
104
+
105
+ self.logger.info(f"Loading {args.task} pipeline for {args.model}")
106
+ if args.task == "text_classification":
107
+ self.pipeline = TextClassificationPipeline.from_pretrained(args.model)
108
+ elif args.task == "token_classification":
109
+ raise NotImplementedError
110
+ elif args.task == "question_answering":
111
+ raise NotImplementedError
112
+
113
+ self.logger.info(f"Loading dataset from {args.train_data}")
114
+ self.train_dataset = Processor.create_from_csv(
115
+ args.train_data,
116
+ column_label=args.column_label,
117
+ column_text=args.column_text,
118
+ column_id=args.column_id,
119
+ skip_first_row=args.skip_first_row,
120
+ )
121
+ self.valid_dataset = None
122
+ if args.validation_data:
123
+ self.logger.info(f"Loading validation dataset from {args.validation_data}")
124
+ self.valid_dataset = Processor.create_from_csv(
125
+ args.validation_data,
126
+ column_label=args.column_label,
127
+ column_text=args.column_text,
128
+ column_id=args.column_id,
129
+ skip_first_row=args.skip_first_row,
130
+ )
131
+
132
+ self.validation_split = args.validation_split
133
+ self.train_batch_size = args.train_batch_size
134
+ self.valid_batch_size = args.valid_batch_size
135
+ self.learning_rate = args.learning_rate
136
+ self.adam_epsilon = args.adam_epsilon
137
+
138
+ def run(self):
139
+ if self.framework == "tf":
140
+ return self.run_tf()
141
+ return self.run_torch()
142
+
143
+ def run_torch(self):
144
+ raise NotImplementedError
145
+
146
+ def run_tf(self):
147
+ self.pipeline.fit(
148
+ self.train_dataset,
149
+ validation_data=self.valid_dataset,
150
+ validation_split=self.validation_split,
151
+ learning_rate=self.learning_rate,
152
+ adam_epsilon=self.adam_epsilon,
153
+ train_batch_size=self.train_batch_size,
154
+ valid_batch_size=self.valid_batch_size,
155
+ )
156
+
157
+ # Save trained pipeline
158
+ self.pipeline.save_pretrained(self.output)
llmeval-env/lib/python3.10/site-packages/transformers/commands/transformers_cli.py ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ from argparse import ArgumentParser
17
+
18
+ from .add_new_model import AddNewModelCommand
19
+ from .add_new_model_like import AddNewModelLikeCommand
20
+ from .convert import ConvertCommand
21
+ from .download import DownloadCommand
22
+ from .env import EnvironmentCommand
23
+ from .lfs import LfsCommands
24
+ from .pt_to_tf import PTtoTFCommand
25
+ from .run import RunCommand
26
+ from .serving import ServeCommand
27
+ from .user import UserCommands
28
+
29
+
30
+ def main():
31
+ parser = ArgumentParser("Transformers CLI tool", usage="transformers-cli <command> [<args>]")
32
+ commands_parser = parser.add_subparsers(help="transformers-cli command helpers")
33
+
34
+ # Register commands
35
+ ConvertCommand.register_subcommand(commands_parser)
36
+ DownloadCommand.register_subcommand(commands_parser)
37
+ EnvironmentCommand.register_subcommand(commands_parser)
38
+ RunCommand.register_subcommand(commands_parser)
39
+ ServeCommand.register_subcommand(commands_parser)
40
+ UserCommands.register_subcommand(commands_parser)
41
+ AddNewModelCommand.register_subcommand(commands_parser)
42
+ AddNewModelLikeCommand.register_subcommand(commands_parser)
43
+ LfsCommands.register_subcommand(commands_parser)
44
+ PTtoTFCommand.register_subcommand(commands_parser)
45
+
46
+ # Let's go
47
+ args = parser.parse_args()
48
+
49
+ if not hasattr(args, "func"):
50
+ parser.print_help()
51
+ exit(1)
52
+
53
+ # Run
54
+ service = args.func(args)
55
+ service.run()
56
+
57
+
58
+ if __name__ == "__main__":
59
+ main()
llmeval-env/lib/python3.10/site-packages/transformers/commands/user.py ADDED
@@ -0,0 +1,197 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import subprocess
16
+ from argparse import ArgumentParser
17
+ from typing import List, Union
18
+
19
+ from huggingface_hub.hf_api import HfFolder, create_repo, whoami
20
+ from requests.exceptions import HTTPError
21
+
22
+ from . import BaseTransformersCLICommand
23
+
24
+
25
+ class UserCommands(BaseTransformersCLICommand):
26
+ @staticmethod
27
+ def register_subcommand(parser: ArgumentParser):
28
+ login_parser = parser.add_parser("login", help="Log in using the same credentials as on huggingface.co")
29
+ login_parser.set_defaults(func=lambda args: LoginCommand(args))
30
+ whoami_parser = parser.add_parser("whoami", help="Find out which huggingface.co account you are logged in as.")
31
+ whoami_parser.set_defaults(func=lambda args: WhoamiCommand(args))
32
+ logout_parser = parser.add_parser("logout", help="Log out")
33
+ logout_parser.set_defaults(func=lambda args: LogoutCommand(args))
34
+
35
+ # new system: git-based repo system
36
+ repo_parser = parser.add_parser(
37
+ "repo",
38
+ help="Deprecated: use `huggingface-cli` instead. Commands to interact with your huggingface.co repos.",
39
+ )
40
+ repo_subparsers = repo_parser.add_subparsers(
41
+ help="Deprecated: use `huggingface-cli` instead. huggingface.co repos related commands"
42
+ )
43
+ repo_create_parser = repo_subparsers.add_parser(
44
+ "create", help="Deprecated: use `huggingface-cli` instead. Create a new repo on huggingface.co"
45
+ )
46
+ repo_create_parser.add_argument(
47
+ "name",
48
+ type=str,
49
+ help="Name for your model's repo. Will be namespaced under your username to build the model id.",
50
+ )
51
+ repo_create_parser.add_argument("--organization", type=str, help="Optional: organization namespace.")
52
+ repo_create_parser.add_argument("-y", "--yes", action="store_true", help="Optional: answer Yes to the prompt")
53
+ repo_create_parser.set_defaults(func=lambda args: RepoCreateCommand(args))
54
+
55
+
56
+ class ANSI:
57
+ """
58
+ Helper for en.wikipedia.org/wiki/ANSI_escape_code
59
+ """
60
+
61
+ _bold = "\u001b[1m"
62
+ _red = "\u001b[31m"
63
+ _gray = "\u001b[90m"
64
+ _reset = "\u001b[0m"
65
+
66
+ @classmethod
67
+ def bold(cls, s):
68
+ return f"{cls._bold}{s}{cls._reset}"
69
+
70
+ @classmethod
71
+ def red(cls, s):
72
+ return f"{cls._bold}{cls._red}{s}{cls._reset}"
73
+
74
+ @classmethod
75
+ def gray(cls, s):
76
+ return f"{cls._gray}{s}{cls._reset}"
77
+
78
+
79
+ def tabulate(rows: List[List[Union[str, int]]], headers: List[str]) -> str:
80
+ """
81
+ Inspired by:
82
+
83
+ - stackoverflow.com/a/8356620/593036
84
+ - stackoverflow.com/questions/9535954/printing-lists-as-tabular-data
85
+ """
86
+ col_widths = [max(len(str(x)) for x in col) for col in zip(*rows, headers)]
87
+ row_format = ("{{:{}}} " * len(headers)).format(*col_widths)
88
+ lines = []
89
+ lines.append(row_format.format(*headers))
90
+ lines.append(row_format.format(*["-" * w for w in col_widths]))
91
+ for row in rows:
92
+ lines.append(row_format.format(*row))
93
+ return "\n".join(lines)
94
+
95
+
96
+ class BaseUserCommand:
97
+ def __init__(self, args):
98
+ self.args = args
99
+
100
+
101
+ class LoginCommand(BaseUserCommand):
102
+ def run(self):
103
+ print(
104
+ ANSI.red(
105
+ "ERROR! `huggingface-cli login` uses an outdated login mechanism "
106
+ "that is not compatible with the Hugging Face Hub backend anymore. "
107
+ "Please use `huggingface-cli login instead."
108
+ )
109
+ )
110
+
111
+
112
+ class WhoamiCommand(BaseUserCommand):
113
+ def run(self):
114
+ print(
115
+ ANSI.red(
116
+ "WARNING! `transformers-cli whoami` is deprecated and will be removed in v5. Please use "
117
+ "`huggingface-cli whoami` instead."
118
+ )
119
+ )
120
+ token = HfFolder.get_token()
121
+ if token is None:
122
+ print("Not logged in")
123
+ exit()
124
+ try:
125
+ user, orgs = whoami(token)
126
+ print(user)
127
+ if orgs:
128
+ print(ANSI.bold("orgs: "), ",".join(orgs))
129
+ except HTTPError as e:
130
+ print(e)
131
+ print(ANSI.red(e.response.text))
132
+ exit(1)
133
+
134
+
135
+ class LogoutCommand(BaseUserCommand):
136
+ def run(self):
137
+ print(
138
+ ANSI.red(
139
+ "ERROR! `transformers-cli logout` uses an outdated logout mechanism "
140
+ "that is not compatible with the Hugging Face Hub backend anymore. "
141
+ "Please use `huggingface-cli logout instead."
142
+ )
143
+ )
144
+
145
+
146
+ class RepoCreateCommand(BaseUserCommand):
147
+ def run(self):
148
+ print(
149
+ ANSI.red(
150
+ "WARNING! Managing repositories through transformers-cli is deprecated. "
151
+ "Please use `huggingface-cli` instead."
152
+ )
153
+ )
154
+ token = HfFolder.get_token()
155
+ if token is None:
156
+ print("Not logged in")
157
+ exit(1)
158
+ try:
159
+ stdout = subprocess.check_output(["git", "--version"]).decode("utf-8")
160
+ print(ANSI.gray(stdout.strip()))
161
+ except FileNotFoundError:
162
+ print("Looks like you do not have git installed, please install.")
163
+
164
+ try:
165
+ stdout = subprocess.check_output(["git-lfs", "--version"]).decode("utf-8")
166
+ print(ANSI.gray(stdout.strip()))
167
+ except FileNotFoundError:
168
+ print(
169
+ ANSI.red(
170
+ "Looks like you do not have git-lfs installed, please install."
171
+ " You can install from https://git-lfs.github.com/."
172
+ " Then run `git lfs install` (you only have to do this once)."
173
+ )
174
+ )
175
+ print("")
176
+
177
+ user, _ = whoami(token)
178
+ namespace = self.args.organization if self.args.organization is not None else user
179
+ full_name = f"{namespace}/{self.args.name}"
180
+ print(f"You are about to create {ANSI.bold(full_name)}")
181
+
182
+ if not self.args.yes:
183
+ choice = input("Proceed? [Y/n] ").lower()
184
+ if not (choice == "" or choice == "y" or choice == "yes"):
185
+ print("Abort")
186
+ exit()
187
+ try:
188
+ url = create_repo(token, name=self.args.name, organization=self.args.organization)
189
+ except HTTPError as e:
190
+ print(e)
191
+ print(ANSI.red(e.response.text))
192
+ exit(1)
193
+ print("\nYour repo now lives at:")
194
+ print(f" {ANSI.bold(url)}")
195
+ print("\nYou can clone it locally with the command below, and commit/push as usual.")
196
+ print(f"\n git clone {url}")
197
+ print("")
llmeval-env/lib/python3.10/site-packages/transformers/models/albert/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (2.62 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/albert/__pycache__/modeling_flax_albert.cpython-310.pyc ADDED
Binary file (28.8 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/dbrx/__init__.py ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
17
+
18
+
19
+ _import_structure = {
20
+ "configuration_dbrx": ["DbrxConfig"],
21
+ }
22
+
23
+ try:
24
+ if not is_torch_available():
25
+ raise OptionalDependencyNotAvailable()
26
+ except OptionalDependencyNotAvailable:
27
+ pass
28
+ else:
29
+ _import_structure["modeling_dbrx"] = [
30
+ "DbrxForCausalLM",
31
+ "DbrxModel",
32
+ "DbrxPreTrainedModel",
33
+ ]
34
+
35
+
36
+ if TYPE_CHECKING:
37
+ from .configuration_dbrx import DbrxConfig
38
+
39
+ try:
40
+ if not is_torch_available():
41
+ raise OptionalDependencyNotAvailable()
42
+ except OptionalDependencyNotAvailable:
43
+ pass
44
+ else:
45
+ from .modeling_dbrx import DbrxForCausalLM, DbrxModel, DbrxPreTrainedModel
46
+
47
+
48
+ else:
49
+ import sys
50
+
51
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
llmeval-env/lib/python3.10/site-packages/transformers/models/dbrx/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (800 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/dbrx/__pycache__/configuration_dbrx.cpython-310.pyc ADDED
Binary file (9.38 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/dbrx/__pycache__/modeling_dbrx.cpython-310.pyc ADDED
Binary file (44.4 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/dbrx/configuration_dbrx.py ADDED
@@ -0,0 +1,257 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 Databricks Mosaic Research and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ DBRX model configuration """
16
+
17
+ from typing import Any, Optional
18
+
19
+ from ...configuration_utils import PretrainedConfig
20
+ from ...utils import logging
21
+
22
+
23
+ logger = logging.get_logger(__name__)
24
+
25
+
26
+ class DbrxAttentionConfig(PretrainedConfig):
27
+ """Configuration class for Dbrx Attention.
28
+
29
+ [`DbrxAttention`] class. It is used to instantiate attention layers
30
+ according to the specified arguments, defining the layers architecture.
31
+
32
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
33
+ documentation from [`PretrainedConfig`] for more information.
34
+
35
+ Args:
36
+ attn_pdrop (`float`, *optional*, defaults to 0.0):
37
+ The dropout probability for the attention layers.
38
+ clip_qkv (`float`, *optional*):
39
+ If set, clip the queries, keys, and values in the attention layer to this value.
40
+ kv_n_heads (`Optional[int]`, defaults to 1): For grouped_query_attention only, allow user to specify number of kv heads.
41
+ rope_theta (`float`, defaults to 10000.0): The base frequency for rope.
42
+ """
43
+
44
+ def __init__(
45
+ self,
46
+ attn_pdrop: float = 0.0,
47
+ clip_qkv: Optional[float] = None,
48
+ kv_n_heads: int = 1,
49
+ rope_theta: float = 10000.0,
50
+ **kwargs: Any,
51
+ ):
52
+ super().__init__(**kwargs)
53
+ self.attn_pdrop = attn_pdrop
54
+ self.clip_qkv = clip_qkv
55
+ self.kv_n_heads = kv_n_heads
56
+ self.rope_theta = rope_theta
57
+
58
+ for k in ["model_type"]:
59
+ if k in kwargs:
60
+ kwargs.pop(k)
61
+ if len(kwargs) != 0:
62
+ raise ValueError(f"Found unknown {kwargs=}")
63
+
64
+ @classmethod
65
+ def from_pretrained(cls, pretrained_model_name_or_path: str, **kwargs: Any) -> "PretrainedConfig":
66
+ cls._set_token_in_kwargs(kwargs)
67
+
68
+ config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
69
+
70
+ if config_dict.get("model_type") == "dbrx":
71
+ config_dict = config_dict["attn_config"]
72
+
73
+ if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
74
+ logger.warning(
75
+ f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
76
+ + f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
77
+ )
78
+
79
+ return cls.from_dict(config_dict, **kwargs)
80
+
81
+
82
+ class DbrxFFNConfig(PretrainedConfig):
83
+ """Configuration class for Dbrx FFN.
84
+
85
+ [`DbrxFFN`] class. It is used to instantiate feedforward layers according to
86
+ the specified arguments, defining the layers architecture.
87
+
88
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
89
+ documentation from [`PretrainedConfig`] for more information.
90
+
91
+ Args:
92
+ ffn_act_fn (`dict`, *optional*, defaults to `None`): A dict specifying activation function for the FFN.
93
+ The dict should have a key 'name' with the value being the name of the activation function along with
94
+ any additional keyword arguments. If `None`, then set to `{"name": "silu"}`.
95
+ ffn_hidden_size (`int`, defaults to 3584): The hidden size of the feedforward network.
96
+ moe_num_experts (`int`, defaults to 4): The number of experts in the mixture of experts layer.
97
+ moe_top_k (`int`, defaults to 1): The number of experts to use in the mixture of experts layer.
98
+ moe_jitter_eps (`float`, *optional*, defaults to `None`): If not `None`, the jitter epsilon for the mixture of experts layer.
99
+ moe_loss_weight (`float`, defaults to 0.01): The loss weight for the mixture of experts layer.
100
+ moe_normalize_expert_weights (`float`, *optional*, defaults to 1.0): The normalization factor for the expert weights.
101
+ """
102
+
103
+ def __init__(
104
+ self,
105
+ ffn_act_fn: dict = None,
106
+ ffn_hidden_size: int = 3584,
107
+ moe_num_experts: int = 4,
108
+ moe_top_k: int = 1,
109
+ moe_jitter_eps: Optional[float] = None,
110
+ moe_loss_weight: float = 0.01,
111
+ moe_normalize_expert_weights: Optional[float] = 1.0,
112
+ **kwargs: Any,
113
+ ):
114
+ super().__init__()
115
+ if ffn_act_fn is None:
116
+ ffn_act_fn = {"name": "silu"}
117
+ self.ffn_act_fn = ffn_act_fn
118
+ self.ffn_hidden_size = ffn_hidden_size
119
+ self.moe_num_experts = moe_num_experts
120
+ self.moe_top_k = moe_top_k
121
+ self.moe_jitter_eps = moe_jitter_eps
122
+ self.moe_loss_weight = moe_loss_weight
123
+ self.moe_normalize_expert_weights = moe_normalize_expert_weights
124
+
125
+ for k in ["model_type"]:
126
+ if k in kwargs:
127
+ kwargs.pop(k)
128
+ if len(kwargs) != 0:
129
+ raise ValueError(f"Found unknown {kwargs=}")
130
+
131
+ @classmethod
132
+ def from_pretrained(cls, pretrained_model_name_or_path: str, **kwargs: Any) -> "PretrainedConfig":
133
+ cls._set_token_in_kwargs(kwargs)
134
+
135
+ config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
136
+
137
+ if config_dict.get("model_type") == "dbrx":
138
+ config_dict = config_dict["ffn_config"]
139
+
140
+ if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
141
+ logger.warning(
142
+ f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
143
+ + f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
144
+ )
145
+
146
+ return cls.from_dict(config_dict, **kwargs)
147
+
148
+
149
+ class DbrxConfig(PretrainedConfig):
150
+ r"""
151
+
152
+ This is the configuration class to store the configuration of a [`DbrxModel`]. It is used to instantiate a Dbrx model according to the
153
+ specified arguments, defining the model architecture. Instantiating a configuration with the
154
+ defaults will yield a different configuration to that of the [databricks/dbrx-instruct](https://huggingface.co/databricks/dbrx-instruct) architecture.
155
+
156
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
157
+ documentation from [`PretrainedConfig`] for more information.
158
+
159
+
160
+ Args:
161
+ d_model (`int`, *optional*, defaults to 2048):
162
+ Dimensionality of the embeddings and hidden states.
163
+ n_heads (`int`, *optional*, defaults to 16):
164
+ Number of attention heads for each attention layer in the Transformer encoder.
165
+ n_layers (`int`, *optional*, defaults to 24):
166
+ Number of hidden layers in the Transformer encoder.
167
+ max_seq_len (`int`, *optional*, defaults to 2048):
168
+ The maximum sequence length of the model.
169
+ vocab_size (`int`, *optional*, defaults to 32000):
170
+ Vocabulary size of the Dbrx model. Defines the maximum number of different tokens that can be represented by
171
+ the `inputs_ids` passed when calling [`DbrxModel`].
172
+ resid_pdrop (`float`, *optional*, defaults to 0.0):
173
+ The dropout probability applied to the attention output before combining with residual.
174
+ emb_pdrop (`float`, *optional*, defaults to 0.0):
175
+ The dropout probability for the embedding layer.
176
+ attn_config (`dict`, *optional*):
177
+ A dictionary used to configure the model's attention module.
178
+ ffn_config (`dict`, *optional*):
179
+ A dictionary used to configure the model's FFN module.
180
+ use_cache (`bool`, *optional*, defaults to `True`):
181
+ Whether or not the model should return the last key/values attentions (not used by all models).
182
+ initializer_range (`float`, *optional*, defaults to 0.02):
183
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
184
+ output_router_logits (`bool`, *optional*, defaults to `False`):
185
+ Whether or not the router logits should be returned by the model. Enabling this will also
186
+ allow the model to output the auxiliary loss. See [here]() for more details.
187
+
188
+
189
+ Example:
190
+ ```python
191
+ >>> from transformers import DbrxConfig, DbrxModel
192
+
193
+ >>> # Initializing a Dbrx configuration
194
+ >>> configuration = DbrxConfig(n_layers=2, d_model=256, n_heads=8, vocab_size=128)
195
+
196
+ >>> # Initializing a model (with random weights) from the configuration
197
+ >>> model = DbrxModel(configuration)
198
+
199
+ >>> # Accessing the model configuration
200
+ >>> configuration = model.config
201
+ ```
202
+ """
203
+
204
+ model_type = "dbrx"
205
+ attribute_map = {
206
+ "num_attention_heads": "n_heads",
207
+ "hidden_size": "d_model",
208
+ "num_hidden_layers": "n_layers",
209
+ "max_position_embeddings": "max_seq_len",
210
+ }
211
+
212
+ def __init__(
213
+ self,
214
+ d_model: int = 2048,
215
+ n_heads: int = 16,
216
+ n_layers: int = 24,
217
+ max_seq_len: int = 2048,
218
+ vocab_size: int = 32000,
219
+ resid_pdrop: float = 0.0,
220
+ emb_pdrop: float = 0.0,
221
+ attn_config: Optional[DbrxAttentionConfig] = None,
222
+ ffn_config: Optional[DbrxFFNConfig] = None,
223
+ use_cache: bool = True,
224
+ initializer_range: float = 0.02,
225
+ output_router_logits: bool = False,
226
+ **kwargs: Any,
227
+ ):
228
+ if attn_config is None:
229
+ self.attn_config = DbrxAttentionConfig()
230
+ elif isinstance(attn_config, dict):
231
+ self.attn_config = DbrxAttentionConfig(**attn_config)
232
+ else:
233
+ self.attn_config = attn_config
234
+
235
+ if ffn_config is None:
236
+ self.ffn_config = DbrxFFNConfig()
237
+ elif isinstance(ffn_config, dict):
238
+ self.ffn_config = DbrxFFNConfig(**ffn_config)
239
+ else:
240
+ self.ffn_config = ffn_config
241
+
242
+ self.d_model = d_model
243
+ self.n_heads = n_heads
244
+ self.n_layers = n_layers
245
+ self.max_seq_len = max_seq_len
246
+ self.vocab_size = vocab_size
247
+ self.resid_pdrop = resid_pdrop
248
+ self.emb_pdrop = emb_pdrop
249
+ self.use_cache = use_cache
250
+ self.initializer_range = initializer_range
251
+ self.output_router_logits = output_router_logits
252
+
253
+ tie_word_embeddings = kwargs.pop("tie_word_embeddings", False)
254
+ if tie_word_embeddings:
255
+ raise ValueError("tie_word_embeddings is not supported for DBRX models.")
256
+
257
+ super().__init__(tie_word_embeddings=tie_word_embeddings, **kwargs)
llmeval-env/lib/python3.10/site-packages/transformers/models/dbrx/modeling_dbrx.py ADDED
@@ -0,0 +1,1523 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 Databricks Mosaic Research and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch DBRX model. """
16
+
17
+ import math
18
+ from typing import Any, Dict, Optional, Tuple, Union
19
+
20
+ import torch
21
+ import torch.nn.functional as F
22
+ import torch.utils.checkpoint
23
+ from torch import nn
24
+
25
+ from ...activations import ACT2FN
26
+ from ...cache_utils import Cache, DynamicCache, StaticCache
27
+ from ...modeling_attn_mask_utils import AttentionMaskConverter
28
+ from ...modeling_outputs import MoeCausalLMOutputWithPast, MoeModelOutputWithPast
29
+ from ...modeling_utils import PreTrainedModel
30
+ from ...utils import (
31
+ add_start_docstrings,
32
+ add_start_docstrings_to_model_forward,
33
+ is_flash_attn_2_available,
34
+ is_flash_attn_greater_or_equal_2_10,
35
+ logging,
36
+ replace_return_docstrings,
37
+ )
38
+ from .configuration_dbrx import DbrxConfig
39
+
40
+
41
+ if is_flash_attn_2_available():
42
+ from flash_attn import flash_attn_func, flash_attn_varlen_func
43
+ from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa
44
+
45
+ logger = logging.get_logger(__name__)
46
+
47
+ _CONFIG_FOR_DOC = "DbrxConfig"
48
+
49
+
50
+ # Copied from transformers.models.gemma.modeling_gemma.GemmaRotaryEmbedding with Gemma->Dbrx
51
+ class DbrxRotaryEmbedding(nn.Module):
52
+ def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None):
53
+ super().__init__()
54
+
55
+ self.dim = dim
56
+ self.max_position_embeddings = max_position_embeddings
57
+ self.base = base
58
+ self.register_buffer("inv_freq", None, persistent=False)
59
+
60
+ @torch.no_grad()
61
+ def forward(self, x, position_ids, seq_len=None):
62
+ # x: [bs, num_attention_heads, seq_len, head_size]
63
+ if self.inv_freq is None:
64
+ self.inv_freq = 1.0 / (
65
+ self.base ** (torch.arange(0, self.dim, 2, dtype=torch.int64, device=x.device).float() / self.dim)
66
+ )
67
+ inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1)
68
+ position_ids_expanded = position_ids[:, None, :].float()
69
+ # Force float32 since bfloat16 loses precision on long contexts
70
+ # See https://github.com/huggingface/transformers/pull/29285
71
+ device_type = x.device.type
72
+ device_type = device_type if isinstance(device_type, str) and device_type != "mps" else "cpu"
73
+ with torch.autocast(device_type=device_type, enabled=False):
74
+ freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
75
+ emb = torch.cat((freqs, freqs), dim=-1)
76
+ cos = emb.cos()
77
+ sin = emb.sin()
78
+ return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
79
+
80
+
81
+ # Copied from transformers.models.llama.modeling_llama.rotate_half
82
+ def rotate_half(x):
83
+ """Rotates half the hidden dims of the input."""
84
+ x1 = x[..., : x.shape[-1] // 2]
85
+ x2 = x[..., x.shape[-1] // 2 :]
86
+ return torch.cat((-x2, x1), dim=-1)
87
+
88
+
89
+ # Copied from transformers.models.llama.modeling_llama.apply_rotary_pos_emb
90
+ def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
91
+ """Applies Rotary Position Embedding to the query and key tensors.
92
+
93
+ Args:
94
+ q (`torch.Tensor`): The query tensor.
95
+ k (`torch.Tensor`): The key tensor.
96
+ cos (`torch.Tensor`): The cosine part of the rotary embedding.
97
+ sin (`torch.Tensor`): The sine part of the rotary embedding.
98
+ position_ids (`torch.Tensor`, *optional*):
99
+ Deprecated and unused.
100
+ unsqueeze_dim (`int`, *optional*, defaults to 1):
101
+ The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
102
+ sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
103
+ that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
104
+ k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
105
+ cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
106
+ the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
107
+ Returns:
108
+ `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
109
+ """
110
+ cos = cos.unsqueeze(unsqueeze_dim)
111
+ sin = sin.unsqueeze(unsqueeze_dim)
112
+ q_embed = (q * cos) + (rotate_half(q) * sin)
113
+ k_embed = (k * cos) + (rotate_half(k) * sin)
114
+ return q_embed, k_embed
115
+
116
+
117
+ # Copied from transformers.models.llama.modeling_llama.repeat_kv
118
+ def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
119
+ """
120
+ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
121
+ num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
122
+ """
123
+ batch, num_key_value_heads, slen, head_dim = hidden_states.shape
124
+ if n_rep == 1:
125
+ return hidden_states
126
+ hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
127
+ return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
128
+
129
+
130
+ def load_balancing_loss_func(
131
+ gate_logits: torch.Tensor,
132
+ num_experts: int,
133
+ top_k: int,
134
+ attention_mask: Optional[torch.Tensor],
135
+ ) -> torch.Tensor:
136
+ r"""Computes auxiliary load balancing loss as in Switch Transformer - implemented in Pytorch.
137
+
138
+ See Switch Transformer (https://arxiv.org/abs/2101.03961) for more details. This function implements the loss
139
+ function presented in equations (4) - (6) of the paper. It aims at penalizing cases where the routing between
140
+ experts is too unbalanced.
141
+
142
+ Args:
143
+ gate_logits (Union[`torch.Tensor`, Tuple[torch.Tensor]):
144
+ Logits from the `gate`, should be a tuple of model.config.num_hidden_layers tensors of
145
+ shape [batch_size X sequence_length, num_experts].
146
+ num_experts (`int`):
147
+ Number of experts.
148
+ top_k (`int`):
149
+ The number of experts each token is routed to.
150
+ attention_mask (`torch.Tensor`, None):
151
+ The attention_mask used in forward function
152
+ shape [batch_size X sequence_length] if not None.
153
+
154
+ Returns:
155
+ The auxiliary loss.
156
+ """
157
+ if gate_logits is None or not isinstance(gate_logits, tuple):
158
+ return torch.tensor(0.0)
159
+
160
+ if isinstance(gate_logits, tuple):
161
+ compute_device = gate_logits[0].device
162
+ concatenated_gate_logits = torch.cat([layer_gate.to(compute_device) for layer_gate in gate_logits], dim=0)
163
+
164
+ routing_weights = torch.nn.functional.softmax(concatenated_gate_logits, dim=-1)
165
+
166
+ _, selected_experts = torch.topk(routing_weights, top_k, dim=-1)
167
+
168
+ expert_mask = torch.nn.functional.one_hot(selected_experts, num_experts)
169
+
170
+ if attention_mask is None:
171
+ # Compute the percentage of tokens routed to each experts
172
+ tokens_per_expert = torch.mean(expert_mask.float(), dim=0)
173
+
174
+ # Compute the average probability of routing to these experts
175
+ router_prob_per_expert = torch.mean(routing_weights, dim=0)
176
+ else:
177
+ batch_size, sequence_length = attention_mask.shape
178
+ num_hidden_layers = concatenated_gate_logits.shape[0] // (batch_size * sequence_length)
179
+
180
+ # Compute the mask that masks all padding tokens as 0 with the same shape of expert_mask
181
+ expert_attention_mask = (
182
+ attention_mask[None, :, :, None, None]
183
+ .expand((num_hidden_layers, batch_size, sequence_length, top_k, num_experts))
184
+ .reshape(-1, top_k, num_experts)
185
+ .to(compute_device)
186
+ )
187
+
188
+ # Compute the percentage of tokens routed to each experts
189
+ tokens_per_expert = torch.sum(expert_mask.float() * expert_attention_mask, dim=0) / torch.sum(
190
+ expert_attention_mask, dim=0
191
+ )
192
+
193
+ # Compute the mask that masks all padding tokens as 0 with the same shape of tokens_per_expert
194
+ router_per_expert_attention_mask = (
195
+ attention_mask[None, :, :, None]
196
+ .expand((num_hidden_layers, batch_size, sequence_length, num_experts))
197
+ .reshape(-1, num_experts)
198
+ .to(compute_device)
199
+ )
200
+
201
+ # Compute the average probability of routing to these experts
202
+ router_prob_per_expert = torch.sum(routing_weights * router_per_expert_attention_mask, dim=0) / torch.sum(
203
+ router_per_expert_attention_mask, dim=0
204
+ )
205
+
206
+ overall_loss = torch.sum(tokens_per_expert * router_prob_per_expert.unsqueeze(0))
207
+ return overall_loss * num_experts
208
+
209
+
210
+ # Copied from transformers.models.llama.modeling_llama._get_unpad_data
211
+ def _get_unpad_data(attention_mask):
212
+ seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)
213
+ indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
214
+ max_seqlen_in_batch = seqlens_in_batch.max().item()
215
+ cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.int32), (1, 0))
216
+ return (
217
+ indices,
218
+ cu_seqlens,
219
+ max_seqlen_in_batch,
220
+ )
221
+
222
+
223
+ class DbrxAttention(nn.Module):
224
+ """Multi-head self attention."""
225
+
226
+ def __init__(self, config: DbrxConfig, block_idx: Optional[int] = None):
227
+ super().__init__()
228
+ self.config = config
229
+ self.hidden_size = config.d_model
230
+ self.num_heads = config.n_heads
231
+ self.head_dim = self.hidden_size // self.num_heads
232
+ self.max_position_embeddings = config.max_seq_len
233
+ self.block_idx = block_idx
234
+ if block_idx is None:
235
+ logger.warning_once(
236
+ f"Instantiating {self.__class__.__name__} without passing a `block_idx` is not recommended and will "
237
+ + "lead to errors during the forward call if caching is used. Please make sure to provide a `block_idx` "
238
+ + "when creating this class."
239
+ )
240
+
241
+ attn_config = config.attn_config
242
+ self.attn_pdrop = attn_config.attn_pdrop
243
+ self.clip_qkv = attn_config.clip_qkv
244
+ self.num_key_value_heads = attn_config.kv_n_heads
245
+ self.num_key_value_groups = self.num_heads // self.num_key_value_heads
246
+ self.rope_theta = attn_config.rope_theta
247
+ self.is_causal = True
248
+
249
+ self.Wqkv = nn.Linear(
250
+ self.hidden_size, self.hidden_size + 2 * self.num_key_value_heads * self.head_dim, bias=False
251
+ )
252
+ self.out_proj = nn.Linear(self.hidden_size, self.hidden_size, bias=False)
253
+ self.rotary_emb = DbrxRotaryEmbedding(
254
+ self.head_dim,
255
+ max_position_embeddings=self.max_position_embeddings,
256
+ base=self.rope_theta,
257
+ )
258
+
259
+ def forward(
260
+ self,
261
+ hidden_states: torch.Tensor,
262
+ position_ids: torch.LongTensor,
263
+ attention_mask: Optional[torch.Tensor] = None,
264
+ past_key_value: Optional[Cache] = None,
265
+ output_attentions: bool = False,
266
+ use_cache: bool = False,
267
+ cache_position: Optional[torch.LongTensor] = None,
268
+ **kwargs: Any,
269
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Cache]]:
270
+ bsz, q_len, _ = hidden_states.size()
271
+
272
+ qkv_states = self.Wqkv(hidden_states)
273
+ min_val = -self.clip_qkv if self.clip_qkv is not None else None
274
+ max_val = self.clip_qkv
275
+ qkv_states = qkv_states.clamp(min=min_val, max=max_val)
276
+
277
+ query_states, key_states, value_states = qkv_states.split(
278
+ [
279
+ self.hidden_size,
280
+ self.num_key_value_heads * self.head_dim,
281
+ self.num_key_value_heads * self.head_dim,
282
+ ],
283
+ dim=2,
284
+ )
285
+
286
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
287
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
288
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
289
+
290
+ past_key_value = getattr(self, "past_key_value", past_key_value)
291
+ cos, sin = self.rotary_emb(value_states, position_ids)
292
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
293
+
294
+ if past_key_value is not None:
295
+ # sin and cos are specific to RoPE models; position_ids needed for the static cache
296
+ cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
297
+ key_states, value_states = past_key_value.update(key_states, value_states, self.block_idx, cache_kwargs)
298
+
299
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
300
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
301
+
302
+ attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
303
+
304
+ if attention_mask is not None: # no matter the length, we just slice it
305
+ causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
306
+ attn_weights = attn_weights + causal_mask
307
+
308
+ # upcast attention to fp32
309
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
310
+ attn_weights = nn.functional.dropout(attn_weights, p=self.attn_pdrop, training=self.training)
311
+ attn_output = torch.matmul(attn_weights, value_states)
312
+
313
+ if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
314
+ raise ValueError(
315
+ f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is"
316
+ + f" {attn_output.size()}"
317
+ )
318
+
319
+ attn_output = attn_output.transpose(1, 2).contiguous()
320
+ attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
321
+ attn_output = self.out_proj(attn_output)
322
+
323
+ if not output_attentions:
324
+ attn_weights = None
325
+
326
+ return attn_output, attn_weights, past_key_value
327
+
328
+
329
+ class DbrxFlashAttention2(DbrxAttention):
330
+ """Dbrx flash attention module.
331
+
332
+ This module inherits from `DbrxAttention` as the weights of the module stays
333
+ untouched. The only required change would be on the forward pass where it
334
+ calls the public API of flash attention.
335
+ """
336
+
337
+ def __init__(self, *args: Any, **kwargs: Any):
338
+ super().__init__(*args, **kwargs)
339
+
340
+ # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1.
341
+ # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0.
342
+ # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left).
343
+ # From: https://github.com/huggingface/transformers/blob/3b8e2932ce743008f63585aae1e1b8b30dc8b3ac/src/transformers/models/gemma/modeling_gemma.py#L318
344
+ self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10()
345
+
346
+ def forward(
347
+ self,
348
+ hidden_states: torch.Tensor,
349
+ attention_mask: Optional[torch.LongTensor] = None,
350
+ position_ids: Optional[torch.LongTensor] = None,
351
+ past_key_value: Optional[Cache] = None,
352
+ output_attentions: bool = False,
353
+ use_cache: bool = False,
354
+ cache_position: Optional[torch.LongTensor] = None,
355
+ **kwargs: Any,
356
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
357
+ logger.info("Implicitly setting `output_attentions` to False as it is not supported in Flash Attention.")
358
+ output_attentions = False
359
+
360
+ bsz, q_len, _ = hidden_states.size()
361
+
362
+ qkv_states = self.Wqkv(hidden_states)
363
+ if self.clip_qkv is not None:
364
+ qkv_states = qkv_states.clamp(min=-self.clip_qkv, max=self.clip_qkv)
365
+
366
+ query_states, key_states, value_states = qkv_states.split(
367
+ [
368
+ self.hidden_size,
369
+ self.num_key_value_heads * self.head_dim,
370
+ self.num_key_value_heads * self.head_dim,
371
+ ],
372
+ dim=2,
373
+ )
374
+
375
+ # Flash attention requires the input to have the shape
376
+ # batch_size x seq_length x head_dim x hidden_dim
377
+ # therefore we just need to keep the original shape
378
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
379
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
380
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
381
+
382
+ cos, sin = self.rotary_emb(value_states, position_ids)
383
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
384
+
385
+ past_key_value = getattr(self, "past_key_value", past_key_value)
386
+
387
+ if past_key_value is not None:
388
+ # sin and cos are specific to RoPE models; cache_position needed for the static cache
389
+ cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
390
+ key_states, value_states = past_key_value.update(key_states, value_states, self.block_idx, cache_kwargs)
391
+
392
+ # TODO: These transpose are quite inefficient but Flash Attention requires the layout
393
+ # [batch_size, sequence_length, num_heads, head_dim]. We would need to refactor the KV cache
394
+ # to be able to avoid many of these transpose/reshape/view.
395
+ query_states = query_states.transpose(1, 2)
396
+ key_states = key_states.transpose(1, 2)
397
+ value_states = value_states.transpose(1, 2)
398
+
399
+ dropout_rate = self.attn_pdrop if self.training else 0.0
400
+
401
+ # In PEFT, usually we cast the layer norms in float32 for training stability reasons
402
+ # therefore the input hidden states gets silently casted in float32. Hence, we need
403
+ # cast them back in the correct dtype just to be sure everything works as expected.
404
+ # This might slowdown training & inference so it is recommended to not cast the LayerNorms
405
+ # in fp32. (LlamaRMSNorm handles it correctly)
406
+ input_dtype = query_states.dtype
407
+ if input_dtype == torch.float32:
408
+ if torch.is_autocast_enabled():
409
+ target_dtype = torch.get_autocast_gpu_dtype()
410
+ # Handle the case where the model is quantized
411
+ elif hasattr(self.config, "_pre_quantization_dtype"):
412
+ target_dtype = self.config._pre_quantization_dtype
413
+ else:
414
+ target_dtype = query_states.dtype
415
+
416
+ logger.warning_once(
417
+ "The input hidden states seems to be silently casted in float32, this might be "
418
+ + "related to the fact you have upcasted embedding or layer norm layers in "
419
+ + f"float32. We will cast back the input in {target_dtype}."
420
+ )
421
+
422
+ query_states = query_states.to(target_dtype)
423
+ key_states = key_states.to(target_dtype)
424
+ value_states = value_states.to(target_dtype)
425
+
426
+ attn_output = self._flash_attention_forward(
427
+ query_states,
428
+ key_states,
429
+ value_states,
430
+ attention_mask,
431
+ q_len,
432
+ dropout=dropout_rate,
433
+ )
434
+
435
+ attn_output = attn_output.reshape(bsz, q_len, self.hidden_size).contiguous()
436
+ attn_output = self.out_proj(attn_output)
437
+
438
+ if not output_attentions:
439
+ attn_weights = None
440
+
441
+ return attn_output, attn_weights, past_key_value
442
+
443
+ # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2._flash_attention_forward
444
+ def _flash_attention_forward(
445
+ self, query_states, key_states, value_states, attention_mask, query_length, dropout=0.0, softmax_scale=None
446
+ ):
447
+ """
448
+ Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token
449
+ first unpad the input, then computes the attention scores and pad the final attention scores.
450
+
451
+ Args:
452
+ query_states (`torch.Tensor`):
453
+ Input query states to be passed to Flash Attention API
454
+ key_states (`torch.Tensor`):
455
+ Input key states to be passed to Flash Attention API
456
+ value_states (`torch.Tensor`):
457
+ Input value states to be passed to Flash Attention API
458
+ attention_mask (`torch.Tensor`):
459
+ The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the
460
+ position of padding tokens and 1 for the position of non-padding tokens.
461
+ dropout (`float`):
462
+ Attention dropout
463
+ softmax_scale (`float`, *optional*):
464
+ The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim)
465
+ """
466
+ if not self._flash_attn_uses_top_left_mask:
467
+ causal = self.is_causal
468
+ else:
469
+ # TODO: Remove the `query_length != 1` check once Flash Attention for RoCm is bumped to 2.1. For details, please see the comment in LlamaFlashAttention2 __init__.
470
+ causal = self.is_causal and query_length != 1
471
+
472
+ # Contains at least one padding token in the sequence
473
+ if attention_mask is not None:
474
+ batch_size = query_states.shape[0]
475
+ query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input(
476
+ query_states, key_states, value_states, attention_mask, query_length
477
+ )
478
+
479
+ cu_seqlens_q, cu_seqlens_k = cu_seq_lens
480
+ max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens
481
+
482
+ attn_output_unpad = flash_attn_varlen_func(
483
+ query_states,
484
+ key_states,
485
+ value_states,
486
+ cu_seqlens_q=cu_seqlens_q,
487
+ cu_seqlens_k=cu_seqlens_k,
488
+ max_seqlen_q=max_seqlen_in_batch_q,
489
+ max_seqlen_k=max_seqlen_in_batch_k,
490
+ dropout_p=dropout,
491
+ softmax_scale=softmax_scale,
492
+ causal=causal,
493
+ )
494
+
495
+ attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length)
496
+ else:
497
+ attn_output = flash_attn_func(
498
+ query_states, key_states, value_states, dropout, softmax_scale=softmax_scale, causal=causal
499
+ )
500
+
501
+ return attn_output
502
+
503
+ # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2._upad_input
504
+ def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length):
505
+ indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask)
506
+ batch_size, kv_seq_len, num_key_value_heads, head_dim = key_layer.shape
507
+
508
+ key_layer = index_first_axis(
509
+ key_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
510
+ )
511
+ value_layer = index_first_axis(
512
+ value_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
513
+ )
514
+ if query_length == kv_seq_len:
515
+ query_layer = index_first_axis(
516
+ query_layer.reshape(batch_size * kv_seq_len, self.num_heads, head_dim), indices_k
517
+ )
518
+ cu_seqlens_q = cu_seqlens_k
519
+ max_seqlen_in_batch_q = max_seqlen_in_batch_k
520
+ indices_q = indices_k
521
+ elif query_length == 1:
522
+ max_seqlen_in_batch_q = 1
523
+ cu_seqlens_q = torch.arange(
524
+ batch_size + 1, dtype=torch.int32, device=query_layer.device
525
+ ) # There is a memcpy here, that is very bad.
526
+ indices_q = cu_seqlens_q[:-1]
527
+ query_layer = query_layer.squeeze(1)
528
+ else:
529
+ # The -q_len: slice assumes left padding.
530
+ attention_mask = attention_mask[:, -query_length:]
531
+ query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask)
532
+
533
+ return (
534
+ query_layer,
535
+ key_layer,
536
+ value_layer,
537
+ indices_q,
538
+ (cu_seqlens_q, cu_seqlens_k),
539
+ (max_seqlen_in_batch_q, max_seqlen_in_batch_k),
540
+ )
541
+
542
+
543
+ class DbrxSdpaAttention(DbrxAttention):
544
+ """
545
+ Dbrx attention module using torch.nn.functional.scaled_dot_product_attention. This module inherits from
546
+ `DbrxAttention` as the weights of the module stays untouched. The only changes are on the forward pass to adapt to
547
+ SDPA API.
548
+ """
549
+
550
+ def forward(
551
+ self,
552
+ hidden_states: torch.Tensor,
553
+ attention_mask: Optional[torch.Tensor] = None,
554
+ position_ids: Optional[torch.LongTensor] = None,
555
+ past_key_value: Optional[Cache] = None,
556
+ output_attentions: bool = False,
557
+ use_cache: bool = False,
558
+ cache_position: Optional[torch.LongTensor] = None,
559
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
560
+ if output_attentions:
561
+ # TODO: Improve this warning with e.g. `model.config.attn_implementation = "manual"` once this is implemented.
562
+ logger.warning_once(
563
+ "DbrxModel is using DbrxSdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to the manual attention implementation, "
564
+ 'but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.'
565
+ )
566
+ return super().forward(
567
+ hidden_states=hidden_states,
568
+ attention_mask=attention_mask,
569
+ position_ids=position_ids,
570
+ past_key_value=past_key_value,
571
+ output_attentions=output_attentions,
572
+ use_cache=use_cache,
573
+ cache_position=cache_position,
574
+ )
575
+
576
+ bsz, q_len, _ = hidden_states.size()
577
+
578
+ qkv_states = self.Wqkv(hidden_states)
579
+ if self.clip_qkv is not None:
580
+ qkv_states = qkv_states.clamp(min=-self.clip_qkv, max=self.clip_qkv)
581
+
582
+ query_states, key_states, value_states = qkv_states.split(
583
+ [
584
+ self.hidden_size,
585
+ self.num_key_value_heads * self.head_dim,
586
+ self.num_key_value_heads * self.head_dim,
587
+ ],
588
+ dim=2,
589
+ )
590
+
591
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
592
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
593
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
594
+
595
+ cos, sin = self.rotary_emb(value_states, position_ids, seq_len=None)
596
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, None)
597
+
598
+ past_key_value = getattr(self, "past_key_value", past_key_value)
599
+
600
+ if past_key_value is not None:
601
+ # sin and cos are specific to RoPE models; cache_position needed for the static cache
602
+ cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
603
+ key_states, value_states = past_key_value.update(key_states, value_states, self.block_idx, cache_kwargs)
604
+
605
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
606
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
607
+
608
+ causal_mask = attention_mask
609
+ if attention_mask is not None:
610
+ causal_mask = causal_mask[:, :, :, : key_states.shape[-2]]
611
+
612
+ # SDPA with memory-efficient backend is currently (torch==2.1.2) bugged with non-contiguous inputs with custom attn_mask,
613
+ # Reference: https://github.com/pytorch/pytorch/issues/112577.
614
+ if query_states.device.type == "cuda" and causal_mask is not None:
615
+ query_states = query_states.contiguous()
616
+ key_states = key_states.contiguous()
617
+ value_states = value_states.contiguous()
618
+
619
+ attn_output = torch.nn.functional.scaled_dot_product_attention(
620
+ query_states,
621
+ key_states,
622
+ value_states,
623
+ attn_mask=causal_mask,
624
+ dropout_p=self.attn_pdrop if self.training else 0.0,
625
+ )
626
+
627
+ attn_output = attn_output.transpose(1, 2).contiguous()
628
+ attn_output = attn_output.view(bsz, q_len, -1)
629
+
630
+ attn_output = self.out_proj(attn_output)
631
+
632
+ return attn_output, None, past_key_value
633
+
634
+
635
+ DBRX_ATTENTION_CLASSES = {
636
+ "eager": DbrxAttention,
637
+ "flash_attention_2": DbrxFlashAttention2,
638
+ "sdpa": DbrxSdpaAttention,
639
+ }
640
+
641
+
642
+ class DbrxNormAttentionNorm(nn.Module):
643
+ def __init__(self, config: DbrxConfig, block_idx: Optional[int] = None):
644
+ super().__init__()
645
+ self.block_idx = block_idx
646
+ self.resid_pdrop = config.resid_pdrop
647
+ self.norm_1 = nn.LayerNorm(config.d_model, bias=False)
648
+ self.attn = DBRX_ATTENTION_CLASSES[config._attn_implementation](
649
+ config=config,
650
+ block_idx=block_idx,
651
+ )
652
+ self.norm_2 = nn.LayerNorm(config.d_model, bias=False)
653
+
654
+ def forward(
655
+ self,
656
+ hidden_states: torch.Tensor,
657
+ position_ids: torch.LongTensor,
658
+ attention_mask: Optional[torch.Tensor] = None,
659
+ past_key_value: Optional[Cache] = None,
660
+ output_attentions: bool = False,
661
+ use_cache: bool = False,
662
+ cache_position: Optional[torch.LongTensor] = None,
663
+ **kwargs: Any,
664
+ ) -> Tuple[torch.Tensor, torch.Tensor, Optional[torch.Tensor], Optional[Cache]]:
665
+ residual_states = hidden_states
666
+ hidden_states = self.norm_1(hidden_states).to(hidden_states.dtype)
667
+
668
+ hidden_states, attn_weights, past_key_value = self.attn(
669
+ hidden_states=hidden_states,
670
+ attention_mask=attention_mask,
671
+ position_ids=position_ids,
672
+ past_key_value=past_key_value,
673
+ output_attentions=output_attentions,
674
+ use_cache=use_cache,
675
+ cache_position=cache_position,
676
+ **kwargs,
677
+ )
678
+
679
+ hidden_states = nn.functional.dropout(hidden_states, p=self.resid_pdrop, training=self.training)
680
+ hidden_states = hidden_states + residual_states
681
+
682
+ residual_states = hidden_states
683
+ hidden_states = self.norm_2(hidden_states).to(hidden_states.dtype)
684
+
685
+ return residual_states, hidden_states, attn_weights, past_key_value
686
+
687
+
688
+ class DbrxRouter(nn.Module):
689
+ def __init__(
690
+ self,
691
+ hidden_size: int,
692
+ moe_num_experts: int,
693
+ moe_top_k: int,
694
+ moe_jitter_eps: Optional[float],
695
+ moe_normalize_expert_weights: Optional[float],
696
+ ):
697
+ super().__init__()
698
+ self.hidden_size = hidden_size
699
+ self.moe_num_experts = moe_num_experts
700
+ self.moe_top_k = moe_top_k
701
+ self.moe_jitter_eps = moe_jitter_eps
702
+ self.moe_normalize_expert_weights = moe_normalize_expert_weights
703
+
704
+ self.layer = nn.Linear(self.hidden_size, self.moe_num_experts, bias=False)
705
+
706
+ def forward(self, hidden_states: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.LongTensor]:
707
+ if self.training and self.moe_jitter_eps is not None:
708
+ hidden_states *= torch.empty_like(hidden_states).uniform_(
709
+ 1.0 - self.moe_jitter_eps, 1.0 + self.moe_jitter_eps
710
+ )
711
+ hidden_states = hidden_states.view(-1, hidden_states.shape[-1])
712
+ weights = self.layer(hidden_states).softmax(dim=-1, dtype=torch.float32)
713
+ top_weights, top_experts = torch.topk(weights, self.moe_top_k, dim=-1)
714
+
715
+ top_weights_scale = (
716
+ torch.norm(top_weights, p=self.moe_normalize_expert_weights, dim=-1, keepdim=True)
717
+ if self.moe_normalize_expert_weights is not None
718
+ else 1.0
719
+ )
720
+ top_weights = top_weights / top_weights_scale
721
+
722
+ weights = weights.to(hidden_states.dtype)
723
+ top_weights = top_weights.to(hidden_states.dtype)
724
+ return weights, top_weights, top_experts
725
+
726
+
727
+ class DbrxExpertGLU(nn.Module):
728
+ def __init__(self, hidden_size: int, ffn_hidden_size: int, moe_num_experts: int, ffn_act_fn: dict):
729
+ super().__init__()
730
+ self.hidden_size = hidden_size
731
+ self.ffn_hidden_size = ffn_hidden_size
732
+ self.moe_num_experts = moe_num_experts
733
+
734
+ self.w1 = nn.Parameter(torch.empty(moe_num_experts * ffn_hidden_size, hidden_size))
735
+ self.v1 = nn.Parameter(torch.empty(moe_num_experts * ffn_hidden_size, hidden_size))
736
+ self.w2 = nn.Parameter(torch.empty(moe_num_experts * ffn_hidden_size, hidden_size))
737
+
738
+ act_fn_name = ffn_act_fn.get("name", "silu")
739
+ self.activation_fn = ACT2FN[act_fn_name]
740
+
741
+ def forward(
742
+ self, x: torch.Tensor, expert_w1: torch.Tensor, expert_v1: torch.Tensor, expert_w2: torch.Tensor
743
+ ) -> torch.Tensor:
744
+ gate_proj = x.matmul(expert_w1.t())
745
+ up_proj = x.matmul(expert_v1.t())
746
+ gate_proj = self.activation_fn(gate_proj)
747
+ intermediate_states = gate_proj * up_proj
748
+ down_proj = intermediate_states.matmul(expert_w2)
749
+ return down_proj
750
+
751
+
752
+ class DbrxExperts(nn.Module):
753
+ def __init__(self, hidden_size: int, ffn_hidden_size: int, moe_num_experts: int, ffn_act_fn: dict):
754
+ super().__init__()
755
+ self.moe_num_experts = moe_num_experts
756
+ self.mlp = DbrxExpertGLU(
757
+ hidden_size=hidden_size,
758
+ ffn_hidden_size=ffn_hidden_size,
759
+ moe_num_experts=moe_num_experts,
760
+ ffn_act_fn=ffn_act_fn,
761
+ )
762
+
763
+ def forward(
764
+ self, x: torch.Tensor, weights: torch.Tensor, top_weights: torch.Tensor, top_experts: torch.LongTensor
765
+ ) -> torch.Tensor:
766
+ bsz, q_len, hidden_size = x.shape
767
+ x = x.view(-1, hidden_size)
768
+ out = torch.zeros_like(x)
769
+
770
+ expert_mask = nn.functional.one_hot(top_experts, num_classes=self.moe_num_experts).permute(2, 1, 0)
771
+ # Chunk experts at once to avoid storing full parameter multiple times in autograd
772
+ w1_chunked = self.mlp.w1.view(self.mlp.moe_num_experts, self.mlp.ffn_hidden_size, self.mlp.hidden_size).chunk(
773
+ self.moe_num_experts, dim=0
774
+ )
775
+ v1_chunked = self.mlp.v1.view(self.mlp.moe_num_experts, self.mlp.ffn_hidden_size, self.mlp.hidden_size).chunk(
776
+ self.moe_num_experts, dim=0
777
+ )
778
+ w2_chunked = self.mlp.w2.view(self.mlp.moe_num_experts, self.mlp.ffn_hidden_size, self.mlp.hidden_size).chunk(
779
+ self.moe_num_experts, dim=0
780
+ )
781
+ w1_chunked = [w1.squeeze(dim=0) for w1 in w1_chunked]
782
+ v1_chunked = [v1.squeeze(dim=0) for v1 in v1_chunked]
783
+ w2_chunked = [w2.squeeze(dim=0) for w2 in w2_chunked]
784
+ for expert_idx in range(0, self.moe_num_experts):
785
+ topk_idx, token_idx = torch.where(expert_mask[expert_idx])
786
+ if token_idx.shape[0] == 0:
787
+ continue
788
+
789
+ token_list = token_idx
790
+ topk_list = topk_idx
791
+
792
+ expert_tokens = x[None, token_list].reshape(-1, hidden_size)
793
+ expert_out = (
794
+ self.mlp(expert_tokens, w1_chunked[expert_idx], v1_chunked[expert_idx], w2_chunked[expert_idx])
795
+ * top_weights[token_list, topk_list, None]
796
+ )
797
+
798
+ out.index_add_(0, token_idx, expert_out)
799
+
800
+ out = out.reshape(bsz, q_len, hidden_size)
801
+ return out
802
+
803
+
804
+ class DbrxFFN(nn.Module):
805
+ def __init__(self, config: DbrxConfig):
806
+ super().__init__()
807
+
808
+ ffn_config = config.ffn_config
809
+ self.router = DbrxRouter(
810
+ hidden_size=config.d_model,
811
+ moe_num_experts=ffn_config.moe_num_experts,
812
+ moe_top_k=ffn_config.moe_top_k,
813
+ moe_jitter_eps=ffn_config.moe_jitter_eps,
814
+ moe_normalize_expert_weights=ffn_config.moe_normalize_expert_weights,
815
+ )
816
+
817
+ self.experts = DbrxExperts(
818
+ hidden_size=config.d_model,
819
+ ffn_hidden_size=ffn_config.ffn_hidden_size,
820
+ moe_num_experts=ffn_config.moe_num_experts,
821
+ ffn_act_fn=ffn_config.ffn_act_fn,
822
+ )
823
+
824
+ def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
825
+ weights, top_weights, top_experts = self.router(x)
826
+ out = self.experts(x, weights, top_weights, top_experts)
827
+ return out, weights
828
+
829
+
830
+ class DbrxBlock(nn.Module):
831
+ def __init__(self, config: DbrxConfig, block_idx: int):
832
+ super().__init__()
833
+ self.hidden_size = config.d_model
834
+ self.resid_pdrop = config.resid_pdrop
835
+ self.block_idx = block_idx
836
+ self.norm_attn_norm = DbrxNormAttentionNorm(
837
+ config=config,
838
+ block_idx=block_idx,
839
+ )
840
+ self.ffn = DbrxFFN(config=config)
841
+
842
+ def forward(
843
+ self,
844
+ hidden_states: torch.Tensor,
845
+ attention_mask: Optional[torch.Tensor] = None,
846
+ position_ids: torch.LongTensor = None,
847
+ past_key_value: Optional[Cache] = None,
848
+ output_attentions: Optional[bool] = False,
849
+ output_router_logits: Optional[bool] = False,
850
+ use_cache: Optional[bool] = False,
851
+ cache_position: Optional[torch.LongTensor] = None,
852
+ **kwargs: Any,
853
+ ) -> Union[
854
+ Tuple[torch.Tensor],
855
+ Tuple[torch.Tensor, Optional[torch.Tensor]],
856
+ Tuple[torch.Tensor, Optional[Cache]],
857
+ Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Cache]],
858
+ Tuple[torch.Tensor, Optional[torch.Tensor], Optional[torch.Tensor]],
859
+ Tuple[torch.Tensor, Optional[Cache], Optional[torch.Tensor]],
860
+ Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Cache], Optional[torch.Tensor]],
861
+ ]:
862
+ """Forward function for DbrxBlock.
863
+
864
+ Args:
865
+ hidden_states (`torch.Tensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
866
+ position_ids (`torch.LongTensor`): position ids of shape `(batch, seq_len)`
867
+ attention_mask (`torch.Tensor`, optional): attention mask of size (batch_size, sequence_length)
868
+ if flash attention is used or (batch_size, 1, query_sequence_length, key_sequence_length)
869
+ if default attention is used.
870
+ past_key_value (`Tuple(torch.Tensor)`, optional): cached past key and value projection states
871
+ output_attentions (`bool`, optional): Whether or not to return the attentions tensors of all
872
+ attention layers. See `attentions` under returned tensors for more detail.
873
+ output_router_logits (`bool`, optional): Whether or not to return the router logits.
874
+ use_cache (`bool`, optional): If set to `True`, `past_key_values` key value states are
875
+ returned and can be used to speed up decoding (see `past_key_values`).
876
+ cache_position (`torch.LongTensor`, optional): position ids of the cache
877
+ """
878
+
879
+ # Norm + Attention + Norm
880
+ resid_states, hidden_states, self_attn_weights, present_key_value = self.norm_attn_norm(
881
+ hidden_states=hidden_states,
882
+ attention_mask=attention_mask,
883
+ position_ids=position_ids,
884
+ past_key_value=past_key_value,
885
+ output_attentions=output_attentions,
886
+ use_cache=use_cache,
887
+ cache_position=cache_position,
888
+ **kwargs,
889
+ )
890
+
891
+ # Fully Connected
892
+ hidden_states, router_logits = self.ffn(hidden_states)
893
+ hidden_states = nn.functional.dropout(hidden_states, p=self.resid_pdrop, training=self.training)
894
+ hidden_states = resid_states + hidden_states
895
+
896
+ outputs = (hidden_states,)
897
+
898
+ if output_attentions:
899
+ outputs += (self_attn_weights,)
900
+
901
+ if use_cache:
902
+ outputs += (present_key_value,)
903
+
904
+ if output_router_logits:
905
+ outputs += (router_logits,)
906
+
907
+ return outputs
908
+
909
+
910
+ DBRX_START_DOCSTRING = r"""
911
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
912
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
913
+ etc.)
914
+
915
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
916
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
917
+ and behavior.
918
+
919
+ Parameters:
920
+ config ([`DbrxConfig`]):
921
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
922
+ load the weights associated with the model, only the configuration. Check out the
923
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
924
+ """
925
+
926
+
927
+ @add_start_docstrings(
928
+ "The bare DBRX Model outputting raw hidden-states without any specific head on top.",
929
+ DBRX_START_DOCSTRING,
930
+ )
931
+ class DbrxPreTrainedModel(PreTrainedModel):
932
+ config_class = DbrxConfig
933
+ base_model_prefix = "transformer"
934
+ supports_gradient_checkpointing = True
935
+ _no_split_modules = ["DbrxBlock"]
936
+ _skip_keys_device_placement = ["past_key_values"]
937
+ _supports_flash_attn_2 = True
938
+ _supports_sdpa = True
939
+ _supports_cache_class = True
940
+
941
+ def _init_weights(self, module: nn.Module):
942
+ std = self.config.initializer_range
943
+ if isinstance(module, nn.Linear):
944
+ module.weight.data.normal_(mean=0.0, std=std)
945
+ if module.bias is not None:
946
+ module.bias.data.zero_()
947
+ elif isinstance(module, nn.Embedding):
948
+ module.weight.data.normal_(mean=0.0, std=std)
949
+ if module.padding_idx is not None:
950
+ module.weight.data[module.padding_idx].zero_()
951
+ elif isinstance(module, nn.LayerNorm):
952
+ module.weight.data.normal_(mean=0.0, std=std)
953
+ if module.bias is not None:
954
+ module.bias.data.zero_()
955
+ elif isinstance(module, DbrxExpertGLU):
956
+ module.w1.data.normal_(mean=0.0, std=std)
957
+ module.v1.data.normal_(mean=0.0, std=std)
958
+ module.w2.data.normal_(mean=0.0, std=std)
959
+
960
+ def _setup_cache(self, cache_cls: Any, max_batch_size: int, max_cache_len: int):
961
+ if self.config._attn_implementation == "flash_attention_2" and cache_cls == StaticCache:
962
+ raise ValueError(
963
+ "`static` cache implementation is not compatible with "
964
+ + "`attn_implementation==flash_attention_2`. Make sure to use "
965
+ + "`spda` in the mean time and open an issue at https://github.com/huggingface/transformers."
966
+ )
967
+
968
+ for block in self.transformer.blocks:
969
+ device = block.norm_attn_norm.norm_1.weight.device
970
+ if hasattr(self.config, "_pre_quantization_dtype"):
971
+ dtype = self.config._pre_quantization_dtype
972
+ else:
973
+ dtype = block.norm_attn_norm.attn.out_proj.weight.dtype
974
+ block.norm_attn_norm.attn.past_key_value = cache_cls(
975
+ self.config, max_batch_size, max_cache_len, device=device, dtype=dtype
976
+ )
977
+
978
+ def _reset_cache(self):
979
+ for block in self.transformer.blocks:
980
+ block.norm_attn_norm.attn.past_key_value = None
981
+
982
+
983
+ DBRX_INPUTS_DOCSTRING = r"""
984
+ Args:
985
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
986
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
987
+ it.
988
+
989
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
990
+ [`PreTrainedTokenizer.__call__`] for details.
991
+
992
+ [What are input IDs?](../glossary#input-ids)
993
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
994
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
995
+
996
+ - 1 for tokens that are **not masked**,
997
+ - 0 for tokens that are **masked**.
998
+
999
+ [What are attention masks?](../glossary#attention-mask)
1000
+
1001
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
1002
+ [`PreTrainedTokenizer.__call__`] for details.
1003
+
1004
+ If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
1005
+ `past_key_values`).
1006
+
1007
+ If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
1008
+ and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
1009
+ information on the default strategy.
1010
+
1011
+ - 1 indicates the head is **not masked**,
1012
+ - 0 indicates the head is **masked**.
1013
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1014
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
1015
+ config.n_positions - 1]`.
1016
+
1017
+ [What are position IDs?](../glossary#position-ids)
1018
+ past_key_values (`Cache` or `tuple(tuple(torch.FloatTensor))`, *optional*):
1019
+ Pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
1020
+ blocks) that can be used to speed up sequential decoding. This typically consists in the `past_key_values`
1021
+ returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`.
1022
+
1023
+ Two formats are allowed:
1024
+ - a [`~cache_utils.Cache`] instance;
1025
+ - Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
1026
+ shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`). This is also known as the legacy
1027
+ cache format.
1028
+
1029
+ The model will output the same cache format that is fed as input. If no `past_key_values` are passed, the
1030
+ legacy cache format will be returned.
1031
+
1032
+ If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't
1033
+ have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids`
1034
+ of shape `(batch_size, sequence_length)`.
1035
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
1036
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
1037
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
1038
+ model's internal embedding lookup matrix.
1039
+ use_cache (`bool`, *optional*):
1040
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
1041
+ `past_key_values`).
1042
+ output_attentions (`bool`, *optional*):
1043
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
1044
+ tensors for more detail.
1045
+ output_hidden_states (`bool`, *optional*):
1046
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
1047
+ more detail.
1048
+ output_router_logits (`bool`, *optional*):
1049
+ Whether or not to return the logits of all the routers. They are useful for computing the router loss, and
1050
+ should not be returned during inference.
1051
+ return_dict (`bool`, *optional*):
1052
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
1053
+ cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
1054
+ Indices depicting the position of the input sequence tokens in the sequence. Contrarily to `position_ids`,
1055
+ this tensor is not affected by padding. It is used to update the cache in the correct position and to infer
1056
+ the complete sequence length.
1057
+ """
1058
+
1059
+
1060
+ @add_start_docstrings(
1061
+ "The bare DBRX Model outputting raw hidden-states without any specific head on top.",
1062
+ DBRX_START_DOCSTRING,
1063
+ )
1064
+ class DbrxModel(DbrxPreTrainedModel):
1065
+ """Transformer decoder consisting of *config.num_hidden_layers*. Each layer is a [`DbrxBlock`] layer.
1066
+
1067
+ Args:
1068
+ config ([`DbrxConfig`]): Model configuration class with all parameters of the model.
1069
+ Initializing with a config file does not load the weights associated with the model, only the
1070
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
1071
+ """
1072
+
1073
+ def __init__(self, config: DbrxConfig):
1074
+ super().__init__(config)
1075
+ self.padding_idx = config.pad_token_id
1076
+ self.vocab_size = config.vocab_size
1077
+ self.emb_pdrop = config.emb_pdrop
1078
+
1079
+ self.wte = nn.Embedding(config.vocab_size, config.d_model, self.padding_idx)
1080
+ self.blocks = nn.ModuleList([DbrxBlock(config, block_idx) for block_idx in range(config.n_layers)])
1081
+ self.norm_f = nn.LayerNorm(config.d_model, bias=False)
1082
+ self.gradient_checkpointing = False
1083
+
1084
+ # Initialize weights and apply final processing
1085
+ self.post_init()
1086
+
1087
+ def get_input_embeddings(self) -> nn.Embedding:
1088
+ return self.wte
1089
+
1090
+ def set_input_embeddings(self, value: nn.Embedding):
1091
+ self.wte = value
1092
+
1093
+ @add_start_docstrings_to_model_forward(DBRX_INPUTS_DOCSTRING)
1094
+ def forward(
1095
+ self,
1096
+ input_ids: Optional[torch.LongTensor] = None,
1097
+ attention_mask: Optional[torch.Tensor] = None,
1098
+ position_ids: Optional[torch.LongTensor] = None,
1099
+ past_key_values: Optional[Cache] = None,
1100
+ inputs_embeds: Optional[torch.Tensor] = None,
1101
+ use_cache: Optional[bool] = None,
1102
+ output_attentions: Optional[bool] = None,
1103
+ output_hidden_states: Optional[bool] = None,
1104
+ output_router_logits: Optional[bool] = None,
1105
+ return_dict: Optional[bool] = None,
1106
+ cache_position: Optional[torch.LongTensor] = None,
1107
+ ) -> Union[Tuple, MoeModelOutputWithPast]:
1108
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1109
+ output_hidden_states = (
1110
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1111
+ )
1112
+ output_router_logits = (
1113
+ output_router_logits if output_router_logits is not None else self.config.output_router_logits
1114
+ )
1115
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
1116
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1117
+
1118
+ if (input_ids is None) ^ (inputs_embeds is not None):
1119
+ raise ValueError(
1120
+ "You cannot specify both input_ids and inputs_embeds at the same time, and must specify either one"
1121
+ )
1122
+
1123
+ if self.gradient_checkpointing and self.training and use_cache:
1124
+ logger.warning_once(
1125
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`."
1126
+ )
1127
+ use_cache = False
1128
+
1129
+ if inputs_embeds is None:
1130
+ inputs_embeds = self.wte(input_ids)
1131
+
1132
+ inputs_embeds = nn.functional.dropout(inputs_embeds, p=self.emb_pdrop, training=self.training)
1133
+
1134
+ past_seen_tokens = 0
1135
+ if use_cache: # kept for BC (cache positions)
1136
+ if not isinstance(past_key_values, StaticCache):
1137
+ past_key_values = DynamicCache.from_legacy_cache(past_key_values)
1138
+ past_seen_tokens = past_key_values.get_seq_length()
1139
+
1140
+ if cache_position is None:
1141
+ if isinstance(past_key_values, StaticCache):
1142
+ raise ValueError("cache_position is a required argument when using StaticCache.")
1143
+ cache_position = torch.arange(
1144
+ past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
1145
+ )
1146
+
1147
+ if position_ids is None:
1148
+ position_ids = cache_position.unsqueeze(0)
1149
+ causal_mask = self._update_causal_mask(attention_mask, inputs_embeds, cache_position)
1150
+
1151
+ # embed positions
1152
+ hidden_states = inputs_embeds
1153
+
1154
+ # decoder layers
1155
+ all_hidden_states = () if output_hidden_states else None
1156
+ all_self_attns = () if output_attentions else None
1157
+ all_router_logits = () if output_router_logits else None
1158
+ next_decoder_cache = None
1159
+
1160
+ for block in self.blocks:
1161
+ if output_hidden_states:
1162
+ all_hidden_states += (hidden_states,)
1163
+
1164
+ if self.gradient_checkpointing and self.training:
1165
+ block_outputs = self._gradient_checkpointing_func(
1166
+ block.__call__,
1167
+ hidden_states,
1168
+ causal_mask,
1169
+ position_ids,
1170
+ past_key_values,
1171
+ output_attentions,
1172
+ output_router_logits,
1173
+ use_cache,
1174
+ cache_position,
1175
+ )
1176
+ else:
1177
+ block_outputs = block(
1178
+ hidden_states,
1179
+ attention_mask=causal_mask,
1180
+ position_ids=position_ids,
1181
+ past_key_value=past_key_values,
1182
+ output_attentions=output_attentions,
1183
+ output_router_logits=output_router_logits,
1184
+ use_cache=use_cache,
1185
+ cache_position=cache_position,
1186
+ )
1187
+
1188
+ hidden_states = block_outputs[0]
1189
+
1190
+ if use_cache:
1191
+ next_decoder_cache = block_outputs[2 if output_attentions else 1]
1192
+
1193
+ if output_attentions:
1194
+ all_self_attns += (block_outputs[1],)
1195
+
1196
+ if output_router_logits:
1197
+ all_router_logits += (block_outputs[-1],)
1198
+
1199
+ hidden_states = self.norm_f(hidden_states)
1200
+
1201
+ # add hidden states from the last decoder layer
1202
+ if output_hidden_states:
1203
+ all_hidden_states += (hidden_states,)
1204
+
1205
+ next_cache = None
1206
+ if use_cache:
1207
+ next_cache = (
1208
+ next_decoder_cache.to_legacy_cache() if isinstance(next_decoder_cache, Cache) else next_decoder_cache
1209
+ )
1210
+ if not return_dict:
1211
+ return tuple(
1212
+ v
1213
+ for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, all_router_logits]
1214
+ if v is not None
1215
+ )
1216
+ return MoeModelOutputWithPast(
1217
+ last_hidden_state=hidden_states,
1218
+ past_key_values=next_cache,
1219
+ hidden_states=all_hidden_states,
1220
+ attentions=all_self_attns,
1221
+ router_logits=all_router_logits,
1222
+ )
1223
+
1224
+ # TODO: As of torch==2.2.0, the `attention_mask` passed to the model in `generate` is 2D and of dynamic length even when the static
1225
+ # KV cache is used. This is an issue for torch.compile which then recaptures cudagraphs at each decode steps due to the dynamic shapes.
1226
+ # (`recording cudagraph tree for symint key 13`, etc.), which is VERY slow. A workaround is `@torch.compiler.disable`, but this prevents using
1227
+ # `fullgraph=True`. See more context in https://github.com/huggingface/transformers/pull/29114
1228
+ def _update_causal_mask(
1229
+ self, attention_mask: Optional[torch.Tensor], input_tensor: torch.Tensor, cache_position: torch.Tensor
1230
+ ) -> Optional[torch.Tensor]:
1231
+ if self.config._attn_implementation == "flash_attention_2":
1232
+ if attention_mask is not None and 0.0 in attention_mask:
1233
+ return attention_mask
1234
+ return None
1235
+
1236
+ dtype, device = input_tensor.dtype, input_tensor.device
1237
+ min_dtype = torch.finfo(dtype).min
1238
+ sequence_length = input_tensor.shape[1]
1239
+ if hasattr(self.blocks[0].norm_attn_norm.attn, "past_key_value"): # static cache
1240
+ target_length = self.config.max_position_embeddings
1241
+ else: # dynamic cache
1242
+ target_length = (
1243
+ attention_mask.shape[-1] if isinstance(attention_mask, torch.Tensor) else cache_position[-1] + 1
1244
+ )
1245
+ target_length = int(target_length)
1246
+
1247
+ causal_mask = torch.full((sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=device)
1248
+ if sequence_length != 1:
1249
+ causal_mask = torch.triu(causal_mask, diagonal=1)
1250
+ causal_mask *= torch.arange(target_length, device=device) > cache_position.reshape(-1, 1)
1251
+ causal_mask = causal_mask[None, None, :, :].expand(input_tensor.shape[0], 1, -1, -1)
1252
+ if attention_mask is not None:
1253
+ causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit
1254
+ if attention_mask.dim() == 2:
1255
+ mask_length = attention_mask.shape[-1]
1256
+ padding_mask = causal_mask[..., :mask_length].eq(0.0) * attention_mask[:, None, None, :].eq(0.0)
1257
+ causal_mask[..., :mask_length] = causal_mask[..., :mask_length].masked_fill(padding_mask, min_dtype)
1258
+ elif attention_mask.dim() == 4:
1259
+ # backwards compatibility: we allow passing a 4D attention mask shorter than the input length with
1260
+ # cache. In that case, the 4D attention mask attends to the newest tokens only.
1261
+ if attention_mask.shape[-2] < cache_position[0] + sequence_length:
1262
+ offset = cache_position[0]
1263
+ else:
1264
+ offset = 0
1265
+ mask_shape = attention_mask.shape
1266
+ mask_slice = (attention_mask.eq(0.0)).to(dtype=dtype) * min_dtype
1267
+ causal_mask[
1268
+ : mask_shape[0], : mask_shape[1], offset : mask_shape[2] + offset, : mask_shape[3]
1269
+ ] = mask_slice
1270
+
1271
+ if (
1272
+ self.config._attn_implementation == "sdpa"
1273
+ and attention_mask is not None
1274
+ and attention_mask.device.type == "cuda"
1275
+ ):
1276
+ # TODO: For dynamo, rather use a check on fullgraph=True once this is possible (https://github.com/pytorch/pytorch/pull/120400).
1277
+ is_tracing = (
1278
+ torch.jit.is_tracing()
1279
+ or isinstance(input_tensor, torch.fx.Proxy)
1280
+ or (hasattr(torch, "_dynamo") and torch._dynamo.is_compiling())
1281
+ )
1282
+ if not is_tracing and torch.any(attention_mask != 1):
1283
+ # Attend to all tokens in fully masked rows in the causal_mask, for example the relevant first rows when
1284
+ # using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path.
1285
+ # Details: https://github.com/pytorch/pytorch/issues/110213
1286
+ causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype)
1287
+
1288
+ return causal_mask
1289
+
1290
+
1291
+ @add_start_docstrings("The DBRX Model transformer for causal language modeling.", DBRX_START_DOCSTRING)
1292
+ class DbrxForCausalLM(DbrxPreTrainedModel):
1293
+ def __init__(self, config: DbrxConfig):
1294
+ super().__init__(config)
1295
+ self.transformer = DbrxModel(config)
1296
+ self.vocab_size = config.vocab_size
1297
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
1298
+ self.moe_loss_weight = config.ffn_config.moe_loss_weight
1299
+ self.num_experts = config.ffn_config.moe_num_experts
1300
+ self.num_experts_per_tok = config.ffn_config.moe_top_k
1301
+
1302
+ # Initialize weights and apply final processing
1303
+ self.post_init()
1304
+
1305
+ def get_input_embeddings(self) -> nn.Embedding:
1306
+ return self.transformer.get_input_embeddings()
1307
+
1308
+ def set_input_embeddings(self, value: nn.Embedding):
1309
+ self.transformer.set_input_embeddings(value)
1310
+
1311
+ def get_output_embeddings(self) -> nn.Linear:
1312
+ return self.lm_head
1313
+
1314
+ def set_output_embeddings(self, new_embeddings: nn.Linear):
1315
+ self.lm_head = new_embeddings
1316
+
1317
+ def set_decoder(self, decoder: DbrxModel):
1318
+ self.transformer = decoder
1319
+
1320
+ def get_decoder(self) -> DbrxModel:
1321
+ return self.transformer
1322
+
1323
+ @add_start_docstrings_to_model_forward(DBRX_INPUTS_DOCSTRING)
1324
+ @replace_return_docstrings(output_type=MoeCausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
1325
+ def forward(
1326
+ self,
1327
+ input_ids: Optional[torch.LongTensor] = None,
1328
+ attention_mask: Optional[torch.Tensor] = None,
1329
+ position_ids: Optional[torch.LongTensor] = None,
1330
+ past_key_values: Optional[Cache] = None,
1331
+ inputs_embeds: Optional[torch.Tensor] = None,
1332
+ labels: Optional[torch.LongTensor] = None,
1333
+ use_cache: Optional[bool] = None,
1334
+ output_attentions: Optional[bool] = None,
1335
+ output_hidden_states: Optional[bool] = None,
1336
+ output_router_logits: Optional[bool] = None,
1337
+ return_dict: Optional[bool] = None,
1338
+ cache_position: Optional[torch.LongTensor] = None,
1339
+ ) -> Union[Tuple, MoeCausalLMOutputWithPast]:
1340
+ r"""Forward function for causal language modeling.
1341
+
1342
+ Args:
1343
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1344
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
1345
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
1346
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
1347
+
1348
+ Returns:
1349
+
1350
+ Example:
1351
+
1352
+ ```python
1353
+ >> from transformers import AutoTokenizer, DbrxForCausalLM
1354
+
1355
+ >> model = DbrxForCausalLM.from_pretrained("databricks/dbrx-instruct")
1356
+ >> tokenizer = AutoTokenizer.from_pretrained("databricks/dbrx-instruct")
1357
+
1358
+ >> prompt = "Hey, are you conscious? Can you talk to me?"
1359
+ >> inputs = tokenizer(prompt, return_tensors="pt")
1360
+
1361
+ >> # Generate
1362
+ >> generate_ids = model.generate(inputs.input_ids, max_length=30)
1363
+ >> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
1364
+ "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
1365
+ ```
1366
+ """
1367
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1368
+ output_hidden_states = (
1369
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1370
+ )
1371
+ output_router_logits = (
1372
+ output_router_logits if output_router_logits is not None else self.config.output_router_logits
1373
+ )
1374
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1375
+
1376
+ # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
1377
+ outputs = self.transformer(
1378
+ input_ids=input_ids,
1379
+ attention_mask=attention_mask,
1380
+ position_ids=position_ids,
1381
+ past_key_values=past_key_values,
1382
+ inputs_embeds=inputs_embeds,
1383
+ use_cache=use_cache,
1384
+ output_attentions=output_attentions,
1385
+ output_hidden_states=output_hidden_states,
1386
+ output_router_logits=output_router_logits,
1387
+ return_dict=return_dict,
1388
+ cache_position=cache_position,
1389
+ )
1390
+
1391
+ hidden_states = outputs[0]
1392
+ logits = self.lm_head(hidden_states)
1393
+
1394
+ loss = None
1395
+ if labels is not None:
1396
+ # Shift so that tokens < n predict n
1397
+ shift_logits = logits[..., :-1, :].contiguous()
1398
+ shift_labels = labels[..., 1:].contiguous()
1399
+ # Flatten the tokens
1400
+ loss_fct = nn.CrossEntropyLoss()
1401
+ shift_logits = shift_logits.view(-1, self.config.vocab_size)
1402
+ shift_labels = shift_labels.view(-1)
1403
+ # Enable model parallelism
1404
+ shift_labels = shift_labels.to(shift_logits.device)
1405
+ loss = loss_fct(shift_logits, shift_labels)
1406
+
1407
+ aux_loss = None
1408
+ if output_router_logits:
1409
+ aux_loss = load_balancing_loss_func(
1410
+ outputs.router_logits if return_dict else outputs[-1],
1411
+ self.num_experts,
1412
+ self.num_experts_per_tok,
1413
+ attention_mask,
1414
+ )
1415
+ if labels is not None and loss is not None:
1416
+ loss += self.moe_loss_weight * aux_loss.to(loss.device) # make sure to reside in the same device
1417
+
1418
+ if not return_dict:
1419
+ output = (logits,) + outputs[1:]
1420
+ if output_router_logits:
1421
+ output = (aux_loss,) + output
1422
+ return (loss,) + output if loss is not None else output
1423
+
1424
+ return MoeCausalLMOutputWithPast(
1425
+ loss=loss,
1426
+ aux_loss=aux_loss,
1427
+ logits=logits,
1428
+ past_key_values=outputs.past_key_values,
1429
+ hidden_states=outputs.hidden_states,
1430
+ attentions=outputs.attentions,
1431
+ router_logits=outputs.router_logits,
1432
+ )
1433
+
1434
+ def prepare_inputs_for_generation(
1435
+ self,
1436
+ input_ids: torch.Tensor,
1437
+ past_key_values: Optional[Cache] = None,
1438
+ attention_mask: Optional[torch.Tensor] = None,
1439
+ inputs_embeds: Optional[torch.Tensor] = None,
1440
+ **kwargs: Any,
1441
+ ) -> Dict[str, Any]:
1442
+ past_length = 0
1443
+ if past_key_values is not None:
1444
+ if isinstance(past_key_values, Cache):
1445
+ cache_length = past_key_values.get_seq_length()
1446
+ past_length = past_key_values.seen_tokens
1447
+ max_cache_length = past_key_values.get_max_length()
1448
+ else:
1449
+ cache_length = past_length = past_key_values[0][0].shape[2]
1450
+ max_cache_length = None
1451
+
1452
+ # Keep only the unprocessed tokens:
1453
+ # 1 - If the length of the attention_mask exceeds the length of input_ids, then we are in a setting where
1454
+ # some of the inputs are exclusively passed as part of the cache (e.g. when passing input_embeds as
1455
+ # input)
1456
+ if attention_mask is not None and attention_mask.shape[1] > input_ids.shape[1]:
1457
+ input_ids = input_ids[:, -(attention_mask.shape[1] - past_length) :]
1458
+ # 2 - If the past_length is smaller than input_ids', then input_ids holds all input tokens. We can discard
1459
+ # input_ids based on the past_length.
1460
+ elif past_length < input_ids.shape[1]:
1461
+ input_ids = input_ids[:, past_length:]
1462
+ # 3 - Otherwise (past_length >= input_ids.shape[1]), let's assume input_ids only has unprocessed tokens.
1463
+
1464
+ # If we are about to go beyond the maximum cache length, we need to crop the input attention mask.
1465
+ if (
1466
+ max_cache_length is not None
1467
+ and attention_mask is not None
1468
+ and cache_length + input_ids.shape[1] > max_cache_length
1469
+ ):
1470
+ attention_mask = attention_mask[:, -max_cache_length:]
1471
+
1472
+ position_ids = kwargs.get("position_ids", None)
1473
+ if attention_mask is not None and position_ids is None:
1474
+ # create position_ids on the fly for batch generation
1475
+ position_ids = attention_mask.long().cumsum(-1) - 1
1476
+ position_ids.masked_fill_(attention_mask == 0, 1)
1477
+ if past_key_values:
1478
+ position_ids = position_ids[:, -input_ids.shape[1] :]
1479
+
1480
+ if self.generation_config.cache_implementation == "static":
1481
+ # generation with static cache
1482
+ cache_position = kwargs.get("cache_position", None)
1483
+ if cache_position is None:
1484
+ past_length = 0
1485
+ else:
1486
+ past_length = cache_position[-1] + 1
1487
+ input_ids = input_ids[:, past_length:]
1488
+ position_ids = position_ids[:, past_length:] if position_ids is not None else None
1489
+
1490
+ # TODO @gante we should only keep a `cache_position` in generate, and do +=1.
1491
+ # same goes for position ids. Could also help with continued generation.
1492
+ input_length = position_ids.shape[-1] if position_ids is not None else input_ids.shape[-1]
1493
+ cache_position = torch.arange(past_length, past_length + input_length, device=input_ids.device)
1494
+ position_ids = position_ids.contiguous() if position_ids is not None else None
1495
+
1496
+ # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
1497
+ if inputs_embeds is not None and past_key_values is None:
1498
+ model_inputs = {"inputs_embeds": inputs_embeds}
1499
+ else:
1500
+ # The `contiguous()` here is necessary to have a static stride during decoding. torchdynamo otherwise
1501
+ # recompiles graphs as the stride of the inputs is a guard. Ref: https://github.com/huggingface/transformers/pull/29114
1502
+ # TODO: use `next_tokens` directly instead.
1503
+ model_inputs = {"input_ids": input_ids.contiguous()}
1504
+
1505
+ model_inputs.update(
1506
+ {
1507
+ "position_ids": position_ids,
1508
+ "cache_position": cache_position,
1509
+ "past_key_values": past_key_values,
1510
+ "use_cache": kwargs.get("use_cache"),
1511
+ "attention_mask": attention_mask,
1512
+ }
1513
+ )
1514
+ return model_inputs
1515
+
1516
+ @staticmethod
1517
+ def _reorder_cache(past_key_values: Cache, beam_idx: torch.LongTensor):
1518
+ reordered_past = ()
1519
+ for layer_past in past_key_values:
1520
+ reordered_past += (
1521
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),
1522
+ )
1523
+ return reordered_past
llmeval-env/lib/python3.10/site-packages/transformers/models/detr/__init__.py ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import TYPE_CHECKING
16
+
17
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
18
+
19
+
20
+ _import_structure = {"configuration_detr": ["DETR_PRETRAINED_CONFIG_ARCHIVE_MAP", "DetrConfig", "DetrOnnxConfig"]}
21
+
22
+ try:
23
+ if not is_vision_available():
24
+ raise OptionalDependencyNotAvailable()
25
+ except OptionalDependencyNotAvailable:
26
+ pass
27
+ else:
28
+ _import_structure["feature_extraction_detr"] = ["DetrFeatureExtractor"]
29
+ _import_structure["image_processing_detr"] = ["DetrImageProcessor"]
30
+
31
+ try:
32
+ if not is_torch_available():
33
+ raise OptionalDependencyNotAvailable()
34
+ except OptionalDependencyNotAvailable:
35
+ pass
36
+ else:
37
+ _import_structure["modeling_detr"] = [
38
+ "DETR_PRETRAINED_MODEL_ARCHIVE_LIST",
39
+ "DetrForObjectDetection",
40
+ "DetrForSegmentation",
41
+ "DetrModel",
42
+ "DetrPreTrainedModel",
43
+ ]
44
+
45
+
46
+ if TYPE_CHECKING:
47
+ from .configuration_detr import DETR_PRETRAINED_CONFIG_ARCHIVE_MAP, DetrConfig, DetrOnnxConfig
48
+
49
+ try:
50
+ if not is_vision_available():
51
+ raise OptionalDependencyNotAvailable()
52
+ except OptionalDependencyNotAvailable:
53
+ pass
54
+ else:
55
+ from .feature_extraction_detr import DetrFeatureExtractor
56
+ from .image_processing_detr import DetrImageProcessor
57
+
58
+ try:
59
+ if not is_torch_available():
60
+ raise OptionalDependencyNotAvailable()
61
+ except OptionalDependencyNotAvailable:
62
+ pass
63
+ else:
64
+ from .modeling_detr import (
65
+ DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
66
+ DetrForObjectDetection,
67
+ DetrForSegmentation,
68
+ DetrModel,
69
+ DetrPreTrainedModel,
70
+ )
71
+
72
+ else:
73
+ import sys
74
+
75
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
llmeval-env/lib/python3.10/site-packages/transformers/models/detr/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.25 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/detr/__pycache__/configuration_detr.cpython-310.pyc ADDED
Binary file (11.9 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/detr/__pycache__/convert_detr_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc ADDED
Binary file (7.81 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/detr/__pycache__/convert_detr_to_pytorch.cpython-310.pyc ADDED
Binary file (10.3 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/detr/__pycache__/feature_extraction_detr.cpython-310.pyc ADDED
Binary file (1.33 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/detr/__pycache__/image_processing_detr.cpython-310.pyc ADDED
Binary file (67.1 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/detr/__pycache__/modeling_detr.cpython-310.pyc ADDED
Binary file (86.3 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/detr/configuration_detr.py ADDED
@@ -0,0 +1,284 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 Facebook AI Research and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ DETR model configuration"""
16
+
17
+ from collections import OrderedDict
18
+ from typing import Mapping
19
+
20
+ from packaging import version
21
+
22
+ from ...configuration_utils import PretrainedConfig
23
+ from ...onnx import OnnxConfig
24
+ from ...utils import logging
25
+ from ..auto import CONFIG_MAPPING
26
+
27
+
28
+ logger = logging.get_logger(__name__)
29
+
30
+
31
+ from ..deprecated._archive_maps import DETR_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
32
+
33
+
34
+ class DetrConfig(PretrainedConfig):
35
+ r"""
36
+ This is the configuration class to store the configuration of a [`DetrModel`]. It is used to instantiate a DETR
37
+ model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
38
+ defaults will yield a similar configuration to that of the DETR
39
+ [facebook/detr-resnet-50](https://huggingface.co/facebook/detr-resnet-50) architecture.
40
+
41
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
42
+ documentation from [`PretrainedConfig`] for more information.
43
+
44
+ Args:
45
+ use_timm_backbone (`bool`, *optional*, defaults to `True`):
46
+ Whether or not to use the `timm` library for the backbone. If set to `False`, will use the [`AutoBackbone`]
47
+ API.
48
+ backbone_config (`PretrainedConfig` or `dict`, *optional*):
49
+ The configuration of the backbone model. Only used in case `use_timm_backbone` is set to `False` in which
50
+ case it will default to `ResNetConfig()`.
51
+ num_channels (`int`, *optional*, defaults to 3):
52
+ The number of input channels.
53
+ num_queries (`int`, *optional*, defaults to 100):
54
+ Number of object queries, i.e. detection slots. This is the maximal number of objects [`DetrModel`] can
55
+ detect in a single image. For COCO, we recommend 100 queries.
56
+ d_model (`int`, *optional*, defaults to 256):
57
+ Dimension of the layers.
58
+ encoder_layers (`int`, *optional*, defaults to 6):
59
+ Number of encoder layers.
60
+ decoder_layers (`int`, *optional*, defaults to 6):
61
+ Number of decoder layers.
62
+ encoder_attention_heads (`int`, *optional*, defaults to 8):
63
+ Number of attention heads for each attention layer in the Transformer encoder.
64
+ decoder_attention_heads (`int`, *optional*, defaults to 8):
65
+ Number of attention heads for each attention layer in the Transformer decoder.
66
+ decoder_ffn_dim (`int`, *optional*, defaults to 2048):
67
+ Dimension of the "intermediate" (often named feed-forward) layer in decoder.
68
+ encoder_ffn_dim (`int`, *optional*, defaults to 2048):
69
+ Dimension of the "intermediate" (often named feed-forward) layer in decoder.
70
+ activation_function (`str` or `function`, *optional*, defaults to `"relu"`):
71
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
72
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
73
+ dropout (`float`, *optional*, defaults to 0.1):
74
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
75
+ attention_dropout (`float`, *optional*, defaults to 0.0):
76
+ The dropout ratio for the attention probabilities.
77
+ activation_dropout (`float`, *optional*, defaults to 0.0):
78
+ The dropout ratio for activations inside the fully connected layer.
79
+ init_std (`float`, *optional*, defaults to 0.02):
80
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
81
+ init_xavier_std (`float`, *optional*, defaults to 1):
82
+ The scaling factor used for the Xavier initialization gain in the HM Attention map module.
83
+ encoder_layerdrop (`float`, *optional*, defaults to 0.0):
84
+ The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556)
85
+ for more details.
86
+ decoder_layerdrop (`float`, *optional*, defaults to 0.0):
87
+ The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556)
88
+ for more details.
89
+ auxiliary_loss (`bool`, *optional*, defaults to `False`):
90
+ Whether auxiliary decoding losses (loss at each decoder layer) are to be used.
91
+ position_embedding_type (`str`, *optional*, defaults to `"sine"`):
92
+ Type of position embeddings to be used on top of the image features. One of `"sine"` or `"learned"`.
93
+ backbone (`str`, *optional*, defaults to `"resnet50"`):
94
+ Name of backbone to use when `backbone_config` is `None`. If `use_pretrained_backbone` is `True`, this
95
+ will load the corresponding pretrained weights from the timm or transformers library. If `use_pretrained_backbone`
96
+ is `False`, this loads the backbone's config and uses that to initialize the backbone with random weights.
97
+ use_pretrained_backbone (`bool`, *optional*, `True`):
98
+ Whether to use pretrained weights for the backbone.
99
+ backbone_kwargs (`dict`, *optional*):
100
+ Keyword arguments to be passed to AutoBackbone when loading from a checkpoint
101
+ e.g. `{'out_indices': (0, 1, 2, 3)}`. Cannot be specified if `backbone_config` is set.
102
+ dilation (`bool`, *optional*, defaults to `False`):
103
+ Whether to replace stride with dilation in the last convolutional block (DC5). Only supported when
104
+ `use_timm_backbone` = `True`.
105
+ class_cost (`float`, *optional*, defaults to 1):
106
+ Relative weight of the classification error in the Hungarian matching cost.
107
+ bbox_cost (`float`, *optional*, defaults to 5):
108
+ Relative weight of the L1 error of the bounding box coordinates in the Hungarian matching cost.
109
+ giou_cost (`float`, *optional*, defaults to 2):
110
+ Relative weight of the generalized IoU loss of the bounding box in the Hungarian matching cost.
111
+ mask_loss_coefficient (`float`, *optional*, defaults to 1):
112
+ Relative weight of the Focal loss in the panoptic segmentation loss.
113
+ dice_loss_coefficient (`float`, *optional*, defaults to 1):
114
+ Relative weight of the DICE/F-1 loss in the panoptic segmentation loss.
115
+ bbox_loss_coefficient (`float`, *optional*, defaults to 5):
116
+ Relative weight of the L1 bounding box loss in the object detection loss.
117
+ giou_loss_coefficient (`float`, *optional*, defaults to 2):
118
+ Relative weight of the generalized IoU loss in the object detection loss.
119
+ eos_coefficient (`float`, *optional*, defaults to 0.1):
120
+ Relative classification weight of the 'no-object' class in the object detection loss.
121
+
122
+ Examples:
123
+
124
+ ```python
125
+ >>> from transformers import DetrConfig, DetrModel
126
+
127
+ >>> # Initializing a DETR facebook/detr-resnet-50 style configuration
128
+ >>> configuration = DetrConfig()
129
+
130
+ >>> # Initializing a model (with random weights) from the facebook/detr-resnet-50 style configuration
131
+ >>> model = DetrModel(configuration)
132
+
133
+ >>> # Accessing the model configuration
134
+ >>> configuration = model.config
135
+ ```"""
136
+
137
+ model_type = "detr"
138
+ keys_to_ignore_at_inference = ["past_key_values"]
139
+ attribute_map = {
140
+ "hidden_size": "d_model",
141
+ "num_attention_heads": "encoder_attention_heads",
142
+ }
143
+
144
+ def __init__(
145
+ self,
146
+ use_timm_backbone=True,
147
+ backbone_config=None,
148
+ num_channels=3,
149
+ num_queries=100,
150
+ encoder_layers=6,
151
+ encoder_ffn_dim=2048,
152
+ encoder_attention_heads=8,
153
+ decoder_layers=6,
154
+ decoder_ffn_dim=2048,
155
+ decoder_attention_heads=8,
156
+ encoder_layerdrop=0.0,
157
+ decoder_layerdrop=0.0,
158
+ is_encoder_decoder=True,
159
+ activation_function="relu",
160
+ d_model=256,
161
+ dropout=0.1,
162
+ attention_dropout=0.0,
163
+ activation_dropout=0.0,
164
+ init_std=0.02,
165
+ init_xavier_std=1.0,
166
+ auxiliary_loss=False,
167
+ position_embedding_type="sine",
168
+ backbone="resnet50",
169
+ use_pretrained_backbone=True,
170
+ backbone_kwargs=None,
171
+ dilation=False,
172
+ class_cost=1,
173
+ bbox_cost=5,
174
+ giou_cost=2,
175
+ mask_loss_coefficient=1,
176
+ dice_loss_coefficient=1,
177
+ bbox_loss_coefficient=5,
178
+ giou_loss_coefficient=2,
179
+ eos_coefficient=0.1,
180
+ **kwargs,
181
+ ):
182
+ if not use_timm_backbone and use_pretrained_backbone:
183
+ raise ValueError(
184
+ "Loading pretrained backbone weights from the transformers library is not supported yet. `use_timm_backbone` must be set to `True` when `use_pretrained_backbone=True`"
185
+ )
186
+
187
+ if backbone_config is not None and backbone is not None:
188
+ raise ValueError("You can't specify both `backbone` and `backbone_config`.")
189
+
190
+ if backbone_config is not None and use_timm_backbone:
191
+ raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`.")
192
+
193
+ if backbone_kwargs is not None and backbone_kwargs and backbone_config is not None:
194
+ raise ValueError("You can't specify both `backbone_kwargs` and `backbone_config`.")
195
+
196
+ if not use_timm_backbone:
197
+ if backbone_config is None:
198
+ logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.")
199
+ backbone_config = CONFIG_MAPPING["resnet"](out_features=["stage4"])
200
+ elif isinstance(backbone_config, dict):
201
+ backbone_model_type = backbone_config.get("model_type")
202
+ config_class = CONFIG_MAPPING[backbone_model_type]
203
+ backbone_config = config_class.from_dict(backbone_config)
204
+ # set timm attributes to None
205
+ dilation, backbone, use_pretrained_backbone = None, None, None
206
+
207
+ self.use_timm_backbone = use_timm_backbone
208
+ self.backbone_config = backbone_config
209
+ self.num_channels = num_channels
210
+ self.num_queries = num_queries
211
+ self.d_model = d_model
212
+ self.encoder_ffn_dim = encoder_ffn_dim
213
+ self.encoder_layers = encoder_layers
214
+ self.encoder_attention_heads = encoder_attention_heads
215
+ self.decoder_ffn_dim = decoder_ffn_dim
216
+ self.decoder_layers = decoder_layers
217
+ self.decoder_attention_heads = decoder_attention_heads
218
+ self.dropout = dropout
219
+ self.attention_dropout = attention_dropout
220
+ self.activation_dropout = activation_dropout
221
+ self.activation_function = activation_function
222
+ self.init_std = init_std
223
+ self.init_xavier_std = init_xavier_std
224
+ self.encoder_layerdrop = encoder_layerdrop
225
+ self.decoder_layerdrop = decoder_layerdrop
226
+ self.num_hidden_layers = encoder_layers
227
+ self.auxiliary_loss = auxiliary_loss
228
+ self.position_embedding_type = position_embedding_type
229
+ self.backbone = backbone
230
+ self.use_pretrained_backbone = use_pretrained_backbone
231
+ self.backbone_kwargs = backbone_kwargs
232
+ self.dilation = dilation
233
+ # Hungarian matcher
234
+ self.class_cost = class_cost
235
+ self.bbox_cost = bbox_cost
236
+ self.giou_cost = giou_cost
237
+ # Loss coefficients
238
+ self.mask_loss_coefficient = mask_loss_coefficient
239
+ self.dice_loss_coefficient = dice_loss_coefficient
240
+ self.bbox_loss_coefficient = bbox_loss_coefficient
241
+ self.giou_loss_coefficient = giou_loss_coefficient
242
+ self.eos_coefficient = eos_coefficient
243
+ super().__init__(is_encoder_decoder=is_encoder_decoder, **kwargs)
244
+
245
+ @property
246
+ def num_attention_heads(self) -> int:
247
+ return self.encoder_attention_heads
248
+
249
+ @property
250
+ def hidden_size(self) -> int:
251
+ return self.d_model
252
+
253
+ @classmethod
254
+ def from_backbone_config(cls, backbone_config: PretrainedConfig, **kwargs):
255
+ """Instantiate a [`DetrConfig`] (or a derived class) from a pre-trained backbone model configuration.
256
+
257
+ Args:
258
+ backbone_config ([`PretrainedConfig`]):
259
+ The backbone configuration.
260
+ Returns:
261
+ [`DetrConfig`]: An instance of a configuration object
262
+ """
263
+ return cls(backbone_config=backbone_config, **kwargs)
264
+
265
+
266
+ class DetrOnnxConfig(OnnxConfig):
267
+ torch_onnx_minimum_version = version.parse("1.11")
268
+
269
+ @property
270
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
271
+ return OrderedDict(
272
+ [
273
+ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
274
+ ("pixel_mask", {0: "batch"}),
275
+ ]
276
+ )
277
+
278
+ @property
279
+ def atol_for_validation(self) -> float:
280
+ return 1e-5
281
+
282
+ @property
283
+ def default_onnx_opset(self) -> int:
284
+ return 12
llmeval-env/lib/python3.10/site-packages/transformers/models/detr/convert_detr_original_pytorch_checkpoint_to_pytorch.py ADDED
@@ -0,0 +1,278 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Convert DETR checkpoints with timm backbone."""
16
+
17
+
18
+ import argparse
19
+ import json
20
+ from collections import OrderedDict
21
+ from pathlib import Path
22
+
23
+ import requests
24
+ import torch
25
+ from huggingface_hub import hf_hub_download
26
+ from PIL import Image
27
+
28
+ from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor
29
+ from transformers.utils import logging
30
+
31
+
32
+ logging.set_verbosity_info()
33
+ logger = logging.get_logger(__name__)
34
+
35
+ # here we list all keys to be renamed (original name on the left, our name on the right)
36
+ rename_keys = []
37
+ for i in range(6):
38
+ # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
39
+ rename_keys.append(
40
+ (f"transformer.encoder.layers.{i}.self_attn.out_proj.weight", f"encoder.layers.{i}.self_attn.out_proj.weight")
41
+ )
42
+ rename_keys.append(
43
+ (f"transformer.encoder.layers.{i}.self_attn.out_proj.bias", f"encoder.layers.{i}.self_attn.out_proj.bias")
44
+ )
45
+ rename_keys.append((f"transformer.encoder.layers.{i}.linear1.weight", f"encoder.layers.{i}.fc1.weight"))
46
+ rename_keys.append((f"transformer.encoder.layers.{i}.linear1.bias", f"encoder.layers.{i}.fc1.bias"))
47
+ rename_keys.append((f"transformer.encoder.layers.{i}.linear2.weight", f"encoder.layers.{i}.fc2.weight"))
48
+ rename_keys.append((f"transformer.encoder.layers.{i}.linear2.bias", f"encoder.layers.{i}.fc2.bias"))
49
+ rename_keys.append(
50
+ (f"transformer.encoder.layers.{i}.norm1.weight", f"encoder.layers.{i}.self_attn_layer_norm.weight")
51
+ )
52
+ rename_keys.append((f"transformer.encoder.layers.{i}.norm1.bias", f"encoder.layers.{i}.self_attn_layer_norm.bias"))
53
+ rename_keys.append((f"transformer.encoder.layers.{i}.norm2.weight", f"encoder.layers.{i}.final_layer_norm.weight"))
54
+ rename_keys.append((f"transformer.encoder.layers.{i}.norm2.bias", f"encoder.layers.{i}.final_layer_norm.bias"))
55
+ # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
56
+ rename_keys.append(
57
+ (f"transformer.decoder.layers.{i}.self_attn.out_proj.weight", f"decoder.layers.{i}.self_attn.out_proj.weight")
58
+ )
59
+ rename_keys.append(
60
+ (f"transformer.decoder.layers.{i}.self_attn.out_proj.bias", f"decoder.layers.{i}.self_attn.out_proj.bias")
61
+ )
62
+ rename_keys.append(
63
+ (
64
+ f"transformer.decoder.layers.{i}.multihead_attn.out_proj.weight",
65
+ f"decoder.layers.{i}.encoder_attn.out_proj.weight",
66
+ )
67
+ )
68
+ rename_keys.append(
69
+ (
70
+ f"transformer.decoder.layers.{i}.multihead_attn.out_proj.bias",
71
+ f"decoder.layers.{i}.encoder_attn.out_proj.bias",
72
+ )
73
+ )
74
+ rename_keys.append((f"transformer.decoder.layers.{i}.linear1.weight", f"decoder.layers.{i}.fc1.weight"))
75
+ rename_keys.append((f"transformer.decoder.layers.{i}.linear1.bias", f"decoder.layers.{i}.fc1.bias"))
76
+ rename_keys.append((f"transformer.decoder.layers.{i}.linear2.weight", f"decoder.layers.{i}.fc2.weight"))
77
+ rename_keys.append((f"transformer.decoder.layers.{i}.linear2.bias", f"decoder.layers.{i}.fc2.bias"))
78
+ rename_keys.append(
79
+ (f"transformer.decoder.layers.{i}.norm1.weight", f"decoder.layers.{i}.self_attn_layer_norm.weight")
80
+ )
81
+ rename_keys.append((f"transformer.decoder.layers.{i}.norm1.bias", f"decoder.layers.{i}.self_attn_layer_norm.bias"))
82
+ rename_keys.append(
83
+ (f"transformer.decoder.layers.{i}.norm2.weight", f"decoder.layers.{i}.encoder_attn_layer_norm.weight")
84
+ )
85
+ rename_keys.append(
86
+ (f"transformer.decoder.layers.{i}.norm2.bias", f"decoder.layers.{i}.encoder_attn_layer_norm.bias")
87
+ )
88
+ rename_keys.append((f"transformer.decoder.layers.{i}.norm3.weight", f"decoder.layers.{i}.final_layer_norm.weight"))
89
+ rename_keys.append((f"transformer.decoder.layers.{i}.norm3.bias", f"decoder.layers.{i}.final_layer_norm.bias"))
90
+
91
+ # convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
92
+ rename_keys.extend(
93
+ [
94
+ ("input_proj.weight", "input_projection.weight"),
95
+ ("input_proj.bias", "input_projection.bias"),
96
+ ("query_embed.weight", "query_position_embeddings.weight"),
97
+ ("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
98
+ ("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
99
+ ("class_embed.weight", "class_labels_classifier.weight"),
100
+ ("class_embed.bias", "class_labels_classifier.bias"),
101
+ ("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
102
+ ("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
103
+ ("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
104
+ ("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
105
+ ("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
106
+ ("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
107
+ ]
108
+ )
109
+
110
+
111
+ def rename_key(state_dict, old, new):
112
+ val = state_dict.pop(old)
113
+ state_dict[new] = val
114
+
115
+
116
+ def rename_backbone_keys(state_dict):
117
+ new_state_dict = OrderedDict()
118
+ for key, value in state_dict.items():
119
+ if "backbone.0.body" in key:
120
+ new_key = key.replace("backbone.0.body", "backbone.conv_encoder.model")
121
+ new_state_dict[new_key] = value
122
+ else:
123
+ new_state_dict[key] = value
124
+
125
+ return new_state_dict
126
+
127
+
128
+ def read_in_q_k_v(state_dict, is_panoptic=False):
129
+ prefix = ""
130
+ if is_panoptic:
131
+ prefix = "detr."
132
+
133
+ # first: transformer encoder
134
+ for i in range(6):
135
+ # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
136
+ in_proj_weight = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight")
137
+ in_proj_bias = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias")
138
+ # next, add query, keys and values (in that order) to the state dict
139
+ state_dict[f"encoder.layers.{i}.self_attn.q_proj.weight"] = in_proj_weight[:256, :]
140
+ state_dict[f"encoder.layers.{i}.self_attn.q_proj.bias"] = in_proj_bias[:256]
141
+ state_dict[f"encoder.layers.{i}.self_attn.k_proj.weight"] = in_proj_weight[256:512, :]
142
+ state_dict[f"encoder.layers.{i}.self_attn.k_proj.bias"] = in_proj_bias[256:512]
143
+ state_dict[f"encoder.layers.{i}.self_attn.v_proj.weight"] = in_proj_weight[-256:, :]
144
+ state_dict[f"encoder.layers.{i}.self_attn.v_proj.bias"] = in_proj_bias[-256:]
145
+ # next: transformer decoder (which is a bit more complex because it also includes cross-attention)
146
+ for i in range(6):
147
+ # read in weights + bias of input projection layer of self-attention
148
+ in_proj_weight = state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight")
149
+ in_proj_bias = state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias")
150
+ # next, add query, keys and values (in that order) to the state dict
151
+ state_dict[f"decoder.layers.{i}.self_attn.q_proj.weight"] = in_proj_weight[:256, :]
152
+ state_dict[f"decoder.layers.{i}.self_attn.q_proj.bias"] = in_proj_bias[:256]
153
+ state_dict[f"decoder.layers.{i}.self_attn.k_proj.weight"] = in_proj_weight[256:512, :]
154
+ state_dict[f"decoder.layers.{i}.self_attn.k_proj.bias"] = in_proj_bias[256:512]
155
+ state_dict[f"decoder.layers.{i}.self_attn.v_proj.weight"] = in_proj_weight[-256:, :]
156
+ state_dict[f"decoder.layers.{i}.self_attn.v_proj.bias"] = in_proj_bias[-256:]
157
+ # read in weights + bias of input projection layer of cross-attention
158
+ in_proj_weight_cross_attn = state_dict.pop(
159
+ f"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight"
160
+ )
161
+ in_proj_bias_cross_attn = state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias")
162
+ # next, add query, keys and values (in that order) of cross-attention to the state dict
163
+ state_dict[f"decoder.layers.{i}.encoder_attn.q_proj.weight"] = in_proj_weight_cross_attn[:256, :]
164
+ state_dict[f"decoder.layers.{i}.encoder_attn.q_proj.bias"] = in_proj_bias_cross_attn[:256]
165
+ state_dict[f"decoder.layers.{i}.encoder_attn.k_proj.weight"] = in_proj_weight_cross_attn[256:512, :]
166
+ state_dict[f"decoder.layers.{i}.encoder_attn.k_proj.bias"] = in_proj_bias_cross_attn[256:512]
167
+ state_dict[f"decoder.layers.{i}.encoder_attn.v_proj.weight"] = in_proj_weight_cross_attn[-256:, :]
168
+ state_dict[f"decoder.layers.{i}.encoder_attn.v_proj.bias"] = in_proj_bias_cross_attn[-256:]
169
+
170
+
171
+ # We will verify our results on an image of cute cats
172
+ def prepare_img():
173
+ url = "http://images.cocodataset.org/val2017/000000039769.jpg"
174
+ im = Image.open(requests.get(url, stream=True).raw)
175
+
176
+ return im
177
+
178
+
179
+ @torch.no_grad()
180
+ def convert_detr_checkpoint(model_name, pytorch_dump_folder_path):
181
+ """
182
+ Copy/paste/tweak model's weights to our DETR structure.
183
+ """
184
+
185
+ # load default config
186
+ config = DetrConfig()
187
+ # set backbone and dilation attributes
188
+ if "resnet101" in model_name:
189
+ config.backbone = "resnet101"
190
+ if "dc5" in model_name:
191
+ config.dilation = True
192
+ is_panoptic = "panoptic" in model_name
193
+ if is_panoptic:
194
+ config.num_labels = 250
195
+ else:
196
+ config.num_labels = 91
197
+ repo_id = "huggingface/label-files"
198
+ filename = "coco-detection-id2label.json"
199
+ id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r"))
200
+ id2label = {int(k): v for k, v in id2label.items()}
201
+ config.id2label = id2label
202
+ config.label2id = {v: k for k, v in id2label.items()}
203
+
204
+ # load image processor
205
+ format = "coco_panoptic" if is_panoptic else "coco_detection"
206
+ image_processor = DetrImageProcessor(format=format)
207
+
208
+ # prepare image
209
+ img = prepare_img()
210
+ encoding = image_processor(images=img, return_tensors="pt")
211
+ pixel_values = encoding["pixel_values"]
212
+
213
+ logger.info(f"Converting model {model_name}...")
214
+
215
+ # load original model from torch hub
216
+ detr = torch.hub.load("facebookresearch/detr", model_name, pretrained=True).eval()
217
+ state_dict = detr.state_dict()
218
+ # rename keys
219
+ for src, dest in rename_keys:
220
+ if is_panoptic:
221
+ src = "detr." + src
222
+ rename_key(state_dict, src, dest)
223
+ state_dict = rename_backbone_keys(state_dict)
224
+ # query, key and value matrices need special treatment
225
+ read_in_q_k_v(state_dict, is_panoptic=is_panoptic)
226
+ # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
227
+ prefix = "detr.model." if is_panoptic else "model."
228
+ for key in state_dict.copy().keys():
229
+ if is_panoptic:
230
+ if (
231
+ key.startswith("detr")
232
+ and not key.startswith("class_labels_classifier")
233
+ and not key.startswith("bbox_predictor")
234
+ ):
235
+ val = state_dict.pop(key)
236
+ state_dict["detr.model" + key[4:]] = val
237
+ elif "class_labels_classifier" in key or "bbox_predictor" in key:
238
+ val = state_dict.pop(key)
239
+ state_dict["detr." + key] = val
240
+ elif key.startswith("bbox_attention") or key.startswith("mask_head"):
241
+ continue
242
+ else:
243
+ val = state_dict.pop(key)
244
+ state_dict[prefix + key] = val
245
+ else:
246
+ if not key.startswith("class_labels_classifier") and not key.startswith("bbox_predictor"):
247
+ val = state_dict.pop(key)
248
+ state_dict[prefix + key] = val
249
+ # finally, create HuggingFace model and load state dict
250
+ model = DetrForSegmentation(config) if is_panoptic else DetrForObjectDetection(config)
251
+ model.load_state_dict(state_dict)
252
+ model.eval()
253
+ # verify our conversion
254
+ original_outputs = detr(pixel_values)
255
+ outputs = model(pixel_values)
256
+ assert torch.allclose(outputs.logits, original_outputs["pred_logits"], atol=1e-4)
257
+ assert torch.allclose(outputs.pred_boxes, original_outputs["pred_boxes"], atol=1e-4)
258
+ if is_panoptic:
259
+ assert torch.allclose(outputs.pred_masks, original_outputs["pred_masks"], atol=1e-4)
260
+
261
+ # Save model and image processor
262
+ logger.info(f"Saving PyTorch model and image processor to {pytorch_dump_folder_path}...")
263
+ Path(pytorch_dump_folder_path).mkdir(exist_ok=True)
264
+ model.save_pretrained(pytorch_dump_folder_path)
265
+ image_processor.save_pretrained(pytorch_dump_folder_path)
266
+
267
+
268
+ if __name__ == "__main__":
269
+ parser = argparse.ArgumentParser()
270
+
271
+ parser.add_argument(
272
+ "--model_name", default="detr_resnet50", type=str, help="Name of the DETR model you'd like to convert."
273
+ )
274
+ parser.add_argument(
275
+ "--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
276
+ )
277
+ args = parser.parse_args()
278
+ convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
llmeval-env/lib/python3.10/site-packages/transformers/models/detr/convert_detr_to_pytorch.py ADDED
@@ -0,0 +1,386 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Convert DETR checkpoints with native (Transformers) backbone."""
16
+
17
+
18
+ import argparse
19
+ import json
20
+ from pathlib import Path
21
+
22
+ import requests
23
+ import torch
24
+ from huggingface_hub import hf_hub_download
25
+ from PIL import Image
26
+
27
+ from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
28
+ from transformers.utils import logging
29
+
30
+
31
+ logging.set_verbosity_info()
32
+ logger = logging.get_logger(__name__)
33
+
34
+
35
+ def get_detr_config(model_name):
36
+ # initialize config
37
+ if "resnet-50" in model_name:
38
+ backbone_config = ResNetConfig.from_pretrained("microsoft/resnet-50")
39
+ elif "resnet-101" in model_name:
40
+ backbone_config = ResNetConfig.from_pretrained("microsoft/resnet-101")
41
+ else:
42
+ raise ValueError("Model name should include either resnet50 or resnet101")
43
+
44
+ config = DetrConfig(use_timm_backbone=False, backbone_config=backbone_config)
45
+
46
+ # set label attributes
47
+ is_panoptic = "panoptic" in model_name
48
+ if is_panoptic:
49
+ config.num_labels = 250
50
+ else:
51
+ config.num_labels = 91
52
+ repo_id = "huggingface/label-files"
53
+ filename = "coco-detection-id2label.json"
54
+ id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r"))
55
+ id2label = {int(k): v for k, v in id2label.items()}
56
+ config.id2label = id2label
57
+ config.label2id = {v: k for k, v in id2label.items()}
58
+
59
+ return config, is_panoptic
60
+
61
+
62
+ def create_rename_keys(config):
63
+ # here we list all keys to be renamed (original name on the left, our name on the right)
64
+ rename_keys = []
65
+
66
+ # stem
67
+ # fmt: off
68
+ rename_keys.append(("backbone.0.body.conv1.weight", "backbone.conv_encoder.model.embedder.embedder.convolution.weight"))
69
+ rename_keys.append(("backbone.0.body.bn1.weight", "backbone.conv_encoder.model.embedder.embedder.normalization.weight"))
70
+ rename_keys.append(("backbone.0.body.bn1.bias", "backbone.conv_encoder.model.embedder.embedder.normalization.bias"))
71
+ rename_keys.append(("backbone.0.body.bn1.running_mean", "backbone.conv_encoder.model.embedder.embedder.normalization.running_mean"))
72
+ rename_keys.append(("backbone.0.body.bn1.running_var", "backbone.conv_encoder.model.embedder.embedder.normalization.running_var"))
73
+ # stages
74
+ for stage_idx in range(len(config.backbone_config.depths)):
75
+ for layer_idx in range(config.backbone_config.depths[stage_idx]):
76
+ # shortcut
77
+ if layer_idx == 0:
78
+ rename_keys.append(
79
+ (
80
+ f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight",
81
+ f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight",
82
+ )
83
+ )
84
+ rename_keys.append(
85
+ (
86
+ f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight",
87
+ f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight",
88
+ )
89
+ )
90
+ rename_keys.append(
91
+ (
92
+ f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias",
93
+ f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias",
94
+ )
95
+ )
96
+ rename_keys.append(
97
+ (
98
+ f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean",
99
+ f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean",
100
+ )
101
+ )
102
+ rename_keys.append(
103
+ (
104
+ f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var",
105
+ f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var",
106
+ )
107
+ )
108
+ # 3 convs
109
+ for i in range(3):
110
+ rename_keys.append(
111
+ (
112
+ f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight",
113
+ f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight",
114
+ )
115
+ )
116
+ rename_keys.append(
117
+ (
118
+ f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight",
119
+ f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight",
120
+ )
121
+ )
122
+ rename_keys.append(
123
+ (
124
+ f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias",
125
+ f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias",
126
+ )
127
+ )
128
+ rename_keys.append(
129
+ (
130
+ f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean",
131
+ f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean",
132
+ )
133
+ )
134
+ rename_keys.append(
135
+ (
136
+ f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var",
137
+ f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var",
138
+ )
139
+ )
140
+ # fmt: on
141
+
142
+ for i in range(config.encoder_layers):
143
+ # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
144
+ rename_keys.append(
145
+ (
146
+ f"transformer.encoder.layers.{i}.self_attn.out_proj.weight",
147
+ f"encoder.layers.{i}.self_attn.out_proj.weight",
148
+ )
149
+ )
150
+ rename_keys.append(
151
+ (f"transformer.encoder.layers.{i}.self_attn.out_proj.bias", f"encoder.layers.{i}.self_attn.out_proj.bias")
152
+ )
153
+ rename_keys.append((f"transformer.encoder.layers.{i}.linear1.weight", f"encoder.layers.{i}.fc1.weight"))
154
+ rename_keys.append((f"transformer.encoder.layers.{i}.linear1.bias", f"encoder.layers.{i}.fc1.bias"))
155
+ rename_keys.append((f"transformer.encoder.layers.{i}.linear2.weight", f"encoder.layers.{i}.fc2.weight"))
156
+ rename_keys.append((f"transformer.encoder.layers.{i}.linear2.bias", f"encoder.layers.{i}.fc2.bias"))
157
+ rename_keys.append(
158
+ (f"transformer.encoder.layers.{i}.norm1.weight", f"encoder.layers.{i}.self_attn_layer_norm.weight")
159
+ )
160
+ rename_keys.append(
161
+ (f"transformer.encoder.layers.{i}.norm1.bias", f"encoder.layers.{i}.self_attn_layer_norm.bias")
162
+ )
163
+ rename_keys.append(
164
+ (f"transformer.encoder.layers.{i}.norm2.weight", f"encoder.layers.{i}.final_layer_norm.weight")
165
+ )
166
+ rename_keys.append((f"transformer.encoder.layers.{i}.norm2.bias", f"encoder.layers.{i}.final_layer_norm.bias"))
167
+ # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
168
+ rename_keys.append(
169
+ (
170
+ f"transformer.decoder.layers.{i}.self_attn.out_proj.weight",
171
+ f"decoder.layers.{i}.self_attn.out_proj.weight",
172
+ )
173
+ )
174
+ rename_keys.append(
175
+ (f"transformer.decoder.layers.{i}.self_attn.out_proj.bias", f"decoder.layers.{i}.self_attn.out_proj.bias")
176
+ )
177
+ rename_keys.append(
178
+ (
179
+ f"transformer.decoder.layers.{i}.multihead_attn.out_proj.weight",
180
+ f"decoder.layers.{i}.encoder_attn.out_proj.weight",
181
+ )
182
+ )
183
+ rename_keys.append(
184
+ (
185
+ f"transformer.decoder.layers.{i}.multihead_attn.out_proj.bias",
186
+ f"decoder.layers.{i}.encoder_attn.out_proj.bias",
187
+ )
188
+ )
189
+ rename_keys.append((f"transformer.decoder.layers.{i}.linear1.weight", f"decoder.layers.{i}.fc1.weight"))
190
+ rename_keys.append((f"transformer.decoder.layers.{i}.linear1.bias", f"decoder.layers.{i}.fc1.bias"))
191
+ rename_keys.append((f"transformer.decoder.layers.{i}.linear2.weight", f"decoder.layers.{i}.fc2.weight"))
192
+ rename_keys.append((f"transformer.decoder.layers.{i}.linear2.bias", f"decoder.layers.{i}.fc2.bias"))
193
+ rename_keys.append(
194
+ (f"transformer.decoder.layers.{i}.norm1.weight", f"decoder.layers.{i}.self_attn_layer_norm.weight")
195
+ )
196
+ rename_keys.append(
197
+ (f"transformer.decoder.layers.{i}.norm1.bias", f"decoder.layers.{i}.self_attn_layer_norm.bias")
198
+ )
199
+ rename_keys.append(
200
+ (f"transformer.decoder.layers.{i}.norm2.weight", f"decoder.layers.{i}.encoder_attn_layer_norm.weight")
201
+ )
202
+ rename_keys.append(
203
+ (f"transformer.decoder.layers.{i}.norm2.bias", f"decoder.layers.{i}.encoder_attn_layer_norm.bias")
204
+ )
205
+ rename_keys.append(
206
+ (f"transformer.decoder.layers.{i}.norm3.weight", f"decoder.layers.{i}.final_layer_norm.weight")
207
+ )
208
+ rename_keys.append((f"transformer.decoder.layers.{i}.norm3.bias", f"decoder.layers.{i}.final_layer_norm.bias"))
209
+
210
+ # convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
211
+ rename_keys.extend(
212
+ [
213
+ ("input_proj.weight", "input_projection.weight"),
214
+ ("input_proj.bias", "input_projection.bias"),
215
+ ("query_embed.weight", "query_position_embeddings.weight"),
216
+ ("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
217
+ ("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
218
+ ("class_embed.weight", "class_labels_classifier.weight"),
219
+ ("class_embed.bias", "class_labels_classifier.bias"),
220
+ ("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
221
+ ("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
222
+ ("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
223
+ ("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
224
+ ("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
225
+ ("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
226
+ ]
227
+ )
228
+
229
+ return rename_keys
230
+
231
+
232
+ def rename_key(state_dict, old, new):
233
+ val = state_dict.pop(old)
234
+ state_dict[new] = val
235
+
236
+
237
+ def read_in_q_k_v(state_dict, is_panoptic=False):
238
+ prefix = ""
239
+ if is_panoptic:
240
+ prefix = "detr."
241
+
242
+ # first: transformer encoder
243
+ for i in range(6):
244
+ # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
245
+ in_proj_weight = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight")
246
+ in_proj_bias = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias")
247
+ # next, add query, keys and values (in that order) to the state dict
248
+ state_dict[f"encoder.layers.{i}.self_attn.q_proj.weight"] = in_proj_weight[:256, :]
249
+ state_dict[f"encoder.layers.{i}.self_attn.q_proj.bias"] = in_proj_bias[:256]
250
+ state_dict[f"encoder.layers.{i}.self_attn.k_proj.weight"] = in_proj_weight[256:512, :]
251
+ state_dict[f"encoder.layers.{i}.self_attn.k_proj.bias"] = in_proj_bias[256:512]
252
+ state_dict[f"encoder.layers.{i}.self_attn.v_proj.weight"] = in_proj_weight[-256:, :]
253
+ state_dict[f"encoder.layers.{i}.self_attn.v_proj.bias"] = in_proj_bias[-256:]
254
+ # next: transformer decoder (which is a bit more complex because it also includes cross-attention)
255
+ for i in range(6):
256
+ # read in weights + bias of input projection layer of self-attention
257
+ in_proj_weight = state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight")
258
+ in_proj_bias = state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias")
259
+ # next, add query, keys and values (in that order) to the state dict
260
+ state_dict[f"decoder.layers.{i}.self_attn.q_proj.weight"] = in_proj_weight[:256, :]
261
+ state_dict[f"decoder.layers.{i}.self_attn.q_proj.bias"] = in_proj_bias[:256]
262
+ state_dict[f"decoder.layers.{i}.self_attn.k_proj.weight"] = in_proj_weight[256:512, :]
263
+ state_dict[f"decoder.layers.{i}.self_attn.k_proj.bias"] = in_proj_bias[256:512]
264
+ state_dict[f"decoder.layers.{i}.self_attn.v_proj.weight"] = in_proj_weight[-256:, :]
265
+ state_dict[f"decoder.layers.{i}.self_attn.v_proj.bias"] = in_proj_bias[-256:]
266
+ # read in weights + bias of input projection layer of cross-attention
267
+ in_proj_weight_cross_attn = state_dict.pop(
268
+ f"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight"
269
+ )
270
+ in_proj_bias_cross_attn = state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias")
271
+ # next, add query, keys and values (in that order) of cross-attention to the state dict
272
+ state_dict[f"decoder.layers.{i}.encoder_attn.q_proj.weight"] = in_proj_weight_cross_attn[:256, :]
273
+ state_dict[f"decoder.layers.{i}.encoder_attn.q_proj.bias"] = in_proj_bias_cross_attn[:256]
274
+ state_dict[f"decoder.layers.{i}.encoder_attn.k_proj.weight"] = in_proj_weight_cross_attn[256:512, :]
275
+ state_dict[f"decoder.layers.{i}.encoder_attn.k_proj.bias"] = in_proj_bias_cross_attn[256:512]
276
+ state_dict[f"decoder.layers.{i}.encoder_attn.v_proj.weight"] = in_proj_weight_cross_attn[-256:, :]
277
+ state_dict[f"decoder.layers.{i}.encoder_attn.v_proj.bias"] = in_proj_bias_cross_attn[-256:]
278
+
279
+
280
+ # We will verify our results on an image of cute cats
281
+ def prepare_img():
282
+ url = "http://images.cocodataset.org/val2017/000000039769.jpg"
283
+ im = Image.open(requests.get(url, stream=True).raw)
284
+
285
+ return im
286
+
287
+
288
+ @torch.no_grad()
289
+ def convert_detr_checkpoint(model_name, pytorch_dump_folder_path=None, push_to_hub=False):
290
+ """
291
+ Copy/paste/tweak model's weights to our DETR structure.
292
+ """
293
+
294
+ # load default config
295
+ config, is_panoptic = get_detr_config(model_name)
296
+
297
+ # load original model from torch hub
298
+ model_name_to_original_name = {
299
+ "detr-resnet-50": "detr_resnet50",
300
+ "detr-resnet-101": "detr_resnet101",
301
+ }
302
+ logger.info(f"Converting model {model_name}...")
303
+ detr = torch.hub.load("facebookresearch/detr", model_name_to_original_name[model_name], pretrained=True).eval()
304
+ state_dict = detr.state_dict()
305
+ # rename keys
306
+ for src, dest in create_rename_keys(config):
307
+ if is_panoptic:
308
+ src = "detr." + src
309
+ rename_key(state_dict, src, dest)
310
+ # query, key and value matrices need special treatment
311
+ read_in_q_k_v(state_dict, is_panoptic=is_panoptic)
312
+ # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
313
+ prefix = "detr.model." if is_panoptic else "model."
314
+ for key in state_dict.copy().keys():
315
+ if is_panoptic:
316
+ if (
317
+ key.startswith("detr")
318
+ and not key.startswith("class_labels_classifier")
319
+ and not key.startswith("bbox_predictor")
320
+ ):
321
+ val = state_dict.pop(key)
322
+ state_dict["detr.model" + key[4:]] = val
323
+ elif "class_labels_classifier" in key or "bbox_predictor" in key:
324
+ val = state_dict.pop(key)
325
+ state_dict["detr." + key] = val
326
+ elif key.startswith("bbox_attention") or key.startswith("mask_head"):
327
+ continue
328
+ else:
329
+ val = state_dict.pop(key)
330
+ state_dict[prefix + key] = val
331
+ else:
332
+ if not key.startswith("class_labels_classifier") and not key.startswith("bbox_predictor"):
333
+ val = state_dict.pop(key)
334
+ state_dict[prefix + key] = val
335
+
336
+ # finally, create HuggingFace model and load state dict
337
+ model = DetrForSegmentation(config) if is_panoptic else DetrForObjectDetection(config)
338
+ model.load_state_dict(state_dict)
339
+ model.eval()
340
+
341
+ # verify our conversion on an image
342
+ format = "coco_panoptic" if is_panoptic else "coco_detection"
343
+ processor = DetrImageProcessor(format=format)
344
+
345
+ encoding = processor(images=prepare_img(), return_tensors="pt")
346
+ pixel_values = encoding["pixel_values"]
347
+
348
+ original_outputs = detr(pixel_values)
349
+ outputs = model(pixel_values)
350
+
351
+ assert torch.allclose(outputs.logits, original_outputs["pred_logits"], atol=1e-3)
352
+ assert torch.allclose(outputs.pred_boxes, original_outputs["pred_boxes"], atol=1e-3)
353
+ if is_panoptic:
354
+ assert torch.allclose(outputs.pred_masks, original_outputs["pred_masks"], atol=1e-4)
355
+ print("Looks ok!")
356
+
357
+ if pytorch_dump_folder_path is not None:
358
+ # Save model and image processor
359
+ logger.info(f"Saving PyTorch model and image processor to {pytorch_dump_folder_path}...")
360
+ Path(pytorch_dump_folder_path).mkdir(exist_ok=True)
361
+ model.save_pretrained(pytorch_dump_folder_path)
362
+ processor.save_pretrained(pytorch_dump_folder_path)
363
+
364
+ if push_to_hub:
365
+ # Upload model and image processor to the hub
366
+ logger.info("Uploading PyTorch model and image processor to the hub...")
367
+ model.push_to_hub(f"nielsr/{model_name}")
368
+ processor.push_to_hub(f"nielsr/{model_name}")
369
+
370
+
371
+ if __name__ == "__main__":
372
+ parser = argparse.ArgumentParser()
373
+
374
+ parser.add_argument(
375
+ "--model_name",
376
+ default="detr-resnet-50",
377
+ type=str,
378
+ choices=["detr-resnet-50", "detr-resnet-101"],
379
+ help="Name of the DETR model you'd like to convert.",
380
+ )
381
+ parser.add_argument(
382
+ "--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
383
+ )
384
+ parser.add_argument("--push_to_hub", action="store_true", help="Whether to push the model to the hub or not.")
385
+ args = parser.parse_args()
386
+ convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
llmeval-env/lib/python3.10/site-packages/transformers/models/detr/feature_extraction_detr.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Feature extractor class for DETR."""
16
+
17
+ import warnings
18
+
19
+ from ...image_transforms import rgb_to_id as _rgb_to_id
20
+ from ...utils import logging
21
+ from .image_processing_detr import DetrImageProcessor
22
+
23
+
24
+ logger = logging.get_logger(__name__)
25
+
26
+
27
+ def rgb_to_id(x):
28
+ warnings.warn(
29
+ "rgb_to_id has moved and will not be importable from this module from v5. "
30
+ "Please import from transformers.image_transforms instead.",
31
+ FutureWarning,
32
+ )
33
+ return _rgb_to_id(x)
34
+
35
+
36
+ class DetrFeatureExtractor(DetrImageProcessor):
37
+ def __init__(self, *args, **kwargs) -> None:
38
+ warnings.warn(
39
+ "The class DetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
40
+ " Please use DetrImageProcessor instead.",
41
+ FutureWarning,
42
+ )
43
+ super().__init__(*args, **kwargs)
llmeval-env/lib/python3.10/site-packages/transformers/models/detr/image_processing_detr.py ADDED
@@ -0,0 +1,1965 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Image processor class for DETR."""
16
+
17
+ import io
18
+ import pathlib
19
+ from collections import defaultdict
20
+ from typing import Any, Callable, Dict, Iterable, List, Optional, Set, Tuple, Union
21
+
22
+ import numpy as np
23
+
24
+ from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
25
+ from ...image_transforms import (
26
+ PaddingMode,
27
+ center_to_corners_format,
28
+ corners_to_center_format,
29
+ id_to_rgb,
30
+ pad,
31
+ rescale,
32
+ resize,
33
+ rgb_to_id,
34
+ to_channel_dimension_format,
35
+ )
36
+ from ...image_utils import (
37
+ IMAGENET_DEFAULT_MEAN,
38
+ IMAGENET_DEFAULT_STD,
39
+ AnnotationFormat,
40
+ AnnotationType,
41
+ ChannelDimension,
42
+ ImageInput,
43
+ PILImageResampling,
44
+ get_image_size,
45
+ infer_channel_dimension_format,
46
+ is_scaled_image,
47
+ make_list_of_images,
48
+ to_numpy_array,
49
+ valid_images,
50
+ validate_annotations,
51
+ validate_kwargs,
52
+ validate_preprocess_arguments,
53
+ )
54
+ from ...utils import (
55
+ TensorType,
56
+ is_flax_available,
57
+ is_jax_tensor,
58
+ is_scipy_available,
59
+ is_tf_available,
60
+ is_tf_tensor,
61
+ is_torch_available,
62
+ is_torch_tensor,
63
+ is_vision_available,
64
+ logging,
65
+ )
66
+
67
+
68
+ if is_torch_available():
69
+ import torch
70
+ from torch import nn
71
+
72
+
73
+ if is_vision_available():
74
+ import PIL
75
+
76
+
77
+ if is_scipy_available():
78
+ import scipy.special
79
+ import scipy.stats
80
+
81
+
82
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
83
+
84
+ SUPPORTED_ANNOTATION_FORMATS = (AnnotationFormat.COCO_DETECTION, AnnotationFormat.COCO_PANOPTIC)
85
+
86
+
87
+ # From the original repo: https://github.com/facebookresearch/detr/blob/3af9fa878e73b6894ce3596450a8d9b89d918ca9/datasets/transforms.py#L76
88
+ def get_size_with_aspect_ratio(image_size, size, max_size=None) -> Tuple[int, int]:
89
+ """
90
+ Computes the output image size given the input image size and the desired output size.
91
+
92
+ Args:
93
+ image_size (`Tuple[int, int]`):
94
+ The input image size.
95
+ size (`int`):
96
+ The desired output size.
97
+ max_size (`int`, *optional*):
98
+ The maximum allowed output size.
99
+ """
100
+ height, width = image_size
101
+ if max_size is not None:
102
+ min_original_size = float(min((height, width)))
103
+ max_original_size = float(max((height, width)))
104
+ if max_original_size / min_original_size * size > max_size:
105
+ size = int(round(max_size * min_original_size / max_original_size))
106
+
107
+ if (height <= width and height == size) or (width <= height and width == size):
108
+ return height, width
109
+
110
+ if width < height:
111
+ ow = size
112
+ oh = int(size * height / width)
113
+ else:
114
+ oh = size
115
+ ow = int(size * width / height)
116
+ return (oh, ow)
117
+
118
+
119
+ def get_resize_output_image_size(
120
+ input_image: np.ndarray,
121
+ size: Union[int, Tuple[int, int], List[int]],
122
+ max_size: Optional[int] = None,
123
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
124
+ ) -> Tuple[int, int]:
125
+ """
126
+ Computes the output image size given the input image size and the desired output size. If the desired output size
127
+ is a tuple or list, the output image size is returned as is. If the desired output size is an integer, the output
128
+ image size is computed by keeping the aspect ratio of the input image size.
129
+
130
+ Args:
131
+ input_image (`np.ndarray`):
132
+ The image to resize.
133
+ size (`int` or `Tuple[int, int]` or `List[int]`):
134
+ The desired output size.
135
+ max_size (`int`, *optional*):
136
+ The maximum allowed output size.
137
+ input_data_format (`ChannelDimension` or `str`, *optional*):
138
+ The channel dimension format of the input image. If not provided, it will be inferred from the input image.
139
+ """
140
+ image_size = get_image_size(input_image, input_data_format)
141
+ if isinstance(size, (list, tuple)):
142
+ return size
143
+
144
+ return get_size_with_aspect_ratio(image_size, size, max_size)
145
+
146
+
147
+ def get_numpy_to_framework_fn(arr) -> Callable:
148
+ """
149
+ Returns a function that converts a numpy array to the framework of the input array.
150
+
151
+ Args:
152
+ arr (`np.ndarray`): The array to convert.
153
+ """
154
+ if isinstance(arr, np.ndarray):
155
+ return np.array
156
+ if is_tf_available() and is_tf_tensor(arr):
157
+ import tensorflow as tf
158
+
159
+ return tf.convert_to_tensor
160
+ if is_torch_available() and is_torch_tensor(arr):
161
+ import torch
162
+
163
+ return torch.tensor
164
+ if is_flax_available() and is_jax_tensor(arr):
165
+ import jax.numpy as jnp
166
+
167
+ return jnp.array
168
+ raise ValueError(f"Cannot convert arrays of type {type(arr)}")
169
+
170
+
171
+ def safe_squeeze(arr: np.ndarray, axis: Optional[int] = None) -> np.ndarray:
172
+ """
173
+ Squeezes an array, but only if the axis specified has dim 1.
174
+ """
175
+ if axis is None:
176
+ return arr.squeeze()
177
+
178
+ try:
179
+ return arr.squeeze(axis=axis)
180
+ except ValueError:
181
+ return arr
182
+
183
+
184
+ def normalize_annotation(annotation: Dict, image_size: Tuple[int, int]) -> Dict:
185
+ image_height, image_width = image_size
186
+ norm_annotation = {}
187
+ for key, value in annotation.items():
188
+ if key == "boxes":
189
+ boxes = value
190
+ boxes = corners_to_center_format(boxes)
191
+ boxes /= np.asarray([image_width, image_height, image_width, image_height], dtype=np.float32)
192
+ norm_annotation[key] = boxes
193
+ else:
194
+ norm_annotation[key] = value
195
+ return norm_annotation
196
+
197
+
198
+ # Copied from transformers.models.vilt.image_processing_vilt.max_across_indices
199
+ def max_across_indices(values: Iterable[Any]) -> List[Any]:
200
+ """
201
+ Return the maximum value across all indices of an iterable of values.
202
+ """
203
+ return [max(values_i) for values_i in zip(*values)]
204
+
205
+
206
+ # Copied from transformers.models.vilt.image_processing_vilt.get_max_height_width
207
+ def get_max_height_width(
208
+ images: List[np.ndarray], input_data_format: Optional[Union[str, ChannelDimension]] = None
209
+ ) -> List[int]:
210
+ """
211
+ Get the maximum height and width across all images in a batch.
212
+ """
213
+ if input_data_format is None:
214
+ input_data_format = infer_channel_dimension_format(images[0])
215
+
216
+ if input_data_format == ChannelDimension.FIRST:
217
+ _, max_height, max_width = max_across_indices([img.shape for img in images])
218
+ elif input_data_format == ChannelDimension.LAST:
219
+ max_height, max_width, _ = max_across_indices([img.shape for img in images])
220
+ else:
221
+ raise ValueError(f"Invalid channel dimension format: {input_data_format}")
222
+ return (max_height, max_width)
223
+
224
+
225
+ # Copied from transformers.models.vilt.image_processing_vilt.make_pixel_mask
226
+ def make_pixel_mask(
227
+ image: np.ndarray, output_size: Tuple[int, int], input_data_format: Optional[Union[str, ChannelDimension]] = None
228
+ ) -> np.ndarray:
229
+ """
230
+ Make a pixel mask for the image, where 1 indicates a valid pixel and 0 indicates padding.
231
+
232
+ Args:
233
+ image (`np.ndarray`):
234
+ Image to make the pixel mask for.
235
+ output_size (`Tuple[int, int]`):
236
+ Output size of the mask.
237
+ """
238
+ input_height, input_width = get_image_size(image, channel_dim=input_data_format)
239
+ mask = np.zeros(output_size, dtype=np.int64)
240
+ mask[:input_height, :input_width] = 1
241
+ return mask
242
+
243
+
244
+ # inspired by https://github.com/facebookresearch/detr/blob/master/datasets/coco.py#L33
245
+ def convert_coco_poly_to_mask(segmentations, height: int, width: int) -> np.ndarray:
246
+ """
247
+ Convert a COCO polygon annotation to a mask.
248
+
249
+ Args:
250
+ segmentations (`List[List[float]]`):
251
+ List of polygons, each polygon represented by a list of x-y coordinates.
252
+ height (`int`):
253
+ Height of the mask.
254
+ width (`int`):
255
+ Width of the mask.
256
+ """
257
+ try:
258
+ from pycocotools import mask as coco_mask
259
+ except ImportError:
260
+ raise ImportError("Pycocotools is not installed in your environment.")
261
+
262
+ masks = []
263
+ for polygons in segmentations:
264
+ rles = coco_mask.frPyObjects(polygons, height, width)
265
+ mask = coco_mask.decode(rles)
266
+ if len(mask.shape) < 3:
267
+ mask = mask[..., None]
268
+ mask = np.asarray(mask, dtype=np.uint8)
269
+ mask = np.any(mask, axis=2)
270
+ masks.append(mask)
271
+ if masks:
272
+ masks = np.stack(masks, axis=0)
273
+ else:
274
+ masks = np.zeros((0, height, width), dtype=np.uint8)
275
+
276
+ return masks
277
+
278
+
279
+ # inspired by https://github.com/facebookresearch/detr/blob/master/datasets/coco.py#L50
280
+ def prepare_coco_detection_annotation(
281
+ image,
282
+ target,
283
+ return_segmentation_masks: bool = False,
284
+ input_data_format: Optional[Union[ChannelDimension, str]] = None,
285
+ ):
286
+ """
287
+ Convert the target in COCO format into the format expected by DETR.
288
+ """
289
+ image_height, image_width = get_image_size(image, channel_dim=input_data_format)
290
+
291
+ image_id = target["image_id"]
292
+ image_id = np.asarray([image_id], dtype=np.int64)
293
+
294
+ # Get all COCO annotations for the given image.
295
+ annotations = target["annotations"]
296
+ annotations = [obj for obj in annotations if "iscrowd" not in obj or obj["iscrowd"] == 0]
297
+
298
+ classes = [obj["category_id"] for obj in annotations]
299
+ classes = np.asarray(classes, dtype=np.int64)
300
+
301
+ # for conversion to coco api
302
+ area = np.asarray([obj["area"] for obj in annotations], dtype=np.float32)
303
+ iscrowd = np.asarray([obj["iscrowd"] if "iscrowd" in obj else 0 for obj in annotations], dtype=np.int64)
304
+
305
+ boxes = [obj["bbox"] for obj in annotations]
306
+ # guard against no boxes via resizing
307
+ boxes = np.asarray(boxes, dtype=np.float32).reshape(-1, 4)
308
+ boxes[:, 2:] += boxes[:, :2]
309
+ boxes[:, 0::2] = boxes[:, 0::2].clip(min=0, max=image_width)
310
+ boxes[:, 1::2] = boxes[:, 1::2].clip(min=0, max=image_height)
311
+
312
+ keep = (boxes[:, 3] > boxes[:, 1]) & (boxes[:, 2] > boxes[:, 0])
313
+
314
+ new_target = {}
315
+ new_target["image_id"] = image_id
316
+ new_target["class_labels"] = classes[keep]
317
+ new_target["boxes"] = boxes[keep]
318
+ new_target["area"] = area[keep]
319
+ new_target["iscrowd"] = iscrowd[keep]
320
+ new_target["orig_size"] = np.asarray([int(image_height), int(image_width)], dtype=np.int64)
321
+
322
+ if annotations and "keypoints" in annotations[0]:
323
+ keypoints = [obj["keypoints"] for obj in annotations]
324
+ # Converting the filtered keypoints list to a numpy array
325
+ keypoints = np.asarray(keypoints, dtype=np.float32)
326
+ # Apply the keep mask here to filter the relevant annotations
327
+ keypoints = keypoints[keep]
328
+ num_keypoints = keypoints.shape[0]
329
+ keypoints = keypoints.reshape((-1, 3)) if num_keypoints else keypoints
330
+ new_target["keypoints"] = keypoints
331
+
332
+ if return_segmentation_masks:
333
+ segmentation_masks = [obj["segmentation"] for obj in annotations]
334
+ masks = convert_coco_poly_to_mask(segmentation_masks, image_height, image_width)
335
+ new_target["masks"] = masks[keep]
336
+
337
+ return new_target
338
+
339
+
340
+ def masks_to_boxes(masks: np.ndarray) -> np.ndarray:
341
+ """
342
+ Compute the bounding boxes around the provided panoptic segmentation masks.
343
+
344
+ Args:
345
+ masks: masks in format `[number_masks, height, width]` where N is the number of masks
346
+
347
+ Returns:
348
+ boxes: bounding boxes in format `[number_masks, 4]` in xyxy format
349
+ """
350
+ if masks.size == 0:
351
+ return np.zeros((0, 4))
352
+
353
+ h, w = masks.shape[-2:]
354
+ y = np.arange(0, h, dtype=np.float32)
355
+ x = np.arange(0, w, dtype=np.float32)
356
+ # see https://github.com/pytorch/pytorch/issues/50276
357
+ y, x = np.meshgrid(y, x, indexing="ij")
358
+
359
+ x_mask = masks * np.expand_dims(x, axis=0)
360
+ x_max = x_mask.reshape(x_mask.shape[0], -1).max(-1)
361
+ x = np.ma.array(x_mask, mask=~(np.array(masks, dtype=bool)))
362
+ x_min = x.filled(fill_value=1e8)
363
+ x_min = x_min.reshape(x_min.shape[0], -1).min(-1)
364
+
365
+ y_mask = masks * np.expand_dims(y, axis=0)
366
+ y_max = y_mask.reshape(x_mask.shape[0], -1).max(-1)
367
+ y = np.ma.array(y_mask, mask=~(np.array(masks, dtype=bool)))
368
+ y_min = y.filled(fill_value=1e8)
369
+ y_min = y_min.reshape(y_min.shape[0], -1).min(-1)
370
+
371
+ return np.stack([x_min, y_min, x_max, y_max], 1)
372
+
373
+
374
+ def prepare_coco_panoptic_annotation(
375
+ image: np.ndarray,
376
+ target: Dict,
377
+ masks_path: Union[str, pathlib.Path],
378
+ return_masks: bool = True,
379
+ input_data_format: Union[ChannelDimension, str] = None,
380
+ ) -> Dict:
381
+ """
382
+ Prepare a coco panoptic annotation for DETR.
383
+ """
384
+ image_height, image_width = get_image_size(image, channel_dim=input_data_format)
385
+ annotation_path = pathlib.Path(masks_path) / target["file_name"]
386
+
387
+ new_target = {}
388
+ new_target["image_id"] = np.asarray([target["image_id"] if "image_id" in target else target["id"]], dtype=np.int64)
389
+ new_target["size"] = np.asarray([image_height, image_width], dtype=np.int64)
390
+ new_target["orig_size"] = np.asarray([image_height, image_width], dtype=np.int64)
391
+
392
+ if "segments_info" in target:
393
+ masks = np.asarray(PIL.Image.open(annotation_path), dtype=np.uint32)
394
+ masks = rgb_to_id(masks)
395
+
396
+ ids = np.array([segment_info["id"] for segment_info in target["segments_info"]])
397
+ masks = masks == ids[:, None, None]
398
+ masks = masks.astype(np.uint8)
399
+ if return_masks:
400
+ new_target["masks"] = masks
401
+ new_target["boxes"] = masks_to_boxes(masks)
402
+ new_target["class_labels"] = np.array(
403
+ [segment_info["category_id"] for segment_info in target["segments_info"]], dtype=np.int64
404
+ )
405
+ new_target["iscrowd"] = np.asarray(
406
+ [segment_info["iscrowd"] for segment_info in target["segments_info"]], dtype=np.int64
407
+ )
408
+ new_target["area"] = np.asarray(
409
+ [segment_info["area"] for segment_info in target["segments_info"]], dtype=np.float32
410
+ )
411
+
412
+ return new_target
413
+
414
+
415
+ def get_segmentation_image(
416
+ masks: np.ndarray, input_size: Tuple, target_size: Tuple, stuff_equiv_classes, deduplicate=False
417
+ ):
418
+ h, w = input_size
419
+ final_h, final_w = target_size
420
+
421
+ m_id = scipy.special.softmax(masks.transpose(0, 1), -1)
422
+
423
+ if m_id.shape[-1] == 0:
424
+ # We didn't detect any mask :(
425
+ m_id = np.zeros((h, w), dtype=np.int64)
426
+ else:
427
+ m_id = m_id.argmax(-1).reshape(h, w)
428
+
429
+ if deduplicate:
430
+ # Merge the masks corresponding to the same stuff class
431
+ for equiv in stuff_equiv_classes.values():
432
+ for eq_id in equiv:
433
+ m_id[m_id == eq_id] = equiv[0]
434
+
435
+ seg_img = id_to_rgb(m_id)
436
+ seg_img = resize(seg_img, (final_w, final_h), resample=PILImageResampling.NEAREST)
437
+ return seg_img
438
+
439
+
440
+ def get_mask_area(seg_img: np.ndarray, target_size: Tuple[int, int], n_classes: int) -> np.ndarray:
441
+ final_h, final_w = target_size
442
+ np_seg_img = seg_img.astype(np.uint8)
443
+ np_seg_img = np_seg_img.reshape(final_h, final_w, 3)
444
+ m_id = rgb_to_id(np_seg_img)
445
+ area = [(m_id == i).sum() for i in range(n_classes)]
446
+ return area
447
+
448
+
449
+ def score_labels_from_class_probabilities(logits: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
450
+ probs = scipy.special.softmax(logits, axis=-1)
451
+ labels = probs.argmax(-1, keepdims=True)
452
+ scores = np.take_along_axis(probs, labels, axis=-1)
453
+ scores, labels = scores.squeeze(-1), labels.squeeze(-1)
454
+ return scores, labels
455
+
456
+
457
+ def post_process_panoptic_sample(
458
+ out_logits: np.ndarray,
459
+ masks: np.ndarray,
460
+ boxes: np.ndarray,
461
+ processed_size: Tuple[int, int],
462
+ target_size: Tuple[int, int],
463
+ is_thing_map: Dict,
464
+ threshold=0.85,
465
+ ) -> Dict:
466
+ """
467
+ Converts the output of [`DetrForSegmentation`] into panoptic segmentation predictions for a single sample.
468
+
469
+ Args:
470
+ out_logits (`torch.Tensor`):
471
+ The logits for this sample.
472
+ masks (`torch.Tensor`):
473
+ The predicted segmentation masks for this sample.
474
+ boxes (`torch.Tensor`):
475
+ The prediced bounding boxes for this sample. The boxes are in the normalized format `(center_x, center_y,
476
+ width, height)` and values between `[0, 1]`, relative to the size the image (disregarding padding).
477
+ processed_size (`Tuple[int, int]`):
478
+ The processed size of the image `(height, width)`, as returned by the preprocessing step i.e. the size
479
+ after data augmentation but before batching.
480
+ target_size (`Tuple[int, int]`):
481
+ The target size of the image, `(height, width)` corresponding to the requested final size of the
482
+ prediction.
483
+ is_thing_map (`Dict`):
484
+ A dictionary mapping class indices to a boolean value indicating whether the class is a thing or not.
485
+ threshold (`float`, *optional*, defaults to 0.85):
486
+ The threshold used to binarize the segmentation masks.
487
+ """
488
+ # we filter empty queries and detection below threshold
489
+ scores, labels = score_labels_from_class_probabilities(out_logits)
490
+ keep = (labels != out_logits.shape[-1] - 1) & (scores > threshold)
491
+
492
+ cur_scores = scores[keep]
493
+ cur_classes = labels[keep]
494
+ cur_boxes = center_to_corners_format(boxes[keep])
495
+
496
+ if len(cur_boxes) != len(cur_classes):
497
+ raise ValueError("Not as many boxes as there are classes")
498
+
499
+ cur_masks = masks[keep]
500
+ cur_masks = resize(cur_masks[:, None], processed_size, resample=PILImageResampling.BILINEAR)
501
+ cur_masks = safe_squeeze(cur_masks, 1)
502
+ b, h, w = cur_masks.shape
503
+
504
+ # It may be that we have several predicted masks for the same stuff class.
505
+ # In the following, we track the list of masks ids for each stuff class (they are merged later on)
506
+ cur_masks = cur_masks.reshape(b, -1)
507
+ stuff_equiv_classes = defaultdict(list)
508
+ for k, label in enumerate(cur_classes):
509
+ if not is_thing_map[label]:
510
+ stuff_equiv_classes[label].append(k)
511
+
512
+ seg_img = get_segmentation_image(cur_masks, processed_size, target_size, stuff_equiv_classes, deduplicate=True)
513
+ area = get_mask_area(cur_masks, processed_size, n_classes=len(cur_scores))
514
+
515
+ # We filter out any mask that is too small
516
+ if cur_classes.size() > 0:
517
+ # We know filter empty masks as long as we find some
518
+ filtered_small = np.array([a <= 4 for a in area], dtype=bool)
519
+ while filtered_small.any():
520
+ cur_masks = cur_masks[~filtered_small]
521
+ cur_scores = cur_scores[~filtered_small]
522
+ cur_classes = cur_classes[~filtered_small]
523
+ seg_img = get_segmentation_image(cur_masks, (h, w), target_size, stuff_equiv_classes, deduplicate=True)
524
+ area = get_mask_area(seg_img, target_size, n_classes=len(cur_scores))
525
+ filtered_small = np.array([a <= 4 for a in area], dtype=bool)
526
+ else:
527
+ cur_classes = np.ones((1, 1), dtype=np.int64)
528
+
529
+ segments_info = [
530
+ {"id": i, "isthing": is_thing_map[cat], "category_id": int(cat), "area": a}
531
+ for i, (cat, a) in enumerate(zip(cur_classes, area))
532
+ ]
533
+ del cur_classes
534
+
535
+ with io.BytesIO() as out:
536
+ PIL.Image.fromarray(seg_img).save(out, format="PNG")
537
+ predictions = {"png_string": out.getvalue(), "segments_info": segments_info}
538
+
539
+ return predictions
540
+
541
+
542
+ def resize_annotation(
543
+ annotation: Dict[str, Any],
544
+ orig_size: Tuple[int, int],
545
+ target_size: Tuple[int, int],
546
+ threshold: float = 0.5,
547
+ resample: PILImageResampling = PILImageResampling.NEAREST,
548
+ ):
549
+ """
550
+ Resizes an annotation to a target size.
551
+
552
+ Args:
553
+ annotation (`Dict[str, Any]`):
554
+ The annotation dictionary.
555
+ orig_size (`Tuple[int, int]`):
556
+ The original size of the input image.
557
+ target_size (`Tuple[int, int]`):
558
+ The target size of the image, as returned by the preprocessing `resize` step.
559
+ threshold (`float`, *optional*, defaults to 0.5):
560
+ The threshold used to binarize the segmentation masks.
561
+ resample (`PILImageResampling`, defaults to `PILImageResampling.NEAREST`):
562
+ The resampling filter to use when resizing the masks.
563
+ """
564
+ ratios = tuple(float(s) / float(s_orig) for s, s_orig in zip(target_size, orig_size))
565
+ ratio_height, ratio_width = ratios
566
+
567
+ new_annotation = {}
568
+ new_annotation["size"] = target_size
569
+
570
+ for key, value in annotation.items():
571
+ if key == "boxes":
572
+ boxes = value
573
+ scaled_boxes = boxes * np.asarray([ratio_width, ratio_height, ratio_width, ratio_height], dtype=np.float32)
574
+ new_annotation["boxes"] = scaled_boxes
575
+ elif key == "area":
576
+ area = value
577
+ scaled_area = area * (ratio_width * ratio_height)
578
+ new_annotation["area"] = scaled_area
579
+ elif key == "masks":
580
+ masks = value[:, None]
581
+ masks = np.array([resize(mask, target_size, resample=resample) for mask in masks])
582
+ masks = masks.astype(np.float32)
583
+ masks = masks[:, 0] > threshold
584
+ new_annotation["masks"] = masks
585
+ elif key == "size":
586
+ new_annotation["size"] = target_size
587
+ else:
588
+ new_annotation[key] = value
589
+
590
+ return new_annotation
591
+
592
+
593
+ # TODO - (Amy) make compatible with other frameworks
594
+ def binary_mask_to_rle(mask):
595
+ """
596
+ Converts given binary mask of shape `(height, width)` to the run-length encoding (RLE) format.
597
+
598
+ Args:
599
+ mask (`torch.Tensor` or `numpy.array`):
600
+ A binary mask tensor of shape `(height, width)` where 0 denotes background and 1 denotes the target
601
+ segment_id or class_id.
602
+ Returns:
603
+ `List`: Run-length encoded list of the binary mask. Refer to COCO API for more information about the RLE
604
+ format.
605
+ """
606
+ if is_torch_tensor(mask):
607
+ mask = mask.numpy()
608
+
609
+ pixels = mask.flatten()
610
+ pixels = np.concatenate([[0], pixels, [0]])
611
+ runs = np.where(pixels[1:] != pixels[:-1])[0] + 1
612
+ runs[1::2] -= runs[::2]
613
+ return list(runs)
614
+
615
+
616
+ # TODO - (Amy) make compatible with other frameworks
617
+ def convert_segmentation_to_rle(segmentation):
618
+ """
619
+ Converts given segmentation map of shape `(height, width)` to the run-length encoding (RLE) format.
620
+
621
+ Args:
622
+ segmentation (`torch.Tensor` or `numpy.array`):
623
+ A segmentation map of shape `(height, width)` where each value denotes a segment or class id.
624
+ Returns:
625
+ `List[List]`: A list of lists, where each list is the run-length encoding of a segment / class id.
626
+ """
627
+ segment_ids = torch.unique(segmentation)
628
+
629
+ run_length_encodings = []
630
+ for idx in segment_ids:
631
+ mask = torch.where(segmentation == idx, 1, 0)
632
+ rle = binary_mask_to_rle(mask)
633
+ run_length_encodings.append(rle)
634
+
635
+ return run_length_encodings
636
+
637
+
638
+ def remove_low_and_no_objects(masks, scores, labels, object_mask_threshold, num_labels):
639
+ """
640
+ Binarize the given masks using `object_mask_threshold`, it returns the associated values of `masks`, `scores` and
641
+ `labels`.
642
+
643
+ Args:
644
+ masks (`torch.Tensor`):
645
+ A tensor of shape `(num_queries, height, width)`.
646
+ scores (`torch.Tensor`):
647
+ A tensor of shape `(num_queries)`.
648
+ labels (`torch.Tensor`):
649
+ A tensor of shape `(num_queries)`.
650
+ object_mask_threshold (`float`):
651
+ A number between 0 and 1 used to binarize the masks.
652
+ Raises:
653
+ `ValueError`: Raised when the first dimension doesn't match in all input tensors.
654
+ Returns:
655
+ `Tuple[`torch.Tensor`, `torch.Tensor`, `torch.Tensor`]`: The `masks`, `scores` and `labels` without the region
656
+ < `object_mask_threshold`.
657
+ """
658
+ if not (masks.shape[0] == scores.shape[0] == labels.shape[0]):
659
+ raise ValueError("mask, scores and labels must have the same shape!")
660
+
661
+ to_keep = labels.ne(num_labels) & (scores > object_mask_threshold)
662
+
663
+ return masks[to_keep], scores[to_keep], labels[to_keep]
664
+
665
+
666
+ def check_segment_validity(mask_labels, mask_probs, k, mask_threshold=0.5, overlap_mask_area_threshold=0.8):
667
+ # Get the mask associated with the k class
668
+ mask_k = mask_labels == k
669
+ mask_k_area = mask_k.sum()
670
+
671
+ # Compute the area of all the stuff in query k
672
+ original_area = (mask_probs[k] >= mask_threshold).sum()
673
+ mask_exists = mask_k_area > 0 and original_area > 0
674
+
675
+ # Eliminate disconnected tiny segments
676
+ if mask_exists:
677
+ area_ratio = mask_k_area / original_area
678
+ if not area_ratio.item() > overlap_mask_area_threshold:
679
+ mask_exists = False
680
+
681
+ return mask_exists, mask_k
682
+
683
+
684
+ def compute_segments(
685
+ mask_probs,
686
+ pred_scores,
687
+ pred_labels,
688
+ mask_threshold: float = 0.5,
689
+ overlap_mask_area_threshold: float = 0.8,
690
+ label_ids_to_fuse: Optional[Set[int]] = None,
691
+ target_size: Tuple[int, int] = None,
692
+ ):
693
+ height = mask_probs.shape[1] if target_size is None else target_size[0]
694
+ width = mask_probs.shape[2] if target_size is None else target_size[1]
695
+
696
+ segmentation = torch.zeros((height, width), dtype=torch.int32, device=mask_probs.device)
697
+ segments: List[Dict] = []
698
+
699
+ if target_size is not None:
700
+ mask_probs = nn.functional.interpolate(
701
+ mask_probs.unsqueeze(0), size=target_size, mode="bilinear", align_corners=False
702
+ )[0]
703
+
704
+ current_segment_id = 0
705
+
706
+ # Weigh each mask by its prediction score
707
+ mask_probs *= pred_scores.view(-1, 1, 1)
708
+ mask_labels = mask_probs.argmax(0) # [height, width]
709
+
710
+ # Keep track of instances of each class
711
+ stuff_memory_list: Dict[str, int] = {}
712
+ for k in range(pred_labels.shape[0]):
713
+ pred_class = pred_labels[k].item()
714
+ should_fuse = pred_class in label_ids_to_fuse
715
+
716
+ # Check if mask exists and large enough to be a segment
717
+ mask_exists, mask_k = check_segment_validity(
718
+ mask_labels, mask_probs, k, mask_threshold, overlap_mask_area_threshold
719
+ )
720
+
721
+ if mask_exists:
722
+ if pred_class in stuff_memory_list:
723
+ current_segment_id = stuff_memory_list[pred_class]
724
+ else:
725
+ current_segment_id += 1
726
+
727
+ # Add current object segment to final segmentation map
728
+ segmentation[mask_k] = current_segment_id
729
+ segment_score = round(pred_scores[k].item(), 6)
730
+ segments.append(
731
+ {
732
+ "id": current_segment_id,
733
+ "label_id": pred_class,
734
+ "was_fused": should_fuse,
735
+ "score": segment_score,
736
+ }
737
+ )
738
+ if should_fuse:
739
+ stuff_memory_list[pred_class] = current_segment_id
740
+
741
+ return segmentation, segments
742
+
743
+
744
+ class DetrImageProcessor(BaseImageProcessor):
745
+ r"""
746
+ Constructs a Detr image processor.
747
+
748
+ Args:
749
+ format (`str`, *optional*, defaults to `"coco_detection"`):
750
+ Data format of the annotations. One of "coco_detection" or "coco_panoptic".
751
+ do_resize (`bool`, *optional*, defaults to `True`):
752
+ Controls whether to resize the image's `(height, width)` dimensions to the specified `size`. Can be
753
+ overridden by the `do_resize` parameter in the `preprocess` method.
754
+ size (`Dict[str, int]` *optional*, defaults to `{"shortest_edge": 800, "longest_edge": 1333}`):
755
+ Size of the image's `(height, width)` dimensions after resizing. Can be overridden by the `size` parameter
756
+ in the `preprocess` method.
757
+ resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`):
758
+ Resampling filter to use if resizing the image.
759
+ do_rescale (`bool`, *optional*, defaults to `True`):
760
+ Controls whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the
761
+ `do_rescale` parameter in the `preprocess` method.
762
+ rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
763
+ Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the
764
+ `preprocess` method.
765
+ do_normalize (`bool`, *optional*, defaults to True):
766
+ Controls whether to normalize the image. Can be overridden by the `do_normalize` parameter in the
767
+ `preprocess` method.
768
+ image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_DEFAULT_MEAN`):
769
+ Mean values to use when normalizing the image. Can be a single value or a list of values, one for each
770
+ channel. Can be overridden by the `image_mean` parameter in the `preprocess` method.
771
+ image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_DEFAULT_STD`):
772
+ Standard deviation values to use when normalizing the image. Can be a single value or a list of values, one
773
+ for each channel. Can be overridden by the `image_std` parameter in the `preprocess` method.
774
+ do_convert_annotations (`bool`, *optional*, defaults to `True`):
775
+ Controls whether to convert the annotations to the format expected by the DETR model. Converts the
776
+ bounding boxes to the format `(center_x, center_y, width, height)` and in the range `[0, 1]`.
777
+ Can be overridden by the `do_convert_annotations` parameter in the `preprocess` method.
778
+ do_pad (`bool`, *optional*, defaults to `True`):
779
+ Controls whether to pad the image. Can be overridden by the `do_pad` parameter in the `preprocess`
780
+ method. If `True` will pad the images in the batch to the largest height and width in the batch.
781
+ Padding will be applied to the bottom and right of the image with zeros.
782
+ """
783
+
784
+ model_input_names = ["pixel_values", "pixel_mask"]
785
+
786
+ def __init__(
787
+ self,
788
+ format: Union[str, AnnotationFormat] = AnnotationFormat.COCO_DETECTION,
789
+ do_resize: bool = True,
790
+ size: Dict[str, int] = None,
791
+ resample: PILImageResampling = PILImageResampling.BILINEAR,
792
+ do_rescale: bool = True,
793
+ rescale_factor: Union[int, float] = 1 / 255,
794
+ do_normalize: bool = True,
795
+ image_mean: Union[float, List[float]] = None,
796
+ image_std: Union[float, List[float]] = None,
797
+ do_convert_annotations: Optional[bool] = None,
798
+ do_pad: bool = True,
799
+ **kwargs,
800
+ ) -> None:
801
+ if "pad_and_return_pixel_mask" in kwargs:
802
+ do_pad = kwargs.pop("pad_and_return_pixel_mask")
803
+
804
+ if "max_size" in kwargs:
805
+ logger.warning_once(
806
+ "The `max_size` parameter is deprecated and will be removed in v4.26. "
807
+ "Please specify in `size['longest_edge'] instead`.",
808
+ )
809
+ max_size = kwargs.pop("max_size")
810
+ else:
811
+ max_size = None if size is None else 1333
812
+
813
+ size = size if size is not None else {"shortest_edge": 800, "longest_edge": 1333}
814
+ size = get_size_dict(size, max_size=max_size, default_to_square=False)
815
+
816
+ # Backwards compatibility
817
+ if do_convert_annotations is None:
818
+ do_convert_annotations = do_normalize
819
+
820
+ super().__init__(**kwargs)
821
+ self.format = format
822
+ self.do_resize = do_resize
823
+ self.size = size
824
+ self.resample = resample
825
+ self.do_rescale = do_rescale
826
+ self.rescale_factor = rescale_factor
827
+ self.do_normalize = do_normalize
828
+ self.do_convert_annotations = do_convert_annotations
829
+ self.image_mean = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
830
+ self.image_std = image_std if image_std is not None else IMAGENET_DEFAULT_STD
831
+ self.do_pad = do_pad
832
+ self._valid_processor_keys = [
833
+ "images",
834
+ "annotations",
835
+ "return_segmentation_masks",
836
+ "masks_path",
837
+ "do_resize",
838
+ "size",
839
+ "resample",
840
+ "do_rescale",
841
+ "rescale_factor",
842
+ "do_normalize",
843
+ "do_convert_annotations",
844
+ "image_mean",
845
+ "image_std",
846
+ "do_pad",
847
+ "format",
848
+ "return_tensors",
849
+ "data_format",
850
+ "input_data_format",
851
+ ]
852
+
853
+ @classmethod
854
+ def from_dict(cls, image_processor_dict: Dict[str, Any], **kwargs):
855
+ """
856
+ Overrides the `from_dict` method from the base class to make sure parameters are updated if image processor is
857
+ created using from_dict and kwargs e.g. `DetrImageProcessor.from_pretrained(checkpoint, size=600,
858
+ max_size=800)`
859
+ """
860
+ image_processor_dict = image_processor_dict.copy()
861
+ if "max_size" in kwargs:
862
+ image_processor_dict["max_size"] = kwargs.pop("max_size")
863
+ if "pad_and_return_pixel_mask" in kwargs:
864
+ image_processor_dict["pad_and_return_pixel_mask"] = kwargs.pop("pad_and_return_pixel_mask")
865
+ return super().from_dict(image_processor_dict, **kwargs)
866
+
867
+ def prepare_annotation(
868
+ self,
869
+ image: np.ndarray,
870
+ target: Dict,
871
+ format: Optional[AnnotationFormat] = None,
872
+ return_segmentation_masks: bool = None,
873
+ masks_path: Optional[Union[str, pathlib.Path]] = None,
874
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
875
+ ) -> Dict:
876
+ """
877
+ Prepare an annotation for feeding into DETR model.
878
+ """
879
+ format = format if format is not None else self.format
880
+
881
+ if format == AnnotationFormat.COCO_DETECTION:
882
+ return_segmentation_masks = False if return_segmentation_masks is None else return_segmentation_masks
883
+ target = prepare_coco_detection_annotation(
884
+ image, target, return_segmentation_masks, input_data_format=input_data_format
885
+ )
886
+ elif format == AnnotationFormat.COCO_PANOPTIC:
887
+ return_segmentation_masks = True if return_segmentation_masks is None else return_segmentation_masks
888
+ target = prepare_coco_panoptic_annotation(
889
+ image,
890
+ target,
891
+ masks_path=masks_path,
892
+ return_masks=return_segmentation_masks,
893
+ input_data_format=input_data_format,
894
+ )
895
+ else:
896
+ raise ValueError(f"Format {format} is not supported.")
897
+ return target
898
+
899
+ def prepare(self, image, target, return_segmentation_masks=None, masks_path=None):
900
+ logger.warning_once(
901
+ "The `prepare` method is deprecated and will be removed in a v4.33. "
902
+ "Please use `prepare_annotation` instead. Note: the `prepare_annotation` method "
903
+ "does not return the image anymore.",
904
+ )
905
+ target = self.prepare_annotation(image, target, return_segmentation_masks, masks_path, self.format)
906
+ return image, target
907
+
908
+ def convert_coco_poly_to_mask(self, *args, **kwargs):
909
+ logger.warning_once("The `convert_coco_poly_to_mask` method is deprecated and will be removed in v4.33. ")
910
+ return convert_coco_poly_to_mask(*args, **kwargs)
911
+
912
+ def prepare_coco_detection(self, *args, **kwargs):
913
+ logger.warning_once("The `prepare_coco_detection` method is deprecated and will be removed in v4.33. ")
914
+ return prepare_coco_detection_annotation(*args, **kwargs)
915
+
916
+ def prepare_coco_panoptic(self, *args, **kwargs):
917
+ logger.warning_once("The `prepare_coco_panoptic` method is deprecated and will be removed in v4.33. ")
918
+ return prepare_coco_panoptic_annotation(*args, **kwargs)
919
+
920
+ def resize(
921
+ self,
922
+ image: np.ndarray,
923
+ size: Dict[str, int],
924
+ resample: PILImageResampling = PILImageResampling.BILINEAR,
925
+ data_format: Optional[ChannelDimension] = None,
926
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
927
+ **kwargs,
928
+ ) -> np.ndarray:
929
+ """
930
+ Resize the image to the given size. Size can be `min_size` (scalar) or `(height, width)` tuple. If size is an
931
+ int, smaller edge of the image will be matched to this number.
932
+
933
+ Args:
934
+ image (`np.ndarray`):
935
+ Image to resize.
936
+ size (`Dict[str, int]`):
937
+ Dictionary containing the size to resize to. Can contain the keys `shortest_edge` and `longest_edge` or
938
+ `height` and `width`.
939
+ resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`):
940
+ Resampling filter to use if resizing the image.
941
+ data_format (`str` or `ChannelDimension`, *optional*):
942
+ The channel dimension format for the output image. If unset, the channel dimension format of the input
943
+ image is used.
944
+ input_data_format (`ChannelDimension` or `str`, *optional*):
945
+ The channel dimension format of the input image. If not provided, it will be inferred.
946
+ """
947
+ if "max_size" in kwargs:
948
+ logger.warning_once(
949
+ "The `max_size` parameter is deprecated and will be removed in v4.26. "
950
+ "Please specify in `size['longest_edge'] instead`.",
951
+ )
952
+ max_size = kwargs.pop("max_size")
953
+ else:
954
+ max_size = None
955
+ size = get_size_dict(size, max_size=max_size, default_to_square=False)
956
+ if "shortest_edge" in size and "longest_edge" in size:
957
+ size = get_resize_output_image_size(
958
+ image, size["shortest_edge"], size["longest_edge"], input_data_format=input_data_format
959
+ )
960
+ elif "height" in size and "width" in size:
961
+ size = (size["height"], size["width"])
962
+ else:
963
+ raise ValueError(
964
+ "Size must contain 'height' and 'width' keys or 'shortest_edge' and 'longest_edge' keys. Got"
965
+ f" {size.keys()}."
966
+ )
967
+ image = resize(
968
+ image, size=size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs
969
+ )
970
+ return image
971
+
972
+ def resize_annotation(
973
+ self,
974
+ annotation,
975
+ orig_size,
976
+ size,
977
+ resample: PILImageResampling = PILImageResampling.NEAREST,
978
+ ) -> Dict:
979
+ """
980
+ Resize the annotation to match the resized image. If size is an int, smaller edge of the mask will be matched
981
+ to this number.
982
+ """
983
+ return resize_annotation(annotation, orig_size=orig_size, target_size=size, resample=resample)
984
+
985
+ # TODO (Amy) - update to use `rescale_factor` instead of `scale`
986
+ def rescale(
987
+ self,
988
+ image: np.ndarray,
989
+ rescale_factor: float,
990
+ data_format: Optional[Union[str, ChannelDimension]] = None,
991
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
992
+ ) -> np.ndarray:
993
+ """
994
+ Rescale the image by the given factor. image = image * rescale_factor.
995
+
996
+ Args:
997
+ image (`np.ndarray`):
998
+ Image to rescale.
999
+ rescale_factor (`float`):
1000
+ The value to use for rescaling.
1001
+ data_format (`str` or `ChannelDimension`, *optional*):
1002
+ The channel dimension format for the output image. If unset, the channel dimension format of the input
1003
+ image is used. Can be one of:
1004
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
1005
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
1006
+ input_data_format (`str` or `ChannelDimension`, *optional*):
1007
+ The channel dimension format for the input image. If unset, is inferred from the input image. Can be
1008
+ one of:
1009
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
1010
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
1011
+ """
1012
+ return rescale(image, rescale_factor, data_format=data_format, input_data_format=input_data_format)
1013
+
1014
+ def normalize_annotation(self, annotation: Dict, image_size: Tuple[int, int]) -> Dict:
1015
+ """
1016
+ Normalize the boxes in the annotation from `[top_left_x, top_left_y, bottom_right_x, bottom_right_y]` to
1017
+ `[center_x, center_y, width, height]` format and from absolute to relative pixel values.
1018
+ """
1019
+ return normalize_annotation(annotation, image_size=image_size)
1020
+
1021
+ def _update_annotation_for_padded_image(
1022
+ self,
1023
+ annotation: Dict,
1024
+ input_image_size: Tuple[int, int],
1025
+ output_image_size: Tuple[int, int],
1026
+ padding,
1027
+ update_bboxes,
1028
+ ) -> Dict:
1029
+ """
1030
+ Update the annotation for a padded image.
1031
+ """
1032
+ new_annotation = {}
1033
+ new_annotation["size"] = output_image_size
1034
+
1035
+ for key, value in annotation.items():
1036
+ if key == "masks":
1037
+ masks = value
1038
+ masks = pad(
1039
+ masks,
1040
+ padding,
1041
+ mode=PaddingMode.CONSTANT,
1042
+ constant_values=0,
1043
+ input_data_format=ChannelDimension.FIRST,
1044
+ )
1045
+ masks = safe_squeeze(masks, 1)
1046
+ new_annotation["masks"] = masks
1047
+ elif key == "boxes" and update_bboxes:
1048
+ boxes = value
1049
+ boxes *= np.asarray(
1050
+ [
1051
+ input_image_size[1] / output_image_size[1],
1052
+ input_image_size[0] / output_image_size[0],
1053
+ input_image_size[1] / output_image_size[1],
1054
+ input_image_size[0] / output_image_size[0],
1055
+ ]
1056
+ )
1057
+ new_annotation["boxes"] = boxes
1058
+ elif key == "size":
1059
+ new_annotation["size"] = output_image_size
1060
+ else:
1061
+ new_annotation[key] = value
1062
+ return new_annotation
1063
+
1064
+ def _pad_image(
1065
+ self,
1066
+ image: np.ndarray,
1067
+ output_size: Tuple[int, int],
1068
+ annotation: Optional[Dict[str, Any]] = None,
1069
+ constant_values: Union[float, Iterable[float]] = 0,
1070
+ data_format: Optional[ChannelDimension] = None,
1071
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
1072
+ update_bboxes: bool = True,
1073
+ ) -> np.ndarray:
1074
+ """
1075
+ Pad an image with zeros to the given size.
1076
+ """
1077
+ input_height, input_width = get_image_size(image, channel_dim=input_data_format)
1078
+ output_height, output_width = output_size
1079
+
1080
+ pad_bottom = output_height - input_height
1081
+ pad_right = output_width - input_width
1082
+ padding = ((0, pad_bottom), (0, pad_right))
1083
+ padded_image = pad(
1084
+ image,
1085
+ padding,
1086
+ mode=PaddingMode.CONSTANT,
1087
+ constant_values=constant_values,
1088
+ data_format=data_format,
1089
+ input_data_format=input_data_format,
1090
+ )
1091
+ if annotation is not None:
1092
+ annotation = self._update_annotation_for_padded_image(
1093
+ annotation, (input_height, input_width), (output_height, output_width), padding, update_bboxes
1094
+ )
1095
+ return padded_image, annotation
1096
+
1097
+ def pad(
1098
+ self,
1099
+ images: List[np.ndarray],
1100
+ annotations: Optional[Union[AnnotationType, List[AnnotationType]]] = None,
1101
+ constant_values: Union[float, Iterable[float]] = 0,
1102
+ return_pixel_mask: bool = True,
1103
+ return_tensors: Optional[Union[str, TensorType]] = None,
1104
+ data_format: Optional[ChannelDimension] = None,
1105
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
1106
+ update_bboxes: bool = True,
1107
+ ) -> BatchFeature:
1108
+ """
1109
+ Pads a batch of images to the bottom and right of the image with zeros to the size of largest height and width
1110
+ in the batch and optionally returns their corresponding pixel mask.
1111
+
1112
+ Args:
1113
+ images (List[`np.ndarray`]):
1114
+ Images to pad.
1115
+ annotations (`AnnotationType` or `List[AnnotationType]`, *optional*):
1116
+ Annotations to transform according to the padding that is applied to the images.
1117
+ constant_values (`float` or `Iterable[float]`, *optional*):
1118
+ The value to use for the padding if `mode` is `"constant"`.
1119
+ return_pixel_mask (`bool`, *optional*, defaults to `True`):
1120
+ Whether to return a pixel mask.
1121
+ return_tensors (`str` or `TensorType`, *optional*):
1122
+ The type of tensors to return. Can be one of:
1123
+ - Unset: Return a list of `np.ndarray`.
1124
+ - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
1125
+ - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
1126
+ - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
1127
+ - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
1128
+ data_format (`str` or `ChannelDimension`, *optional*):
1129
+ The channel dimension format of the image. If not provided, it will be the same as the input image.
1130
+ input_data_format (`ChannelDimension` or `str`, *optional*):
1131
+ The channel dimension format of the input image. If not provided, it will be inferred.
1132
+ update_bboxes (`bool`, *optional*, defaults to `True`):
1133
+ Whether to update the bounding boxes in the annotations to match the padded images. If the
1134
+ bounding boxes have not been converted to relative coordinates and `(centre_x, centre_y, width, height)`
1135
+ format, the bounding boxes will not be updated.
1136
+ """
1137
+ pad_size = get_max_height_width(images, input_data_format=input_data_format)
1138
+
1139
+ annotation_list = annotations if annotations is not None else [None] * len(images)
1140
+ padded_images = []
1141
+ padded_annotations = []
1142
+ for image, annotation in zip(images, annotation_list):
1143
+ padded_image, padded_annotation = self._pad_image(
1144
+ image,
1145
+ pad_size,
1146
+ annotation,
1147
+ constant_values=constant_values,
1148
+ data_format=data_format,
1149
+ input_data_format=input_data_format,
1150
+ update_bboxes=update_bboxes,
1151
+ )
1152
+ padded_images.append(padded_image)
1153
+ padded_annotations.append(padded_annotation)
1154
+
1155
+ data = {"pixel_values": padded_images}
1156
+
1157
+ if return_pixel_mask:
1158
+ masks = [
1159
+ make_pixel_mask(image=image, output_size=pad_size, input_data_format=input_data_format)
1160
+ for image in images
1161
+ ]
1162
+ data["pixel_mask"] = masks
1163
+
1164
+ encoded_inputs = BatchFeature(data=data, tensor_type=return_tensors)
1165
+
1166
+ if annotations is not None:
1167
+ encoded_inputs["labels"] = [
1168
+ BatchFeature(annotation, tensor_type=return_tensors) for annotation in padded_annotations
1169
+ ]
1170
+
1171
+ return encoded_inputs
1172
+
1173
+ def preprocess(
1174
+ self,
1175
+ images: ImageInput,
1176
+ annotations: Optional[Union[AnnotationType, List[AnnotationType]]] = None,
1177
+ return_segmentation_masks: bool = None,
1178
+ masks_path: Optional[Union[str, pathlib.Path]] = None,
1179
+ do_resize: Optional[bool] = None,
1180
+ size: Optional[Dict[str, int]] = None,
1181
+ resample=None, # PILImageResampling
1182
+ do_rescale: Optional[bool] = None,
1183
+ rescale_factor: Optional[Union[int, float]] = None,
1184
+ do_normalize: Optional[bool] = None,
1185
+ do_convert_annotations: Optional[bool] = None,
1186
+ image_mean: Optional[Union[float, List[float]]] = None,
1187
+ image_std: Optional[Union[float, List[float]]] = None,
1188
+ do_pad: Optional[bool] = None,
1189
+ format: Optional[Union[str, AnnotationFormat]] = None,
1190
+ return_tensors: Optional[Union[TensorType, str]] = None,
1191
+ data_format: Union[str, ChannelDimension] = ChannelDimension.FIRST,
1192
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
1193
+ **kwargs,
1194
+ ) -> BatchFeature:
1195
+ """
1196
+ Preprocess an image or a batch of images so that it can be used by the model.
1197
+
1198
+ Args:
1199
+ images (`ImageInput`):
1200
+ Image or batch of images to preprocess. Expects a single or batch of images with pixel values ranging
1201
+ from 0 to 255. If passing in images with pixel values between 0 and 1, set `do_rescale=False`.
1202
+ annotations (`AnnotationType` or `List[AnnotationType]`, *optional*):
1203
+ List of annotations associated with the image or batch of images. If annotation is for object
1204
+ detection, the annotations should be a dictionary with the following keys:
1205
+ - "image_id" (`int`): The image id.
1206
+ - "annotations" (`List[Dict]`): List of annotations for an image. Each annotation should be a
1207
+ dictionary. An image can have no annotations, in which case the list should be empty.
1208
+ If annotation is for segmentation, the annotations should be a dictionary with the following keys:
1209
+ - "image_id" (`int`): The image id.
1210
+ - "segments_info" (`List[Dict]`): List of segments for an image. Each segment should be a dictionary.
1211
+ An image can have no segments, in which case the list should be empty.
1212
+ - "file_name" (`str`): The file name of the image.
1213
+ return_segmentation_masks (`bool`, *optional*, defaults to self.return_segmentation_masks):
1214
+ Whether to return segmentation masks.
1215
+ masks_path (`str` or `pathlib.Path`, *optional*):
1216
+ Path to the directory containing the segmentation masks.
1217
+ do_resize (`bool`, *optional*, defaults to self.do_resize):
1218
+ Whether to resize the image.
1219
+ size (`Dict[str, int]`, *optional*, defaults to self.size):
1220
+ Size of the image after resizing.
1221
+ resample (`PILImageResampling`, *optional*, defaults to self.resample):
1222
+ Resampling filter to use when resizing the image.
1223
+ do_rescale (`bool`, *optional*, defaults to self.do_rescale):
1224
+ Whether to rescale the image.
1225
+ rescale_factor (`float`, *optional*, defaults to self.rescale_factor):
1226
+ Rescale factor to use when rescaling the image.
1227
+ do_normalize (`bool`, *optional*, defaults to self.do_normalize):
1228
+ Whether to normalize the image.
1229
+ do_convert_annotations (`bool`, *optional*, defaults to self.do_convert_annotations):
1230
+ Whether to convert the annotations to the format expected by the model. Converts the bounding
1231
+ boxes from the format `(top_left_x, top_left_y, width, height)` to `(center_x, center_y, width, height)`
1232
+ and in relative coordinates.
1233
+ image_mean (`float` or `List[float]`, *optional*, defaults to self.image_mean):
1234
+ Mean to use when normalizing the image.
1235
+ image_std (`float` or `List[float]`, *optional*, defaults to self.image_std):
1236
+ Standard deviation to use when normalizing the image.
1237
+ do_pad (`bool`, *optional*, defaults to self.do_pad):
1238
+ Whether to pad the image. If `True` will pad the images in the batch to the largest image in the batch
1239
+ and create a pixel mask. Padding will be applied to the bottom and right of the image with zeros.
1240
+ format (`str` or `AnnotationFormat`, *optional*, defaults to self.format):
1241
+ Format of the annotations.
1242
+ return_tensors (`str` or `TensorType`, *optional*, defaults to self.return_tensors):
1243
+ Type of tensors to return. If `None`, will return the list of images.
1244
+ data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
1245
+ The channel dimension format for the output image. Can be one of:
1246
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
1247
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
1248
+ - Unset: Use the channel dimension format of the input image.
1249
+ input_data_format (`ChannelDimension` or `str`, *optional*):
1250
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
1251
+ from the input image. Can be one of:
1252
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
1253
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
1254
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
1255
+ """
1256
+ if "pad_and_return_pixel_mask" in kwargs:
1257
+ logger.warning_once(
1258
+ "The `pad_and_return_pixel_mask` argument is deprecated and will be removed in a future version, "
1259
+ "use `do_pad` instead."
1260
+ )
1261
+ do_pad = kwargs.pop("pad_and_return_pixel_mask")
1262
+
1263
+ max_size = None
1264
+ if "max_size" in kwargs:
1265
+ logger.warning_once(
1266
+ "The `max_size` argument is deprecated and will be removed in a future version, use"
1267
+ " `size['longest_edge']` instead."
1268
+ )
1269
+ size = kwargs.pop("max_size")
1270
+
1271
+ do_resize = self.do_resize if do_resize is None else do_resize
1272
+ size = self.size if size is None else size
1273
+ size = get_size_dict(size=size, max_size=max_size, default_to_square=False)
1274
+ resample = self.resample if resample is None else resample
1275
+ do_rescale = self.do_rescale if do_rescale is None else do_rescale
1276
+ rescale_factor = self.rescale_factor if rescale_factor is None else rescale_factor
1277
+ do_normalize = self.do_normalize if do_normalize is None else do_normalize
1278
+ image_mean = self.image_mean if image_mean is None else image_mean
1279
+ image_std = self.image_std if image_std is None else image_std
1280
+ do_convert_annotations = (
1281
+ self.do_convert_annotations if do_convert_annotations is None else do_convert_annotations
1282
+ )
1283
+ do_pad = self.do_pad if do_pad is None else do_pad
1284
+ format = self.format if format is None else format
1285
+
1286
+ images = make_list_of_images(images)
1287
+
1288
+ if not valid_images(images):
1289
+ raise ValueError(
1290
+ "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
1291
+ "torch.Tensor, tf.Tensor or jax.ndarray."
1292
+ )
1293
+ validate_kwargs(captured_kwargs=kwargs.keys(), valid_processor_keys=self._valid_processor_keys)
1294
+
1295
+ # Here, the pad() method pads to the maximum of (width, height). It does not need to be validated.
1296
+ validate_preprocess_arguments(
1297
+ do_rescale=do_rescale,
1298
+ rescale_factor=rescale_factor,
1299
+ do_normalize=do_normalize,
1300
+ image_mean=image_mean,
1301
+ image_std=image_std,
1302
+ do_resize=do_resize,
1303
+ size=size,
1304
+ resample=resample,
1305
+ )
1306
+
1307
+ if annotations is not None and isinstance(annotations, dict):
1308
+ annotations = [annotations]
1309
+
1310
+ if annotations is not None and len(images) != len(annotations):
1311
+ raise ValueError(
1312
+ f"The number of images ({len(images)}) and annotations ({len(annotations)}) do not match."
1313
+ )
1314
+
1315
+ format = AnnotationFormat(format)
1316
+ if annotations is not None:
1317
+ validate_annotations(format, SUPPORTED_ANNOTATION_FORMATS, annotations)
1318
+
1319
+ if (
1320
+ masks_path is not None
1321
+ and format == AnnotationFormat.COCO_PANOPTIC
1322
+ and not isinstance(masks_path, (pathlib.Path, str))
1323
+ ):
1324
+ raise ValueError(
1325
+ "The path to the directory containing the mask PNG files should be provided as a"
1326
+ f" `pathlib.Path` or string object, but is {type(masks_path)} instead."
1327
+ )
1328
+
1329
+ # All transformations expect numpy arrays
1330
+ images = [to_numpy_array(image) for image in images]
1331
+
1332
+ if is_scaled_image(images[0]) and do_rescale:
1333
+ logger.warning_once(
1334
+ "It looks like you are trying to rescale already rescaled images. If the input"
1335
+ " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
1336
+ )
1337
+
1338
+ if input_data_format is None:
1339
+ # We assume that all images have the same channel dimension format.
1340
+ input_data_format = infer_channel_dimension_format(images[0])
1341
+
1342
+ # prepare (COCO annotations as a list of Dict -> DETR target as a single Dict per image)
1343
+ if annotations is not None:
1344
+ prepared_images = []
1345
+ prepared_annotations = []
1346
+ for image, target in zip(images, annotations):
1347
+ target = self.prepare_annotation(
1348
+ image,
1349
+ target,
1350
+ format,
1351
+ return_segmentation_masks=return_segmentation_masks,
1352
+ masks_path=masks_path,
1353
+ input_data_format=input_data_format,
1354
+ )
1355
+ prepared_images.append(image)
1356
+ prepared_annotations.append(target)
1357
+ images = prepared_images
1358
+ annotations = prepared_annotations
1359
+ del prepared_images, prepared_annotations
1360
+
1361
+ # transformations
1362
+ if do_resize:
1363
+ if annotations is not None:
1364
+ resized_images, resized_annotations = [], []
1365
+ for image, target in zip(images, annotations):
1366
+ orig_size = get_image_size(image, input_data_format)
1367
+ resized_image = self.resize(
1368
+ image, size=size, max_size=max_size, resample=resample, input_data_format=input_data_format
1369
+ )
1370
+ resized_annotation = self.resize_annotation(
1371
+ target, orig_size, get_image_size(resized_image, input_data_format)
1372
+ )
1373
+ resized_images.append(resized_image)
1374
+ resized_annotations.append(resized_annotation)
1375
+ images = resized_images
1376
+ annotations = resized_annotations
1377
+ del resized_images, resized_annotations
1378
+ else:
1379
+ images = [
1380
+ self.resize(image, size=size, resample=resample, input_data_format=input_data_format)
1381
+ for image in images
1382
+ ]
1383
+
1384
+ if do_rescale:
1385
+ images = [self.rescale(image, rescale_factor, input_data_format=input_data_format) for image in images]
1386
+
1387
+ if do_normalize:
1388
+ images = [
1389
+ self.normalize(image, image_mean, image_std, input_data_format=input_data_format) for image in images
1390
+ ]
1391
+
1392
+ if do_convert_annotations and annotations is not None:
1393
+ annotations = [
1394
+ self.normalize_annotation(annotation, get_image_size(image, input_data_format))
1395
+ for annotation, image in zip(annotations, images)
1396
+ ]
1397
+
1398
+ if do_pad:
1399
+ # Pads images and returns their mask: {'pixel_values': ..., 'pixel_mask': ...}
1400
+ encoded_inputs = self.pad(
1401
+ images,
1402
+ annotations=annotations,
1403
+ return_pixel_mask=True,
1404
+ data_format=data_format,
1405
+ input_data_format=input_data_format,
1406
+ update_bboxes=do_convert_annotations,
1407
+ return_tensors=return_tensors,
1408
+ )
1409
+ else:
1410
+ images = [
1411
+ to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format)
1412
+ for image in images
1413
+ ]
1414
+ encoded_inputs = BatchFeature(data={"pixel_values": images}, tensor_type=return_tensors)
1415
+ if annotations is not None:
1416
+ encoded_inputs["labels"] = [
1417
+ BatchFeature(annotation, tensor_type=return_tensors) for annotation in annotations
1418
+ ]
1419
+
1420
+ return encoded_inputs
1421
+
1422
+ # POSTPROCESSING METHODS - TODO: add support for other frameworks
1423
+ # inspired by https://github.com/facebookresearch/detr/blob/master/models/detr.py#L258
1424
+ def post_process(self, outputs, target_sizes):
1425
+ """
1426
+ Converts the raw output of [`DetrForObjectDetection`] into final bounding boxes in (top_left_x, top_left_y,
1427
+ bottom_right_x, bottom_right_y) format. Only supports PyTorch.
1428
+
1429
+ Args:
1430
+ outputs ([`DetrObjectDetectionOutput`]):
1431
+ Raw outputs of the model.
1432
+ target_sizes (`torch.Tensor` of shape `(batch_size, 2)`):
1433
+ Tensor containing the size (height, width) of each image of the batch. For evaluation, this must be the
1434
+ original image size (before any data augmentation). For visualization, this should be the image size
1435
+ after data augment, but before padding.
1436
+ Returns:
1437
+ `List[Dict]`: A list of dictionaries, each dictionary containing the scores, labels and boxes for an image
1438
+ in the batch as predicted by the model.
1439
+ """
1440
+ logger.warning_once(
1441
+ "`post_process` is deprecated and will be removed in v5 of Transformers, please use"
1442
+ " `post_process_object_detection` instead, with `threshold=0.` for equivalent results.",
1443
+ )
1444
+
1445
+ out_logits, out_bbox = outputs.logits, outputs.pred_boxes
1446
+
1447
+ if len(out_logits) != len(target_sizes):
1448
+ raise ValueError("Make sure that you pass in as many target sizes as the batch dimension of the logits")
1449
+ if target_sizes.shape[1] != 2:
1450
+ raise ValueError("Each element of target_sizes must contain the size (h, w) of each image of the batch")
1451
+
1452
+ prob = nn.functional.softmax(out_logits, -1)
1453
+ scores, labels = prob[..., :-1].max(-1)
1454
+
1455
+ # convert to [x0, y0, x1, y1] format
1456
+ boxes = center_to_corners_format(out_bbox)
1457
+ # and from relative [0, 1] to absolute [0, height] coordinates
1458
+ img_h, img_w = target_sizes.unbind(1)
1459
+ scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1).to(boxes.device)
1460
+ boxes = boxes * scale_fct[:, None, :]
1461
+
1462
+ results = [{"scores": s, "labels": l, "boxes": b} for s, l, b in zip(scores, labels, boxes)]
1463
+ return results
1464
+
1465
+ def post_process_segmentation(self, outputs, target_sizes, threshold=0.9, mask_threshold=0.5):
1466
+ """
1467
+ Converts the output of [`DetrForSegmentation`] into image segmentation predictions. Only supports PyTorch.
1468
+
1469
+ Args:
1470
+ outputs ([`DetrSegmentationOutput`]):
1471
+ Raw outputs of the model.
1472
+ target_sizes (`torch.Tensor` of shape `(batch_size, 2)` or `List[Tuple]` of length `batch_size`):
1473
+ Torch Tensor (or list) corresponding to the requested final size (h, w) of each prediction.
1474
+ threshold (`float`, *optional*, defaults to 0.9):
1475
+ Threshold to use to filter out queries.
1476
+ mask_threshold (`float`, *optional*, defaults to 0.5):
1477
+ Threshold to use when turning the predicted masks into binary values.
1478
+ Returns:
1479
+ `List[Dict]`: A list of dictionaries, each dictionary containing the scores, labels, and masks for an image
1480
+ in the batch as predicted by the model.
1481
+ """
1482
+ logger.warning_once(
1483
+ "`post_process_segmentation` is deprecated and will be removed in v5 of Transformers, please use"
1484
+ " `post_process_semantic_segmentation`.",
1485
+ )
1486
+ out_logits, raw_masks = outputs.logits, outputs.pred_masks
1487
+ empty_label = out_logits.shape[-1] - 1
1488
+ preds = []
1489
+
1490
+ def to_tuple(tup):
1491
+ if isinstance(tup, tuple):
1492
+ return tup
1493
+ return tuple(tup.cpu().tolist())
1494
+
1495
+ for cur_logits, cur_masks, size in zip(out_logits, raw_masks, target_sizes):
1496
+ # we filter empty queries and detection below threshold
1497
+ cur_scores, cur_labels = cur_logits.softmax(-1).max(-1)
1498
+ keep = cur_labels.ne(empty_label) & (cur_scores > threshold)
1499
+ cur_scores = cur_scores[keep]
1500
+ cur_labels = cur_labels[keep]
1501
+ cur_masks = cur_masks[keep]
1502
+ cur_masks = nn.functional.interpolate(cur_masks[:, None], to_tuple(size), mode="bilinear").squeeze(1)
1503
+ cur_masks = (cur_masks.sigmoid() > mask_threshold) * 1
1504
+
1505
+ predictions = {"scores": cur_scores, "labels": cur_labels, "masks": cur_masks}
1506
+ preds.append(predictions)
1507
+ return preds
1508
+
1509
+ # inspired by https://github.com/facebookresearch/detr/blob/master/models/segmentation.py#L218
1510
+ def post_process_instance(self, results, outputs, orig_target_sizes, max_target_sizes, threshold=0.5):
1511
+ """
1512
+ Converts the output of [`DetrForSegmentation`] into actual instance segmentation predictions. Only supports
1513
+ PyTorch.
1514
+
1515
+ Args:
1516
+ results (`List[Dict]`):
1517
+ Results list obtained by [`~DetrImageProcessor.post_process`], to which "masks" results will be added.
1518
+ outputs ([`DetrSegmentationOutput`]):
1519
+ Raw outputs of the model.
1520
+ orig_target_sizes (`torch.Tensor` of shape `(batch_size, 2)`):
1521
+ Tensor containing the size (h, w) of each image of the batch. For evaluation, this must be the original
1522
+ image size (before any data augmentation).
1523
+ max_target_sizes (`torch.Tensor` of shape `(batch_size, 2)`):
1524
+ Tensor containing the maximum size (h, w) of each image of the batch. For evaluation, this must be the
1525
+ original image size (before any data augmentation).
1526
+ threshold (`float`, *optional*, defaults to 0.5):
1527
+ Threshold to use when turning the predicted masks into binary values.
1528
+ Returns:
1529
+ `List[Dict]`: A list of dictionaries, each dictionary containing the scores, labels, boxes and masks for an
1530
+ image in the batch as predicted by the model.
1531
+ """
1532
+ logger.warning_once(
1533
+ "`post_process_instance` is deprecated and will be removed in v5 of Transformers, please use"
1534
+ " `post_process_instance_segmentation`.",
1535
+ )
1536
+
1537
+ if len(orig_target_sizes) != len(max_target_sizes):
1538
+ raise ValueError("Make sure to pass in as many orig_target_sizes as max_target_sizes")
1539
+ max_h, max_w = max_target_sizes.max(0)[0].tolist()
1540
+ outputs_masks = outputs.pred_masks.squeeze(2)
1541
+ outputs_masks = nn.functional.interpolate(
1542
+ outputs_masks, size=(max_h, max_w), mode="bilinear", align_corners=False
1543
+ )
1544
+ outputs_masks = (outputs_masks.sigmoid() > threshold).cpu()
1545
+
1546
+ for i, (cur_mask, t, tt) in enumerate(zip(outputs_masks, max_target_sizes, orig_target_sizes)):
1547
+ img_h, img_w = t[0], t[1]
1548
+ results[i]["masks"] = cur_mask[:, :img_h, :img_w].unsqueeze(1)
1549
+ results[i]["masks"] = nn.functional.interpolate(
1550
+ results[i]["masks"].float(), size=tuple(tt.tolist()), mode="nearest"
1551
+ ).byte()
1552
+
1553
+ return results
1554
+
1555
+ # inspired by https://github.com/facebookresearch/detr/blob/master/models/segmentation.py#L241
1556
+ def post_process_panoptic(self, outputs, processed_sizes, target_sizes=None, is_thing_map=None, threshold=0.85):
1557
+ """
1558
+ Converts the output of [`DetrForSegmentation`] into actual panoptic predictions. Only supports PyTorch.
1559
+
1560
+ Args:
1561
+ outputs ([`DetrSegmentationOutput`]):
1562
+ Raw outputs of the model.
1563
+ processed_sizes (`torch.Tensor` of shape `(batch_size, 2)` or `List[Tuple]` of length `batch_size`):
1564
+ Torch Tensor (or list) containing the size (h, w) of each image of the batch, i.e. the size after data
1565
+ augmentation but before batching.
1566
+ target_sizes (`torch.Tensor` of shape `(batch_size, 2)` or `List[Tuple]` of length `batch_size`, *optional*):
1567
+ Torch Tensor (or list) corresponding to the requested final size `(height, width)` of each prediction.
1568
+ If left to None, it will default to the `processed_sizes`.
1569
+ is_thing_map (`torch.Tensor` of shape `(batch_size, 2)`, *optional*):
1570
+ Dictionary mapping class indices to either True or False, depending on whether or not they are a thing.
1571
+ If not set, defaults to the `is_thing_map` of COCO panoptic.
1572
+ threshold (`float`, *optional*, defaults to 0.85):
1573
+ Threshold to use to filter out queries.
1574
+ Returns:
1575
+ `List[Dict]`: A list of dictionaries, each dictionary containing a PNG string and segments_info values for
1576
+ an image in the batch as predicted by the model.
1577
+ """
1578
+ logger.warning_once(
1579
+ "`post_process_panoptic is deprecated and will be removed in v5 of Transformers, please use"
1580
+ " `post_process_panoptic_segmentation`.",
1581
+ )
1582
+ if target_sizes is None:
1583
+ target_sizes = processed_sizes
1584
+ if len(processed_sizes) != len(target_sizes):
1585
+ raise ValueError("Make sure to pass in as many processed_sizes as target_sizes")
1586
+
1587
+ if is_thing_map is None:
1588
+ # default to is_thing_map of COCO panoptic
1589
+ is_thing_map = {i: i <= 90 for i in range(201)}
1590
+
1591
+ out_logits, raw_masks, raw_boxes = outputs.logits, outputs.pred_masks, outputs.pred_boxes
1592
+ if not len(out_logits) == len(raw_masks) == len(target_sizes):
1593
+ raise ValueError(
1594
+ "Make sure that you pass in as many target sizes as the batch dimension of the logits and masks"
1595
+ )
1596
+ empty_label = out_logits.shape[-1] - 1
1597
+ preds = []
1598
+
1599
+ def to_tuple(tup):
1600
+ if isinstance(tup, tuple):
1601
+ return tup
1602
+ return tuple(tup.cpu().tolist())
1603
+
1604
+ for cur_logits, cur_masks, cur_boxes, size, target_size in zip(
1605
+ out_logits, raw_masks, raw_boxes, processed_sizes, target_sizes
1606
+ ):
1607
+ # we filter empty queries and detection below threshold
1608
+ cur_scores, cur_labels = cur_logits.softmax(-1).max(-1)
1609
+ keep = cur_labels.ne(empty_label) & (cur_scores > threshold)
1610
+ cur_scores = cur_scores[keep]
1611
+ cur_labels = cur_labels[keep]
1612
+ cur_masks = cur_masks[keep]
1613
+ cur_masks = nn.functional.interpolate(cur_masks[:, None], to_tuple(size), mode="bilinear").squeeze(1)
1614
+ cur_boxes = center_to_corners_format(cur_boxes[keep])
1615
+
1616
+ h, w = cur_masks.shape[-2:]
1617
+ if len(cur_boxes) != len(cur_labels):
1618
+ raise ValueError("Not as many boxes as there are classes")
1619
+
1620
+ # It may be that we have several predicted masks for the same stuff class.
1621
+ # In the following, we track the list of masks ids for each stuff class (they are merged later on)
1622
+ cur_masks = cur_masks.flatten(1)
1623
+ stuff_equiv_classes = defaultdict(lambda: [])
1624
+ for k, label in enumerate(cur_labels):
1625
+ if not is_thing_map[label.item()]:
1626
+ stuff_equiv_classes[label.item()].append(k)
1627
+
1628
+ def get_ids_area(masks, scores, dedup=False):
1629
+ # This helper function creates the final panoptic segmentation image
1630
+ # It also returns the area of the masks that appears on the image
1631
+
1632
+ m_id = masks.transpose(0, 1).softmax(-1)
1633
+
1634
+ if m_id.shape[-1] == 0:
1635
+ # We didn't detect any mask :(
1636
+ m_id = torch.zeros((h, w), dtype=torch.long, device=m_id.device)
1637
+ else:
1638
+ m_id = m_id.argmax(-1).view(h, w)
1639
+
1640
+ if dedup:
1641
+ # Merge the masks corresponding to the same stuff class
1642
+ for equiv in stuff_equiv_classes.values():
1643
+ if len(equiv) > 1:
1644
+ for eq_id in equiv:
1645
+ m_id.masked_fill_(m_id.eq(eq_id), equiv[0])
1646
+
1647
+ final_h, final_w = to_tuple(target_size)
1648
+
1649
+ seg_img = PIL.Image.fromarray(id_to_rgb(m_id.view(h, w).cpu().numpy()))
1650
+ seg_img = seg_img.resize(size=(final_w, final_h), resample=PILImageResampling.NEAREST)
1651
+
1652
+ np_seg_img = torch.ByteTensor(torch.ByteStorage.from_buffer(seg_img.tobytes()))
1653
+ np_seg_img = np_seg_img.view(final_h, final_w, 3)
1654
+ np_seg_img = np_seg_img.numpy()
1655
+
1656
+ m_id = torch.from_numpy(rgb_to_id(np_seg_img))
1657
+
1658
+ area = []
1659
+ for i in range(len(scores)):
1660
+ area.append(m_id.eq(i).sum().item())
1661
+ return area, seg_img
1662
+
1663
+ area, seg_img = get_ids_area(cur_masks, cur_scores, dedup=True)
1664
+ if cur_labels.numel() > 0:
1665
+ # We know filter empty masks as long as we find some
1666
+ while True:
1667
+ filtered_small = torch.as_tensor(
1668
+ [area[i] <= 4 for i, c in enumerate(cur_labels)], dtype=torch.bool, device=keep.device
1669
+ )
1670
+ if filtered_small.any().item():
1671
+ cur_scores = cur_scores[~filtered_small]
1672
+ cur_labels = cur_labels[~filtered_small]
1673
+ cur_masks = cur_masks[~filtered_small]
1674
+ area, seg_img = get_ids_area(cur_masks, cur_scores)
1675
+ else:
1676
+ break
1677
+
1678
+ else:
1679
+ cur_labels = torch.ones(1, dtype=torch.long, device=cur_labels.device)
1680
+
1681
+ segments_info = []
1682
+ for i, a in enumerate(area):
1683
+ cat = cur_labels[i].item()
1684
+ segments_info.append({"id": i, "isthing": is_thing_map[cat], "category_id": cat, "area": a})
1685
+ del cur_labels
1686
+
1687
+ with io.BytesIO() as out:
1688
+ seg_img.save(out, format="PNG")
1689
+ predictions = {"png_string": out.getvalue(), "segments_info": segments_info}
1690
+ preds.append(predictions)
1691
+ return preds
1692
+
1693
+ # inspired by https://github.com/facebookresearch/detr/blob/master/models/detr.py#L258
1694
+ def post_process_object_detection(
1695
+ self, outputs, threshold: float = 0.5, target_sizes: Union[TensorType, List[Tuple]] = None
1696
+ ):
1697
+ """
1698
+ Converts the raw output of [`DetrForObjectDetection`] into final bounding boxes in (top_left_x, top_left_y,
1699
+ bottom_right_x, bottom_right_y) format. Only supports PyTorch.
1700
+
1701
+ Args:
1702
+ outputs ([`DetrObjectDetectionOutput`]):
1703
+ Raw outputs of the model.
1704
+ threshold (`float`, *optional*):
1705
+ Score threshold to keep object detection predictions.
1706
+ target_sizes (`torch.Tensor` or `List[Tuple[int, int]]`, *optional*):
1707
+ Tensor of shape `(batch_size, 2)` or list of tuples (`Tuple[int, int]`) containing the target size
1708
+ `(height, width)` of each image in the batch. If unset, predictions will not be resized.
1709
+ Returns:
1710
+ `List[Dict]`: A list of dictionaries, each dictionary containing the scores, labels and boxes for an image
1711
+ in the batch as predicted by the model.
1712
+ """
1713
+ out_logits, out_bbox = outputs.logits, outputs.pred_boxes
1714
+
1715
+ if target_sizes is not None:
1716
+ if len(out_logits) != len(target_sizes):
1717
+ raise ValueError(
1718
+ "Make sure that you pass in as many target sizes as the batch dimension of the logits"
1719
+ )
1720
+
1721
+ prob = nn.functional.softmax(out_logits, -1)
1722
+ scores, labels = prob[..., :-1].max(-1)
1723
+
1724
+ # Convert to [x0, y0, x1, y1] format
1725
+ boxes = center_to_corners_format(out_bbox)
1726
+
1727
+ # Convert from relative [0, 1] to absolute [0, height] coordinates
1728
+ if target_sizes is not None:
1729
+ if isinstance(target_sizes, List):
1730
+ img_h = torch.Tensor([i[0] for i in target_sizes])
1731
+ img_w = torch.Tensor([i[1] for i in target_sizes])
1732
+ else:
1733
+ img_h, img_w = target_sizes.unbind(1)
1734
+
1735
+ scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1).to(boxes.device)
1736
+ boxes = boxes * scale_fct[:, None, :]
1737
+
1738
+ results = []
1739
+ for s, l, b in zip(scores, labels, boxes):
1740
+ score = s[s > threshold]
1741
+ label = l[s > threshold]
1742
+ box = b[s > threshold]
1743
+ results.append({"scores": score, "labels": label, "boxes": box})
1744
+
1745
+ return results
1746
+
1747
+ def post_process_semantic_segmentation(self, outputs, target_sizes: List[Tuple[int, int]] = None):
1748
+ """
1749
+ Converts the output of [`DetrForSegmentation`] into semantic segmentation maps. Only supports PyTorch.
1750
+
1751
+ Args:
1752
+ outputs ([`DetrForSegmentation`]):
1753
+ Raw outputs of the model.
1754
+ target_sizes (`List[Tuple[int, int]]`, *optional*):
1755
+ A list of tuples (`Tuple[int, int]`) containing the target size (height, width) of each image in the
1756
+ batch. If unset, predictions will not be resized.
1757
+ Returns:
1758
+ `List[torch.Tensor]`:
1759
+ A list of length `batch_size`, where each item is a semantic segmentation map of shape (height, width)
1760
+ corresponding to the target_sizes entry (if `target_sizes` is specified). Each entry of each
1761
+ `torch.Tensor` correspond to a semantic class id.
1762
+ """
1763
+ class_queries_logits = outputs.logits # [batch_size, num_queries, num_classes+1]
1764
+ masks_queries_logits = outputs.pred_masks # [batch_size, num_queries, height, width]
1765
+
1766
+ # Remove the null class `[..., :-1]`
1767
+ masks_classes = class_queries_logits.softmax(dim=-1)[..., :-1]
1768
+ masks_probs = masks_queries_logits.sigmoid() # [batch_size, num_queries, height, width]
1769
+
1770
+ # Semantic segmentation logits of shape (batch_size, num_classes, height, width)
1771
+ segmentation = torch.einsum("bqc, bqhw -> bchw", masks_classes, masks_probs)
1772
+ batch_size = class_queries_logits.shape[0]
1773
+
1774
+ # Resize logits and compute semantic segmentation maps
1775
+ if target_sizes is not None:
1776
+ if batch_size != len(target_sizes):
1777
+ raise ValueError(
1778
+ "Make sure that you pass in as many target sizes as the batch dimension of the logits"
1779
+ )
1780
+
1781
+ semantic_segmentation = []
1782
+ for idx in range(batch_size):
1783
+ resized_logits = nn.functional.interpolate(
1784
+ segmentation[idx].unsqueeze(dim=0), size=target_sizes[idx], mode="bilinear", align_corners=False
1785
+ )
1786
+ semantic_map = resized_logits[0].argmax(dim=0)
1787
+ semantic_segmentation.append(semantic_map)
1788
+ else:
1789
+ semantic_segmentation = segmentation.argmax(dim=1)
1790
+ semantic_segmentation = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
1791
+
1792
+ return semantic_segmentation
1793
+
1794
+ # inspired by https://github.com/facebookresearch/detr/blob/master/models/segmentation.py#L218
1795
+ def post_process_instance_segmentation(
1796
+ self,
1797
+ outputs,
1798
+ threshold: float = 0.5,
1799
+ mask_threshold: float = 0.5,
1800
+ overlap_mask_area_threshold: float = 0.8,
1801
+ target_sizes: Optional[List[Tuple[int, int]]] = None,
1802
+ return_coco_annotation: Optional[bool] = False,
1803
+ ) -> List[Dict]:
1804
+ """
1805
+ Converts the output of [`DetrForSegmentation`] into instance segmentation predictions. Only supports PyTorch.
1806
+
1807
+ Args:
1808
+ outputs ([`DetrForSegmentation`]):
1809
+ Raw outputs of the model.
1810
+ threshold (`float`, *optional*, defaults to 0.5):
1811
+ The probability score threshold to keep predicted instance masks.
1812
+ mask_threshold (`float`, *optional*, defaults to 0.5):
1813
+ Threshold to use when turning the predicted masks into binary values.
1814
+ overlap_mask_area_threshold (`float`, *optional*, defaults to 0.8):
1815
+ The overlap mask area threshold to merge or discard small disconnected parts within each binary
1816
+ instance mask.
1817
+ target_sizes (`List[Tuple]`, *optional*):
1818
+ List of length (batch_size), where each list item (`Tuple[int, int]]`) corresponds to the requested
1819
+ final size (height, width) of each prediction. If unset, predictions will not be resized.
1820
+ return_coco_annotation (`bool`, *optional*):
1821
+ Defaults to `False`. If set to `True`, segmentation maps are returned in COCO run-length encoding (RLE)
1822
+ format.
1823
+ Returns:
1824
+ `List[Dict]`: A list of dictionaries, one per image, each dictionary containing two keys:
1825
+ - **segmentation** -- A tensor of shape `(height, width)` where each pixel represents a `segment_id` or
1826
+ `List[List]` run-length encoding (RLE) of the segmentation map if return_coco_annotation is set to
1827
+ `True`. Set to `None` if no mask if found above `threshold`.
1828
+ - **segments_info** -- A dictionary that contains additional information on each segment.
1829
+ - **id** -- An integer representing the `segment_id`.
1830
+ - **label_id** -- An integer representing the label / semantic class id corresponding to `segment_id`.
1831
+ - **score** -- Prediction score of segment with `segment_id`.
1832
+ """
1833
+ class_queries_logits = outputs.logits # [batch_size, num_queries, num_classes+1]
1834
+ masks_queries_logits = outputs.pred_masks # [batch_size, num_queries, height, width]
1835
+
1836
+ batch_size = class_queries_logits.shape[0]
1837
+ num_labels = class_queries_logits.shape[-1] - 1
1838
+
1839
+ mask_probs = masks_queries_logits.sigmoid() # [batch_size, num_queries, height, width]
1840
+
1841
+ # Predicted label and score of each query (batch_size, num_queries)
1842
+ pred_scores, pred_labels = nn.functional.softmax(class_queries_logits, dim=-1).max(-1)
1843
+
1844
+ # Loop over items in batch size
1845
+ results: List[Dict[str, TensorType]] = []
1846
+
1847
+ for i in range(batch_size):
1848
+ mask_probs_item, pred_scores_item, pred_labels_item = remove_low_and_no_objects(
1849
+ mask_probs[i], pred_scores[i], pred_labels[i], threshold, num_labels
1850
+ )
1851
+
1852
+ # No mask found
1853
+ if mask_probs_item.shape[0] <= 0:
1854
+ height, width = target_sizes[i] if target_sizes is not None else mask_probs_item.shape[1:]
1855
+ segmentation = torch.zeros((height, width)) - 1
1856
+ results.append({"segmentation": segmentation, "segments_info": []})
1857
+ continue
1858
+
1859
+ # Get segmentation map and segment information of batch item
1860
+ target_size = target_sizes[i] if target_sizes is not None else None
1861
+ segmentation, segments = compute_segments(
1862
+ mask_probs=mask_probs_item,
1863
+ pred_scores=pred_scores_item,
1864
+ pred_labels=pred_labels_item,
1865
+ mask_threshold=mask_threshold,
1866
+ overlap_mask_area_threshold=overlap_mask_area_threshold,
1867
+ label_ids_to_fuse=[],
1868
+ target_size=target_size,
1869
+ )
1870
+
1871
+ # Return segmentation map in run-length encoding (RLE) format
1872
+ if return_coco_annotation:
1873
+ segmentation = convert_segmentation_to_rle(segmentation)
1874
+
1875
+ results.append({"segmentation": segmentation, "segments_info": segments})
1876
+ return results
1877
+
1878
+ # inspired by https://github.com/facebookresearch/detr/blob/master/models/segmentation.py#L241
1879
+ def post_process_panoptic_segmentation(
1880
+ self,
1881
+ outputs,
1882
+ threshold: float = 0.5,
1883
+ mask_threshold: float = 0.5,
1884
+ overlap_mask_area_threshold: float = 0.8,
1885
+ label_ids_to_fuse: Optional[Set[int]] = None,
1886
+ target_sizes: Optional[List[Tuple[int, int]]] = None,
1887
+ ) -> List[Dict]:
1888
+ """
1889
+ Converts the output of [`DetrForSegmentation`] into image panoptic segmentation predictions. Only supports
1890
+ PyTorch.
1891
+
1892
+ Args:
1893
+ outputs ([`DetrForSegmentation`]):
1894
+ The outputs from [`DetrForSegmentation`].
1895
+ threshold (`float`, *optional*, defaults to 0.5):
1896
+ The probability score threshold to keep predicted instance masks.
1897
+ mask_threshold (`float`, *optional*, defaults to 0.5):
1898
+ Threshold to use when turning the predicted masks into binary values.
1899
+ overlap_mask_area_threshold (`float`, *optional*, defaults to 0.8):
1900
+ The overlap mask area threshold to merge or discard small disconnected parts within each binary
1901
+ instance mask.
1902
+ label_ids_to_fuse (`Set[int]`, *optional*):
1903
+ The labels in this state will have all their instances be fused together. For instance we could say
1904
+ there can only be one sky in an image, but several persons, so the label ID for sky would be in that
1905
+ set, but not the one for person.
1906
+ target_sizes (`List[Tuple]`, *optional*):
1907
+ List of length (batch_size), where each list item (`Tuple[int, int]]`) corresponds to the requested
1908
+ final size (height, width) of each prediction in batch. If unset, predictions will not be resized.
1909
+ Returns:
1910
+ `List[Dict]`: A list of dictionaries, one per image, each dictionary containing two keys:
1911
+ - **segmentation** -- a tensor of shape `(height, width)` where each pixel represents a `segment_id` or
1912
+ `None` if no mask if found above `threshold`. If `target_sizes` is specified, segmentation is resized to
1913
+ the corresponding `target_sizes` entry.
1914
+ - **segments_info** -- A dictionary that contains additional information on each segment.
1915
+ - **id** -- an integer representing the `segment_id`.
1916
+ - **label_id** -- An integer representing the label / semantic class id corresponding to `segment_id`.
1917
+ - **was_fused** -- a boolean, `True` if `label_id` was in `label_ids_to_fuse`, `False` otherwise.
1918
+ Multiple instances of the same class / label were fused and assigned a single `segment_id`.
1919
+ - **score** -- Prediction score of segment with `segment_id`.
1920
+ """
1921
+
1922
+ if label_ids_to_fuse is None:
1923
+ logger.warning_once("`label_ids_to_fuse` unset. No instance will be fused.")
1924
+ label_ids_to_fuse = set()
1925
+
1926
+ class_queries_logits = outputs.logits # [batch_size, num_queries, num_classes+1]
1927
+ masks_queries_logits = outputs.pred_masks # [batch_size, num_queries, height, width]
1928
+
1929
+ batch_size = class_queries_logits.shape[0]
1930
+ num_labels = class_queries_logits.shape[-1] - 1
1931
+
1932
+ mask_probs = masks_queries_logits.sigmoid() # [batch_size, num_queries, height, width]
1933
+
1934
+ # Predicted label and score of each query (batch_size, num_queries)
1935
+ pred_scores, pred_labels = nn.functional.softmax(class_queries_logits, dim=-1).max(-1)
1936
+
1937
+ # Loop over items in batch size
1938
+ results: List[Dict[str, TensorType]] = []
1939
+
1940
+ for i in range(batch_size):
1941
+ mask_probs_item, pred_scores_item, pred_labels_item = remove_low_and_no_objects(
1942
+ mask_probs[i], pred_scores[i], pred_labels[i], threshold, num_labels
1943
+ )
1944
+
1945
+ # No mask found
1946
+ if mask_probs_item.shape[0] <= 0:
1947
+ height, width = target_sizes[i] if target_sizes is not None else mask_probs_item.shape[1:]
1948
+ segmentation = torch.zeros((height, width)) - 1
1949
+ results.append({"segmentation": segmentation, "segments_info": []})
1950
+ continue
1951
+
1952
+ # Get segmentation map and segment information of batch item
1953
+ target_size = target_sizes[i] if target_sizes is not None else None
1954
+ segmentation, segments = compute_segments(
1955
+ mask_probs=mask_probs_item,
1956
+ pred_scores=pred_scores_item,
1957
+ pred_labels=pred_labels_item,
1958
+ mask_threshold=mask_threshold,
1959
+ overlap_mask_area_threshold=overlap_mask_area_threshold,
1960
+ label_ids_to_fuse=label_ids_to_fuse,
1961
+ target_size=target_size,
1962
+ )
1963
+
1964
+ results.append({"segmentation": segmentation, "segments_info": segments})
1965
+ return results
llmeval-env/lib/python3.10/site-packages/transformers/models/detr/modeling_detr.py ADDED
The diff for this file is too large to render. See raw diff
 
llmeval-env/lib/python3.10/site-packages/transformers/models/jukebox/__init__.py ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import TYPE_CHECKING
16
+
17
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
18
+
19
+
20
+ _import_structure = {
21
+ "configuration_jukebox": [
22
+ "JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP",
23
+ "JukeboxConfig",
24
+ "JukeboxPriorConfig",
25
+ "JukeboxVQVAEConfig",
26
+ ],
27
+ "tokenization_jukebox": ["JukeboxTokenizer"],
28
+ }
29
+
30
+ try:
31
+ if not is_torch_available():
32
+ raise OptionalDependencyNotAvailable()
33
+ except OptionalDependencyNotAvailable:
34
+ pass
35
+ else:
36
+ _import_structure["modeling_jukebox"] = [
37
+ "JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST",
38
+ "JukeboxModel",
39
+ "JukeboxPreTrainedModel",
40
+ "JukeboxVQVAE",
41
+ "JukeboxPrior",
42
+ ]
43
+
44
+ if TYPE_CHECKING:
45
+ from .configuration_jukebox import (
46
+ JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
47
+ JukeboxConfig,
48
+ JukeboxPriorConfig,
49
+ JukeboxVQVAEConfig,
50
+ )
51
+ from .tokenization_jukebox import JukeboxTokenizer
52
+
53
+ try:
54
+ if not is_torch_available():
55
+ raise OptionalDependencyNotAvailable()
56
+ except OptionalDependencyNotAvailable:
57
+ pass
58
+ else:
59
+ from .modeling_jukebox import (
60
+ JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
61
+ JukeboxModel,
62
+ JukeboxPreTrainedModel,
63
+ JukeboxPrior,
64
+ JukeboxVQVAE,
65
+ )
66
+
67
+ else:
68
+ import sys
69
+
70
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
llmeval-env/lib/python3.10/site-packages/transformers/models/jukebox/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.1 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/jukebox/__pycache__/convert_jukebox.cpython-310.pyc ADDED
Binary file (6.93 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/jukebox/__pycache__/tokenization_jukebox.cpython-310.pyc ADDED
Binary file (16.3 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/jukebox/configuration_jukebox.py ADDED
@@ -0,0 +1,613 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The OpenAI Team Authors and HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Jukebox configuration"""
16
+
17
+ import os
18
+ from typing import List, Union
19
+
20
+ from ...configuration_utils import PretrainedConfig
21
+ from ...utils import logging
22
+
23
+
24
+ logger = logging.get_logger(__name__)
25
+
26
+
27
+ from ..deprecated._archive_maps import JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
28
+
29
+
30
+ _LARGE_ATTENTION = [
31
+ "block_attn",
32
+ "transpose_block_attn",
33
+ "prev_block_attn",
34
+ "block_attn",
35
+ "transpose_block_attn",
36
+ "prev_block_attn",
37
+ "block_attn",
38
+ "transpose_block_attn",
39
+ "prev_block_attn",
40
+ "block_attn",
41
+ "transpose_block_attn",
42
+ "prev_block_attn",
43
+ "block_attn",
44
+ "transpose_block_attn",
45
+ "prev_block_attn",
46
+ "block_attn",
47
+ "transpose_block_attn",
48
+ "prev_block_attn",
49
+ "cross_attention",
50
+ "block_attn",
51
+ "transpose_block_attn",
52
+ "prev_block_attn",
53
+ "block_attn",
54
+ "transpose_block_attn",
55
+ "prev_block_attn",
56
+ "block_attn",
57
+ "transpose_block_attn",
58
+ "prev_block_attn",
59
+ "cross_attention",
60
+ "block_attn",
61
+ "transpose_block_attn",
62
+ "prev_block_attn",
63
+ "block_attn",
64
+ "transpose_block_attn",
65
+ "prev_block_attn",
66
+ "block_attn",
67
+ "transpose_block_attn",
68
+ "prev_block_attn",
69
+ "cross_attention",
70
+ "block_attn",
71
+ "transpose_block_attn",
72
+ "prev_block_attn",
73
+ "block_attn",
74
+ "transpose_block_attn",
75
+ "prev_block_attn",
76
+ "block_attn",
77
+ "transpose_block_attn",
78
+ "prev_block_attn",
79
+ "cross_attention",
80
+ "block_attn",
81
+ "transpose_block_attn",
82
+ "prev_block_attn",
83
+ "block_attn",
84
+ "transpose_block_attn",
85
+ "prev_block_attn",
86
+ "block_attn",
87
+ "transpose_block_attn",
88
+ "prev_block_attn",
89
+ "cross_attention",
90
+ "block_attn",
91
+ "transpose_block_attn",
92
+ "prev_block_attn",
93
+ "block_attn",
94
+ "transpose_block_attn",
95
+ "prev_block_attn",
96
+ "block_attn",
97
+ "transpose_block_attn",
98
+ "prev_block_attn",
99
+ "cross_attention",
100
+ "block_attn",
101
+ "transpose_block_attn",
102
+ "prev_block_attn",
103
+ "block_attn",
104
+ "transpose_block_attn",
105
+ "prev_block_attn",
106
+ "block_attn",
107
+ "transpose_block_attn",
108
+ "prev_block_attn",
109
+ "cross_attention",
110
+ ]
111
+ _RawColumnPreviousRowAttention = ["block_attn", "transpose_block_attn", "prev_block_attn"]
112
+ _FullDenseAttention = ["dense_attention"]
113
+ _PrimePrimeDenseAttention = ["prime_attn", "prime_attn", "dense_attn"]
114
+
115
+
116
+ def full_dense_attention(layer):
117
+ return _FullDenseAttention[0]
118
+
119
+
120
+ def raw_column_previous_row_attention(layer):
121
+ return _RawColumnPreviousRowAttention[layer % 3]
122
+
123
+
124
+ def large_separated_enc_dec_w_lyrics(layer):
125
+ return _LARGE_ATTENTION[layer % 79]
126
+
127
+
128
+ def enc_dec_with_lyrics(layer):
129
+ if layer % 16 == 15:
130
+ return _PrimePrimeDenseAttention[layer % 3]
131
+ return _RawColumnPreviousRowAttention[layer % 3]
132
+
133
+
134
+ ATTENTION_PATTERNS = {
135
+ "full_dense_attention": full_dense_attention,
136
+ "raw_column_previous_row_attention": raw_column_previous_row_attention, # Alternate row, column and previous row attn
137
+ "large_separated_enc_dec_w_lyrics": large_separated_enc_dec_w_lyrics, # Used by large separated_enc_dec model with lyrics
138
+ "enc_dec_with_lyrics": enc_dec_with_lyrics, # Used by encoder_decoder model with lyrics
139
+ }
140
+
141
+
142
+ class JukeboxPriorConfig(PretrainedConfig):
143
+ """
144
+ This is the configuration class to store the configuration of a [`JukeboxPrior`]. It is used to instantiate a
145
+ `JukeboxPrior` according to the specified arguments, defining the model architecture. Instantiating a
146
+ configuration with the defaults will yield a similar configuration to that of the top level prior from the
147
+ [openai/jukebox-1b-lyrics](https://huggingface.co/openai/jukebox
148
+ -1b-lyrics) architecture.
149
+
150
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
151
+ documentation from [`PretrainedConfig`] for more information.
152
+
153
+
154
+
155
+ Args:
156
+ act_fn (`str`, *optional*, defaults to `"quick_gelu"`):
157
+ Activation function.
158
+ alignment_head (`int`, *optional*, defaults to 2):
159
+ Head that is responsible of the alignment between lyrics and music. Only used to compute the lyric to audio
160
+ alignment
161
+ alignment_layer (`int`, *optional*, defaults to 68):
162
+ Index of the layer that is responsible of the alignment between lyrics and music. Only used to compute the
163
+ lyric to audio alignment
164
+ attention_multiplier (`float`, *optional*, defaults to 0.25):
165
+ Multiplier coefficient used to define the hidden dimension of the attention layers. 0.25 means that
166
+ 0.25*width of the model will be used.
167
+ attention_pattern (`str`, *optional*, defaults to `"enc_dec_with_lyrics"`):
168
+ Which attention pattern to use for the decoder/
169
+ attn_dropout (`int`, *optional*, defaults to 0):
170
+ Dropout probability for the post-attention layer dropout in the decoder.
171
+ attn_res_scale (`bool`, *optional*, defaults to `False`):
172
+ Whether or not to scale the residuals in the attention conditioner block.
173
+ blocks (`int`, *optional*, defaults to 64):
174
+ Number of blocks used in the `block_attn`. A sequence of length seq_len is factored as `[blocks, seq_len //
175
+ blocks]` in the `JukeboxAttention` layer.
176
+ conv_res_scale (`int`, *optional*):
177
+ Whether or not to scale the residuals in the conditioner block. Since the top level prior does not have a
178
+ conditioner, the default value is to None and should not be modified.
179
+ num_layers (`int`, *optional*, defaults to 72):
180
+ Number of layers of the transformer architecture.
181
+ emb_dropout (`int`, *optional*, defaults to 0):
182
+ Embedding dropout used in the lyric decoder.
183
+ encoder_config (`JukeboxPriorConfig`, *optional*) :
184
+ Configuration of the encoder which models the prior on the lyrics.
185
+ encoder_loss_fraction (`float`, *optional*, defaults to 0.4):
186
+ Multiplication factor used in front of the lyric encoder loss.
187
+ hidden_size (`int`, *optional*, defaults to 2048):
188
+ Hidden dimension of the attention layers.
189
+ init_scale (`float`, *optional*, defaults to 0.2):
190
+ Initialization scales for the prior modules.
191
+ is_encoder_decoder (`bool`, *optional*, defaults to `True`):
192
+ Whether or not the prior is an encoder-decoder model. In case it is not, and `nb_relevant_lyric_tokens` is
193
+ greater than 0, the `encoder` args should be specified for the lyric encoding.
194
+ mask (`bool`, *optional*, defaults to `False`):
195
+ Whether or not to mask the previous positions in the attention.
196
+ max_duration (`int`, *optional*, defaults to 600):
197
+ Maximum supported duration of the generated song in seconds.
198
+ max_nb_genres (`int`, *optional*, defaults to 1):
199
+ Maximum number of genres that can be used to condition the model.
200
+ merged_decoder (`bool`, *optional*, defaults to `True`):
201
+ Whether or not the decoder and the encoder inputs are merged. This is used for the separated
202
+ encoder-decoder architecture
203
+ metadata_conditioning (`bool`, *optional*, defaults to `True)`:
204
+ Whether or not to condition on the artist and genre metadata.
205
+ metadata_dims (`List[int]`, *optional*, defaults to `[604, 7898]`):
206
+ Number of genres and the number of artists that were used to train the embedding layers of the prior
207
+ models.
208
+ min_duration (`int`, *optional*, defaults to 0):
209
+ Minimum duration of the generated audio on which the model was trained.
210
+ mlp_multiplier (`float`, *optional*, defaults to 1.0):
211
+ Multiplier coefficient used to define the hidden dimension of the MLP layers. 0.25 means that 0.25*width of
212
+ the model will be used.
213
+ music_vocab_size (`int`, *optional*, defaults to 2048):
214
+ Number of different music tokens. Should be similar to the `JukeboxVQVAEConfig.nb_discrete_codes`.
215
+ n_ctx (`int`, *optional*, defaults to 6144):
216
+ Number of context tokens for each prior. The context tokens are the music tokens that are attended to when
217
+ generating music tokens.
218
+ n_heads (`int`, *optional*, defaults to 2):
219
+ Number of attention heads.
220
+ nb_relevant_lyric_tokens (`int`, *optional*, defaults to 384):
221
+ Number of lyric tokens that are used when sampling a single window of length `n_ctx`
222
+ res_conv_depth (`int`, *optional*, defaults to 3):
223
+ Depth of the `JukeboxDecoderConvBock` used to upsample the previously sampled audio in the
224
+ `JukeboxMusicTokenConditioner`.
225
+ res_conv_width (`int`, *optional*, defaults to 128):
226
+ Width of the `JukeboxDecoderConvBock` used to upsample the previously sampled audio in the
227
+ `JukeboxMusicTokenConditioner`.
228
+ res_convolution_multiplier (`int`, *optional*, defaults to 1):
229
+ Multiplier used to scale the `hidden_dim` of the `JukeboxResConv1DBlock`.
230
+ res_dilation_cycle (`int`, *optional*):
231
+ Dilation cycle used to define the `JukeboxMusicTokenConditioner`. Usually similar to the ones used in the
232
+ corresponding level of the VQVAE. The first prior does not use it as it is not conditioned on upper level
233
+ tokens.
234
+ res_dilation_growth_rate (`int`, *optional*, defaults to 1):
235
+ Dilation grow rate used between each convolutionnal block of the `JukeboxMusicTokenConditioner`
236
+ res_downs_t (`List[int]`, *optional*, defaults to `[3, 2, 2]`):
237
+ Downsampling rates used in the audio conditioning network
238
+ res_strides_t (`List[int]`, *optional*, defaults to `[2, 2, 2]`):
239
+ Striding used in the audio conditioning network
240
+ resid_dropout (`int`, *optional*, defaults to 0):
241
+ Residual dropout used in the attention pattern.
242
+ sampling_rate (`int`, *optional*, defaults to 44100):
243
+ Sampling rate used for training.
244
+ spread (`int`, *optional*):
245
+ Spread used in the `summary_spread_attention` pattern
246
+ timing_dims (`int`, *optional*, defaults to 64):
247
+ Dimension of the timing embedding.
248
+ zero_out (`bool`, *optional*, defaults to `False`):
249
+ Whether or not to zero out convolution weights when initializing.
250
+ """
251
+
252
+ model_type = "jukebox_prior"
253
+ attribute_map = {
254
+ "max_position_embeddings": "n_positions",
255
+ "num_attention_heads": "n_head",
256
+ }
257
+
258
+ def __init__(
259
+ self,
260
+ act_fn="quick_gelu",
261
+ level=0,
262
+ alignment_head=2,
263
+ alignment_layer=68,
264
+ attention_multiplier=0.25,
265
+ attention_pattern="enc_dec_with_lyrics",
266
+ attn_dropout=0,
267
+ attn_res_scale=False,
268
+ blocks=64,
269
+ conv_res_scale=None,
270
+ num_layers=72,
271
+ emb_dropout=0,
272
+ encoder_config=None,
273
+ encoder_loss_fraction=0.4,
274
+ hidden_size=2048,
275
+ init_scale=0.2,
276
+ is_encoder_decoder=True,
277
+ lyric_vocab_size=80,
278
+ mask=False,
279
+ max_duration=600,
280
+ max_nb_genres=1,
281
+ merged_decoder=True,
282
+ metadata_conditioning=True,
283
+ metadata_dims=[604, 7898],
284
+ min_duration=0,
285
+ mlp_multiplier=1.0,
286
+ music_vocab_size=2048,
287
+ n_ctx=6144,
288
+ n_heads=2,
289
+ nb_relevant_lyric_tokens=384,
290
+ res_conv_depth=3,
291
+ res_conv_width=128,
292
+ res_convolution_multiplier=1,
293
+ res_dilation_cycle=None,
294
+ res_dilation_growth_rate=1,
295
+ res_downs_t=[3, 2, 2],
296
+ res_strides_t=[2, 2, 2],
297
+ resid_dropout=0,
298
+ sampling_rate=44100,
299
+ spread=None,
300
+ timing_dims=64,
301
+ zero_out=False,
302
+ **kwargs,
303
+ ):
304
+ self.act_fn = act_fn
305
+ self.alignment_head = alignment_head
306
+ self.alignment_layer = alignment_layer
307
+ self.attention_multiplier = attention_multiplier
308
+ self.attention_pattern = attention_pattern
309
+ self.attn_dropout = attn_dropout
310
+ self.attn_res_scale = attn_res_scale
311
+ self.blocks = blocks
312
+ self.conv_res_scale = conv_res_scale
313
+ self.num_layers = num_layers
314
+ self.emb_dropout = emb_dropout
315
+ self.music_vocab_size = music_vocab_size
316
+ if encoder_config is not None:
317
+ self.encoder_config = JukeboxPriorConfig(**encoder_config)
318
+ else:
319
+ self.encoder_config = None
320
+ self.encoder_loss_fraction = encoder_loss_fraction
321
+ self.init_scale = init_scale
322
+ self.is_encoder_decoder = is_encoder_decoder
323
+ self.lyric_vocab_size = lyric_vocab_size
324
+ self.level = level
325
+ self.mask = mask
326
+ self.max_duration = max_duration
327
+ self.max_nb_genres = max_nb_genres
328
+ self.merged_decoder = merged_decoder
329
+ self.metadata_conditioning = metadata_conditioning
330
+ self.metadata_dims = metadata_dims
331
+ self.min_duration = min_duration
332
+ self.mlp_multiplier = mlp_multiplier
333
+ self.n_ctx = n_ctx
334
+ self.n_heads = n_heads
335
+ self.nb_relevant_lyric_tokens = nb_relevant_lyric_tokens
336
+ self.res_conv_depth = res_conv_depth
337
+ self.res_conv_width = res_conv_width
338
+ self.res_convolution_multiplier = res_convolution_multiplier
339
+ self.res_dilation_cycle = res_dilation_cycle
340
+ self.res_dilation_growth_rate = res_dilation_growth_rate
341
+ self.res_downs_t = res_downs_t
342
+ self.res_strides_t = res_strides_t
343
+ self.resid_dropout = resid_dropout
344
+ self.sampling_rate = sampling_rate
345
+ self.spread = spread
346
+ self.timing_dims = timing_dims
347
+ self.hidden_size = hidden_size
348
+ self.zero_out = zero_out
349
+
350
+ @classmethod
351
+ def from_pretrained(
352
+ cls, pretrained_model_name_or_path: Union[str, os.PathLike], level=0, **kwargs
353
+ ) -> "PretrainedConfig":
354
+ cls._set_token_in_kwargs(kwargs)
355
+
356
+ config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
357
+
358
+ # get the prior config dict if we are loading from JukeboxConfig
359
+ if config_dict.get("model_type") == "jukebox":
360
+ config_dict = config_dict[f"prior_{level}"]
361
+
362
+ if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
363
+ logger.warning(
364
+ f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
365
+ f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
366
+ )
367
+
368
+ return cls.from_dict(config_dict, **kwargs)
369
+
370
+
371
+ class JukeboxVQVAEConfig(PretrainedConfig):
372
+ """
373
+ This is the configuration class to store the configuration of a [`JukeboxVQVAE`]. It is used to instantiate a
374
+ `JukeboxVQVAE` according to the specified arguments, defining the model architecture. Instantiating a configuration
375
+ with the defaults will yield a similar configuration to that of the VQVAE from
376
+ [openai/jukebox-1b-lyrics](https://huggingface.co/openai/jukebox-1b-lyrics) architecture.
377
+
378
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
379
+ documentation from [`PretrainedConfig`] for more information.
380
+
381
+ Args:
382
+ act_fn (`str`, *optional*, defaults to `"relu"`):
383
+ Activation function of the model.
384
+ nb_discrete_codes (`int`, *optional*, defaults to 2048):
385
+ Number of codes of the VQVAE.
386
+ commit (`float`, *optional*, defaults to 0.02):
387
+ Commit loss multiplier.
388
+ conv_input_shape (`int`, *optional*, defaults to 1):
389
+ Number of audio channels.
390
+ conv_res_scale (`bool`, *optional*, defaults to `False`):
391
+ Whether or not to scale the residuals of the `JukeboxResConv1DBlock`.
392
+ embed_dim (`int`, *optional*, defaults to 64):
393
+ Embedding dimension of the codebook vectors.
394
+ hop_fraction (`List[int]`, *optional*, defaults to `[0.125, 0.5, 0.5]`):
395
+ Fraction of non-intersecting window used when continuing the sampling process.
396
+ levels (`int`, *optional*, defaults to 3):
397
+ Number of hierarchical levels that used in the VQVAE.
398
+ lmu (`float`, *optional*, defaults to 0.99):
399
+ Used in the codebook update, exponential moving average coefficient. For more detail refer to Appendix A.1
400
+ of the original [VQVAE paper](https://arxiv.org/pdf/1711.00937v2.pdf)
401
+ multipliers (`List[int]`, *optional*, defaults to `[2, 1, 1]`):
402
+ Depth and width multipliers used for each level. Used on the `res_conv_width` and `res_conv_depth`
403
+ res_conv_depth (`int`, *optional*, defaults to 4):
404
+ Depth of the encoder and decoder block. If no `multipliers` are used, this is the same for each level.
405
+ res_conv_width (`int`, *optional*, defaults to 32):
406
+ Width of the encoder and decoder block. If no `multipliers` are used, this is the same for each level.
407
+ res_convolution_multiplier (`int`, *optional*, defaults to 1):
408
+ Scaling factor of the hidden dimension used in the `JukeboxResConv1DBlock`.
409
+ res_dilation_cycle (`int`, *optional*):
410
+ Dilation cycle value used in the `JukeboxResnet`. If an int is used, each new Conv1 block will have a depth
411
+ reduced by a power of `res_dilation_cycle`.
412
+ res_dilation_growth_rate (`int`, *optional*, defaults to 3):
413
+ Resnet dilation growth rate used in the VQVAE (dilation_growth_rate ** depth)
414
+ res_downs_t (`List[int]`, *optional*, defaults to `[3, 2, 2]`):
415
+ Downsampling rate for each level of the hierarchical VQ-VAE.
416
+ res_strides_t (`List[int]`, *optional*, defaults to `[2, 2, 2]`):
417
+ Stride used for each level of the hierarchical VQ-VAE.
418
+ sample_length (`int`, *optional*, defaults to 1058304):
419
+ Provides the max input shape of the VQVAE. Is used to compute the input shape of each level.
420
+ init_scale (`float`, *optional*, defaults to 0.2):
421
+ Initialization scale.
422
+ zero_out (`bool`, *optional*, defaults to `False`):
423
+ Whether or not to zero out convolution weights when initializing.
424
+ """
425
+
426
+ model_type = "jukebox_vqvae"
427
+
428
+ def __init__(
429
+ self,
430
+ act_fn="relu",
431
+ nb_discrete_codes=2048,
432
+ commit=0.02,
433
+ conv_input_shape=1,
434
+ conv_res_scale=False,
435
+ embed_dim=64,
436
+ hop_fraction=[0.125, 0.5, 0.5],
437
+ levels=3,
438
+ lmu=0.99,
439
+ multipliers=[2, 1, 1],
440
+ res_conv_depth=4,
441
+ res_conv_width=32,
442
+ res_convolution_multiplier=1,
443
+ res_dilation_cycle=None,
444
+ res_dilation_growth_rate=3,
445
+ res_downs_t=[3, 2, 2],
446
+ res_strides_t=[2, 2, 2],
447
+ sample_length=1058304,
448
+ init_scale=0.2,
449
+ zero_out=False,
450
+ **kwargs,
451
+ ):
452
+ self.hop_fraction = hop_fraction
453
+ self.conv_input_shape = conv_input_shape
454
+ self.sample_length = sample_length
455
+
456
+ # VQVAE parameters (all used)
457
+ self.levels = levels
458
+ self.embed_dim = embed_dim
459
+ self.nb_discrete_codes = nb_discrete_codes
460
+ self.res_conv_width = res_conv_width
461
+ self.res_conv_depth = res_conv_depth
462
+ self.res_convolution_multiplier = res_convolution_multiplier
463
+ self.res_dilation_growth_rate = res_dilation_growth_rate
464
+ self.res_dilation_cycle = res_dilation_cycle
465
+ self.multipliers = multipliers
466
+ self.res_downs_t = res_downs_t
467
+ self.res_strides_t = res_strides_t
468
+ self.lmu = lmu
469
+ self.commit = commit
470
+ self.conv_res_scale = conv_res_scale
471
+ self.act_fn = act_fn
472
+ self.init_scale = init_scale
473
+ self.zero_out = zero_out
474
+
475
+ @classmethod
476
+ def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig":
477
+ cls._set_token_in_kwargs(kwargs)
478
+
479
+ config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
480
+
481
+ # get the text config dict if we are loading from CLIPConfig
482
+ if config_dict.get("model_type") == "jukebox":
483
+ config_dict = config_dict["vqvae_config"]
484
+
485
+ if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
486
+ logger.warning(
487
+ f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
488
+ f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
489
+ )
490
+
491
+ return cls.from_dict(config_dict, **kwargs)
492
+
493
+
494
+ class JukeboxConfig(PretrainedConfig):
495
+ """
496
+ This is the configuration class to store the configuration of a [`JukeboxModel`].
497
+
498
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
499
+ documentation from [`PretrainedConfig`] for more information. Instantiating a configuration with the defaults will
500
+ yield a similar configuration to that of
501
+ [openai/jukebox-1b-lyrics](https://huggingface.co/openai/jukebox-1b-lyrics) architecture.
502
+
503
+
504
+ The downsampling and stride are used to determine downsampling of the input sequence. For example, downsampling =
505
+ (5,3), and strides = (2, 2) will downsample the audio by 2^5 = 32 to get the first level of codes, and 2**8 = 256
506
+ to get the second level codes. This is mostly true for training the top level prior and the upsamplers.
507
+
508
+ Args:
509
+ vqvae_config (`JukeboxVQVAEConfig`, *optional*):
510
+ Configuration for the `JukeboxVQVAE` model.
511
+ prior_config_list (`List[JukeboxPriorConfig]`, *optional*):
512
+ List of the configs for each of the `JukeboxPrior` of the model. The original architecture uses 3 priors.
513
+ nb_priors (`int`, *optional*, defaults to 3):
514
+ Number of prior models that will sequentially sample tokens. Each prior is conditional auto regressive
515
+ (decoder) model, apart from the top prior, which can include a lyric encoder. The available models were
516
+ trained using a top prior and 2 upsampler priors.
517
+ sampling_rate (`int`, *optional*, defaults to 44100):
518
+ Sampling rate of the raw audio.
519
+ timing_dims (`int`, *optional*, defaults to 64):
520
+ Dimensions of the JukeboxRangeEmbedding layer which is equivalent to traditional positional embedding
521
+ layer. The timing embedding layer converts the absolute and relative position in the currently sampled
522
+ audio to a tensor of length `timing_dims` that will be added to the music tokens.
523
+ min_duration (`int`, *optional*, defaults to 0):
524
+ Minimum duration of the audios to generate
525
+ max_duration (`float`, *optional*, defaults to 600.0):
526
+ Maximum duration of the audios to generate
527
+ max_nb_genres (`int`, *optional*, defaults to 5):
528
+ Maximum number of genres that can be used to condition a single sample.
529
+ metadata_conditioning (`bool`, *optional*, defaults to `True`):
530
+ Whether or not to use metadata conditioning, corresponding to the artist, the genre and the min/maximum
531
+ duration.
532
+
533
+ Example:
534
+
535
+ ```python
536
+ >>> from transformers import JukeboxModel, JukeboxConfig
537
+
538
+ >>> # Initializing a Jukebox configuration
539
+ >>> configuration = JukeboxConfig()
540
+
541
+ >>> # Initializing a model from the configuration
542
+ >>> model = JukeboxModel(configuration)
543
+
544
+ >>> # Accessing the model configuration
545
+ >>> configuration = model.config
546
+ ```
547
+ """
548
+
549
+ model_type = "jukebox"
550
+
551
+ def __init__(
552
+ self,
553
+ vqvae_config=None,
554
+ prior_config_list=None,
555
+ nb_priors=3,
556
+ sampling_rate=44100,
557
+ timing_dims=64,
558
+ min_duration=0,
559
+ max_duration=600.0,
560
+ max_nb_genres=5,
561
+ metadata_conditioning=True,
562
+ **kwargs,
563
+ ):
564
+ if vqvae_config is None:
565
+ vqvae_config = {}
566
+ logger.info("vqvae_config is None. initializing the JukeboxVQVAE with default values.")
567
+
568
+ self.vqvae_config = JukeboxVQVAEConfig(**vqvae_config)
569
+ if prior_config_list is not None:
570
+ self.prior_configs = [JukeboxPriorConfig(**prior_config) for prior_config in prior_config_list]
571
+ else:
572
+ self.prior_configs = []
573
+ for prior_idx in range(nb_priors):
574
+ prior_config = kwargs.pop(f"prior_{prior_idx}", None)
575
+ if prior_config is None:
576
+ prior_config = {}
577
+ logger.info(
578
+ f"prior_{prior_idx}'s config is None. Initializing the JukeboxPriorConfig list with default"
579
+ " values."
580
+ )
581
+ self.prior_configs.append(JukeboxPriorConfig(**prior_config))
582
+
583
+ self.hop_fraction = self.vqvae_config.hop_fraction
584
+
585
+ self.nb_priors = nb_priors
586
+
587
+ # Metadata conditioning
588
+ self.max_nb_genres = max_nb_genres
589
+ self.sampling_rate = sampling_rate
590
+ self.timing_dims = timing_dims
591
+ self.min_duration = min_duration
592
+ self.max_duration = max_duration
593
+ self.metadata_conditioning = metadata_conditioning
594
+
595
+ super().__init__(**kwargs)
596
+
597
+ @classmethod
598
+ def from_configs(cls, prior_configs: List[JukeboxPriorConfig], vqvae_config: JukeboxVQVAEConfig, **kwargs):
599
+ r"""
600
+ Instantiate a [`JukeboxConfig`] (or a derived class) from clip text model configuration and clip vision model
601
+ configuration.
602
+
603
+ Returns:
604
+ [`JukeboxConfig`]: An instance of a configuration object
605
+ """
606
+ prior_config_list = [config.to_dict() for config in prior_configs]
607
+ return cls(prior_config_list=prior_config_list, vqvae_config_dict=vqvae_config.to_dict(), **kwargs)
608
+
609
+ def to_dict(self):
610
+ # Override the default to_dict to apply to_dict to the list of prior configs.
611
+ result = super().to_dict()
612
+ result["prior_config_list"] = [config.to_dict() for config in result.pop("prior_configs")]
613
+ return result