applied-ai-018 commited on
Commit
54e8b1e
·
verified ·
1 Parent(s): 87a620d

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. env-llmeval/lib/python3.10/site-packages/transformers/commands/__init__.py +27 -0
  2. env-llmeval/lib/python3.10/site-packages/transformers/commands/__pycache__/__init__.cpython-310.pyc +0 -0
  3. env-llmeval/lib/python3.10/site-packages/transformers/commands/__pycache__/add_new_model.cpython-310.pyc +0 -0
  4. env-llmeval/lib/python3.10/site-packages/transformers/commands/__pycache__/add_new_model_like.cpython-310.pyc +0 -0
  5. env-llmeval/lib/python3.10/site-packages/transformers/commands/__pycache__/convert.cpython-310.pyc +0 -0
  6. env-llmeval/lib/python3.10/site-packages/transformers/commands/__pycache__/download.cpython-310.pyc +0 -0
  7. env-llmeval/lib/python3.10/site-packages/transformers/commands/__pycache__/env.cpython-310.pyc +0 -0
  8. env-llmeval/lib/python3.10/site-packages/transformers/commands/__pycache__/lfs.cpython-310.pyc +0 -0
  9. env-llmeval/lib/python3.10/site-packages/transformers/commands/__pycache__/pt_to_tf.cpython-310.pyc +0 -0
  10. env-llmeval/lib/python3.10/site-packages/transformers/commands/__pycache__/run.cpython-310.pyc +0 -0
  11. env-llmeval/lib/python3.10/site-packages/transformers/commands/__pycache__/serving.cpython-310.pyc +0 -0
  12. env-llmeval/lib/python3.10/site-packages/transformers/commands/__pycache__/train.cpython-310.pyc +0 -0
  13. env-llmeval/lib/python3.10/site-packages/transformers/commands/__pycache__/transformers_cli.cpython-310.pyc +0 -0
  14. env-llmeval/lib/python3.10/site-packages/transformers/commands/__pycache__/user.cpython-310.pyc +0 -0
  15. env-llmeval/lib/python3.10/site-packages/transformers/commands/add_new_model.py +259 -0
  16. env-llmeval/lib/python3.10/site-packages/transformers/commands/add_new_model_like.py +1763 -0
  17. env-llmeval/lib/python3.10/site-packages/transformers/commands/convert.py +165 -0
  18. env-llmeval/lib/python3.10/site-packages/transformers/commands/download.py +56 -0
  19. env-llmeval/lib/python3.10/site-packages/transformers/commands/env.py +143 -0
  20. env-llmeval/lib/python3.10/site-packages/transformers/commands/lfs.py +226 -0
  21. env-llmeval/lib/python3.10/site-packages/transformers/commands/pt_to_tf.py +425 -0
  22. env-llmeval/lib/python3.10/site-packages/transformers/commands/run.py +110 -0
  23. env-llmeval/lib/python3.10/site-packages/transformers/commands/serving.py +228 -0
  24. env-llmeval/lib/python3.10/site-packages/transformers/commands/train.py +158 -0
  25. env-llmeval/lib/python3.10/site-packages/transformers/commands/transformers_cli.py +59 -0
  26. env-llmeval/lib/python3.10/site-packages/transformers/commands/user.py +197 -0
  27. env-llmeval/lib/python3.10/site-packages/transformers/data/__init__.py +44 -0
  28. env-llmeval/lib/python3.10/site-packages/transformers/data/__pycache__/__init__.cpython-310.pyc +0 -0
  29. env-llmeval/lib/python3.10/site-packages/transformers/data/__pycache__/data_collator.cpython-310.pyc +0 -0
  30. env-llmeval/lib/python3.10/site-packages/transformers/data/data_collator.py +1568 -0
  31. env-llmeval/lib/python3.10/site-packages/transformers/data/datasets/__init__.py +23 -0
  32. env-llmeval/lib/python3.10/site-packages/transformers/data/datasets/__pycache__/__init__.cpython-310.pyc +0 -0
  33. env-llmeval/lib/python3.10/site-packages/transformers/data/datasets/__pycache__/glue.cpython-310.pyc +0 -0
  34. env-llmeval/lib/python3.10/site-packages/transformers/data/datasets/__pycache__/language_modeling.cpython-310.pyc +0 -0
  35. env-llmeval/lib/python3.10/site-packages/transformers/data/datasets/__pycache__/squad.cpython-310.pyc +0 -0
  36. env-llmeval/lib/python3.10/site-packages/transformers/data/datasets/glue.py +161 -0
  37. env-llmeval/lib/python3.10/site-packages/transformers/data/datasets/language_modeling.py +530 -0
  38. env-llmeval/lib/python3.10/site-packages/transformers/data/datasets/squad.py +229 -0
  39. env-llmeval/lib/python3.10/site-packages/transformers/data/metrics/__init__.py +98 -0
  40. env-llmeval/lib/python3.10/site-packages/transformers/data/metrics/__pycache__/squad_metrics.cpython-310.pyc +0 -0
  41. env-llmeval/lib/python3.10/site-packages/transformers/data/metrics/squad_metrics.py +780 -0
  42. env-llmeval/lib/python3.10/site-packages/transformers/pipelines/audio_classification.py +215 -0
  43. env-llmeval/lib/python3.10/site-packages/transformers/pipelines/document_question_answering.py +502 -0
  44. env-llmeval/lib/python3.10/site-packages/transformers/pipelines/image_classification.py +201 -0
  45. env-llmeval/lib/python3.10/site-packages/transformers/pipelines/image_segmentation.py +211 -0
  46. env-llmeval/lib/python3.10/site-packages/transformers/pipelines/image_to_image.py +134 -0
  47. env-llmeval/lib/python3.10/site-packages/transformers/pipelines/mask_generation.py +285 -0
  48. env-llmeval/lib/python3.10/site-packages/transformers/pipelines/object_detection.py +187 -0
  49. env-llmeval/lib/python3.10/site-packages/transformers/pipelines/text_to_audio.py +207 -0
  50. env-llmeval/lib/python3.10/site-packages/transformers/pipelines/visual_question_answering.py +151 -0
env-llmeval/lib/python3.10/site-packages/transformers/commands/__init__.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from abc import ABC, abstractmethod
16
+ from argparse import ArgumentParser
17
+
18
+
19
+ class BaseTransformersCLICommand(ABC):
20
+ @staticmethod
21
+ @abstractmethod
22
+ def register_subcommand(parser: ArgumentParser):
23
+ raise NotImplementedError()
24
+
25
+ @abstractmethod
26
+ def run(self):
27
+ raise NotImplementedError()
env-llmeval/lib/python3.10/site-packages/transformers/commands/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (825 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/commands/__pycache__/add_new_model.cpython-310.pyc ADDED
Binary file (7.11 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/commands/__pycache__/add_new_model_like.cpython-310.pyc ADDED
Binary file (49 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/commands/__pycache__/convert.cpython-310.pyc ADDED
Binary file (4.64 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/commands/__pycache__/download.cpython-310.pyc ADDED
Binary file (2.03 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/commands/__pycache__/env.cpython-310.pyc ADDED
Binary file (4.12 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/commands/__pycache__/lfs.cpython-310.pyc ADDED
Binary file (7.28 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/commands/__pycache__/pt_to_tf.cpython-310.pyc ADDED
Binary file (12.8 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/commands/__pycache__/run.cpython-310.pyc ADDED
Binary file (3.44 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/commands/__pycache__/serving.cpython-310.pyc ADDED
Binary file (6.83 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/commands/__pycache__/train.cpython-310.pyc ADDED
Binary file (4.66 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/commands/__pycache__/transformers_cli.cpython-310.pyc ADDED
Binary file (1.37 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/commands/__pycache__/user.cpython-310.pyc ADDED
Binary file (7.31 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/commands/add_new_model.py ADDED
@@ -0,0 +1,259 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import json
16
+ import os
17
+ import shutil
18
+ import warnings
19
+ from argparse import ArgumentParser, Namespace
20
+ from pathlib import Path
21
+ from typing import List
22
+
23
+ from ..utils import logging
24
+ from . import BaseTransformersCLICommand
25
+
26
+
27
+ try:
28
+ from cookiecutter.main import cookiecutter
29
+
30
+ _has_cookiecutter = True
31
+ except ImportError:
32
+ _has_cookiecutter = False
33
+
34
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
35
+
36
+
37
+ def add_new_model_command_factory(args: Namespace):
38
+ return AddNewModelCommand(args.testing, args.testing_file, path=args.path)
39
+
40
+
41
+ class AddNewModelCommand(BaseTransformersCLICommand):
42
+ @staticmethod
43
+ def register_subcommand(parser: ArgumentParser):
44
+ add_new_model_parser = parser.add_parser("add-new-model")
45
+ add_new_model_parser.add_argument("--testing", action="store_true", help="If in testing mode.")
46
+ add_new_model_parser.add_argument("--testing_file", type=str, help="Configuration file on which to run.")
47
+ add_new_model_parser.add_argument(
48
+ "--path", type=str, help="Path to cookiecutter. Should only be used for testing purposes."
49
+ )
50
+ add_new_model_parser.set_defaults(func=add_new_model_command_factory)
51
+
52
+ def __init__(self, testing: bool, testing_file: str, path=None, *args):
53
+ self._testing = testing
54
+ self._testing_file = testing_file
55
+ self._path = path
56
+
57
+ def run(self):
58
+ warnings.warn(
59
+ "The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. "
60
+ "It is not actively maintained anymore, so might give a result that won't pass all tests and quality "
61
+ "checks, you should use `transformers-cli add-new-model-like` instead."
62
+ )
63
+ if not _has_cookiecutter:
64
+ raise ImportError(
65
+ "Model creation dependencies are required to use the `add_new_model` command. Install them by running "
66
+ "the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n"
67
+ )
68
+ # Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
69
+ directories = [directory for directory in os.listdir() if "cookiecutter-template-" == directory[:22]]
70
+ if len(directories) > 0:
71
+ raise ValueError(
72
+ "Several directories starting with `cookiecutter-template-` in current working directory. "
73
+ "Please clean your directory by removing all folders starting with `cookiecutter-template-` or "
74
+ "change your working directory."
75
+ )
76
+
77
+ path_to_transformer_root = (
78
+ Path(__file__).parent.parent.parent.parent if self._path is None else Path(self._path).parent.parent
79
+ )
80
+ path_to_cookiecutter = path_to_transformer_root / "templates" / "adding_a_new_model"
81
+
82
+ # Execute cookiecutter
83
+ if not self._testing:
84
+ cookiecutter(str(path_to_cookiecutter))
85
+ else:
86
+ with open(self._testing_file, "r") as configuration_file:
87
+ testing_configuration = json.load(configuration_file)
88
+
89
+ cookiecutter(
90
+ str(path_to_cookiecutter if self._path is None else self._path),
91
+ no_input=True,
92
+ extra_context=testing_configuration,
93
+ )
94
+
95
+ directory = [directory for directory in os.listdir() if "cookiecutter-template-" in directory[:22]][0]
96
+
97
+ # Retrieve configuration
98
+ with open(directory + "/configuration.json", "r") as configuration_file:
99
+ configuration = json.load(configuration_file)
100
+
101
+ lowercase_model_name = configuration["lowercase_modelname"]
102
+ generate_tensorflow_pytorch_and_flax = configuration["generate_tensorflow_pytorch_and_flax"]
103
+ os.remove(f"{directory}/configuration.json")
104
+
105
+ output_pytorch = "PyTorch" in generate_tensorflow_pytorch_and_flax
106
+ output_tensorflow = "TensorFlow" in generate_tensorflow_pytorch_and_flax
107
+ output_flax = "Flax" in generate_tensorflow_pytorch_and_flax
108
+
109
+ model_dir = f"{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}"
110
+ os.makedirs(model_dir, exist_ok=True)
111
+ os.makedirs(f"{path_to_transformer_root}/tests/models/{lowercase_model_name}", exist_ok=True)
112
+
113
+ # Tests require submodules as they have parent imports
114
+ with open(f"{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py", "w"):
115
+ pass
116
+
117
+ shutil.move(
118
+ f"{directory}/__init__.py",
119
+ f"{model_dir}/__init__.py",
120
+ )
121
+ shutil.move(
122
+ f"{directory}/configuration_{lowercase_model_name}.py",
123
+ f"{model_dir}/configuration_{lowercase_model_name}.py",
124
+ )
125
+
126
+ def remove_copy_lines(path):
127
+ with open(path, "r") as f:
128
+ lines = f.readlines()
129
+ with open(path, "w") as f:
130
+ for line in lines:
131
+ if "# Copied from transformers." not in line:
132
+ f.write(line)
133
+
134
+ if output_pytorch:
135
+ if not self._testing:
136
+ remove_copy_lines(f"{directory}/modeling_{lowercase_model_name}.py")
137
+
138
+ shutil.move(
139
+ f"{directory}/modeling_{lowercase_model_name}.py",
140
+ f"{model_dir}/modeling_{lowercase_model_name}.py",
141
+ )
142
+
143
+ shutil.move(
144
+ f"{directory}/test_modeling_{lowercase_model_name}.py",
145
+ f"{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py",
146
+ )
147
+ else:
148
+ os.remove(f"{directory}/modeling_{lowercase_model_name}.py")
149
+ os.remove(f"{directory}/test_modeling_{lowercase_model_name}.py")
150
+
151
+ if output_tensorflow:
152
+ if not self._testing:
153
+ remove_copy_lines(f"{directory}/modeling_tf_{lowercase_model_name}.py")
154
+
155
+ shutil.move(
156
+ f"{directory}/modeling_tf_{lowercase_model_name}.py",
157
+ f"{model_dir}/modeling_tf_{lowercase_model_name}.py",
158
+ )
159
+
160
+ shutil.move(
161
+ f"{directory}/test_modeling_tf_{lowercase_model_name}.py",
162
+ f"{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py",
163
+ )
164
+ else:
165
+ os.remove(f"{directory}/modeling_tf_{lowercase_model_name}.py")
166
+ os.remove(f"{directory}/test_modeling_tf_{lowercase_model_name}.py")
167
+
168
+ if output_flax:
169
+ if not self._testing:
170
+ remove_copy_lines(f"{directory}/modeling_flax_{lowercase_model_name}.py")
171
+
172
+ shutil.move(
173
+ f"{directory}/modeling_flax_{lowercase_model_name}.py",
174
+ f"{model_dir}/modeling_flax_{lowercase_model_name}.py",
175
+ )
176
+
177
+ shutil.move(
178
+ f"{directory}/test_modeling_flax_{lowercase_model_name}.py",
179
+ f"{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py",
180
+ )
181
+ else:
182
+ os.remove(f"{directory}/modeling_flax_{lowercase_model_name}.py")
183
+ os.remove(f"{directory}/test_modeling_flax_{lowercase_model_name}.py")
184
+
185
+ shutil.move(
186
+ f"{directory}/{lowercase_model_name}.md",
187
+ f"{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md",
188
+ )
189
+
190
+ shutil.move(
191
+ f"{directory}/tokenization_{lowercase_model_name}.py",
192
+ f"{model_dir}/tokenization_{lowercase_model_name}.py",
193
+ )
194
+
195
+ shutil.move(
196
+ f"{directory}/tokenization_fast_{lowercase_model_name}.py",
197
+ f"{model_dir}/tokenization_{lowercase_model_name}_fast.py",
198
+ )
199
+
200
+ from os import fdopen, remove
201
+ from shutil import copymode, move
202
+ from tempfile import mkstemp
203
+
204
+ def replace(original_file: str, line_to_copy_below: str, lines_to_copy: List[str]):
205
+ # Create temp file
206
+ fh, abs_path = mkstemp()
207
+ line_found = False
208
+ with fdopen(fh, "w") as new_file:
209
+ with open(original_file) as old_file:
210
+ for line in old_file:
211
+ new_file.write(line)
212
+ if line_to_copy_below in line:
213
+ line_found = True
214
+ for line_to_copy in lines_to_copy:
215
+ new_file.write(line_to_copy)
216
+
217
+ if not line_found:
218
+ raise ValueError(f"Line {line_to_copy_below} was not found in file.")
219
+
220
+ # Copy the file permissions from the old file to the new file
221
+ copymode(original_file, abs_path)
222
+ # Remove original file
223
+ remove(original_file)
224
+ # Move new file
225
+ move(abs_path, original_file)
226
+
227
+ def skip_units(line):
228
+ return (
229
+ ("generating PyTorch" in line and not output_pytorch)
230
+ or ("generating TensorFlow" in line and not output_tensorflow)
231
+ or ("generating Flax" in line and not output_flax)
232
+ )
233
+
234
+ def replace_in_files(path_to_datafile):
235
+ with open(path_to_datafile) as datafile:
236
+ lines_to_copy = []
237
+ skip_file = False
238
+ skip_snippet = False
239
+ for line in datafile:
240
+ if "# To replace in: " in line and "##" not in line:
241
+ file_to_replace_in = line.split('"')[1]
242
+ skip_file = skip_units(line)
243
+ elif "# Below: " in line and "##" not in line:
244
+ line_to_copy_below = line.split('"')[1]
245
+ skip_snippet = skip_units(line)
246
+ elif "# End." in line and "##" not in line:
247
+ if not skip_file and not skip_snippet:
248
+ replace(file_to_replace_in, line_to_copy_below, lines_to_copy)
249
+
250
+ lines_to_copy = []
251
+ elif "# Replace with" in line and "##" not in line:
252
+ lines_to_copy = []
253
+ elif "##" not in line:
254
+ lines_to_copy.append(line)
255
+
256
+ remove(path_to_datafile)
257
+
258
+ replace_in_files(f"{directory}/to_replace_{lowercase_model_name}.py")
259
+ os.rmdir(directory)
env-llmeval/lib/python3.10/site-packages/transformers/commands/add_new_model_like.py ADDED
@@ -0,0 +1,1763 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2021 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import difflib
16
+ import json
17
+ import os
18
+ import re
19
+ from argparse import ArgumentParser, Namespace
20
+ from dataclasses import dataclass
21
+ from datetime import date
22
+ from itertools import chain
23
+ from pathlib import Path
24
+ from typing import Any, Callable, Dict, List, Optional, Pattern, Tuple, Union
25
+
26
+ import yaml
27
+
28
+ from ..models import auto as auto_module
29
+ from ..models.auto.configuration_auto import model_type_to_module_name
30
+ from ..utils import is_flax_available, is_tf_available, is_torch_available, logging
31
+ from . import BaseTransformersCLICommand
32
+
33
+
34
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
35
+
36
+
37
+ CURRENT_YEAR = date.today().year
38
+ TRANSFORMERS_PATH = Path(__file__).parent.parent
39
+ REPO_PATH = TRANSFORMERS_PATH.parent.parent
40
+
41
+
42
+ @dataclass
43
+ class ModelPatterns:
44
+ """
45
+ Holds the basic information about a new model for the add-new-model-like command.
46
+
47
+ Args:
48
+ model_name (`str`): The model name.
49
+ checkpoint (`str`): The checkpoint to use for doc examples.
50
+ model_type (`str`, *optional*):
51
+ The model type, the identifier used internally in the library like `bert` or `xlm-roberta`. Will default to
52
+ `model_name` lowercased with spaces replaced with minuses (-).
53
+ model_lower_cased (`str`, *optional*):
54
+ The lowercased version of the model name, to use for the module name or function names. Will default to
55
+ `model_name` lowercased with spaces and minuses replaced with underscores.
56
+ model_camel_cased (`str`, *optional*):
57
+ The camel-cased version of the model name, to use for the class names. Will default to `model_name`
58
+ camel-cased (with spaces and minuses both considered as word separators.
59
+ model_upper_cased (`str`, *optional*):
60
+ The uppercased version of the model name, to use for the constant names. Will default to `model_name`
61
+ uppercased with spaces and minuses replaced with underscores.
62
+ config_class (`str`, *optional*):
63
+ The tokenizer class associated with this model. Will default to `"{model_camel_cased}Config"`.
64
+ tokenizer_class (`str`, *optional*):
65
+ The tokenizer class associated with this model (leave to `None` for models that don't use a tokenizer).
66
+ image_processor_class (`str`, *optional*):
67
+ The image processor class associated with this model (leave to `None` for models that don't use an image
68
+ processor).
69
+ feature_extractor_class (`str`, *optional*):
70
+ The feature extractor class associated with this model (leave to `None` for models that don't use a feature
71
+ extractor).
72
+ processor_class (`str`, *optional*):
73
+ The processor class associated with this model (leave to `None` for models that don't use a processor).
74
+ """
75
+
76
+ model_name: str
77
+ checkpoint: str
78
+ model_type: Optional[str] = None
79
+ model_lower_cased: Optional[str] = None
80
+ model_camel_cased: Optional[str] = None
81
+ model_upper_cased: Optional[str] = None
82
+ config_class: Optional[str] = None
83
+ tokenizer_class: Optional[str] = None
84
+ image_processor_class: Optional[str] = None
85
+ feature_extractor_class: Optional[str] = None
86
+ processor_class: Optional[str] = None
87
+
88
+ def __post_init__(self):
89
+ if self.model_type is None:
90
+ self.model_type = self.model_name.lower().replace(" ", "-")
91
+ if self.model_lower_cased is None:
92
+ self.model_lower_cased = self.model_name.lower().replace(" ", "_").replace("-", "_")
93
+ if self.model_camel_cased is None:
94
+ # Split the model name on - and space
95
+ words = self.model_name.split(" ")
96
+ words = list(chain(*[w.split("-") for w in words]))
97
+ # Make sure each word is capitalized
98
+ words = [w[0].upper() + w[1:] for w in words]
99
+ self.model_camel_cased = "".join(words)
100
+ if self.model_upper_cased is None:
101
+ self.model_upper_cased = self.model_name.upper().replace(" ", "_").replace("-", "_")
102
+ if self.config_class is None:
103
+ self.config_class = f"{self.model_camel_cased}Config"
104
+
105
+
106
+ ATTRIBUTE_TO_PLACEHOLDER = {
107
+ "config_class": "[CONFIG_CLASS]",
108
+ "tokenizer_class": "[TOKENIZER_CLASS]",
109
+ "image_processor_class": "[IMAGE_PROCESSOR_CLASS]",
110
+ "feature_extractor_class": "[FEATURE_EXTRACTOR_CLASS]",
111
+ "processor_class": "[PROCESSOR_CLASS]",
112
+ "checkpoint": "[CHECKPOINT]",
113
+ "model_type": "[MODEL_TYPE]",
114
+ "model_upper_cased": "[MODEL_UPPER_CASED]",
115
+ "model_camel_cased": "[MODEL_CAMELCASED]",
116
+ "model_lower_cased": "[MODEL_LOWER_CASED]",
117
+ "model_name": "[MODEL_NAME]",
118
+ }
119
+
120
+
121
+ def is_empty_line(line: str) -> bool:
122
+ """
123
+ Determines whether a line is empty or not.
124
+ """
125
+ return len(line) == 0 or line.isspace()
126
+
127
+
128
+ def find_indent(line: str) -> int:
129
+ """
130
+ Returns the number of spaces that start a line indent.
131
+ """
132
+ search = re.search(r"^(\s*)(?:\S|$)", line)
133
+ if search is None:
134
+ return 0
135
+ return len(search.groups()[0])
136
+
137
+
138
+ def parse_module_content(content: str) -> List[str]:
139
+ """
140
+ Parse the content of a module in the list of objects it defines.
141
+
142
+ Args:
143
+ content (`str`): The content to parse
144
+
145
+ Returns:
146
+ `List[str]`: The list of objects defined in the module.
147
+ """
148
+ objects = []
149
+ current_object = []
150
+ lines = content.split("\n")
151
+ # Doc-styler takes everything between two triple quotes in docstrings, so we need a fake """ here to go with this.
152
+ end_markers = [")", "]", "}", '"""']
153
+
154
+ for line in lines:
155
+ # End of an object
156
+ is_valid_object = len(current_object) > 0
157
+ if is_valid_object and len(current_object) == 1:
158
+ is_valid_object = not current_object[0].startswith("# Copied from")
159
+ if not is_empty_line(line) and find_indent(line) == 0 and is_valid_object:
160
+ # Closing parts should be included in current object
161
+ if line in end_markers:
162
+ current_object.append(line)
163
+ objects.append("\n".join(current_object))
164
+ current_object = []
165
+ else:
166
+ objects.append("\n".join(current_object))
167
+ current_object = [line]
168
+ else:
169
+ current_object.append(line)
170
+
171
+ # Add last object
172
+ if len(current_object) > 0:
173
+ objects.append("\n".join(current_object))
174
+
175
+ return objects
176
+
177
+
178
+ def extract_block(content: str, indent_level: int = 0) -> str:
179
+ """Return the first block in `content` with the indent level `indent_level`.
180
+
181
+ The first line in `content` should be indented at `indent_level` level, otherwise an error will be thrown.
182
+
183
+ This method will immediately stop the search when a (non-empty) line with indent level less than `indent_level` is
184
+ encountered.
185
+
186
+ Args:
187
+ content (`str`): The content to parse
188
+ indent_level (`int`, *optional*, default to 0): The indent level of the blocks to search for
189
+
190
+ Returns:
191
+ `str`: The first block in `content` with the indent level `indent_level`.
192
+ """
193
+ current_object = []
194
+ lines = content.split("\n")
195
+ # Doc-styler takes everything between two triple quotes in docstrings, so we need a fake """ here to go with this.
196
+ end_markers = [")", "]", "}", '"""']
197
+
198
+ for idx, line in enumerate(lines):
199
+ if idx == 0 and indent_level > 0 and not is_empty_line(line) and find_indent(line) != indent_level:
200
+ raise ValueError(
201
+ f"When `indent_level > 0`, the first line in `content` should have indent level {indent_level}. Got "
202
+ f"{find_indent(line)} instead."
203
+ )
204
+
205
+ if find_indent(line) < indent_level and not is_empty_line(line):
206
+ break
207
+
208
+ # End of an object
209
+ is_valid_object = len(current_object) > 0
210
+ if (
211
+ not is_empty_line(line)
212
+ and not line.endswith(":")
213
+ and find_indent(line) == indent_level
214
+ and is_valid_object
215
+ ):
216
+ # Closing parts should be included in current object
217
+ if line.lstrip() in end_markers:
218
+ current_object.append(line)
219
+ return "\n".join(current_object)
220
+ else:
221
+ current_object.append(line)
222
+
223
+ # Add last object
224
+ if len(current_object) > 0:
225
+ return "\n".join(current_object)
226
+
227
+
228
+ def add_content_to_text(
229
+ text: str,
230
+ content: str,
231
+ add_after: Optional[Union[str, Pattern]] = None,
232
+ add_before: Optional[Union[str, Pattern]] = None,
233
+ exact_match: bool = False,
234
+ ) -> str:
235
+ """
236
+ A utility to add some content inside a given text.
237
+
238
+ Args:
239
+ text (`str`): The text in which we want to insert some content.
240
+ content (`str`): The content to add.
241
+ add_after (`str` or `Pattern`):
242
+ The pattern to test on a line of `text`, the new content is added after the first instance matching it.
243
+ add_before (`str` or `Pattern`):
244
+ The pattern to test on a line of `text`, the new content is added before the first instance matching it.
245
+ exact_match (`bool`, *optional*, defaults to `False`):
246
+ A line is considered a match with `add_after` or `add_before` if it matches exactly when `exact_match=True`,
247
+ otherwise, if `add_after`/`add_before` is present in the line.
248
+
249
+ <Tip warning={true}>
250
+
251
+ The arguments `add_after` and `add_before` are mutually exclusive, and one exactly needs to be provided.
252
+
253
+ </Tip>
254
+
255
+ Returns:
256
+ `str`: The text with the new content added if a match was found.
257
+ """
258
+ if add_after is None and add_before is None:
259
+ raise ValueError("You need to pass either `add_after` or `add_before`")
260
+ if add_after is not None and add_before is not None:
261
+ raise ValueError("You can't pass both `add_after` or `add_before`")
262
+ pattern = add_after if add_before is None else add_before
263
+
264
+ def this_is_the_line(line):
265
+ if isinstance(pattern, Pattern):
266
+ return pattern.search(line) is not None
267
+ elif exact_match:
268
+ return pattern == line
269
+ else:
270
+ return pattern in line
271
+
272
+ new_lines = []
273
+ for line in text.split("\n"):
274
+ if this_is_the_line(line):
275
+ if add_before is not None:
276
+ new_lines.append(content)
277
+ new_lines.append(line)
278
+ if add_after is not None:
279
+ new_lines.append(content)
280
+ else:
281
+ new_lines.append(line)
282
+
283
+ return "\n".join(new_lines)
284
+
285
+
286
+ def add_content_to_file(
287
+ file_name: Union[str, os.PathLike],
288
+ content: str,
289
+ add_after: Optional[Union[str, Pattern]] = None,
290
+ add_before: Optional[Union[str, Pattern]] = None,
291
+ exact_match: bool = False,
292
+ ):
293
+ """
294
+ A utility to add some content inside a given file.
295
+
296
+ Args:
297
+ file_name (`str` or `os.PathLike`): The name of the file in which we want to insert some content.
298
+ content (`str`): The content to add.
299
+ add_after (`str` or `Pattern`):
300
+ The pattern to test on a line of `text`, the new content is added after the first instance matching it.
301
+ add_before (`str` or `Pattern`):
302
+ The pattern to test on a line of `text`, the new content is added before the first instance matching it.
303
+ exact_match (`bool`, *optional*, defaults to `False`):
304
+ A line is considered a match with `add_after` or `add_before` if it matches exactly when `exact_match=True`,
305
+ otherwise, if `add_after`/`add_before` is present in the line.
306
+
307
+ <Tip warning={true}>
308
+
309
+ The arguments `add_after` and `add_before` are mutually exclusive, and one exactly needs to be provided.
310
+
311
+ </Tip>
312
+ """
313
+ with open(file_name, "r", encoding="utf-8") as f:
314
+ old_content = f.read()
315
+
316
+ new_content = add_content_to_text(
317
+ old_content, content, add_after=add_after, add_before=add_before, exact_match=exact_match
318
+ )
319
+
320
+ with open(file_name, "w", encoding="utf-8") as f:
321
+ f.write(new_content)
322
+
323
+
324
+ def replace_model_patterns(
325
+ text: str, old_model_patterns: ModelPatterns, new_model_patterns: ModelPatterns
326
+ ) -> Tuple[str, str]:
327
+ """
328
+ Replace all patterns present in a given text.
329
+
330
+ Args:
331
+ text (`str`): The text to treat.
332
+ old_model_patterns (`ModelPatterns`): The patterns for the old model.
333
+ new_model_patterns (`ModelPatterns`): The patterns for the new model.
334
+
335
+ Returns:
336
+ `Tuple(str, str)`: A tuple of with the treated text and the replacement actually done in it.
337
+ """
338
+ # The order is crucially important as we will check and replace in that order. For instance the config probably
339
+ # contains the camel-cased named, but will be treated before.
340
+ attributes_to_check = ["config_class"]
341
+ # Add relevant preprocessing classes
342
+ for attr in ["tokenizer_class", "image_processor_class", "feature_extractor_class", "processor_class"]:
343
+ if getattr(old_model_patterns, attr) is not None and getattr(new_model_patterns, attr) is not None:
344
+ attributes_to_check.append(attr)
345
+
346
+ # Special cases for checkpoint and model_type
347
+ if old_model_patterns.checkpoint not in [old_model_patterns.model_type, old_model_patterns.model_lower_cased]:
348
+ attributes_to_check.append("checkpoint")
349
+ if old_model_patterns.model_type != old_model_patterns.model_lower_cased:
350
+ attributes_to_check.append("model_type")
351
+ else:
352
+ text = re.sub(
353
+ rf'(\s*)model_type = "{old_model_patterns.model_type}"',
354
+ r'\1model_type = "[MODEL_TYPE]"',
355
+ text,
356
+ )
357
+
358
+ # Special case when the model camel cased and upper cased names are the same for the old model (like for GPT2) but
359
+ # not the new one. We can't just do a replace in all the text and will need a special regex
360
+ if old_model_patterns.model_upper_cased == old_model_patterns.model_camel_cased:
361
+ old_model_value = old_model_patterns.model_upper_cased
362
+ if re.search(rf"{old_model_value}_[A-Z_]*[^A-Z_]", text) is not None:
363
+ text = re.sub(rf"{old_model_value}([A-Z_]*)([^a-zA-Z_])", r"[MODEL_UPPER_CASED]\1\2", text)
364
+ else:
365
+ attributes_to_check.append("model_upper_cased")
366
+
367
+ attributes_to_check.extend(["model_camel_cased", "model_lower_cased", "model_name"])
368
+
369
+ # Now let's replace every other attribute by their placeholder
370
+ for attr in attributes_to_check:
371
+ text = text.replace(getattr(old_model_patterns, attr), ATTRIBUTE_TO_PLACEHOLDER[attr])
372
+
373
+ # Finally we can replace the placeholder byt the new values.
374
+ replacements = []
375
+ for attr, placeholder in ATTRIBUTE_TO_PLACEHOLDER.items():
376
+ if placeholder in text:
377
+ replacements.append((getattr(old_model_patterns, attr), getattr(new_model_patterns, attr)))
378
+ text = text.replace(placeholder, getattr(new_model_patterns, attr))
379
+
380
+ # If we have two inconsistent replacements, we don't return anything (ex: GPT2->GPT_NEW and GPT2->GPTNew)
381
+ old_replacement_values = [old for old, new in replacements]
382
+ if len(set(old_replacement_values)) != len(old_replacement_values):
383
+ return text, ""
384
+
385
+ replacements = simplify_replacements(replacements)
386
+ replacements = [f"{old}->{new}" for old, new in replacements]
387
+ return text, ",".join(replacements)
388
+
389
+
390
+ def simplify_replacements(replacements):
391
+ """
392
+ Simplify a list of replacement patterns to make sure there are no needless ones.
393
+
394
+ For instance in the sequence "Bert->BertNew, BertConfig->BertNewConfig, bert->bert_new", the replacement
395
+ "BertConfig->BertNewConfig" is implied by "Bert->BertNew" so not needed.
396
+
397
+ Args:
398
+ replacements (`List[Tuple[str, str]]`): List of patterns (old, new)
399
+
400
+ Returns:
401
+ `List[Tuple[str, str]]`: The list of patterns simplified.
402
+ """
403
+ if len(replacements) <= 1:
404
+ # Nothing to simplify
405
+ return replacements
406
+
407
+ # Next let's sort replacements by length as a replacement can only "imply" another replacement if it's shorter.
408
+ replacements.sort(key=lambda x: len(x[0]))
409
+
410
+ idx = 0
411
+ while idx < len(replacements):
412
+ old, new = replacements[idx]
413
+ # Loop through all replacements after
414
+ j = idx + 1
415
+ while j < len(replacements):
416
+ old_2, new_2 = replacements[j]
417
+ # If the replacement is implied by the current one, we can drop it.
418
+ if old_2.replace(old, new) == new_2:
419
+ replacements.pop(j)
420
+ else:
421
+ j += 1
422
+ idx += 1
423
+
424
+ return replacements
425
+
426
+
427
+ def get_module_from_file(module_file: Union[str, os.PathLike]) -> str:
428
+ """
429
+ Returns the module name corresponding to a module file.
430
+ """
431
+ full_module_path = Path(module_file).absolute()
432
+ module_parts = full_module_path.with_suffix("").parts
433
+
434
+ # Find the first part named transformers, starting from the end.
435
+ idx = len(module_parts) - 1
436
+ while idx >= 0 and module_parts[idx] != "transformers":
437
+ idx -= 1
438
+ if idx < 0:
439
+ raise ValueError(f"{module_file} is not a transformers module.")
440
+
441
+ return ".".join(module_parts[idx:])
442
+
443
+
444
+ SPECIAL_PATTERNS = {
445
+ "_CHECKPOINT_FOR_DOC =": "checkpoint",
446
+ "_CONFIG_FOR_DOC =": "config_class",
447
+ "_TOKENIZER_FOR_DOC =": "tokenizer_class",
448
+ "_IMAGE_PROCESSOR_FOR_DOC =": "image_processor_class",
449
+ "_FEAT_EXTRACTOR_FOR_DOC =": "feature_extractor_class",
450
+ "_PROCESSOR_FOR_DOC =": "processor_class",
451
+ }
452
+
453
+
454
+ _re_class_func = re.compile(r"^(?:class|def)\s+([^\s:\(]+)\s*(?:\(|\:)", flags=re.MULTILINE)
455
+
456
+
457
+ def remove_attributes(obj, target_attr):
458
+ """Remove `target_attr` in `obj`."""
459
+ lines = obj.split(os.linesep)
460
+
461
+ target_idx = None
462
+ for idx, line in enumerate(lines):
463
+ # search for assignment
464
+ if line.lstrip().startswith(f"{target_attr} = "):
465
+ target_idx = idx
466
+ break
467
+ # search for function/method definition
468
+ elif line.lstrip().startswith(f"def {target_attr}("):
469
+ target_idx = idx
470
+ break
471
+
472
+ # target not found
473
+ if target_idx is None:
474
+ return obj
475
+
476
+ line = lines[target_idx]
477
+ indent_level = find_indent(line)
478
+ # forward pass to find the ending of the block (including empty lines)
479
+ parsed = extract_block("\n".join(lines[target_idx:]), indent_level)
480
+ num_lines = len(parsed.split("\n"))
481
+ for idx in range(num_lines):
482
+ lines[target_idx + idx] = None
483
+
484
+ # backward pass to find comments or decorator
485
+ for idx in range(target_idx - 1, -1, -1):
486
+ line = lines[idx]
487
+ if (line.lstrip().startswith("#") or line.lstrip().startswith("@")) and find_indent(line) == indent_level:
488
+ lines[idx] = None
489
+ else:
490
+ break
491
+
492
+ new_obj = os.linesep.join([x for x in lines if x is not None])
493
+
494
+ return new_obj
495
+
496
+
497
+ def duplicate_module(
498
+ module_file: Union[str, os.PathLike],
499
+ old_model_patterns: ModelPatterns,
500
+ new_model_patterns: ModelPatterns,
501
+ dest_file: Optional[str] = None,
502
+ add_copied_from: bool = True,
503
+ attrs_to_remove: List[str] = None,
504
+ ):
505
+ """
506
+ Create a new module from an existing one and adapting all function and classes names from old patterns to new ones.
507
+
508
+ Args:
509
+ module_file (`str` or `os.PathLike`): Path to the module to duplicate.
510
+ old_model_patterns (`ModelPatterns`): The patterns for the old model.
511
+ new_model_patterns (`ModelPatterns`): The patterns for the new model.
512
+ dest_file (`str` or `os.PathLike`, *optional*): Path to the new module.
513
+ add_copied_from (`bool`, *optional*, defaults to `True`):
514
+ Whether or not to add `# Copied from` statements in the duplicated module.
515
+ """
516
+ if dest_file is None:
517
+ dest_file = str(module_file).replace(
518
+ old_model_patterns.model_lower_cased, new_model_patterns.model_lower_cased
519
+ )
520
+
521
+ with open(module_file, "r", encoding="utf-8") as f:
522
+ content = f.read()
523
+
524
+ content = re.sub(r"# Copyright (\d+)\s", f"# Copyright {CURRENT_YEAR} ", content)
525
+ objects = parse_module_content(content)
526
+
527
+ # Loop and treat all objects
528
+ new_objects = []
529
+ for obj in objects:
530
+ # Special cases
531
+ if "PRETRAINED_CONFIG_ARCHIVE_MAP = {" in obj:
532
+ # docstyle-ignore
533
+ obj = (
534
+ f"{new_model_patterns.model_upper_cased}_PRETRAINED_CONFIG_ARCHIVE_MAP = "
535
+ + "{"
536
+ + f"""
537
+ "{new_model_patterns.checkpoint}": "https://huggingface.co/{new_model_patterns.checkpoint}/resolve/main/config.json",
538
+ """
539
+ + "}\n"
540
+ )
541
+ new_objects.append(obj)
542
+ continue
543
+ elif "PRETRAINED_MODEL_ARCHIVE_LIST = [" in obj:
544
+ if obj.startswith("TF_"):
545
+ prefix = "TF_"
546
+ elif obj.startswith("FLAX_"):
547
+ prefix = "FLAX_"
548
+ else:
549
+ prefix = ""
550
+ # docstyle-ignore
551
+ obj = f"""{prefix}{new_model_patterns.model_upper_cased}_PRETRAINED_MODEL_ARCHIVE_LIST = [
552
+ "{new_model_patterns.checkpoint}",
553
+ # See all {new_model_patterns.model_name} models at https://huggingface.co/models?filter={new_model_patterns.model_type}
554
+ ]
555
+ """
556
+ new_objects.append(obj)
557
+ continue
558
+
559
+ special_pattern = False
560
+ for pattern, attr in SPECIAL_PATTERNS.items():
561
+ if pattern in obj:
562
+ obj = obj.replace(getattr(old_model_patterns, attr), getattr(new_model_patterns, attr))
563
+ new_objects.append(obj)
564
+ special_pattern = True
565
+ break
566
+
567
+ if special_pattern:
568
+ continue
569
+
570
+ # Regular classes functions
571
+ old_obj = obj
572
+ obj, replacement = replace_model_patterns(obj, old_model_patterns, new_model_patterns)
573
+ has_copied_from = re.search(r"^#\s+Copied from", obj, flags=re.MULTILINE) is not None
574
+ if add_copied_from and not has_copied_from and _re_class_func.search(obj) is not None and len(replacement) > 0:
575
+ # Copied from statement must be added just before the class/function definition, which may not be the
576
+ # first line because of decorators.
577
+ module_name = get_module_from_file(module_file)
578
+ old_object_name = _re_class_func.search(old_obj).groups()[0]
579
+ obj = add_content_to_text(
580
+ obj, f"# Copied from {module_name}.{old_object_name} with {replacement}", add_before=_re_class_func
581
+ )
582
+ # In all cases, we remove Copied from statement with indent on methods.
583
+ obj = re.sub("\n[ ]+# Copied from [^\n]*\n", "\n", obj)
584
+
585
+ new_objects.append(obj)
586
+
587
+ content = "\n".join(new_objects)
588
+ # Remove some attributes that we don't want to copy to the new file(s)
589
+ if attrs_to_remove is not None:
590
+ for attr in attrs_to_remove:
591
+ content = remove_attributes(content, target_attr=attr)
592
+
593
+ with open(dest_file, "w", encoding="utf-8") as f:
594
+ f.write(content)
595
+
596
+
597
+ def filter_framework_files(
598
+ files: List[Union[str, os.PathLike]], frameworks: Optional[List[str]] = None
599
+ ) -> List[Union[str, os.PathLike]]:
600
+ """
601
+ Filter a list of files to only keep the ones corresponding to a list of frameworks.
602
+
603
+ Args:
604
+ files (`List[Union[str, os.PathLike]]`): The list of files to filter.
605
+ frameworks (`List[str]`, *optional*): The list of allowed frameworks.
606
+
607
+ Returns:
608
+ `List[Union[str, os.PathLike]]`: The list of filtered files.
609
+ """
610
+ if frameworks is None:
611
+ frameworks = get_default_frameworks()
612
+
613
+ framework_to_file = {}
614
+ others = []
615
+ for f in files:
616
+ parts = Path(f).name.split("_")
617
+ if "modeling" not in parts:
618
+ others.append(f)
619
+ continue
620
+ if "tf" in parts:
621
+ framework_to_file["tf"] = f
622
+ elif "flax" in parts:
623
+ framework_to_file["flax"] = f
624
+ else:
625
+ framework_to_file["pt"] = f
626
+
627
+ return [framework_to_file[f] for f in frameworks if f in framework_to_file] + others
628
+
629
+
630
+ def get_model_files(model_type: str, frameworks: Optional[List[str]] = None) -> Dict[str, Union[Path, List[Path]]]:
631
+ """
632
+ Retrieves all the files associated to a model.
633
+
634
+ Args:
635
+ model_type (`str`): A valid model type (like "bert" or "gpt2")
636
+ frameworks (`List[str]`, *optional*):
637
+ If passed, will only keep the model files corresponding to the passed frameworks.
638
+
639
+ Returns:
640
+ `Dict[str, Union[Path, List[Path]]]`: A dictionary with the following keys:
641
+ - **doc_file** -- The documentation file for the model.
642
+ - **model_files** -- All the files in the model module.
643
+ - **test_files** -- The test files for the model.
644
+ """
645
+ module_name = model_type_to_module_name(model_type)
646
+
647
+ model_module = TRANSFORMERS_PATH / "models" / module_name
648
+ model_files = list(model_module.glob("*.py"))
649
+ model_files = filter_framework_files(model_files, frameworks=frameworks)
650
+
651
+ doc_file = REPO_PATH / "docs" / "source" / "en" / "model_doc" / f"{model_type}.md"
652
+
653
+ # Basic pattern for test files
654
+ test_files = [
655
+ f"test_modeling_{module_name}.py",
656
+ f"test_modeling_tf_{module_name}.py",
657
+ f"test_modeling_flax_{module_name}.py",
658
+ f"test_tokenization_{module_name}.py",
659
+ f"test_image_processing_{module_name}.py",
660
+ f"test_feature_extraction_{module_name}.py",
661
+ f"test_processor_{module_name}.py",
662
+ ]
663
+ test_files = filter_framework_files(test_files, frameworks=frameworks)
664
+ # Add the test directory
665
+ test_files = [REPO_PATH / "tests" / "models" / module_name / f for f in test_files]
666
+ # Filter by existing files
667
+ test_files = [f for f in test_files if f.exists()]
668
+
669
+ return {"doc_file": doc_file, "model_files": model_files, "module_name": module_name, "test_files": test_files}
670
+
671
+
672
+ _re_checkpoint_for_doc = re.compile(r"^_CHECKPOINT_FOR_DOC\s+=\s+(\S*)\s*$", flags=re.MULTILINE)
673
+
674
+
675
+ def find_base_model_checkpoint(
676
+ model_type: str, model_files: Optional[Dict[str, Union[Path, List[Path]]]] = None
677
+ ) -> str:
678
+ """
679
+ Finds the model checkpoint used in the docstrings for a given model.
680
+
681
+ Args:
682
+ model_type (`str`): A valid model type (like "bert" or "gpt2")
683
+ model_files (`Dict[str, Union[Path, List[Path]]`, *optional*):
684
+ The files associated to `model_type`. Can be passed to speed up the function, otherwise will be computed.
685
+
686
+ Returns:
687
+ `str`: The checkpoint used.
688
+ """
689
+ if model_files is None:
690
+ model_files = get_model_files(model_type)
691
+ module_files = model_files["model_files"]
692
+ for fname in module_files:
693
+ if "modeling" not in str(fname):
694
+ continue
695
+
696
+ with open(fname, "r", encoding="utf-8") as f:
697
+ content = f.read()
698
+ if _re_checkpoint_for_doc.search(content) is not None:
699
+ checkpoint = _re_checkpoint_for_doc.search(content).groups()[0]
700
+ # Remove quotes
701
+ checkpoint = checkpoint.replace('"', "")
702
+ checkpoint = checkpoint.replace("'", "")
703
+ return checkpoint
704
+
705
+ # TODO: Find some kind of fallback if there is no _CHECKPOINT_FOR_DOC in any of the modeling file.
706
+ return ""
707
+
708
+
709
+ def get_default_frameworks():
710
+ """
711
+ Returns the list of frameworks (PyTorch, TensorFlow, Flax) that are installed in the environment.
712
+ """
713
+ frameworks = []
714
+ if is_torch_available():
715
+ frameworks.append("pt")
716
+ if is_tf_available():
717
+ frameworks.append("tf")
718
+ if is_flax_available():
719
+ frameworks.append("flax")
720
+ return frameworks
721
+
722
+
723
+ _re_model_mapping = re.compile("MODEL_([A-Z_]*)MAPPING_NAMES")
724
+
725
+
726
+ def retrieve_model_classes(model_type: str, frameworks: Optional[List[str]] = None) -> Dict[str, List[str]]:
727
+ """
728
+ Retrieve the model classes associated to a given model.
729
+
730
+ Args:
731
+ model_type (`str`): A valid model type (like "bert" or "gpt2")
732
+ frameworks (`List[str]`, *optional*):
733
+ The frameworks to look for. Will default to `["pt", "tf", "flax"]`, passing a smaller list will restrict
734
+ the classes returned.
735
+
736
+ Returns:
737
+ `Dict[str, List[str]]`: A dictionary with one key per framework and the list of model classes associated to
738
+ that framework as values.
739
+ """
740
+ if frameworks is None:
741
+ frameworks = get_default_frameworks()
742
+
743
+ modules = {
744
+ "pt": auto_module.modeling_auto if is_torch_available() else None,
745
+ "tf": auto_module.modeling_tf_auto if is_tf_available() else None,
746
+ "flax": auto_module.modeling_flax_auto if is_flax_available() else None,
747
+ }
748
+
749
+ model_classes = {}
750
+ for framework in frameworks:
751
+ new_model_classes = []
752
+ if modules[framework] is None:
753
+ raise ValueError(f"You selected {framework} in the frameworks, but it is not installed.")
754
+ model_mappings = [attr for attr in dir(modules[framework]) if _re_model_mapping.search(attr) is not None]
755
+ for model_mapping_name in model_mappings:
756
+ model_mapping = getattr(modules[framework], model_mapping_name)
757
+ if model_type in model_mapping:
758
+ new_model_classes.append(model_mapping[model_type])
759
+
760
+ if len(new_model_classes) > 0:
761
+ # Remove duplicates
762
+ model_classes[framework] = list(set(new_model_classes))
763
+
764
+ return model_classes
765
+
766
+
767
+ def retrieve_info_for_model(model_type, frameworks: Optional[List[str]] = None):
768
+ """
769
+ Retrieves all the information from a given model_type.
770
+
771
+ Args:
772
+ model_type (`str`): A valid model type (like "bert" or "gpt2")
773
+ frameworks (`List[str]`, *optional*):
774
+ If passed, will only keep the info corresponding to the passed frameworks.
775
+
776
+ Returns:
777
+ `Dict`: A dictionary with the following keys:
778
+ - **frameworks** (`List[str]`): The list of frameworks that back this model type.
779
+ - **model_classes** (`Dict[str, List[str]]`): The model classes implemented for that model type.
780
+ - **model_files** (`Dict[str, Union[Path, List[Path]]]`): The files associated with that model type.
781
+ - **model_patterns** (`ModelPatterns`): The various patterns for the model.
782
+ """
783
+ if model_type not in auto_module.MODEL_NAMES_MAPPING:
784
+ raise ValueError(f"{model_type} is not a valid model type.")
785
+
786
+ model_name = auto_module.MODEL_NAMES_MAPPING[model_type]
787
+ config_class = auto_module.configuration_auto.CONFIG_MAPPING_NAMES[model_type]
788
+ archive_map = auto_module.configuration_auto.CONFIG_ARCHIVE_MAP_MAPPING_NAMES.get(model_type, None)
789
+ if model_type in auto_module.tokenization_auto.TOKENIZER_MAPPING_NAMES:
790
+ tokenizer_classes = auto_module.tokenization_auto.TOKENIZER_MAPPING_NAMES[model_type]
791
+ tokenizer_class = tokenizer_classes[0] if tokenizer_classes[0] is not None else tokenizer_classes[1]
792
+ else:
793
+ tokenizer_class = None
794
+ image_processor_class = auto_module.image_processing_auto.IMAGE_PROCESSOR_MAPPING_NAMES.get(model_type, None)
795
+ feature_extractor_class = auto_module.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES.get(model_type, None)
796
+ processor_class = auto_module.processing_auto.PROCESSOR_MAPPING_NAMES.get(model_type, None)
797
+
798
+ model_files = get_model_files(model_type, frameworks=frameworks)
799
+ model_camel_cased = config_class.replace("Config", "")
800
+
801
+ available_frameworks = []
802
+ for fname in model_files["model_files"]:
803
+ if "modeling_tf" in str(fname):
804
+ available_frameworks.append("tf")
805
+ elif "modeling_flax" in str(fname):
806
+ available_frameworks.append("flax")
807
+ elif "modeling" in str(fname):
808
+ available_frameworks.append("pt")
809
+
810
+ if frameworks is None:
811
+ frameworks = get_default_frameworks()
812
+
813
+ frameworks = [f for f in frameworks if f in available_frameworks]
814
+
815
+ model_classes = retrieve_model_classes(model_type, frameworks=frameworks)
816
+
817
+ # Retrieve model upper-cased name from the constant name of the pretrained archive map.
818
+ if archive_map is None:
819
+ model_upper_cased = model_camel_cased.upper()
820
+ else:
821
+ parts = archive_map.split("_")
822
+ idx = 0
823
+ while idx < len(parts) and parts[idx] != "PRETRAINED":
824
+ idx += 1
825
+ if idx < len(parts):
826
+ model_upper_cased = "_".join(parts[:idx])
827
+ else:
828
+ model_upper_cased = model_camel_cased.upper()
829
+
830
+ model_patterns = ModelPatterns(
831
+ model_name,
832
+ checkpoint=find_base_model_checkpoint(model_type, model_files=model_files),
833
+ model_type=model_type,
834
+ model_camel_cased=model_camel_cased,
835
+ model_lower_cased=model_files["module_name"],
836
+ model_upper_cased=model_upper_cased,
837
+ config_class=config_class,
838
+ tokenizer_class=tokenizer_class,
839
+ image_processor_class=image_processor_class,
840
+ feature_extractor_class=feature_extractor_class,
841
+ processor_class=processor_class,
842
+ )
843
+
844
+ return {
845
+ "frameworks": frameworks,
846
+ "model_classes": model_classes,
847
+ "model_files": model_files,
848
+ "model_patterns": model_patterns,
849
+ }
850
+
851
+
852
+ def clean_frameworks_in_init(
853
+ init_file: Union[str, os.PathLike], frameworks: Optional[List[str]] = None, keep_processing: bool = True
854
+ ):
855
+ """
856
+ Removes all the import lines that don't belong to a given list of frameworks or concern tokenizers/feature
857
+ extractors/image processors/processors in an init.
858
+
859
+ Args:
860
+ init_file (`str` or `os.PathLike`): The path to the init to treat.
861
+ frameworks (`List[str]`, *optional*):
862
+ If passed, this will remove all imports that are subject to a framework not in frameworks
863
+ keep_processing (`bool`, *optional*, defaults to `True`):
864
+ Whether or not to keep the preprocessing (tokenizer, feature extractor, image processor, processor) imports
865
+ in the init.
866
+ """
867
+ if frameworks is None:
868
+ frameworks = get_default_frameworks()
869
+
870
+ names = {"pt": "torch"}
871
+ to_remove = [names.get(f, f) for f in ["pt", "tf", "flax"] if f not in frameworks]
872
+ if not keep_processing:
873
+ to_remove.extend(["sentencepiece", "tokenizers", "vision"])
874
+
875
+ if len(to_remove) == 0:
876
+ # Nothing to do
877
+ return
878
+
879
+ remove_pattern = "|".join(to_remove)
880
+ re_conditional_imports = re.compile(rf"^\s*if not is_({remove_pattern})_available\(\):\s*$")
881
+ re_try = re.compile(r"\s*try:")
882
+ re_else = re.compile(r"\s*else:")
883
+ re_is_xxx_available = re.compile(rf"is_({remove_pattern})_available")
884
+
885
+ with open(init_file, "r", encoding="utf-8") as f:
886
+ content = f.read()
887
+
888
+ lines = content.split("\n")
889
+ new_lines = []
890
+ idx = 0
891
+ while idx < len(lines):
892
+ # Conditional imports in try-except-else blocks
893
+ if (re_conditional_imports.search(lines[idx]) is not None) and (re_try.search(lines[idx - 1]) is not None):
894
+ # Remove the preceding `try:`
895
+ new_lines.pop()
896
+ idx += 1
897
+ # Iterate until `else:`
898
+ while is_empty_line(lines[idx]) or re_else.search(lines[idx]) is None:
899
+ idx += 1
900
+ idx += 1
901
+ indent = find_indent(lines[idx])
902
+ while find_indent(lines[idx]) >= indent or is_empty_line(lines[idx]):
903
+ idx += 1
904
+ # Remove the import from utils
905
+ elif re_is_xxx_available.search(lines[idx]) is not None:
906
+ line = lines[idx]
907
+ for framework in to_remove:
908
+ line = line.replace(f", is_{framework}_available", "")
909
+ line = line.replace(f"is_{framework}_available, ", "")
910
+ line = line.replace(f"is_{framework}_available,", "")
911
+ line = line.replace(f"is_{framework}_available", "")
912
+
913
+ if len(line.strip()) > 0:
914
+ new_lines.append(line)
915
+ idx += 1
916
+ # Otherwise we keep the line, except if it's a tokenizer import and we don't want to keep it.
917
+ elif keep_processing or (
918
+ re.search(r'^\s*"(tokenization|processing|feature_extraction|image_processing)', lines[idx]) is None
919
+ and re.search(r"^\s*from .(tokenization|processing|feature_extraction|image_processing)", lines[idx])
920
+ is None
921
+ ):
922
+ new_lines.append(lines[idx])
923
+ idx += 1
924
+ else:
925
+ idx += 1
926
+
927
+ with open(init_file, "w", encoding="utf-8") as f:
928
+ f.write("\n".join(new_lines))
929
+
930
+
931
+ def add_model_to_main_init(
932
+ old_model_patterns: ModelPatterns,
933
+ new_model_patterns: ModelPatterns,
934
+ frameworks: Optional[List[str]] = None,
935
+ with_processing: bool = True,
936
+ ):
937
+ """
938
+ Add a model to the main init of Transformers.
939
+
940
+ Args:
941
+ old_model_patterns (`ModelPatterns`): The patterns for the old model.
942
+ new_model_patterns (`ModelPatterns`): The patterns for the new model.
943
+ frameworks (`List[str]`, *optional*):
944
+ If specified, only the models implemented in those frameworks will be added.
945
+ with_processsing (`bool`, *optional*, defaults to `True`):
946
+ Whether the tokenizer/feature extractor/processor of the model should also be added to the init or not.
947
+ """
948
+ with open(TRANSFORMERS_PATH / "__init__.py", "r", encoding="utf-8") as f:
949
+ content = f.read()
950
+
951
+ lines = content.split("\n")
952
+ idx = 0
953
+ new_lines = []
954
+ framework = None
955
+ while idx < len(lines):
956
+ new_framework = False
957
+ if not is_empty_line(lines[idx]) and find_indent(lines[idx]) == 0:
958
+ framework = None
959
+ elif lines[idx].lstrip().startswith("if not is_torch_available"):
960
+ framework = "pt"
961
+ new_framework = True
962
+ elif lines[idx].lstrip().startswith("if not is_tf_available"):
963
+ framework = "tf"
964
+ new_framework = True
965
+ elif lines[idx].lstrip().startswith("if not is_flax_available"):
966
+ framework = "flax"
967
+ new_framework = True
968
+
969
+ if new_framework:
970
+ # For a new framework, we need to skip until the else: block to get where the imports are.
971
+ while lines[idx].strip() != "else:":
972
+ new_lines.append(lines[idx])
973
+ idx += 1
974
+
975
+ # Skip if we are in a framework not wanted.
976
+ if framework is not None and frameworks is not None and framework not in frameworks:
977
+ new_lines.append(lines[idx])
978
+ idx += 1
979
+ elif re.search(rf'models.{old_model_patterns.model_lower_cased}( |")', lines[idx]) is not None:
980
+ block = [lines[idx]]
981
+ indent = find_indent(lines[idx])
982
+ idx += 1
983
+ while find_indent(lines[idx]) > indent:
984
+ block.append(lines[idx])
985
+ idx += 1
986
+ if lines[idx].strip() in [")", "]", "],"]:
987
+ block.append(lines[idx])
988
+ idx += 1
989
+ block = "\n".join(block)
990
+ new_lines.append(block)
991
+
992
+ add_block = True
993
+ if not with_processing:
994
+ processing_classes = [
995
+ old_model_patterns.tokenizer_class,
996
+ old_model_patterns.image_processor_class,
997
+ old_model_patterns.feature_extractor_class,
998
+ old_model_patterns.processor_class,
999
+ ]
1000
+ # Only keep the ones that are not None
1001
+ processing_classes = [c for c in processing_classes if c is not None]
1002
+ for processing_class in processing_classes:
1003
+ block = block.replace(f' "{processing_class}",', "")
1004
+ block = block.replace(f', "{processing_class}"', "")
1005
+ block = block.replace(f" {processing_class},", "")
1006
+ block = block.replace(f", {processing_class}", "")
1007
+
1008
+ if processing_class in block:
1009
+ add_block = False
1010
+ if add_block:
1011
+ new_lines.append(replace_model_patterns(block, old_model_patterns, new_model_patterns)[0])
1012
+ else:
1013
+ new_lines.append(lines[idx])
1014
+ idx += 1
1015
+
1016
+ with open(TRANSFORMERS_PATH / "__init__.py", "w", encoding="utf-8") as f:
1017
+ f.write("\n".join(new_lines))
1018
+
1019
+
1020
+ def insert_tokenizer_in_auto_module(old_model_patterns: ModelPatterns, new_model_patterns: ModelPatterns):
1021
+ """
1022
+ Add a tokenizer to the relevant mappings in the auto module.
1023
+
1024
+ Args:
1025
+ old_model_patterns (`ModelPatterns`): The patterns for the old model.
1026
+ new_model_patterns (`ModelPatterns`): The patterns for the new model.
1027
+ """
1028
+ if old_model_patterns.tokenizer_class is None or new_model_patterns.tokenizer_class is None:
1029
+ return
1030
+
1031
+ with open(TRANSFORMERS_PATH / "models" / "auto" / "tokenization_auto.py", "r", encoding="utf-8") as f:
1032
+ content = f.read()
1033
+
1034
+ lines = content.split("\n")
1035
+ idx = 0
1036
+ # First we get to the TOKENIZER_MAPPING_NAMES block.
1037
+ while not lines[idx].startswith(" TOKENIZER_MAPPING_NAMES = OrderedDict("):
1038
+ idx += 1
1039
+ idx += 1
1040
+
1041
+ # That block will end at this prompt:
1042
+ while not lines[idx].startswith("TOKENIZER_MAPPING = _LazyAutoMapping"):
1043
+ # Either all the tokenizer block is defined on one line, in which case, it ends with "),"
1044
+ if lines[idx].endswith(","):
1045
+ block = lines[idx]
1046
+ # Otherwise it takes several lines until we get to a "),"
1047
+ else:
1048
+ block = []
1049
+ while not lines[idx].startswith(" ),"):
1050
+ block.append(lines[idx])
1051
+ idx += 1
1052
+ block = "\n".join(block)
1053
+ idx += 1
1054
+
1055
+ # If we find the model type and tokenizer class in that block, we have the old model tokenizer block
1056
+ if f'"{old_model_patterns.model_type}"' in block and old_model_patterns.tokenizer_class in block:
1057
+ break
1058
+
1059
+ new_block = block.replace(old_model_patterns.model_type, new_model_patterns.model_type)
1060
+ new_block = new_block.replace(old_model_patterns.tokenizer_class, new_model_patterns.tokenizer_class)
1061
+
1062
+ new_lines = lines[:idx] + [new_block] + lines[idx:]
1063
+ with open(TRANSFORMERS_PATH / "models" / "auto" / "tokenization_auto.py", "w", encoding="utf-8") as f:
1064
+ f.write("\n".join(new_lines))
1065
+
1066
+
1067
+ AUTO_CLASSES_PATTERNS = {
1068
+ "configuration_auto.py": [
1069
+ ' ("{model_type}", "{model_name}"),',
1070
+ ' ("{model_type}", "{config_class}"),',
1071
+ ' ("{model_type}", "{pretrained_archive_map}"),',
1072
+ ],
1073
+ "feature_extraction_auto.py": [' ("{model_type}", "{feature_extractor_class}"),'],
1074
+ "image_processing_auto.py": [' ("{model_type}", "{image_processor_class}"),'],
1075
+ "modeling_auto.py": [' ("{model_type}", "{any_pt_class}"),'],
1076
+ "modeling_tf_auto.py": [' ("{model_type}", "{any_tf_class}"),'],
1077
+ "modeling_flax_auto.py": [' ("{model_type}", "{any_flax_class}"),'],
1078
+ "processing_auto.py": [' ("{model_type}", "{processor_class}"),'],
1079
+ }
1080
+
1081
+
1082
+ def add_model_to_auto_classes(
1083
+ old_model_patterns: ModelPatterns, new_model_patterns: ModelPatterns, model_classes: Dict[str, List[str]]
1084
+ ):
1085
+ """
1086
+ Add a model to the relevant mappings in the auto module.
1087
+
1088
+ Args:
1089
+ old_model_patterns (`ModelPatterns`): The patterns for the old model.
1090
+ new_model_patterns (`ModelPatterns`): The patterns for the new model.
1091
+ model_classes (`Dict[str, List[str]]`): A dictionary framework to list of model classes implemented.
1092
+ """
1093
+ for filename in AUTO_CLASSES_PATTERNS:
1094
+ # Extend patterns with all model classes if necessary
1095
+ new_patterns = []
1096
+ for pattern in AUTO_CLASSES_PATTERNS[filename]:
1097
+ if re.search("any_([a-z]*)_class", pattern) is not None:
1098
+ framework = re.search("any_([a-z]*)_class", pattern).groups()[0]
1099
+ if framework in model_classes:
1100
+ new_patterns.extend(
1101
+ [
1102
+ pattern.replace("{" + f"any_{framework}_class" + "}", cls)
1103
+ for cls in model_classes[framework]
1104
+ ]
1105
+ )
1106
+ elif "{config_class}" in pattern:
1107
+ new_patterns.append(pattern.replace("{config_class}", old_model_patterns.config_class))
1108
+ elif "{image_processor_class}" in pattern:
1109
+ if (
1110
+ old_model_patterns.image_processor_class is not None
1111
+ and new_model_patterns.image_processor_class is not None
1112
+ ):
1113
+ new_patterns.append(
1114
+ pattern.replace("{image_processor_class}", old_model_patterns.image_processor_class)
1115
+ )
1116
+ elif "{feature_extractor_class}" in pattern:
1117
+ if (
1118
+ old_model_patterns.feature_extractor_class is not None
1119
+ and new_model_patterns.feature_extractor_class is not None
1120
+ ):
1121
+ new_patterns.append(
1122
+ pattern.replace("{feature_extractor_class}", old_model_patterns.feature_extractor_class)
1123
+ )
1124
+ elif "{processor_class}" in pattern:
1125
+ if old_model_patterns.processor_class is not None and new_model_patterns.processor_class is not None:
1126
+ new_patterns.append(pattern.replace("{processor_class}", old_model_patterns.processor_class))
1127
+ else:
1128
+ new_patterns.append(pattern)
1129
+
1130
+ # Loop through all patterns.
1131
+ for pattern in new_patterns:
1132
+ full_name = TRANSFORMERS_PATH / "models" / "auto" / filename
1133
+ old_model_line = pattern
1134
+ new_model_line = pattern
1135
+ for attr in ["model_type", "model_name"]:
1136
+ old_model_line = old_model_line.replace("{" + attr + "}", getattr(old_model_patterns, attr))
1137
+ new_model_line = new_model_line.replace("{" + attr + "}", getattr(new_model_patterns, attr))
1138
+ if "pretrained_archive_map" in pattern:
1139
+ old_model_line = old_model_line.replace(
1140
+ "{pretrained_archive_map}", f"{old_model_patterns.model_upper_cased}_PRETRAINED_CONFIG_ARCHIVE_MAP"
1141
+ )
1142
+ new_model_line = new_model_line.replace(
1143
+ "{pretrained_archive_map}", f"{new_model_patterns.model_upper_cased}_PRETRAINED_CONFIG_ARCHIVE_MAP"
1144
+ )
1145
+
1146
+ new_model_line = new_model_line.replace(
1147
+ old_model_patterns.model_camel_cased, new_model_patterns.model_camel_cased
1148
+ )
1149
+
1150
+ add_content_to_file(full_name, new_model_line, add_after=old_model_line)
1151
+
1152
+ # Tokenizers require special handling
1153
+ insert_tokenizer_in_auto_module(old_model_patterns, new_model_patterns)
1154
+
1155
+
1156
+ DOC_OVERVIEW_TEMPLATE = """## Overview
1157
+
1158
+ The {model_name} model was proposed in [<INSERT PAPER NAME HERE>](<INSERT PAPER LINK HERE>) by <INSERT AUTHORS HERE>.
1159
+ <INSERT SHORT SUMMARY HERE>
1160
+
1161
+ The abstract from the paper is the following:
1162
+
1163
+ *<INSERT PAPER ABSTRACT HERE>*
1164
+
1165
+ Tips:
1166
+
1167
+ <INSERT TIPS ABOUT MODEL HERE>
1168
+
1169
+ This model was contributed by [INSERT YOUR HF USERNAME HERE](https://huggingface.co/<INSERT YOUR HF USERNAME HERE>).
1170
+ The original code can be found [here](<INSERT LINK TO GITHUB REPO HERE>).
1171
+
1172
+ """
1173
+
1174
+
1175
+ def duplicate_doc_file(
1176
+ doc_file: Union[str, os.PathLike],
1177
+ old_model_patterns: ModelPatterns,
1178
+ new_model_patterns: ModelPatterns,
1179
+ dest_file: Optional[Union[str, os.PathLike]] = None,
1180
+ frameworks: Optional[List[str]] = None,
1181
+ ):
1182
+ """
1183
+ Duplicate a documentation file and adapts it for a new model.
1184
+
1185
+ Args:
1186
+ module_file (`str` or `os.PathLike`): Path to the doc file to duplicate.
1187
+ old_model_patterns (`ModelPatterns`): The patterns for the old model.
1188
+ new_model_patterns (`ModelPatterns`): The patterns for the new model.
1189
+ dest_file (`str` or `os.PathLike`, *optional*): Path to the new doc file.
1190
+ Will default to the a file named `{new_model_patterns.model_type}.md` in the same folder as `module_file`.
1191
+ frameworks (`List[str]`, *optional*):
1192
+ If passed, will only keep the model classes corresponding to this list of frameworks in the new doc file.
1193
+ """
1194
+ with open(doc_file, "r", encoding="utf-8") as f:
1195
+ content = f.read()
1196
+
1197
+ content = re.sub(r"<!--\s*Copyright (\d+)\s", f"<!--Copyright {CURRENT_YEAR} ", content)
1198
+ if frameworks is None:
1199
+ frameworks = get_default_frameworks()
1200
+ if dest_file is None:
1201
+ dest_file = Path(doc_file).parent / f"{new_model_patterns.model_type}.md"
1202
+
1203
+ # Parse the doc file in blocks. One block per section/header
1204
+ lines = content.split("\n")
1205
+ blocks = []
1206
+ current_block = []
1207
+
1208
+ for line in lines:
1209
+ if line.startswith("#"):
1210
+ blocks.append("\n".join(current_block))
1211
+ current_block = [line]
1212
+ else:
1213
+ current_block.append(line)
1214
+ blocks.append("\n".join(current_block))
1215
+
1216
+ new_blocks = []
1217
+ in_classes = False
1218
+ for block in blocks:
1219
+ # Copyright
1220
+ if not block.startswith("#"):
1221
+ new_blocks.append(block)
1222
+ # Main title
1223
+ elif re.search(r"^#\s+\S+", block) is not None:
1224
+ new_blocks.append(f"# {new_model_patterns.model_name}\n")
1225
+ # The config starts the part of the doc with the classes.
1226
+ elif not in_classes and old_model_patterns.config_class in block.split("\n")[0]:
1227
+ in_classes = True
1228
+ new_blocks.append(DOC_OVERVIEW_TEMPLATE.format(model_name=new_model_patterns.model_name))
1229
+ new_block, _ = replace_model_patterns(block, old_model_patterns, new_model_patterns)
1230
+ new_blocks.append(new_block)
1231
+ # In classes
1232
+ elif in_classes:
1233
+ in_classes = True
1234
+ block_title = block.split("\n")[0]
1235
+ block_class = re.search(r"^#+\s+(\S.*)$", block_title).groups()[0]
1236
+ new_block, _ = replace_model_patterns(block, old_model_patterns, new_model_patterns)
1237
+
1238
+ if "Tokenizer" in block_class:
1239
+ # We only add the tokenizer if necessary
1240
+ if old_model_patterns.tokenizer_class != new_model_patterns.tokenizer_class:
1241
+ new_blocks.append(new_block)
1242
+ elif "ImageProcessor" in block_class:
1243
+ # We only add the image processor if necessary
1244
+ if old_model_patterns.image_processor_class != new_model_patterns.image_processor_class:
1245
+ new_blocks.append(new_block)
1246
+ elif "FeatureExtractor" in block_class:
1247
+ # We only add the feature extractor if necessary
1248
+ if old_model_patterns.feature_extractor_class != new_model_patterns.feature_extractor_class:
1249
+ new_blocks.append(new_block)
1250
+ elif "Processor" in block_class:
1251
+ # We only add the processor if necessary
1252
+ if old_model_patterns.processor_class != new_model_patterns.processor_class:
1253
+ new_blocks.append(new_block)
1254
+ elif block_class.startswith("Flax"):
1255
+ # We only add Flax models if in the selected frameworks
1256
+ if "flax" in frameworks:
1257
+ new_blocks.append(new_block)
1258
+ elif block_class.startswith("TF"):
1259
+ # We only add TF models if in the selected frameworks
1260
+ if "tf" in frameworks:
1261
+ new_blocks.append(new_block)
1262
+ elif len(block_class.split(" ")) == 1:
1263
+ # We only add PyTorch models if in the selected frameworks
1264
+ if "pt" in frameworks:
1265
+ new_blocks.append(new_block)
1266
+ else:
1267
+ new_blocks.append(new_block)
1268
+
1269
+ with open(dest_file, "w", encoding="utf-8") as f:
1270
+ f.write("\n".join(new_blocks))
1271
+
1272
+
1273
+ def insert_model_in_doc_toc(old_model_patterns, new_model_patterns):
1274
+ """
1275
+ Insert the new model in the doc TOC, in the same section as the old model.
1276
+
1277
+ Args:
1278
+ old_model_patterns (`ModelPatterns`): The patterns for the old model.
1279
+ new_model_patterns (`ModelPatterns`): The patterns for the new model.
1280
+ """
1281
+ toc_file = REPO_PATH / "docs" / "source" / "en" / "_toctree.yml"
1282
+ with open(toc_file, "r", encoding="utf8") as f:
1283
+ content = yaml.safe_load(f)
1284
+
1285
+ # Get to the model API doc
1286
+ api_idx = 0
1287
+ while content[api_idx]["title"] != "API":
1288
+ api_idx += 1
1289
+ api_doc = content[api_idx]["sections"]
1290
+
1291
+ model_idx = 0
1292
+ while api_doc[model_idx]["title"] != "Models":
1293
+ model_idx += 1
1294
+ model_doc = api_doc[model_idx]["sections"]
1295
+
1296
+ # Find the base model in the Toc
1297
+ old_model_type = old_model_patterns.model_type
1298
+ section_idx = 0
1299
+ while section_idx < len(model_doc):
1300
+ sections = [entry["local"] for entry in model_doc[section_idx]["sections"]]
1301
+ if f"model_doc/{old_model_type}" in sections:
1302
+ break
1303
+
1304
+ section_idx += 1
1305
+
1306
+ if section_idx == len(model_doc):
1307
+ old_model = old_model_patterns.model_name
1308
+ new_model = new_model_patterns.model_name
1309
+ print(f"Did not find {old_model} in the table of content, so you will need to add {new_model} manually.")
1310
+ return
1311
+
1312
+ # Add the new model in the same toc
1313
+ toc_entry = {"local": f"model_doc/{new_model_patterns.model_type}", "title": new_model_patterns.model_name}
1314
+ model_doc[section_idx]["sections"].append(toc_entry)
1315
+ model_doc[section_idx]["sections"] = sorted(model_doc[section_idx]["sections"], key=lambda s: s["title"].lower())
1316
+ api_doc[model_idx]["sections"] = model_doc
1317
+ content[api_idx]["sections"] = api_doc
1318
+
1319
+ with open(toc_file, "w", encoding="utf-8") as f:
1320
+ f.write(yaml.dump(content, allow_unicode=True))
1321
+
1322
+
1323
+ def create_new_model_like(
1324
+ model_type: str,
1325
+ new_model_patterns: ModelPatterns,
1326
+ add_copied_from: bool = True,
1327
+ frameworks: Optional[List[str]] = None,
1328
+ old_checkpoint: Optional[str] = None,
1329
+ ):
1330
+ """
1331
+ Creates a new model module like a given model of the Transformers library.
1332
+
1333
+ Args:
1334
+ model_type (`str`): The model type to duplicate (like "bert" or "gpt2")
1335
+ new_model_patterns (`ModelPatterns`): The patterns for the new model.
1336
+ add_copied_from (`bool`, *optional*, defaults to `True`):
1337
+ Whether or not to add "Copied from" statements to all classes in the new model modeling files.
1338
+ frameworks (`List[str]`, *optional*):
1339
+ If passed, will limit the duplicate to the frameworks specified.
1340
+ old_checkpoint (`str`, *optional*):
1341
+ The name of the base checkpoint for the old model. Should be passed along when it can't be automatically
1342
+ recovered from the `model_type`.
1343
+ """
1344
+ # Retrieve all the old model info.
1345
+ model_info = retrieve_info_for_model(model_type, frameworks=frameworks)
1346
+ model_files = model_info["model_files"]
1347
+ old_model_patterns = model_info["model_patterns"]
1348
+ if old_checkpoint is not None:
1349
+ old_model_patterns.checkpoint = old_checkpoint
1350
+ if len(old_model_patterns.checkpoint) == 0:
1351
+ raise ValueError(
1352
+ "The old model checkpoint could not be recovered from the model type. Please pass it to the "
1353
+ "`old_checkpoint` argument."
1354
+ )
1355
+
1356
+ keep_old_processing = True
1357
+ for processing_attr in ["image_processor_class", "feature_extractor_class", "processor_class", "tokenizer_class"]:
1358
+ if getattr(old_model_patterns, processing_attr) != getattr(new_model_patterns, processing_attr):
1359
+ keep_old_processing = False
1360
+
1361
+ model_classes = model_info["model_classes"]
1362
+
1363
+ # 1. We create the module for our new model.
1364
+ old_module_name = model_files["module_name"]
1365
+ module_folder = TRANSFORMERS_PATH / "models" / new_model_patterns.model_lower_cased
1366
+ os.makedirs(module_folder, exist_ok=True)
1367
+
1368
+ files_to_adapt = model_files["model_files"]
1369
+ if keep_old_processing:
1370
+ files_to_adapt = [
1371
+ f
1372
+ for f in files_to_adapt
1373
+ if "tokenization" not in str(f)
1374
+ and "processing" not in str(f)
1375
+ and "feature_extraction" not in str(f)
1376
+ and "image_processing" not in str(f)
1377
+ ]
1378
+
1379
+ os.makedirs(module_folder, exist_ok=True)
1380
+ for module_file in files_to_adapt:
1381
+ new_module_name = module_file.name.replace(
1382
+ old_model_patterns.model_lower_cased, new_model_patterns.model_lower_cased
1383
+ )
1384
+ dest_file = module_folder / new_module_name
1385
+ duplicate_module(
1386
+ module_file,
1387
+ old_model_patterns,
1388
+ new_model_patterns,
1389
+ dest_file=dest_file,
1390
+ add_copied_from=add_copied_from and "modeling" in new_module_name,
1391
+ )
1392
+
1393
+ clean_frameworks_in_init(
1394
+ module_folder / "__init__.py", frameworks=frameworks, keep_processing=not keep_old_processing
1395
+ )
1396
+
1397
+ # 2. We add our new model to the models init and the main init
1398
+ add_content_to_file(
1399
+ TRANSFORMERS_PATH / "models" / "__init__.py",
1400
+ f" {new_model_patterns.model_lower_cased},",
1401
+ add_after=f" {old_module_name},",
1402
+ exact_match=True,
1403
+ )
1404
+ add_model_to_main_init(
1405
+ old_model_patterns, new_model_patterns, frameworks=frameworks, with_processing=not keep_old_processing
1406
+ )
1407
+
1408
+ # 3. Add test files
1409
+ files_to_adapt = model_files["test_files"]
1410
+ if keep_old_processing:
1411
+ files_to_adapt = [
1412
+ f
1413
+ for f in files_to_adapt
1414
+ if "tokenization" not in str(f)
1415
+ and "processor" not in str(f)
1416
+ and "feature_extraction" not in str(f)
1417
+ and "image_processing" not in str(f)
1418
+ ]
1419
+
1420
+ def disable_fx_test(filename: Path) -> bool:
1421
+ with open(filename) as fp:
1422
+ content = fp.read()
1423
+ new_content = re.sub(r"fx_compatible\s*=\s*True", "fx_compatible = False", content)
1424
+ with open(filename, "w") as fp:
1425
+ fp.write(new_content)
1426
+ return content != new_content
1427
+
1428
+ disabled_fx_test = False
1429
+
1430
+ tests_folder = REPO_PATH / "tests" / "models" / new_model_patterns.model_lower_cased
1431
+ os.makedirs(tests_folder, exist_ok=True)
1432
+ with open(tests_folder / "__init__.py", "w"):
1433
+ pass
1434
+
1435
+ for test_file in files_to_adapt:
1436
+ new_test_file_name = test_file.name.replace(
1437
+ old_model_patterns.model_lower_cased, new_model_patterns.model_lower_cased
1438
+ )
1439
+ dest_file = test_file.parent.parent / new_model_patterns.model_lower_cased / new_test_file_name
1440
+ duplicate_module(
1441
+ test_file,
1442
+ old_model_patterns,
1443
+ new_model_patterns,
1444
+ dest_file=dest_file,
1445
+ add_copied_from=False,
1446
+ attrs_to_remove=["pipeline_model_mapping", "is_pipeline_test_to_skip"],
1447
+ )
1448
+ disabled_fx_test = disabled_fx_test | disable_fx_test(dest_file)
1449
+
1450
+ if disabled_fx_test:
1451
+ print(
1452
+ "The tests for symbolic tracing with torch.fx were disabled, you can add those once symbolic tracing works"
1453
+ " for your new model."
1454
+ )
1455
+
1456
+ # 4. Add model to auto classes
1457
+ add_model_to_auto_classes(old_model_patterns, new_model_patterns, model_classes)
1458
+
1459
+ # 5. Add doc file
1460
+ doc_file = REPO_PATH / "docs" / "source" / "en" / "model_doc" / f"{old_model_patterns.model_type}.md"
1461
+ duplicate_doc_file(doc_file, old_model_patterns, new_model_patterns, frameworks=frameworks)
1462
+ insert_model_in_doc_toc(old_model_patterns, new_model_patterns)
1463
+
1464
+ # 6. Warn the user for duplicate patterns
1465
+ if old_model_patterns.model_type == old_model_patterns.checkpoint:
1466
+ print(
1467
+ "The model you picked has the same name for the model type and the checkpoint name "
1468
+ f"({old_model_patterns.model_type}). As a result, it's possible some places where the new checkpoint "
1469
+ f"should be, you have {new_model_patterns.model_type} instead. You should search for all instances of "
1470
+ f"{new_model_patterns.model_type} in the new files and check they're not badly used as checkpoints."
1471
+ )
1472
+ elif old_model_patterns.model_lower_cased == old_model_patterns.checkpoint:
1473
+ print(
1474
+ "The model you picked has the same name for the model type and the checkpoint name "
1475
+ f"({old_model_patterns.model_lower_cased}). As a result, it's possible some places where the new "
1476
+ f"checkpoint should be, you have {new_model_patterns.model_lower_cased} instead. You should search for "
1477
+ f"all instances of {new_model_patterns.model_lower_cased} in the new files and check they're not badly "
1478
+ "used as checkpoints."
1479
+ )
1480
+ if (
1481
+ old_model_patterns.model_type == old_model_patterns.model_lower_cased
1482
+ and new_model_patterns.model_type != new_model_patterns.model_lower_cased
1483
+ ):
1484
+ print(
1485
+ "The model you picked has the same name for the model type and the lowercased model name "
1486
+ f"({old_model_patterns.model_lower_cased}). As a result, it's possible some places where the new "
1487
+ f"model type should be, you have {new_model_patterns.model_lower_cased} instead. You should search for "
1488
+ f"all instances of {new_model_patterns.model_lower_cased} in the new files and check they're not badly "
1489
+ "used as the model type."
1490
+ )
1491
+
1492
+ if not keep_old_processing and old_model_patterns.tokenizer_class is not None:
1493
+ print(
1494
+ "The constants at the start of the new tokenizer file created needs to be manually fixed. If your new "
1495
+ "model has a tokenizer fast, you will also need to manually add the converter in the "
1496
+ "`SLOW_TO_FAST_CONVERTERS` constant of `convert_slow_tokenizer.py`."
1497
+ )
1498
+
1499
+
1500
+ def add_new_model_like_command_factory(args: Namespace):
1501
+ return AddNewModelLikeCommand(config_file=args.config_file, path_to_repo=args.path_to_repo)
1502
+
1503
+
1504
+ class AddNewModelLikeCommand(BaseTransformersCLICommand):
1505
+ @staticmethod
1506
+ def register_subcommand(parser: ArgumentParser):
1507
+ add_new_model_like_parser = parser.add_parser("add-new-model-like")
1508
+ add_new_model_like_parser.add_argument(
1509
+ "--config_file", type=str, help="A file with all the information for this model creation."
1510
+ )
1511
+ add_new_model_like_parser.add_argument(
1512
+ "--path_to_repo", type=str, help="When not using an editable install, the path to the Transformers repo."
1513
+ )
1514
+ add_new_model_like_parser.set_defaults(func=add_new_model_like_command_factory)
1515
+
1516
+ def __init__(self, config_file=None, path_to_repo=None, *args):
1517
+ if config_file is not None:
1518
+ with open(config_file, "r", encoding="utf-8") as f:
1519
+ config = json.load(f)
1520
+ self.old_model_type = config["old_model_type"]
1521
+ self.model_patterns = ModelPatterns(**config["new_model_patterns"])
1522
+ self.add_copied_from = config.get("add_copied_from", True)
1523
+ self.frameworks = config.get("frameworks", get_default_frameworks())
1524
+ self.old_checkpoint = config.get("old_checkpoint", None)
1525
+ else:
1526
+ (
1527
+ self.old_model_type,
1528
+ self.model_patterns,
1529
+ self.add_copied_from,
1530
+ self.frameworks,
1531
+ self.old_checkpoint,
1532
+ ) = get_user_input()
1533
+
1534
+ self.path_to_repo = path_to_repo
1535
+
1536
+ def run(self):
1537
+ if self.path_to_repo is not None:
1538
+ # Adapt constants
1539
+ global TRANSFORMERS_PATH
1540
+ global REPO_PATH
1541
+
1542
+ REPO_PATH = Path(self.path_to_repo)
1543
+ TRANSFORMERS_PATH = REPO_PATH / "src" / "transformers"
1544
+
1545
+ create_new_model_like(
1546
+ model_type=self.old_model_type,
1547
+ new_model_patterns=self.model_patterns,
1548
+ add_copied_from=self.add_copied_from,
1549
+ frameworks=self.frameworks,
1550
+ old_checkpoint=self.old_checkpoint,
1551
+ )
1552
+
1553
+
1554
+ def get_user_field(
1555
+ question: str,
1556
+ default_value: Optional[str] = None,
1557
+ is_valid_answer: Optional[Callable] = None,
1558
+ convert_to: Optional[Callable] = None,
1559
+ fallback_message: Optional[str] = None,
1560
+ ) -> Any:
1561
+ """
1562
+ A utility function that asks a question to the user to get an answer, potentially looping until it gets a valid
1563
+ answer.
1564
+
1565
+ Args:
1566
+ question (`str`): The question to ask the user.
1567
+ default_value (`str`, *optional*): A potential default value that will be used when the answer is empty.
1568
+ is_valid_answer (`Callable`, *optional*):
1569
+ If set, the question will be asked until this function returns `True` on the provided answer.
1570
+ convert_to (`Callable`, *optional*):
1571
+ If set, the answer will be passed to this function. If this function raises an error on the procided
1572
+ answer, the question will be asked again.
1573
+ fallback_message (`str`, *optional*):
1574
+ A message that will be displayed each time the question is asked again to the user.
1575
+
1576
+ Returns:
1577
+ `Any`: The answer provided by the user (or the default), passed through the potential conversion function.
1578
+ """
1579
+ if not question.endswith(" "):
1580
+ question = question + " "
1581
+ if default_value is not None:
1582
+ question = f"{question} [{default_value}] "
1583
+
1584
+ valid_answer = False
1585
+ while not valid_answer:
1586
+ answer = input(question)
1587
+ if default_value is not None and len(answer) == 0:
1588
+ answer = default_value
1589
+ if is_valid_answer is not None:
1590
+ valid_answer = is_valid_answer(answer)
1591
+ elif convert_to is not None:
1592
+ try:
1593
+ answer = convert_to(answer)
1594
+ valid_answer = True
1595
+ except Exception:
1596
+ valid_answer = False
1597
+ else:
1598
+ valid_answer = True
1599
+
1600
+ if not valid_answer:
1601
+ print(fallback_message)
1602
+
1603
+ return answer
1604
+
1605
+
1606
+ def convert_to_bool(x: str) -> bool:
1607
+ """
1608
+ Converts a string to a bool.
1609
+ """
1610
+ if x.lower() in ["1", "y", "yes", "true"]:
1611
+ return True
1612
+ if x.lower() in ["0", "n", "no", "false"]:
1613
+ return False
1614
+ raise ValueError(f"{x} is not a value that can be converted to a bool.")
1615
+
1616
+
1617
+ def get_user_input():
1618
+ """
1619
+ Ask the user for the necessary inputs to add the new model.
1620
+ """
1621
+ model_types = list(auto_module.configuration_auto.MODEL_NAMES_MAPPING.keys())
1622
+
1623
+ # Get old model type
1624
+ valid_model_type = False
1625
+ while not valid_model_type:
1626
+ old_model_type = input(
1627
+ "What is the model you would like to duplicate? Please provide the lowercase `model_type` (e.g. roberta): "
1628
+ )
1629
+ if old_model_type in model_types:
1630
+ valid_model_type = True
1631
+ else:
1632
+ print(f"{old_model_type} is not a valid model type.")
1633
+ near_choices = difflib.get_close_matches(old_model_type, model_types)
1634
+ if len(near_choices) >= 1:
1635
+ if len(near_choices) > 1:
1636
+ near_choices = " or ".join(near_choices)
1637
+ print(f"Did you mean {near_choices}?")
1638
+
1639
+ old_model_info = retrieve_info_for_model(old_model_type)
1640
+ old_tokenizer_class = old_model_info["model_patterns"].tokenizer_class
1641
+ old_image_processor_class = old_model_info["model_patterns"].image_processor_class
1642
+ old_feature_extractor_class = old_model_info["model_patterns"].feature_extractor_class
1643
+ old_processor_class = old_model_info["model_patterns"].processor_class
1644
+ old_frameworks = old_model_info["frameworks"]
1645
+
1646
+ old_checkpoint = None
1647
+ if len(old_model_info["model_patterns"].checkpoint) == 0:
1648
+ old_checkpoint = get_user_field(
1649
+ "We couldn't find the name of the base checkpoint for that model, please enter it here."
1650
+ )
1651
+
1652
+ model_name = get_user_field(
1653
+ "What is the name (with no special casing) for your new model in the paper (e.g. RoBERTa)? "
1654
+ )
1655
+ default_patterns = ModelPatterns(model_name, model_name)
1656
+
1657
+ model_type = get_user_field(
1658
+ "What identifier would you like to use for the `model_type` of this model? ",
1659
+ default_value=default_patterns.model_type,
1660
+ )
1661
+ model_lower_cased = get_user_field(
1662
+ "What lowercase name would you like to use for the module (folder) of this model? ",
1663
+ default_value=default_patterns.model_lower_cased,
1664
+ )
1665
+ model_camel_cased = get_user_field(
1666
+ "What prefix (camel-cased) would you like to use for the model classes of this model (e.g. Roberta)? ",
1667
+ default_value=default_patterns.model_camel_cased,
1668
+ )
1669
+ model_upper_cased = get_user_field(
1670
+ "What prefix (upper-cased) would you like to use for the constants relative to this model? ",
1671
+ default_value=default_patterns.model_upper_cased,
1672
+ )
1673
+ config_class = get_user_field(
1674
+ "What will be the name of the config class for this model? ", default_value=f"{model_camel_cased}Config"
1675
+ )
1676
+ checkpoint = get_user_field(
1677
+ "Please give a checkpoint identifier (on the model Hub) for this new model (e.g. facebook/FacebookAI/roberta-base): "
1678
+ )
1679
+
1680
+ old_processing_classes = [
1681
+ c
1682
+ for c in [old_image_processor_class, old_feature_extractor_class, old_tokenizer_class, old_processor_class]
1683
+ if c is not None
1684
+ ]
1685
+ old_processing_classes = ", ".join(old_processing_classes)
1686
+ keep_processing = get_user_field(
1687
+ f"Will your new model use the same processing class as {old_model_type} ({old_processing_classes}) (yes/no)? ",
1688
+ convert_to=convert_to_bool,
1689
+ fallback_message="Please answer yes/no, y/n, true/false or 1/0. ",
1690
+ )
1691
+ if keep_processing:
1692
+ image_processor_class = old_image_processor_class
1693
+ feature_extractor_class = old_feature_extractor_class
1694
+ processor_class = old_processor_class
1695
+ tokenizer_class = old_tokenizer_class
1696
+ else:
1697
+ if old_tokenizer_class is not None:
1698
+ tokenizer_class = get_user_field(
1699
+ "What will be the name of the tokenizer class for this model? ",
1700
+ default_value=f"{model_camel_cased}Tokenizer",
1701
+ )
1702
+ else:
1703
+ tokenizer_class = None
1704
+ if old_image_processor_class is not None:
1705
+ image_processor_class = get_user_field(
1706
+ "What will be the name of the image processor class for this model? ",
1707
+ default_value=f"{model_camel_cased}ImageProcessor",
1708
+ )
1709
+ else:
1710
+ image_processor_class = None
1711
+ if old_feature_extractor_class is not None:
1712
+ feature_extractor_class = get_user_field(
1713
+ "What will be the name of the feature extractor class for this model? ",
1714
+ default_value=f"{model_camel_cased}FeatureExtractor",
1715
+ )
1716
+ else:
1717
+ feature_extractor_class = None
1718
+ if old_processor_class is not None:
1719
+ processor_class = get_user_field(
1720
+ "What will be the name of the processor class for this model? ",
1721
+ default_value=f"{model_camel_cased}Processor",
1722
+ )
1723
+ else:
1724
+ processor_class = None
1725
+
1726
+ model_patterns = ModelPatterns(
1727
+ model_name,
1728
+ checkpoint,
1729
+ model_type=model_type,
1730
+ model_lower_cased=model_lower_cased,
1731
+ model_camel_cased=model_camel_cased,
1732
+ model_upper_cased=model_upper_cased,
1733
+ config_class=config_class,
1734
+ tokenizer_class=tokenizer_class,
1735
+ image_processor_class=image_processor_class,
1736
+ feature_extractor_class=feature_extractor_class,
1737
+ processor_class=processor_class,
1738
+ )
1739
+
1740
+ add_copied_from = get_user_field(
1741
+ "Should we add # Copied from statements when creating the new modeling file (yes/no)? ",
1742
+ convert_to=convert_to_bool,
1743
+ default_value="yes",
1744
+ fallback_message="Please answer yes/no, y/n, true/false or 1/0.",
1745
+ )
1746
+
1747
+ all_frameworks = get_user_field(
1748
+ "Should we add a version of your new model in all the frameworks implemented by"
1749
+ f" {old_model_type} ({old_frameworks}) (yes/no)? ",
1750
+ convert_to=convert_to_bool,
1751
+ default_value="yes",
1752
+ fallback_message="Please answer yes/no, y/n, true/false or 1/0.",
1753
+ )
1754
+ if all_frameworks:
1755
+ frameworks = None
1756
+ else:
1757
+ frameworks = get_user_field(
1758
+ "Please enter the list of framworks you want (pt, tf, flax) separated by spaces",
1759
+ is_valid_answer=lambda x: all(p in ["pt", "tf", "flax"] for p in x.split(" ")),
1760
+ )
1761
+ frameworks = list(set(frameworks.split(" ")))
1762
+
1763
+ return (old_model_type, model_patterns, add_copied_from, frameworks, old_checkpoint)
env-llmeval/lib/python3.10/site-packages/transformers/commands/convert.py ADDED
@@ -0,0 +1,165 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from argparse import ArgumentParser, Namespace
16
+
17
+ from ..utils import logging
18
+ from . import BaseTransformersCLICommand
19
+
20
+
21
+ def convert_command_factory(args: Namespace):
22
+ """
23
+ Factory function used to convert a model TF 1.0 checkpoint in a PyTorch checkpoint.
24
+
25
+ Returns: ServeCommand
26
+ """
27
+ return ConvertCommand(
28
+ args.model_type, args.tf_checkpoint, args.pytorch_dump_output, args.config, args.finetuning_task_name
29
+ )
30
+
31
+
32
+ IMPORT_ERROR_MESSAGE = """
33
+ transformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires
34
+ TensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.
35
+ """
36
+
37
+
38
+ class ConvertCommand(BaseTransformersCLICommand):
39
+ @staticmethod
40
+ def register_subcommand(parser: ArgumentParser):
41
+ """
42
+ Register this command to argparse so it's available for the transformer-cli
43
+
44
+ Args:
45
+ parser: Root parser to register command-specific arguments
46
+ """
47
+ train_parser = parser.add_parser(
48
+ "convert",
49
+ help="CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.",
50
+ )
51
+ train_parser.add_argument("--model_type", type=str, required=True, help="Model's type.")
52
+ train_parser.add_argument(
53
+ "--tf_checkpoint", type=str, required=True, help="TensorFlow checkpoint path or folder."
54
+ )
55
+ train_parser.add_argument(
56
+ "--pytorch_dump_output", type=str, required=True, help="Path to the PyTorch saved model output."
57
+ )
58
+ train_parser.add_argument("--config", type=str, default="", help="Configuration file path or folder.")
59
+ train_parser.add_argument(
60
+ "--finetuning_task_name",
61
+ type=str,
62
+ default=None,
63
+ help="Optional fine-tuning task name if the TF model was a finetuned model.",
64
+ )
65
+ train_parser.set_defaults(func=convert_command_factory)
66
+
67
+ def __init__(
68
+ self,
69
+ model_type: str,
70
+ tf_checkpoint: str,
71
+ pytorch_dump_output: str,
72
+ config: str,
73
+ finetuning_task_name: str,
74
+ *args,
75
+ ):
76
+ self._logger = logging.get_logger("transformers-cli/converting")
77
+
78
+ self._logger.info(f"Loading model {model_type}")
79
+ self._model_type = model_type
80
+ self._tf_checkpoint = tf_checkpoint
81
+ self._pytorch_dump_output = pytorch_dump_output
82
+ self._config = config
83
+ self._finetuning_task_name = finetuning_task_name
84
+
85
+ def run(self):
86
+ if self._model_type == "albert":
87
+ try:
88
+ from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
89
+ convert_tf_checkpoint_to_pytorch,
90
+ )
91
+ except ImportError:
92
+ raise ImportError(IMPORT_ERROR_MESSAGE)
93
+
94
+ convert_tf_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output)
95
+ elif self._model_type == "bert":
96
+ try:
97
+ from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
98
+ convert_tf_checkpoint_to_pytorch,
99
+ )
100
+ except ImportError:
101
+ raise ImportError(IMPORT_ERROR_MESSAGE)
102
+
103
+ convert_tf_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output)
104
+ elif self._model_type == "funnel":
105
+ try:
106
+ from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
107
+ convert_tf_checkpoint_to_pytorch,
108
+ )
109
+ except ImportError:
110
+ raise ImportError(IMPORT_ERROR_MESSAGE)
111
+
112
+ convert_tf_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output)
113
+ elif self._model_type == "t5":
114
+ try:
115
+ from ..models.t5.convert_t5_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
116
+ except ImportError:
117
+ raise ImportError(IMPORT_ERROR_MESSAGE)
118
+
119
+ convert_tf_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output)
120
+ elif self._model_type == "gpt":
121
+ from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
122
+ convert_openai_checkpoint_to_pytorch,
123
+ )
124
+
125
+ convert_openai_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output)
126
+ elif self._model_type == "gpt2":
127
+ try:
128
+ from ..models.gpt2.convert_gpt2_original_tf_checkpoint_to_pytorch import (
129
+ convert_gpt2_checkpoint_to_pytorch,
130
+ )
131
+ except ImportError:
132
+ raise ImportError(IMPORT_ERROR_MESSAGE)
133
+
134
+ convert_gpt2_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output)
135
+ elif self._model_type == "xlnet":
136
+ try:
137
+ from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
138
+ convert_xlnet_checkpoint_to_pytorch,
139
+ )
140
+ except ImportError:
141
+ raise ImportError(IMPORT_ERROR_MESSAGE)
142
+
143
+ convert_xlnet_checkpoint_to_pytorch(
144
+ self._tf_checkpoint, self._config, self._pytorch_dump_output, self._finetuning_task_name
145
+ )
146
+ elif self._model_type == "xlm":
147
+ from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
148
+ convert_xlm_checkpoint_to_pytorch,
149
+ )
150
+
151
+ convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint, self._pytorch_dump_output)
152
+ elif self._model_type == "lxmert":
153
+ from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
154
+ convert_lxmert_checkpoint_to_pytorch,
155
+ )
156
+
157
+ convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint, self._pytorch_dump_output)
158
+ elif self._model_type == "rembert":
159
+ from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
160
+ convert_rembert_tf_checkpoint_to_pytorch,
161
+ )
162
+
163
+ convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output)
164
+ else:
165
+ raise ValueError("--model_type should be selected in the list [bert, gpt, gpt2, t5, xlnet, xlm, lxmert]")
env-llmeval/lib/python3.10/site-packages/transformers/commands/download.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from argparse import ArgumentParser
16
+
17
+ from . import BaseTransformersCLICommand
18
+
19
+
20
+ def download_command_factory(args):
21
+ return DownloadCommand(args.model, args.cache_dir, args.force, args.trust_remote_code)
22
+
23
+
24
+ class DownloadCommand(BaseTransformersCLICommand):
25
+ @staticmethod
26
+ def register_subcommand(parser: ArgumentParser):
27
+ download_parser = parser.add_parser("download")
28
+ download_parser.add_argument(
29
+ "--cache-dir", type=str, default=None, help="Path to location to store the models"
30
+ )
31
+ download_parser.add_argument(
32
+ "--force", action="store_true", help="Force the model to be download even if already in cache-dir"
33
+ )
34
+ download_parser.add_argument(
35
+ "--trust-remote-code",
36
+ action="store_true",
37
+ help="Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you've reviewed the code as it will execute on your local machine",
38
+ )
39
+ download_parser.add_argument("model", type=str, help="Name of the model to download")
40
+ download_parser.set_defaults(func=download_command_factory)
41
+
42
+ def __init__(self, model: str, cache: str, force: bool, trust_remote_code: bool):
43
+ self._model = model
44
+ self._cache = cache
45
+ self._force = force
46
+ self._trust_remote_code = trust_remote_code
47
+
48
+ def run(self):
49
+ from ..models.auto import AutoModel, AutoTokenizer
50
+
51
+ AutoModel.from_pretrained(
52
+ self._model, cache_dir=self._cache, force_download=self._force, trust_remote_code=self._trust_remote_code
53
+ )
54
+ AutoTokenizer.from_pretrained(
55
+ self._model, cache_dir=self._cache, force_download=self._force, trust_remote_code=self._trust_remote_code
56
+ )
env-llmeval/lib/python3.10/site-packages/transformers/commands/env.py ADDED
@@ -0,0 +1,143 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import importlib.util
16
+ import os
17
+ import platform
18
+ from argparse import ArgumentParser
19
+
20
+ import huggingface_hub
21
+
22
+ from .. import __version__ as version
23
+ from ..utils import (
24
+ is_accelerate_available,
25
+ is_flax_available,
26
+ is_safetensors_available,
27
+ is_tf_available,
28
+ is_torch_available,
29
+ )
30
+ from . import BaseTransformersCLICommand
31
+
32
+
33
+ def info_command_factory(_):
34
+ return EnvironmentCommand()
35
+
36
+
37
+ def download_command_factory(args):
38
+ return EnvironmentCommand(args.accelerate_config_file)
39
+
40
+
41
+ class EnvironmentCommand(BaseTransformersCLICommand):
42
+ @staticmethod
43
+ def register_subcommand(parser: ArgumentParser):
44
+ download_parser = parser.add_parser("env")
45
+ download_parser.set_defaults(func=info_command_factory)
46
+ download_parser.add_argument(
47
+ "--accelerate-config_file",
48
+ default=None,
49
+ help="The accelerate config file to use for the default values in the launching script.",
50
+ )
51
+ download_parser.set_defaults(func=download_command_factory)
52
+
53
+ def __init__(self, accelerate_config_file, *args) -> None:
54
+ self._accelerate_config_file = accelerate_config_file
55
+
56
+ def run(self):
57
+ safetensors_version = "not installed"
58
+ if is_safetensors_available():
59
+ import safetensors
60
+
61
+ safetensors_version = safetensors.__version__
62
+ elif importlib.util.find_spec("safetensors") is not None:
63
+ import safetensors
64
+
65
+ safetensors_version = f"{safetensors.__version__} but is ignored because of PyTorch version too old."
66
+
67
+ accelerate_version = "not installed"
68
+ accelerate_config = accelerate_config_str = "not found"
69
+ if is_accelerate_available():
70
+ import accelerate
71
+ from accelerate.commands.config import default_config_file, load_config_from_file
72
+
73
+ accelerate_version = accelerate.__version__
74
+ # Get the default from the config file.
75
+ if self._accelerate_config_file is not None or os.path.isfile(default_config_file):
76
+ accelerate_config = load_config_from_file(self._accelerate_config_file).to_dict()
77
+
78
+ accelerate_config_str = (
79
+ "\n".join([f"\t- {prop}: {val}" for prop, val in accelerate_config.items()])
80
+ if isinstance(accelerate_config, dict)
81
+ else f"\t{accelerate_config}"
82
+ )
83
+
84
+ pt_version = "not installed"
85
+ pt_cuda_available = "NA"
86
+ if is_torch_available():
87
+ import torch
88
+
89
+ pt_version = torch.__version__
90
+ pt_cuda_available = torch.cuda.is_available()
91
+
92
+ tf_version = "not installed"
93
+ tf_cuda_available = "NA"
94
+ if is_tf_available():
95
+ import tensorflow as tf
96
+
97
+ tf_version = tf.__version__
98
+ try:
99
+ # deprecated in v2.1
100
+ tf_cuda_available = tf.test.is_gpu_available()
101
+ except AttributeError:
102
+ # returns list of devices, convert to bool
103
+ tf_cuda_available = bool(tf.config.list_physical_devices("GPU"))
104
+
105
+ flax_version = "not installed"
106
+ jax_version = "not installed"
107
+ jaxlib_version = "not installed"
108
+ jax_backend = "NA"
109
+ if is_flax_available():
110
+ import flax
111
+ import jax
112
+ import jaxlib
113
+
114
+ flax_version = flax.__version__
115
+ jax_version = jax.__version__
116
+ jaxlib_version = jaxlib.__version__
117
+ jax_backend = jax.lib.xla_bridge.get_backend().platform
118
+
119
+ info = {
120
+ "`transformers` version": version,
121
+ "Platform": platform.platform(),
122
+ "Python version": platform.python_version(),
123
+ "Huggingface_hub version": huggingface_hub.__version__,
124
+ "Safetensors version": f"{safetensors_version}",
125
+ "Accelerate version": f"{accelerate_version}",
126
+ "Accelerate config": f"{accelerate_config_str}",
127
+ "PyTorch version (GPU?)": f"{pt_version} ({pt_cuda_available})",
128
+ "Tensorflow version (GPU?)": f"{tf_version} ({tf_cuda_available})",
129
+ "Flax version (CPU?/GPU?/TPU?)": f"{flax_version} ({jax_backend})",
130
+ "Jax version": f"{jax_version}",
131
+ "JaxLib version": f"{jaxlib_version}",
132
+ "Using GPU in script?": "<fill in>",
133
+ "Using distributed or parallel set-up in script?": "<fill in>",
134
+ }
135
+
136
+ print("\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n")
137
+ print(self.format_dict(info))
138
+
139
+ return info
140
+
141
+ @staticmethod
142
+ def format_dict(d):
143
+ return "\n".join([f"- {prop}: {val}" for prop, val in d.items()]) + "\n"
env-llmeval/lib/python3.10/site-packages/transformers/commands/lfs.py ADDED
@@ -0,0 +1,226 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Implementation of a custom transfer agent for the transfer type "multipart" for git-lfs.
3
+
4
+ Inspired by: github.com/cbartz/git-lfs-swift-transfer-agent/blob/master/git_lfs_swift_transfer.py
5
+
6
+ Spec is: github.com/git-lfs/git-lfs/blob/master/docs/custom-transfers.md
7
+
8
+
9
+ To launch debugger while developing:
10
+
11
+ ``` [lfs "customtransfer.multipart"]
12
+ path = /path/to/transformers/.env/bin/python args = -m debugpy --listen 5678 --wait-for-client
13
+ /path/to/transformers/src/transformers/commands/transformers_cli.py lfs-multipart-upload ```"""
14
+
15
+ import json
16
+ import os
17
+ import subprocess
18
+ import sys
19
+ import warnings
20
+ from argparse import ArgumentParser
21
+ from contextlib import AbstractContextManager
22
+ from typing import Dict, List, Optional
23
+
24
+ import requests
25
+
26
+ from ..utils import logging
27
+ from . import BaseTransformersCLICommand
28
+
29
+
30
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
31
+
32
+
33
+ LFS_MULTIPART_UPLOAD_COMMAND = "lfs-multipart-upload"
34
+
35
+
36
+ class LfsCommands(BaseTransformersCLICommand):
37
+ """
38
+ Implementation of a custom transfer agent for the transfer type "multipart" for git-lfs. This lets users upload
39
+ large files >5GB 🔥. Spec for LFS custom transfer agent is:
40
+ https://github.com/git-lfs/git-lfs/blob/master/docs/custom-transfers.md
41
+
42
+ This introduces two commands to the CLI:
43
+
44
+ 1. $ transformers-cli lfs-enable-largefiles
45
+
46
+ This should be executed once for each model repo that contains a model file >5GB. It's documented in the error
47
+ message you get if you just try to git push a 5GB file without having enabled it before.
48
+
49
+ 2. $ transformers-cli lfs-multipart-upload
50
+
51
+ This command is called by lfs directly and is not meant to be called by the user.
52
+ """
53
+
54
+ @staticmethod
55
+ def register_subcommand(parser: ArgumentParser):
56
+ enable_parser = parser.add_parser(
57
+ "lfs-enable-largefiles",
58
+ help=(
59
+ "Deprecated: use `huggingface-cli` instead. Configure your repository to enable upload of files > 5GB."
60
+ ),
61
+ )
62
+ enable_parser.add_argument("path", type=str, help="Local path to repository you want to configure.")
63
+ enable_parser.set_defaults(func=lambda args: LfsEnableCommand(args))
64
+
65
+ upload_parser = parser.add_parser(
66
+ LFS_MULTIPART_UPLOAD_COMMAND,
67
+ help=(
68
+ "Deprecated: use `huggingface-cli` instead. "
69
+ "Command will get called by git-lfs, do not call it directly."
70
+ ),
71
+ )
72
+ upload_parser.set_defaults(func=lambda args: LfsUploadCommand(args))
73
+
74
+
75
+ class LfsEnableCommand:
76
+ def __init__(self, args):
77
+ self.args = args
78
+
79
+ def run(self):
80
+ warnings.warn(
81
+ "Managing repositories through transformers-cli is deprecated. Please use `huggingface-cli` instead."
82
+ )
83
+ local_path = os.path.abspath(self.args.path)
84
+ if not os.path.isdir(local_path):
85
+ print("This does not look like a valid git repo.")
86
+ exit(1)
87
+ subprocess.run(
88
+ "git config lfs.customtransfer.multipart.path transformers-cli".split(), check=True, cwd=local_path
89
+ )
90
+ subprocess.run(
91
+ f"git config lfs.customtransfer.multipart.args {LFS_MULTIPART_UPLOAD_COMMAND}".split(),
92
+ check=True,
93
+ cwd=local_path,
94
+ )
95
+ print("Local repo set up for largefiles")
96
+
97
+
98
+ def write_msg(msg: Dict):
99
+ """Write out the message in Line delimited JSON."""
100
+ msg = json.dumps(msg) + "\n"
101
+ sys.stdout.write(msg)
102
+ sys.stdout.flush()
103
+
104
+
105
+ def read_msg() -> Optional[Dict]:
106
+ """Read Line delimited JSON from stdin."""
107
+ msg = json.loads(sys.stdin.readline().strip())
108
+
109
+ if "terminate" in (msg.get("type"), msg.get("event")):
110
+ # terminate message received
111
+ return None
112
+
113
+ if msg.get("event") not in ("download", "upload"):
114
+ logger.critical("Received unexpected message")
115
+ sys.exit(1)
116
+
117
+ return msg
118
+
119
+
120
+ class FileSlice(AbstractContextManager):
121
+ """
122
+ File-like object that only reads a slice of a file
123
+
124
+ Inspired by stackoverflow.com/a/29838711/593036
125
+ """
126
+
127
+ def __init__(self, filepath: str, seek_from: int, read_limit: int):
128
+ self.filepath = filepath
129
+ self.seek_from = seek_from
130
+ self.read_limit = read_limit
131
+ self.n_seen = 0
132
+
133
+ def __enter__(self):
134
+ self.f = open(self.filepath, "rb")
135
+ self.f.seek(self.seek_from)
136
+ return self
137
+
138
+ def __len__(self):
139
+ total_length = os.fstat(self.f.fileno()).st_size
140
+ return min(self.read_limit, total_length - self.seek_from)
141
+
142
+ def read(self, n=-1):
143
+ if self.n_seen >= self.read_limit:
144
+ return b""
145
+ remaining_amount = self.read_limit - self.n_seen
146
+ data = self.f.read(remaining_amount if n < 0 else min(n, remaining_amount))
147
+ self.n_seen += len(data)
148
+ return data
149
+
150
+ def __iter__(self):
151
+ yield self.read(n=4 * 1024 * 1024)
152
+
153
+ def __exit__(self, *args):
154
+ self.f.close()
155
+
156
+
157
+ class LfsUploadCommand:
158
+ def __init__(self, args):
159
+ self.args = args
160
+
161
+ def run(self):
162
+ # Immediately after invoking a custom transfer process, git-lfs
163
+ # sends initiation data to the process over stdin.
164
+ # This tells the process useful information about the configuration.
165
+ init_msg = json.loads(sys.stdin.readline().strip())
166
+ if not (init_msg.get("event") == "init" and init_msg.get("operation") == "upload"):
167
+ write_msg({"error": {"code": 32, "message": "Wrong lfs init operation"}})
168
+ sys.exit(1)
169
+
170
+ # The transfer process should use the information it needs from the
171
+ # initiation structure, and also perform any one-off setup tasks it
172
+ # needs to do. It should then respond on stdout with a simple empty
173
+ # confirmation structure, as follows:
174
+ write_msg({})
175
+
176
+ # After the initiation exchange, git-lfs will send any number of
177
+ # transfer requests to the stdin of the transfer process, in a serial sequence.
178
+ while True:
179
+ msg = read_msg()
180
+ if msg is None:
181
+ # When all transfers have been processed, git-lfs will send
182
+ # a terminate event to the stdin of the transfer process.
183
+ # On receiving this message the transfer process should
184
+ # clean up and terminate. No response is expected.
185
+ sys.exit(0)
186
+
187
+ oid = msg["oid"]
188
+ filepath = msg["path"]
189
+ completion_url = msg["action"]["href"]
190
+ header = msg["action"]["header"]
191
+ chunk_size = int(header.pop("chunk_size"))
192
+ presigned_urls: List[str] = list(header.values())
193
+
194
+ parts = []
195
+ for i, presigned_url in enumerate(presigned_urls):
196
+ with FileSlice(filepath, seek_from=i * chunk_size, read_limit=chunk_size) as data:
197
+ r = requests.put(presigned_url, data=data)
198
+ r.raise_for_status()
199
+ parts.append(
200
+ {
201
+ "etag": r.headers.get("etag"),
202
+ "partNumber": i + 1,
203
+ }
204
+ )
205
+ # In order to support progress reporting while data is uploading / downloading,
206
+ # the transfer process should post messages to stdout
207
+ write_msg(
208
+ {
209
+ "event": "progress",
210
+ "oid": oid,
211
+ "bytesSoFar": (i + 1) * chunk_size,
212
+ "bytesSinceLast": chunk_size,
213
+ }
214
+ )
215
+ # Not precise but that's ok.
216
+
217
+ r = requests.post(
218
+ completion_url,
219
+ json={
220
+ "oid": oid,
221
+ "parts": parts,
222
+ },
223
+ )
224
+ r.raise_for_status()
225
+
226
+ write_msg({"event": "complete", "oid": oid})
env-llmeval/lib/python3.10/site-packages/transformers/commands/pt_to_tf.py ADDED
@@ -0,0 +1,425 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import inspect
16
+ import os
17
+ from argparse import ArgumentParser, Namespace
18
+ from importlib import import_module
19
+
20
+ import huggingface_hub
21
+ import numpy as np
22
+ from packaging import version
23
+
24
+ from .. import (
25
+ FEATURE_EXTRACTOR_MAPPING,
26
+ IMAGE_PROCESSOR_MAPPING,
27
+ PROCESSOR_MAPPING,
28
+ TOKENIZER_MAPPING,
29
+ AutoConfig,
30
+ AutoFeatureExtractor,
31
+ AutoImageProcessor,
32
+ AutoProcessor,
33
+ AutoTokenizer,
34
+ is_datasets_available,
35
+ is_tf_available,
36
+ is_torch_available,
37
+ )
38
+ from ..utils import TF2_WEIGHTS_INDEX_NAME, TF2_WEIGHTS_NAME, logging
39
+ from . import BaseTransformersCLICommand
40
+
41
+
42
+ if is_tf_available():
43
+ import tensorflow as tf
44
+
45
+ tf.config.experimental.enable_tensor_float_32_execution(False)
46
+
47
+ if is_torch_available():
48
+ import torch
49
+
50
+ if is_datasets_available():
51
+ from datasets import load_dataset
52
+
53
+
54
+ MAX_ERROR = 5e-5 # larger error tolerance than in our internal tests, to avoid flaky user-facing errors
55
+
56
+
57
+ def convert_command_factory(args: Namespace):
58
+ """
59
+ Factory function used to convert a model PyTorch checkpoint in a TensorFlow 2 checkpoint.
60
+
61
+ Returns: ServeCommand
62
+ """
63
+ return PTtoTFCommand(
64
+ args.model_name,
65
+ args.local_dir,
66
+ args.max_error,
67
+ args.new_weights,
68
+ args.no_pr,
69
+ args.push,
70
+ args.extra_commit_description,
71
+ args.override_model_class,
72
+ )
73
+
74
+
75
+ class PTtoTFCommand(BaseTransformersCLICommand):
76
+ @staticmethod
77
+ def register_subcommand(parser: ArgumentParser):
78
+ """
79
+ Register this command to argparse so it's available for the transformer-cli
80
+
81
+ Args:
82
+ parser: Root parser to register command-specific arguments
83
+ """
84
+ train_parser = parser.add_parser(
85
+ "pt-to-tf",
86
+ help=(
87
+ "CLI tool to run convert a transformers model from a PyTorch checkpoint to a TensorFlow checkpoint."
88
+ " Can also be used to validate existing weights without opening PRs, with --no-pr."
89
+ ),
90
+ )
91
+ train_parser.add_argument(
92
+ "--model-name",
93
+ type=str,
94
+ required=True,
95
+ help="The model name, including owner/organization, as seen on the hub.",
96
+ )
97
+ train_parser.add_argument(
98
+ "--local-dir",
99
+ type=str,
100
+ default="",
101
+ help="Optional local directory of the model repository. Defaults to /tmp/{model_name}",
102
+ )
103
+ train_parser.add_argument(
104
+ "--max-error",
105
+ type=float,
106
+ default=MAX_ERROR,
107
+ help=(
108
+ f"Maximum error tolerance. Defaults to {MAX_ERROR}. This flag should be avoided, use at your own risk."
109
+ ),
110
+ )
111
+ train_parser.add_argument(
112
+ "--new-weights",
113
+ action="store_true",
114
+ help="Optional flag to create new TensorFlow weights, even if they already exist.",
115
+ )
116
+ train_parser.add_argument(
117
+ "--no-pr", action="store_true", help="Optional flag to NOT open a PR with converted weights."
118
+ )
119
+ train_parser.add_argument(
120
+ "--push",
121
+ action="store_true",
122
+ help="Optional flag to push the weights directly to `main` (requires permissions)",
123
+ )
124
+ train_parser.add_argument(
125
+ "--extra-commit-description",
126
+ type=str,
127
+ default="",
128
+ help="Optional additional commit description to use when opening a PR (e.g. to tag the owner).",
129
+ )
130
+ train_parser.add_argument(
131
+ "--override-model-class",
132
+ type=str,
133
+ default=None,
134
+ help="If you think you know better than the auto-detector, you can specify the model class here. "
135
+ "Can be either an AutoModel class or a specific model class like BertForSequenceClassification.",
136
+ )
137
+ train_parser.set_defaults(func=convert_command_factory)
138
+
139
+ @staticmethod
140
+ def find_pt_tf_differences(pt_outputs, tf_outputs):
141
+ """
142
+ Compares the TensorFlow and PyTorch outputs, returning a dictionary with all tensor differences.
143
+ """
144
+ # 1. All output attributes must be the same
145
+ pt_out_attrs = set(pt_outputs.keys())
146
+ tf_out_attrs = set(tf_outputs.keys())
147
+ if pt_out_attrs != tf_out_attrs:
148
+ raise ValueError(
149
+ f"The model outputs have different attributes, aborting. (Pytorch: {pt_out_attrs}, TensorFlow:"
150
+ f" {tf_out_attrs})"
151
+ )
152
+
153
+ # 2. For each output attribute, computes the difference
154
+ def _find_pt_tf_differences(pt_out, tf_out, differences, attr_name=""):
155
+ # If the current attribute is a tensor, it is a leaf and we make the comparison. Otherwise, we will dig in
156
+ # recursivelly, keeping the name of the attribute.
157
+ if isinstance(pt_out, torch.Tensor):
158
+ tensor_difference = np.max(np.abs(pt_out.numpy() - tf_out.numpy()))
159
+ differences[attr_name] = tensor_difference
160
+ else:
161
+ root_name = attr_name
162
+ for i, pt_item in enumerate(pt_out):
163
+ # If it is a named attribute, we keep the name. Otherwise, just its index.
164
+ if isinstance(pt_item, str):
165
+ branch_name = root_name + pt_item
166
+ tf_item = tf_out[pt_item]
167
+ pt_item = pt_out[pt_item]
168
+ else:
169
+ branch_name = root_name + f"[{i}]"
170
+ tf_item = tf_out[i]
171
+ differences = _find_pt_tf_differences(pt_item, tf_item, differences, branch_name)
172
+
173
+ return differences
174
+
175
+ return _find_pt_tf_differences(pt_outputs, tf_outputs, {})
176
+
177
+ def __init__(
178
+ self,
179
+ model_name: str,
180
+ local_dir: str,
181
+ max_error: float,
182
+ new_weights: bool,
183
+ no_pr: bool,
184
+ push: bool,
185
+ extra_commit_description: str,
186
+ override_model_class: str,
187
+ *args,
188
+ ):
189
+ self._logger = logging.get_logger("transformers-cli/pt_to_tf")
190
+ self._model_name = model_name
191
+ self._local_dir = local_dir if local_dir else os.path.join("/tmp", model_name)
192
+ self._max_error = max_error
193
+ self._new_weights = new_weights
194
+ self._no_pr = no_pr
195
+ self._push = push
196
+ self._extra_commit_description = extra_commit_description
197
+ self._override_model_class = override_model_class
198
+
199
+ def get_inputs(self, pt_model, tf_dummy_inputs, config):
200
+ """
201
+ Returns the right inputs for the model, based on its signature.
202
+ """
203
+
204
+ def _get_audio_input():
205
+ ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
206
+ speech_samples = ds.sort("id").select(range(2))[:2]["audio"]
207
+ raw_samples = [x["array"] for x in speech_samples]
208
+ return raw_samples
209
+
210
+ model_config_class = type(pt_model.config)
211
+ if model_config_class in PROCESSOR_MAPPING:
212
+ processor = AutoProcessor.from_pretrained(self._local_dir)
213
+ if model_config_class in TOKENIZER_MAPPING and processor.tokenizer.pad_token is None:
214
+ processor.tokenizer.pad_token = processor.tokenizer.eos_token
215
+ elif model_config_class in IMAGE_PROCESSOR_MAPPING:
216
+ processor = AutoImageProcessor.from_pretrained(self._local_dir)
217
+ elif model_config_class in FEATURE_EXTRACTOR_MAPPING:
218
+ processor = AutoFeatureExtractor.from_pretrained(self._local_dir)
219
+ elif model_config_class in TOKENIZER_MAPPING:
220
+ processor = AutoTokenizer.from_pretrained(self._local_dir)
221
+ if processor.pad_token is None:
222
+ processor.pad_token = processor.eos_token
223
+ else:
224
+ raise ValueError(f"Unknown data processing type (model config type: {model_config_class})")
225
+
226
+ model_forward_signature = set(inspect.signature(pt_model.forward).parameters.keys())
227
+ processor_inputs = {}
228
+ if "input_ids" in model_forward_signature:
229
+ processor_inputs.update(
230
+ {
231
+ "text": ["Hi there!", "I am a batch with more than one row and different input lengths."],
232
+ "padding": True,
233
+ "truncation": True,
234
+ }
235
+ )
236
+ if "pixel_values" in model_forward_signature:
237
+ sample_images = load_dataset("cifar10", "plain_text", split="test")[:2]["img"]
238
+ processor_inputs.update({"images": sample_images})
239
+ if "input_features" in model_forward_signature:
240
+ feature_extractor_signature = inspect.signature(processor.feature_extractor).parameters
241
+ # Pad to the largest input length by default but take feature extractor default
242
+ # padding value if it exists e.g. "max_length" and is not False or None
243
+ if "padding" in feature_extractor_signature:
244
+ default_strategy = feature_extractor_signature["padding"].default
245
+ if default_strategy is not False and default_strategy is not None:
246
+ padding_strategy = default_strategy
247
+ else:
248
+ padding_strategy = True
249
+ else:
250
+ padding_strategy = True
251
+ processor_inputs.update({"audio": _get_audio_input(), "padding": padding_strategy})
252
+ if "input_values" in model_forward_signature: # Wav2Vec2 audio input
253
+ processor_inputs.update({"audio": _get_audio_input(), "padding": True})
254
+ pt_input = processor(**processor_inputs, return_tensors="pt")
255
+ tf_input = processor(**processor_inputs, return_tensors="tf")
256
+
257
+ # Extra input requirements, in addition to the input modality
258
+ if (
259
+ config.is_encoder_decoder
260
+ or (hasattr(pt_model, "encoder") and hasattr(pt_model, "decoder"))
261
+ or "decoder_input_ids" in tf_dummy_inputs
262
+ ):
263
+ decoder_input_ids = np.asarray([[1], [1]], dtype=int) * (pt_model.config.decoder_start_token_id or 0)
264
+ pt_input.update({"decoder_input_ids": torch.tensor(decoder_input_ids)})
265
+ tf_input.update({"decoder_input_ids": tf.convert_to_tensor(decoder_input_ids)})
266
+
267
+ return pt_input, tf_input
268
+
269
+ def run(self):
270
+ # hub version 0.9.0 introduced the possibility of programmatically opening PRs with normal write tokens.
271
+ if version.parse(huggingface_hub.__version__) < version.parse("0.9.0"):
272
+ raise ImportError(
273
+ "The huggingface_hub version must be >= 0.9.0 to use this command. Please update your huggingface_hub"
274
+ " installation."
275
+ )
276
+ else:
277
+ from huggingface_hub import Repository, create_commit
278
+ from huggingface_hub._commit_api import CommitOperationAdd
279
+
280
+ # Fetch remote data
281
+ repo = Repository(local_dir=self._local_dir, clone_from=self._model_name)
282
+
283
+ # Load config and get the appropriate architecture -- the latter is needed to convert the head's weights
284
+ config = AutoConfig.from_pretrained(self._local_dir)
285
+ architectures = config.architectures
286
+ if self._override_model_class is not None:
287
+ if self._override_model_class.startswith("TF"):
288
+ architectures = [self._override_model_class[2:]]
289
+ else:
290
+ architectures = [self._override_model_class]
291
+ try:
292
+ pt_class = getattr(import_module("transformers"), architectures[0])
293
+ except AttributeError:
294
+ raise ValueError(f"Model class {self._override_model_class} not found in transformers.")
295
+ try:
296
+ tf_class = getattr(import_module("transformers"), "TF" + architectures[0])
297
+ except AttributeError:
298
+ raise ValueError(f"TF model class TF{self._override_model_class} not found in transformers.")
299
+ elif architectures is None: # No architecture defined -- use auto classes
300
+ pt_class = getattr(import_module("transformers"), "AutoModel")
301
+ tf_class = getattr(import_module("transformers"), "TFAutoModel")
302
+ self._logger.warning("No detected architecture, using AutoModel/TFAutoModel")
303
+ else: # Architecture defined -- use it
304
+ if len(architectures) > 1:
305
+ raise ValueError(f"More than one architecture was found, aborting. (architectures = {architectures})")
306
+ self._logger.warning(f"Detected architecture: {architectures[0]}")
307
+ pt_class = getattr(import_module("transformers"), architectures[0])
308
+ try:
309
+ tf_class = getattr(import_module("transformers"), "TF" + architectures[0])
310
+ except AttributeError:
311
+ raise AttributeError(f"The TensorFlow equivalent of {architectures[0]} doesn't exist in transformers.")
312
+
313
+ # Check the TF dummy inputs to see what keys we need in the forward pass
314
+ tf_from_pt_model = tf_class.from_config(config)
315
+ tf_dummy_inputs = tf_from_pt_model.dummy_inputs
316
+
317
+ del tf_from_pt_model # Try to keep only one model in memory at a time
318
+
319
+ # Load the model and get some basic inputs
320
+ pt_model = pt_class.from_pretrained(self._local_dir)
321
+ pt_model.eval()
322
+
323
+ pt_input, tf_input = self.get_inputs(pt_model, tf_dummy_inputs, config)
324
+
325
+ with torch.no_grad():
326
+ pt_outputs = pt_model(**pt_input, output_hidden_states=True)
327
+ del pt_model # will no longer be used, and may have a large memory footprint
328
+
329
+ tf_from_pt_model = tf_class.from_pretrained(self._local_dir, from_pt=True)
330
+ tf_from_pt_outputs = tf_from_pt_model(**tf_input, output_hidden_states=True, training=False)
331
+
332
+ # Confirms that cross loading PT weights into TF worked.
333
+ crossload_differences = self.find_pt_tf_differences(pt_outputs, tf_from_pt_outputs)
334
+ output_differences = {k: v for k, v in crossload_differences.items() if "hidden" not in k}
335
+ hidden_differences = {k: v for k, v in crossload_differences.items() if "hidden" in k}
336
+ if len(output_differences) == 0 and architectures is not None:
337
+ raise ValueError(
338
+ f"Something went wrong -- the config file has architectures ({architectures}), but no model head"
339
+ " output was found. All outputs start with 'hidden'"
340
+ )
341
+ max_crossload_output_diff = max(output_differences.values()) if output_differences else 0.0
342
+ max_crossload_hidden_diff = max(hidden_differences.values())
343
+ if max_crossload_output_diff > self._max_error or max_crossload_hidden_diff > self._max_error:
344
+ raise ValueError(
345
+ "The cross-loaded TensorFlow model has different outputs, something went wrong!\n"
346
+ + f"\nList of maximum output differences above the threshold ({self._max_error}):\n"
347
+ + "\n".join([f"{k}: {v:.3e}" for k, v in output_differences.items() if v > self._max_error])
348
+ + f"\n\nList of maximum hidden layer differences above the threshold ({self._max_error}):\n"
349
+ + "\n".join([f"{k}: {v:.3e}" for k, v in hidden_differences.items() if v > self._max_error])
350
+ )
351
+
352
+ # Save the weights in a TF format (if needed) and confirms that the results are still good
353
+ tf_weights_path = os.path.join(self._local_dir, TF2_WEIGHTS_NAME)
354
+ tf_weights_index_path = os.path.join(self._local_dir, TF2_WEIGHTS_INDEX_NAME)
355
+ if (not os.path.exists(tf_weights_path) and not os.path.exists(tf_weights_index_path)) or self._new_weights:
356
+ tf_from_pt_model.save_pretrained(self._local_dir)
357
+ del tf_from_pt_model # will no longer be used, and may have a large memory footprint
358
+
359
+ tf_model = tf_class.from_pretrained(self._local_dir)
360
+ tf_outputs = tf_model(**tf_input, output_hidden_states=True)
361
+
362
+ conversion_differences = self.find_pt_tf_differences(pt_outputs, tf_outputs)
363
+ output_differences = {k: v for k, v in conversion_differences.items() if "hidden" not in k}
364
+ hidden_differences = {k: v for k, v in conversion_differences.items() if "hidden" in k}
365
+ if len(output_differences) == 0 and architectures is not None:
366
+ raise ValueError(
367
+ f"Something went wrong -- the config file has architectures ({architectures}), but no model head"
368
+ " output was found. All outputs start with 'hidden'"
369
+ )
370
+ max_conversion_output_diff = max(output_differences.values()) if output_differences else 0.0
371
+ max_conversion_hidden_diff = max(hidden_differences.values())
372
+ if max_conversion_output_diff > self._max_error or max_conversion_hidden_diff > self._max_error:
373
+ raise ValueError(
374
+ "The converted TensorFlow model has different outputs, something went wrong!\n"
375
+ + f"\nList of maximum output differences above the threshold ({self._max_error}):\n"
376
+ + "\n".join([f"{k}: {v:.3e}" for k, v in output_differences.items() if v > self._max_error])
377
+ + f"\n\nList of maximum hidden layer differences above the threshold ({self._max_error}):\n"
378
+ + "\n".join([f"{k}: {v:.3e}" for k, v in hidden_differences.items() if v > self._max_error])
379
+ )
380
+
381
+ commit_message = "Update TF weights" if self._new_weights else "Add TF weights"
382
+ if self._push:
383
+ repo.git_add(auto_lfs_track=True)
384
+ repo.git_commit(commit_message)
385
+ repo.git_push(blocking=True) # this prints a progress bar with the upload
386
+ self._logger.warning(f"TF weights pushed into {self._model_name}")
387
+ elif not self._no_pr:
388
+ self._logger.warning("Uploading the weights into a new PR...")
389
+ commit_descrition = (
390
+ "Model converted by the [`transformers`' `pt_to_tf`"
391
+ " CLI](https://github.com/huggingface/transformers/blob/main/src/transformers/commands/pt_to_tf.py). "
392
+ "All converted model outputs and hidden layers were validated against its PyTorch counterpart.\n\n"
393
+ f"Maximum crossload output difference={max_crossload_output_diff:.3e}; "
394
+ f"Maximum crossload hidden layer difference={max_crossload_hidden_diff:.3e};\n"
395
+ f"Maximum conversion output difference={max_conversion_output_diff:.3e}; "
396
+ f"Maximum conversion hidden layer difference={max_conversion_hidden_diff:.3e};\n"
397
+ )
398
+ if self._max_error > MAX_ERROR:
399
+ commit_descrition += (
400
+ f"\n\nCAUTION: The maximum admissible error was manually increased to {self._max_error}!"
401
+ )
402
+ if self._extra_commit_description:
403
+ commit_descrition += "\n\n" + self._extra_commit_description
404
+
405
+ # sharded model -> adds all related files (index and .h5 shards)
406
+ if os.path.exists(tf_weights_index_path):
407
+ operations = [
408
+ CommitOperationAdd(path_in_repo=TF2_WEIGHTS_INDEX_NAME, path_or_fileobj=tf_weights_index_path)
409
+ ]
410
+ for shard_path in tf.io.gfile.glob(self._local_dir + "/tf_model-*.h5"):
411
+ operations += [
412
+ CommitOperationAdd(path_in_repo=os.path.basename(shard_path), path_or_fileobj=shard_path)
413
+ ]
414
+ else:
415
+ operations = [CommitOperationAdd(path_in_repo=TF2_WEIGHTS_NAME, path_or_fileobj=tf_weights_path)]
416
+
417
+ hub_pr_url = create_commit(
418
+ repo_id=self._model_name,
419
+ operations=operations,
420
+ commit_message=commit_message,
421
+ commit_description=commit_descrition,
422
+ repo_type="model",
423
+ create_pr=True,
424
+ ).pr_url
425
+ self._logger.warning(f"PR open in {hub_pr_url}")
env-llmeval/lib/python3.10/site-packages/transformers/commands/run.py ADDED
@@ -0,0 +1,110 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from argparse import ArgumentParser
16
+
17
+ from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline
18
+ from ..utils import logging
19
+ from . import BaseTransformersCLICommand
20
+
21
+
22
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
23
+
24
+
25
+ def try_infer_format_from_ext(path: str):
26
+ if not path:
27
+ return "pipe"
28
+
29
+ for ext in PipelineDataFormat.SUPPORTED_FORMATS:
30
+ if path.endswith(ext):
31
+ return ext
32
+
33
+ raise Exception(
34
+ f"Unable to determine file format from file extension {path}. "
35
+ f"Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}"
36
+ )
37
+
38
+
39
+ def run_command_factory(args):
40
+ nlp = pipeline(
41
+ task=args.task,
42
+ model=args.model if args.model else None,
43
+ config=args.config,
44
+ tokenizer=args.tokenizer,
45
+ device=args.device,
46
+ )
47
+ format = try_infer_format_from_ext(args.input) if args.format == "infer" else args.format
48
+ reader = PipelineDataFormat.from_str(
49
+ format=format,
50
+ output_path=args.output,
51
+ input_path=args.input,
52
+ column=args.column if args.column else nlp.default_input_names,
53
+ overwrite=args.overwrite,
54
+ )
55
+ return RunCommand(nlp, reader)
56
+
57
+
58
+ class RunCommand(BaseTransformersCLICommand):
59
+ def __init__(self, nlp: Pipeline, reader: PipelineDataFormat):
60
+ self._nlp = nlp
61
+ self._reader = reader
62
+
63
+ @staticmethod
64
+ def register_subcommand(parser: ArgumentParser):
65
+ run_parser = parser.add_parser("run", help="Run a pipeline through the CLI")
66
+ run_parser.add_argument("--task", choices=get_supported_tasks(), help="Task to run")
67
+ run_parser.add_argument("--input", type=str, help="Path to the file to use for inference")
68
+ run_parser.add_argument("--output", type=str, help="Path to the file that will be used post to write results.")
69
+ run_parser.add_argument("--model", type=str, help="Name or path to the model to instantiate.")
70
+ run_parser.add_argument("--config", type=str, help="Name or path to the model's config to instantiate.")
71
+ run_parser.add_argument(
72
+ "--tokenizer", type=str, help="Name of the tokenizer to use. (default: same as the model name)"
73
+ )
74
+ run_parser.add_argument(
75
+ "--column",
76
+ type=str,
77
+ help="Name of the column to use as input. (For multi columns input as QA use column1,columns2)",
78
+ )
79
+ run_parser.add_argument(
80
+ "--format",
81
+ type=str,
82
+ default="infer",
83
+ choices=PipelineDataFormat.SUPPORTED_FORMATS,
84
+ help="Input format to read from",
85
+ )
86
+ run_parser.add_argument(
87
+ "--device",
88
+ type=int,
89
+ default=-1,
90
+ help="Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)",
91
+ )
92
+ run_parser.add_argument("--overwrite", action="store_true", help="Allow overwriting the output file.")
93
+ run_parser.set_defaults(func=run_command_factory)
94
+
95
+ def run(self):
96
+ nlp, outputs = self._nlp, []
97
+
98
+ for entry in self._reader:
99
+ output = nlp(**entry) if self._reader.is_multi_columns else nlp(entry)
100
+ if isinstance(output, dict):
101
+ outputs.append(output)
102
+ else:
103
+ outputs += output
104
+
105
+ # Saving data
106
+ if self._nlp.binary_output:
107
+ binary_path = self._reader.save_binary(outputs)
108
+ logger.warning(f"Current pipeline requires output to be in binary format, saving at {binary_path}")
109
+ else:
110
+ self._reader.save(outputs)
env-llmeval/lib/python3.10/site-packages/transformers/commands/serving.py ADDED
@@ -0,0 +1,228 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from argparse import ArgumentParser, Namespace
16
+ from typing import Any, List, Optional
17
+
18
+ from ..pipelines import Pipeline, get_supported_tasks, pipeline
19
+ from ..utils import logging
20
+ from . import BaseTransformersCLICommand
21
+
22
+
23
+ try:
24
+ from fastapi import Body, FastAPI, HTTPException
25
+ from fastapi.routing import APIRoute
26
+ from pydantic import BaseModel
27
+ from starlette.responses import JSONResponse
28
+ from uvicorn import run
29
+
30
+ _serve_dependencies_installed = True
31
+ except (ImportError, AttributeError):
32
+ BaseModel = object
33
+
34
+ def Body(*x, **y):
35
+ pass
36
+
37
+ _serve_dependencies_installed = False
38
+
39
+
40
+ logger = logging.get_logger("transformers-cli/serving")
41
+
42
+
43
+ def serve_command_factory(args: Namespace):
44
+ """
45
+ Factory function used to instantiate serving server from provided command line arguments.
46
+
47
+ Returns: ServeCommand
48
+ """
49
+ nlp = pipeline(
50
+ task=args.task,
51
+ model=args.model if args.model else None,
52
+ config=args.config,
53
+ tokenizer=args.tokenizer,
54
+ device=args.device,
55
+ )
56
+ return ServeCommand(nlp, args.host, args.port, args.workers)
57
+
58
+
59
+ class ServeModelInfoResult(BaseModel):
60
+ """
61
+ Expose model information
62
+ """
63
+
64
+ infos: dict
65
+
66
+
67
+ class ServeTokenizeResult(BaseModel):
68
+ """
69
+ Tokenize result model
70
+ """
71
+
72
+ tokens: List[str]
73
+ tokens_ids: Optional[List[int]]
74
+
75
+
76
+ class ServeDeTokenizeResult(BaseModel):
77
+ """
78
+ DeTokenize result model
79
+ """
80
+
81
+ text: str
82
+
83
+
84
+ class ServeForwardResult(BaseModel):
85
+ """
86
+ Forward result model
87
+ """
88
+
89
+ output: Any
90
+
91
+
92
+ class ServeCommand(BaseTransformersCLICommand):
93
+ @staticmethod
94
+ def register_subcommand(parser: ArgumentParser):
95
+ """
96
+ Register this command to argparse so it's available for the transformer-cli
97
+
98
+ Args:
99
+ parser: Root parser to register command-specific arguments
100
+ """
101
+ serve_parser = parser.add_parser(
102
+ "serve", help="CLI tool to run inference requests through REST and GraphQL endpoints."
103
+ )
104
+ serve_parser.add_argument(
105
+ "--task",
106
+ type=str,
107
+ choices=get_supported_tasks(),
108
+ help="The task to run the pipeline on",
109
+ )
110
+ serve_parser.add_argument("--host", type=str, default="localhost", help="Interface the server will listen on.")
111
+ serve_parser.add_argument("--port", type=int, default=8888, help="Port the serving will listen to.")
112
+ serve_parser.add_argument("--workers", type=int, default=1, help="Number of http workers")
113
+ serve_parser.add_argument("--model", type=str, help="Model's name or path to stored model.")
114
+ serve_parser.add_argument("--config", type=str, help="Model's config name or path to stored model.")
115
+ serve_parser.add_argument("--tokenizer", type=str, help="Tokenizer name to use.")
116
+ serve_parser.add_argument(
117
+ "--device",
118
+ type=int,
119
+ default=-1,
120
+ help="Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)",
121
+ )
122
+ serve_parser.set_defaults(func=serve_command_factory)
123
+
124
+ def __init__(self, pipeline: Pipeline, host: str, port: int, workers: int):
125
+ self._pipeline = pipeline
126
+
127
+ self.host = host
128
+ self.port = port
129
+ self.workers = workers
130
+
131
+ if not _serve_dependencies_installed:
132
+ raise RuntimeError(
133
+ "Using serve command requires FastAPI and uvicorn. "
134
+ 'Please install transformers with [serving]: pip install "transformers[serving]". '
135
+ "Or install FastAPI and uvicorn separately."
136
+ )
137
+ else:
138
+ logger.info(f"Serving model over {host}:{port}")
139
+ self._app = FastAPI(
140
+ routes=[
141
+ APIRoute(
142
+ "/",
143
+ self.model_info,
144
+ response_model=ServeModelInfoResult,
145
+ response_class=JSONResponse,
146
+ methods=["GET"],
147
+ ),
148
+ APIRoute(
149
+ "/tokenize",
150
+ self.tokenize,
151
+ response_model=ServeTokenizeResult,
152
+ response_class=JSONResponse,
153
+ methods=["POST"],
154
+ ),
155
+ APIRoute(
156
+ "/detokenize",
157
+ self.detokenize,
158
+ response_model=ServeDeTokenizeResult,
159
+ response_class=JSONResponse,
160
+ methods=["POST"],
161
+ ),
162
+ APIRoute(
163
+ "/forward",
164
+ self.forward,
165
+ response_model=ServeForwardResult,
166
+ response_class=JSONResponse,
167
+ methods=["POST"],
168
+ ),
169
+ ],
170
+ timeout=600,
171
+ )
172
+
173
+ def run(self):
174
+ run(self._app, host=self.host, port=self.port, workers=self.workers)
175
+
176
+ def model_info(self):
177
+ return ServeModelInfoResult(infos=vars(self._pipeline.model.config))
178
+
179
+ def tokenize(self, text_input: str = Body(None, embed=True), return_ids: bool = Body(False, embed=True)):
180
+ """
181
+ Tokenize the provided input and eventually returns corresponding tokens id: - **text_input**: String to
182
+ tokenize - **return_ids**: Boolean flags indicating if the tokens have to be converted to their integer
183
+ mapping.
184
+ """
185
+ try:
186
+ tokens_txt = self._pipeline.tokenizer.tokenize(text_input)
187
+
188
+ if return_ids:
189
+ tokens_ids = self._pipeline.tokenizer.convert_tokens_to_ids(tokens_txt)
190
+ return ServeTokenizeResult(tokens=tokens_txt, tokens_ids=tokens_ids)
191
+ else:
192
+ return ServeTokenizeResult(tokens=tokens_txt)
193
+
194
+ except Exception as e:
195
+ raise HTTPException(status_code=500, detail={"model": "", "error": str(e)})
196
+
197
+ def detokenize(
198
+ self,
199
+ tokens_ids: List[int] = Body(None, embed=True),
200
+ skip_special_tokens: bool = Body(False, embed=True),
201
+ cleanup_tokenization_spaces: bool = Body(True, embed=True),
202
+ ):
203
+ """
204
+ Detokenize the provided tokens ids to readable text: - **tokens_ids**: List of tokens ids -
205
+ **skip_special_tokens**: Flag indicating to not try to decode special tokens - **cleanup_tokenization_spaces**:
206
+ Flag indicating to remove all leading/trailing spaces and intermediate ones.
207
+ """
208
+ try:
209
+ decoded_str = self._pipeline.tokenizer.decode(tokens_ids, skip_special_tokens, cleanup_tokenization_spaces)
210
+ return ServeDeTokenizeResult(model="", text=decoded_str)
211
+ except Exception as e:
212
+ raise HTTPException(status_code=500, detail={"model": "", "error": str(e)})
213
+
214
+ async def forward(self, inputs=Body(None, embed=True)):
215
+ """
216
+ **inputs**: **attention_mask**: **tokens_type_ids**:
217
+ """
218
+
219
+ # Check we don't have empty string
220
+ if len(inputs) == 0:
221
+ return ServeForwardResult(output=[], attention=[])
222
+
223
+ try:
224
+ # Forward through the model
225
+ output = self._pipeline(inputs)
226
+ return ServeForwardResult(output=output)
227
+ except Exception as e:
228
+ raise HTTPException(500, {"error": str(e)})
env-llmeval/lib/python3.10/site-packages/transformers/commands/train.py ADDED
@@ -0,0 +1,158 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import os
16
+ from argparse import ArgumentParser, Namespace
17
+
18
+ from ..data import SingleSentenceClassificationProcessor as Processor
19
+ from ..pipelines import TextClassificationPipeline
20
+ from ..utils import is_tf_available, is_torch_available, logging
21
+ from . import BaseTransformersCLICommand
22
+
23
+
24
+ if not is_tf_available() and not is_torch_available():
25
+ raise RuntimeError("At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training")
26
+
27
+ # TF training parameters
28
+ USE_XLA = False
29
+ USE_AMP = False
30
+
31
+
32
+ def train_command_factory(args: Namespace):
33
+ """
34
+ Factory function used to instantiate training command from provided command line arguments.
35
+
36
+ Returns: TrainCommand
37
+ """
38
+ return TrainCommand(args)
39
+
40
+
41
+ class TrainCommand(BaseTransformersCLICommand):
42
+ @staticmethod
43
+ def register_subcommand(parser: ArgumentParser):
44
+ """
45
+ Register this command to argparse so it's available for the transformer-cli
46
+
47
+ Args:
48
+ parser: Root parser to register command-specific arguments
49
+ """
50
+ train_parser = parser.add_parser("train", help="CLI tool to train a model on a task.")
51
+
52
+ train_parser.add_argument(
53
+ "--train_data",
54
+ type=str,
55
+ required=True,
56
+ help="path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.",
57
+ )
58
+ train_parser.add_argument(
59
+ "--column_label", type=int, default=0, help="Column of the dataset csv file with example labels."
60
+ )
61
+ train_parser.add_argument(
62
+ "--column_text", type=int, default=1, help="Column of the dataset csv file with example texts."
63
+ )
64
+ train_parser.add_argument(
65
+ "--column_id", type=int, default=2, help="Column of the dataset csv file with example ids."
66
+ )
67
+ train_parser.add_argument(
68
+ "--skip_first_row", action="store_true", help="Skip the first row of the csv file (headers)."
69
+ )
70
+
71
+ train_parser.add_argument("--validation_data", type=str, default="", help="path to validation dataset.")
72
+ train_parser.add_argument(
73
+ "--validation_split",
74
+ type=float,
75
+ default=0.1,
76
+ help="if validation dataset is not provided, fraction of train dataset to use as validation dataset.",
77
+ )
78
+
79
+ train_parser.add_argument("--output", type=str, default="./", help="path to saved the trained model.")
80
+
81
+ train_parser.add_argument(
82
+ "--task", type=str, default="text_classification", help="Task to train the model on."
83
+ )
84
+ train_parser.add_argument(
85
+ "--model", type=str, default="google-bert/bert-base-uncased", help="Model's name or path to stored model."
86
+ )
87
+ train_parser.add_argument("--train_batch_size", type=int, default=32, help="Batch size for training.")
88
+ train_parser.add_argument("--valid_batch_size", type=int, default=64, help="Batch size for validation.")
89
+ train_parser.add_argument("--learning_rate", type=float, default=3e-5, help="Learning rate.")
90
+ train_parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon for Adam optimizer.")
91
+ train_parser.set_defaults(func=train_command_factory)
92
+
93
+ def __init__(self, args: Namespace):
94
+ self.logger = logging.get_logger("transformers-cli/training")
95
+
96
+ self.framework = "tf" if is_tf_available() else "torch"
97
+
98
+ os.makedirs(args.output, exist_ok=True)
99
+ self.output = args.output
100
+
101
+ self.column_label = args.column_label
102
+ self.column_text = args.column_text
103
+ self.column_id = args.column_id
104
+
105
+ self.logger.info(f"Loading {args.task} pipeline for {args.model}")
106
+ if args.task == "text_classification":
107
+ self.pipeline = TextClassificationPipeline.from_pretrained(args.model)
108
+ elif args.task == "token_classification":
109
+ raise NotImplementedError
110
+ elif args.task == "question_answering":
111
+ raise NotImplementedError
112
+
113
+ self.logger.info(f"Loading dataset from {args.train_data}")
114
+ self.train_dataset = Processor.create_from_csv(
115
+ args.train_data,
116
+ column_label=args.column_label,
117
+ column_text=args.column_text,
118
+ column_id=args.column_id,
119
+ skip_first_row=args.skip_first_row,
120
+ )
121
+ self.valid_dataset = None
122
+ if args.validation_data:
123
+ self.logger.info(f"Loading validation dataset from {args.validation_data}")
124
+ self.valid_dataset = Processor.create_from_csv(
125
+ args.validation_data,
126
+ column_label=args.column_label,
127
+ column_text=args.column_text,
128
+ column_id=args.column_id,
129
+ skip_first_row=args.skip_first_row,
130
+ )
131
+
132
+ self.validation_split = args.validation_split
133
+ self.train_batch_size = args.train_batch_size
134
+ self.valid_batch_size = args.valid_batch_size
135
+ self.learning_rate = args.learning_rate
136
+ self.adam_epsilon = args.adam_epsilon
137
+
138
+ def run(self):
139
+ if self.framework == "tf":
140
+ return self.run_tf()
141
+ return self.run_torch()
142
+
143
+ def run_torch(self):
144
+ raise NotImplementedError
145
+
146
+ def run_tf(self):
147
+ self.pipeline.fit(
148
+ self.train_dataset,
149
+ validation_data=self.valid_dataset,
150
+ validation_split=self.validation_split,
151
+ learning_rate=self.learning_rate,
152
+ adam_epsilon=self.adam_epsilon,
153
+ train_batch_size=self.train_batch_size,
154
+ valid_batch_size=self.valid_batch_size,
155
+ )
156
+
157
+ # Save trained pipeline
158
+ self.pipeline.save_pretrained(self.output)
env-llmeval/lib/python3.10/site-packages/transformers/commands/transformers_cli.py ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ from argparse import ArgumentParser
17
+
18
+ from .add_new_model import AddNewModelCommand
19
+ from .add_new_model_like import AddNewModelLikeCommand
20
+ from .convert import ConvertCommand
21
+ from .download import DownloadCommand
22
+ from .env import EnvironmentCommand
23
+ from .lfs import LfsCommands
24
+ from .pt_to_tf import PTtoTFCommand
25
+ from .run import RunCommand
26
+ from .serving import ServeCommand
27
+ from .user import UserCommands
28
+
29
+
30
+ def main():
31
+ parser = ArgumentParser("Transformers CLI tool", usage="transformers-cli <command> [<args>]")
32
+ commands_parser = parser.add_subparsers(help="transformers-cli command helpers")
33
+
34
+ # Register commands
35
+ ConvertCommand.register_subcommand(commands_parser)
36
+ DownloadCommand.register_subcommand(commands_parser)
37
+ EnvironmentCommand.register_subcommand(commands_parser)
38
+ RunCommand.register_subcommand(commands_parser)
39
+ ServeCommand.register_subcommand(commands_parser)
40
+ UserCommands.register_subcommand(commands_parser)
41
+ AddNewModelCommand.register_subcommand(commands_parser)
42
+ AddNewModelLikeCommand.register_subcommand(commands_parser)
43
+ LfsCommands.register_subcommand(commands_parser)
44
+ PTtoTFCommand.register_subcommand(commands_parser)
45
+
46
+ # Let's go
47
+ args = parser.parse_args()
48
+
49
+ if not hasattr(args, "func"):
50
+ parser.print_help()
51
+ exit(1)
52
+
53
+ # Run
54
+ service = args.func(args)
55
+ service.run()
56
+
57
+
58
+ if __name__ == "__main__":
59
+ main()
env-llmeval/lib/python3.10/site-packages/transformers/commands/user.py ADDED
@@ -0,0 +1,197 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import subprocess
16
+ from argparse import ArgumentParser
17
+ from typing import List, Union
18
+
19
+ from huggingface_hub.hf_api import HfFolder, create_repo, whoami
20
+ from requests.exceptions import HTTPError
21
+
22
+ from . import BaseTransformersCLICommand
23
+
24
+
25
+ class UserCommands(BaseTransformersCLICommand):
26
+ @staticmethod
27
+ def register_subcommand(parser: ArgumentParser):
28
+ login_parser = parser.add_parser("login", help="Log in using the same credentials as on huggingface.co")
29
+ login_parser.set_defaults(func=lambda args: LoginCommand(args))
30
+ whoami_parser = parser.add_parser("whoami", help="Find out which huggingface.co account you are logged in as.")
31
+ whoami_parser.set_defaults(func=lambda args: WhoamiCommand(args))
32
+ logout_parser = parser.add_parser("logout", help="Log out")
33
+ logout_parser.set_defaults(func=lambda args: LogoutCommand(args))
34
+
35
+ # new system: git-based repo system
36
+ repo_parser = parser.add_parser(
37
+ "repo",
38
+ help="Deprecated: use `huggingface-cli` instead. Commands to interact with your huggingface.co repos.",
39
+ )
40
+ repo_subparsers = repo_parser.add_subparsers(
41
+ help="Deprecated: use `huggingface-cli` instead. huggingface.co repos related commands"
42
+ )
43
+ repo_create_parser = repo_subparsers.add_parser(
44
+ "create", help="Deprecated: use `huggingface-cli` instead. Create a new repo on huggingface.co"
45
+ )
46
+ repo_create_parser.add_argument(
47
+ "name",
48
+ type=str,
49
+ help="Name for your model's repo. Will be namespaced under your username to build the model id.",
50
+ )
51
+ repo_create_parser.add_argument("--organization", type=str, help="Optional: organization namespace.")
52
+ repo_create_parser.add_argument("-y", "--yes", action="store_true", help="Optional: answer Yes to the prompt")
53
+ repo_create_parser.set_defaults(func=lambda args: RepoCreateCommand(args))
54
+
55
+
56
+ class ANSI:
57
+ """
58
+ Helper for en.wikipedia.org/wiki/ANSI_escape_code
59
+ """
60
+
61
+ _bold = "\u001b[1m"
62
+ _red = "\u001b[31m"
63
+ _gray = "\u001b[90m"
64
+ _reset = "\u001b[0m"
65
+
66
+ @classmethod
67
+ def bold(cls, s):
68
+ return f"{cls._bold}{s}{cls._reset}"
69
+
70
+ @classmethod
71
+ def red(cls, s):
72
+ return f"{cls._bold}{cls._red}{s}{cls._reset}"
73
+
74
+ @classmethod
75
+ def gray(cls, s):
76
+ return f"{cls._gray}{s}{cls._reset}"
77
+
78
+
79
+ def tabulate(rows: List[List[Union[str, int]]], headers: List[str]) -> str:
80
+ """
81
+ Inspired by:
82
+
83
+ - stackoverflow.com/a/8356620/593036
84
+ - stackoverflow.com/questions/9535954/printing-lists-as-tabular-data
85
+ """
86
+ col_widths = [max(len(str(x)) for x in col) for col in zip(*rows, headers)]
87
+ row_format = ("{{:{}}} " * len(headers)).format(*col_widths)
88
+ lines = []
89
+ lines.append(row_format.format(*headers))
90
+ lines.append(row_format.format(*["-" * w for w in col_widths]))
91
+ for row in rows:
92
+ lines.append(row_format.format(*row))
93
+ return "\n".join(lines)
94
+
95
+
96
+ class BaseUserCommand:
97
+ def __init__(self, args):
98
+ self.args = args
99
+
100
+
101
+ class LoginCommand(BaseUserCommand):
102
+ def run(self):
103
+ print(
104
+ ANSI.red(
105
+ "ERROR! `huggingface-cli login` uses an outdated login mechanism "
106
+ "that is not compatible with the Hugging Face Hub backend anymore. "
107
+ "Please use `huggingface-cli login instead."
108
+ )
109
+ )
110
+
111
+
112
+ class WhoamiCommand(BaseUserCommand):
113
+ def run(self):
114
+ print(
115
+ ANSI.red(
116
+ "WARNING! `transformers-cli whoami` is deprecated and will be removed in v5. Please use "
117
+ "`huggingface-cli whoami` instead."
118
+ )
119
+ )
120
+ token = HfFolder.get_token()
121
+ if token is None:
122
+ print("Not logged in")
123
+ exit()
124
+ try:
125
+ user, orgs = whoami(token)
126
+ print(user)
127
+ if orgs:
128
+ print(ANSI.bold("orgs: "), ",".join(orgs))
129
+ except HTTPError as e:
130
+ print(e)
131
+ print(ANSI.red(e.response.text))
132
+ exit(1)
133
+
134
+
135
+ class LogoutCommand(BaseUserCommand):
136
+ def run(self):
137
+ print(
138
+ ANSI.red(
139
+ "ERROR! `transformers-cli logout` uses an outdated logout mechanism "
140
+ "that is not compatible with the Hugging Face Hub backend anymore. "
141
+ "Please use `huggingface-cli logout instead."
142
+ )
143
+ )
144
+
145
+
146
+ class RepoCreateCommand(BaseUserCommand):
147
+ def run(self):
148
+ print(
149
+ ANSI.red(
150
+ "WARNING! Managing repositories through transformers-cli is deprecated. "
151
+ "Please use `huggingface-cli` instead."
152
+ )
153
+ )
154
+ token = HfFolder.get_token()
155
+ if token is None:
156
+ print("Not logged in")
157
+ exit(1)
158
+ try:
159
+ stdout = subprocess.check_output(["git", "--version"]).decode("utf-8")
160
+ print(ANSI.gray(stdout.strip()))
161
+ except FileNotFoundError:
162
+ print("Looks like you do not have git installed, please install.")
163
+
164
+ try:
165
+ stdout = subprocess.check_output(["git-lfs", "--version"]).decode("utf-8")
166
+ print(ANSI.gray(stdout.strip()))
167
+ except FileNotFoundError:
168
+ print(
169
+ ANSI.red(
170
+ "Looks like you do not have git-lfs installed, please install."
171
+ " You can install from https://git-lfs.github.com/."
172
+ " Then run `git lfs install` (you only have to do this once)."
173
+ )
174
+ )
175
+ print("")
176
+
177
+ user, _ = whoami(token)
178
+ namespace = self.args.organization if self.args.organization is not None else user
179
+ full_name = f"{namespace}/{self.args.name}"
180
+ print(f"You are about to create {ANSI.bold(full_name)}")
181
+
182
+ if not self.args.yes:
183
+ choice = input("Proceed? [Y/n] ").lower()
184
+ if not (choice == "" or choice == "y" or choice == "yes"):
185
+ print("Abort")
186
+ exit()
187
+ try:
188
+ url = create_repo(token, name=self.args.name, organization=self.args.organization)
189
+ except HTTPError as e:
190
+ print(e)
191
+ print(ANSI.red(e.response.text))
192
+ exit(1)
193
+ print("\nYour repo now lives at:")
194
+ print(f" {ANSI.bold(url)}")
195
+ print("\nYou can clone it locally with the command below, and commit/push as usual.")
196
+ print(f"\n git clone {url}")
197
+ print("")
env-llmeval/lib/python3.10/site-packages/transformers/data/__init__.py ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from .data_collator import (
16
+ DataCollatorForLanguageModeling,
17
+ DataCollatorForPermutationLanguageModeling,
18
+ DataCollatorForSeq2Seq,
19
+ DataCollatorForSOP,
20
+ DataCollatorForTokenClassification,
21
+ DataCollatorForWholeWordMask,
22
+ DataCollatorWithPadding,
23
+ DefaultDataCollator,
24
+ default_data_collator,
25
+ )
26
+ from .metrics import glue_compute_metrics, xnli_compute_metrics
27
+ from .processors import (
28
+ DataProcessor,
29
+ InputExample,
30
+ InputFeatures,
31
+ SingleSentenceClassificationProcessor,
32
+ SquadExample,
33
+ SquadFeatures,
34
+ SquadV1Processor,
35
+ SquadV2Processor,
36
+ glue_convert_examples_to_features,
37
+ glue_output_modes,
38
+ glue_processors,
39
+ glue_tasks_num_labels,
40
+ squad_convert_examples_to_features,
41
+ xnli_output_modes,
42
+ xnli_processors,
43
+ xnli_tasks_num_labels,
44
+ )
env-llmeval/lib/python3.10/site-packages/transformers/data/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.13 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/data/__pycache__/data_collator.cpython-310.pyc ADDED
Binary file (46.5 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/data/data_collator.py ADDED
@@ -0,0 +1,1568 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import random
16
+ import warnings
17
+ from collections.abc import Mapping
18
+ from dataclasses import dataclass
19
+ from random import randint
20
+ from typing import Any, Callable, Dict, List, NewType, Optional, Tuple, Union
21
+
22
+ import numpy as np
23
+
24
+ from ..models.bert import BertTokenizer, BertTokenizerFast
25
+ from ..tokenization_utils_base import PreTrainedTokenizerBase
26
+ from ..utils import PaddingStrategy
27
+
28
+
29
+ InputDataClass = NewType("InputDataClass", Any)
30
+
31
+ """
32
+ A DataCollator is a function that takes a list of samples from a Dataset and collate them into a batch, as a dictionary
33
+ of PyTorch/TensorFlow tensors or NumPy arrays.
34
+ """
35
+ DataCollator = NewType("DataCollator", Callable[[List[InputDataClass]], Dict[str, Any]])
36
+
37
+
38
+ class DataCollatorMixin:
39
+ def __call__(self, features, return_tensors=None):
40
+ if return_tensors is None:
41
+ return_tensors = self.return_tensors
42
+ if return_tensors == "tf":
43
+ return self.tf_call(features)
44
+ elif return_tensors == "pt":
45
+ return self.torch_call(features)
46
+ elif return_tensors == "np":
47
+ return self.numpy_call(features)
48
+ else:
49
+ raise ValueError(f"Framework '{return_tensors}' not recognized!")
50
+
51
+
52
+ def pad_without_fast_tokenizer_warning(tokenizer, *pad_args, **pad_kwargs):
53
+ """
54
+ Pads without triggering the warning about how using the pad function is sub-optimal when using a fast tokenizer.
55
+ """
56
+
57
+ # To avoid errors when using Feature extractors
58
+ if not hasattr(tokenizer, "deprecation_warnings"):
59
+ return tokenizer.pad(*pad_args, **pad_kwargs)
60
+
61
+ # Save the state of the warning, then disable it
62
+ warning_state = tokenizer.deprecation_warnings.get("Asking-to-pad-a-fast-tokenizer", False)
63
+ tokenizer.deprecation_warnings["Asking-to-pad-a-fast-tokenizer"] = True
64
+
65
+ try:
66
+ padded = tokenizer.pad(*pad_args, **pad_kwargs)
67
+ finally:
68
+ # Restore the state of the warning.
69
+ tokenizer.deprecation_warnings["Asking-to-pad-a-fast-tokenizer"] = warning_state
70
+
71
+ return padded
72
+
73
+
74
+ def default_data_collator(features: List[InputDataClass], return_tensors="pt") -> Dict[str, Any]:
75
+ """
76
+ Very simple data collator that simply collates batches of dict-like objects and performs special handling for
77
+ potential keys named:
78
+
79
+ - `label`: handles a single value (int or float) per object
80
+ - `label_ids`: handles a list of values per object
81
+
82
+ Does not do any additional preprocessing: property names of the input object will be used as corresponding inputs
83
+ to the model. See glue and ner for example of how it's useful.
84
+ """
85
+
86
+ # In this function we'll make the assumption that all `features` in the batch
87
+ # have the same attributes.
88
+ # So we will look at the first element as a proxy for what attributes exist
89
+ # on the whole batch.
90
+
91
+ if return_tensors == "pt":
92
+ return torch_default_data_collator(features)
93
+ elif return_tensors == "tf":
94
+ return tf_default_data_collator(features)
95
+ elif return_tensors == "np":
96
+ return numpy_default_data_collator(features)
97
+
98
+
99
+ @dataclass
100
+ class DefaultDataCollator(DataCollatorMixin):
101
+ """
102
+ Very simple data collator that simply collates batches of dict-like objects and performs special handling for
103
+ potential keys named:
104
+
105
+ - `label`: handles a single value (int or float) per object
106
+ - `label_ids`: handles a list of values per object
107
+
108
+ Does not do any additional preprocessing: property names of the input object will be used as corresponding inputs
109
+ to the model. See glue and ner for example of how it's useful.
110
+
111
+ This is an object (like other data collators) rather than a pure function like default_data_collator. This can be
112
+ helpful if you need to set a return_tensors value at initialization.
113
+
114
+ Args:
115
+ return_tensors (`str`, *optional*, defaults to `"pt"`):
116
+ The type of Tensor to return. Allowable values are "np", "pt" and "tf".
117
+ """
118
+
119
+ return_tensors: str = "pt"
120
+
121
+ def __call__(self, features: List[Dict[str, Any]], return_tensors=None) -> Dict[str, Any]:
122
+ if return_tensors is None:
123
+ return_tensors = self.return_tensors
124
+ return default_data_collator(features, return_tensors)
125
+
126
+
127
+ def torch_default_data_collator(features: List[InputDataClass]) -> Dict[str, Any]:
128
+ import torch
129
+
130
+ if not isinstance(features[0], Mapping):
131
+ features = [vars(f) for f in features]
132
+ first = features[0]
133
+ batch = {}
134
+
135
+ # Special handling for labels.
136
+ # Ensure that tensor is created with the correct type
137
+ # (it should be automatically the case, but let's make sure of it.)
138
+ if "label" in first and first["label"] is not None:
139
+ label = first["label"].item() if isinstance(first["label"], torch.Tensor) else first["label"]
140
+ dtype = torch.long if isinstance(label, int) else torch.float
141
+ batch["labels"] = torch.tensor([f["label"] for f in features], dtype=dtype)
142
+ elif "label_ids" in first and first["label_ids"] is not None:
143
+ if isinstance(first["label_ids"], torch.Tensor):
144
+ batch["labels"] = torch.stack([f["label_ids"] for f in features])
145
+ else:
146
+ dtype = torch.long if isinstance(first["label_ids"][0], int) else torch.float
147
+ batch["labels"] = torch.tensor([f["label_ids"] for f in features], dtype=dtype)
148
+
149
+ # Handling of all other possible keys.
150
+ # Again, we will use the first element to figure out which key/values are not None for this model.
151
+ for k, v in first.items():
152
+ if k not in ("label", "label_ids") and v is not None and not isinstance(v, str):
153
+ if isinstance(v, torch.Tensor):
154
+ batch[k] = torch.stack([f[k] for f in features])
155
+ elif isinstance(v, np.ndarray):
156
+ batch[k] = torch.tensor(np.stack([f[k] for f in features]))
157
+ else:
158
+ batch[k] = torch.tensor([f[k] for f in features])
159
+
160
+ return batch
161
+
162
+
163
+ def tf_default_data_collator(features: List[InputDataClass]) -> Dict[str, Any]:
164
+ import tensorflow as tf
165
+
166
+ if not isinstance(features[0], Mapping):
167
+ features = [vars(f) for f in features]
168
+ first = features[0]
169
+ batch = {}
170
+
171
+ # Special handling for labels.
172
+ # Ensure that tensor is created with the correct type
173
+ # (it should be automatically the case, but let's make sure of it.)
174
+ if "label" in first and first["label"] is not None:
175
+ label_col_name = "label"
176
+ elif "label_ids" in first and first["label_ids"] is not None:
177
+ label_col_name = "label_ids"
178
+ elif "labels" in first and first["labels"] is not None:
179
+ label_col_name = "labels"
180
+ else:
181
+ label_col_name = None
182
+ if label_col_name is not None:
183
+ if isinstance(first[label_col_name], tf.Tensor):
184
+ dtype = tf.int64 if first[label_col_name].dtype.is_integer else tf.float32
185
+ elif isinstance(first[label_col_name], np.ndarray) or isinstance(first[label_col_name], np.generic):
186
+ dtype = tf.int64 if np.issubdtype(first[label_col_name].dtype, np.integer) else tf.float32
187
+ elif isinstance(first[label_col_name], (tuple, list)):
188
+ dtype = tf.int64 if isinstance(first[label_col_name][0], int) else tf.float32
189
+ else:
190
+ dtype = tf.int64 if isinstance(first[label_col_name], int) else tf.float32
191
+ batch["labels"] = tf.convert_to_tensor([f[label_col_name] for f in features], dtype=dtype)
192
+ # Handling of all other possible keys.
193
+ # Again, we will use the first element to figure out which key/values are not None for this model.
194
+ for k, v in first.items():
195
+ if k not in ("label", "label_ids", "labels") and v is not None and not isinstance(v, str):
196
+ if isinstance(v, (tf.Tensor, np.ndarray)):
197
+ batch[k] = tf.stack([f[k] for f in features])
198
+ else:
199
+ batch[k] = tf.convert_to_tensor([f[k] for f in features])
200
+
201
+ return batch
202
+
203
+
204
+ def numpy_default_data_collator(features: List[InputDataClass]) -> Dict[str, Any]:
205
+ if not isinstance(features[0], Mapping):
206
+ features = [vars(f) for f in features]
207
+ first = features[0]
208
+ batch = {}
209
+
210
+ # Special handling for labels.
211
+ # Ensure that tensor is created with the correct type
212
+ # (it should be automatically the case, but let's make sure of it.)
213
+ if "label" in first and first["label"] is not None:
214
+ label = first["label"].item() if isinstance(first["label"], np.ndarray) else first["label"]
215
+ dtype = np.int64 if isinstance(label, int) else np.float32
216
+ batch["labels"] = np.array([f["label"] for f in features], dtype=dtype)
217
+ elif "label_ids" in first and first["label_ids"] is not None:
218
+ if isinstance(first["label_ids"], np.ndarray):
219
+ batch["labels"] = np.stack([f["label_ids"] for f in features])
220
+ else:
221
+ dtype = np.int64 if isinstance(first["label_ids"][0], int) else np.float32
222
+ batch["labels"] = np.array([f["label_ids"] for f in features], dtype=dtype)
223
+
224
+ # Handling of all other possible keys.
225
+ # Again, we will use the first element to figure out which key/values are not None for this model.
226
+ for k, v in first.items():
227
+ if k not in ("label", "label_ids") and v is not None and not isinstance(v, str):
228
+ if isinstance(v, np.ndarray):
229
+ batch[k] = np.stack([f[k] for f in features])
230
+ else:
231
+ batch[k] = np.array([f[k] for f in features])
232
+
233
+ return batch
234
+
235
+
236
+ @dataclass
237
+ class DataCollatorWithPadding:
238
+ """
239
+ Data collator that will dynamically pad the inputs received.
240
+
241
+ Args:
242
+ tokenizer ([`PreTrainedTokenizer`] or [`PreTrainedTokenizerFast`]):
243
+ The tokenizer used for encoding the data.
244
+ padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`):
245
+ Select a strategy to pad the returned sequences (according to the model's padding side and padding index)
246
+ among:
247
+
248
+ - `True` or `'longest'` (default): Pad to the longest sequence in the batch (or no padding if only a single
249
+ sequence is provided).
250
+ - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
251
+ acceptable input length for the model if that argument is not provided.
252
+ - `False` or `'do_not_pad'`: No padding (i.e., can output a batch with sequences of different lengths).
253
+ max_length (`int`, *optional*):
254
+ Maximum length of the returned list and optionally padding length (see above).
255
+ pad_to_multiple_of (`int`, *optional*):
256
+ If set will pad the sequence to a multiple of the provided value.
257
+
258
+ This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >=
259
+ 7.5 (Volta).
260
+ return_tensors (`str`, *optional*, defaults to `"pt"`):
261
+ The type of Tensor to return. Allowable values are "np", "pt" and "tf".
262
+ """
263
+
264
+ tokenizer: PreTrainedTokenizerBase
265
+ padding: Union[bool, str, PaddingStrategy] = True
266
+ max_length: Optional[int] = None
267
+ pad_to_multiple_of: Optional[int] = None
268
+ return_tensors: str = "pt"
269
+
270
+ def __call__(self, features: List[Dict[str, Any]]) -> Dict[str, Any]:
271
+ batch = pad_without_fast_tokenizer_warning(
272
+ self.tokenizer,
273
+ features,
274
+ padding=self.padding,
275
+ max_length=self.max_length,
276
+ pad_to_multiple_of=self.pad_to_multiple_of,
277
+ return_tensors=self.return_tensors,
278
+ )
279
+ if "label" in batch:
280
+ batch["labels"] = batch["label"]
281
+ del batch["label"]
282
+ if "label_ids" in batch:
283
+ batch["labels"] = batch["label_ids"]
284
+ del batch["label_ids"]
285
+ return batch
286
+
287
+
288
+ @dataclass
289
+ class DataCollatorForTokenClassification(DataCollatorMixin):
290
+ """
291
+ Data collator that will dynamically pad the inputs received, as well as the labels.
292
+
293
+ Args:
294
+ tokenizer ([`PreTrainedTokenizer`] or [`PreTrainedTokenizerFast`]):
295
+ The tokenizer used for encoding the data.
296
+ padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`):
297
+ Select a strategy to pad the returned sequences (according to the model's padding side and padding index)
298
+ among:
299
+
300
+ - `True` or `'longest'` (default): Pad to the longest sequence in the batch (or no padding if only a single
301
+ sequence is provided).
302
+ - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
303
+ acceptable input length for the model if that argument is not provided.
304
+ - `False` or `'do_not_pad'`: No padding (i.e., can output a batch with sequences of different lengths).
305
+ max_length (`int`, *optional*):
306
+ Maximum length of the returned list and optionally padding length (see above).
307
+ pad_to_multiple_of (`int`, *optional*):
308
+ If set will pad the sequence to a multiple of the provided value.
309
+
310
+ This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >=
311
+ 7.5 (Volta).
312
+ label_pad_token_id (`int`, *optional*, defaults to -100):
313
+ The id to use when padding the labels (-100 will be automatically ignore by PyTorch loss functions).
314
+ return_tensors (`str`, *optional*, defaults to `"pt"`):
315
+ The type of Tensor to return. Allowable values are "np", "pt" and "tf".
316
+ """
317
+
318
+ tokenizer: PreTrainedTokenizerBase
319
+ padding: Union[bool, str, PaddingStrategy] = True
320
+ max_length: Optional[int] = None
321
+ pad_to_multiple_of: Optional[int] = None
322
+ label_pad_token_id: int = -100
323
+ return_tensors: str = "pt"
324
+
325
+ def torch_call(self, features):
326
+ import torch
327
+
328
+ label_name = "label" if "label" in features[0].keys() else "labels"
329
+ labels = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
330
+
331
+ no_labels_features = [{k: v for k, v in feature.items() if k != label_name} for feature in features]
332
+
333
+ batch = pad_without_fast_tokenizer_warning(
334
+ self.tokenizer,
335
+ no_labels_features,
336
+ padding=self.padding,
337
+ max_length=self.max_length,
338
+ pad_to_multiple_of=self.pad_to_multiple_of,
339
+ return_tensors="pt",
340
+ )
341
+
342
+ if labels is None:
343
+ return batch
344
+
345
+ sequence_length = batch["input_ids"].shape[1]
346
+ padding_side = self.tokenizer.padding_side
347
+
348
+ def to_list(tensor_or_iterable):
349
+ if isinstance(tensor_or_iterable, torch.Tensor):
350
+ return tensor_or_iterable.tolist()
351
+ return list(tensor_or_iterable)
352
+
353
+ if padding_side == "right":
354
+ batch[label_name] = [
355
+ to_list(label) + [self.label_pad_token_id] * (sequence_length - len(label)) for label in labels
356
+ ]
357
+ else:
358
+ batch[label_name] = [
359
+ [self.label_pad_token_id] * (sequence_length - len(label)) + to_list(label) for label in labels
360
+ ]
361
+
362
+ batch[label_name] = torch.tensor(batch[label_name], dtype=torch.int64)
363
+ return batch
364
+
365
+ def tf_call(self, features):
366
+ import tensorflow as tf
367
+
368
+ label_name = "label" if "label" in features[0].keys() else "labels"
369
+ labels = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
370
+ batch = pad_without_fast_tokenizer_warning(
371
+ self.tokenizer,
372
+ features,
373
+ padding=self.padding,
374
+ max_length=self.max_length,
375
+ pad_to_multiple_of=self.pad_to_multiple_of,
376
+ # Conversion to tensors will fail if we have labels as they are not of the same length yet.
377
+ return_tensors="tf" if labels is None else None,
378
+ )
379
+
380
+ if labels is None:
381
+ return batch
382
+
383
+ sequence_length = tf.convert_to_tensor(batch["input_ids"]).shape[1]
384
+ padding_side = self.tokenizer.padding_side
385
+ if padding_side == "right":
386
+ batch["labels"] = [
387
+ list(label) + [self.label_pad_token_id] * (sequence_length - len(label)) for label in labels
388
+ ]
389
+ else:
390
+ batch["labels"] = [
391
+ [self.label_pad_token_id] * (sequence_length - len(label)) + list(label) for label in labels
392
+ ]
393
+
394
+ batch = {k: tf.convert_to_tensor(v, dtype=tf.int64) for k, v in batch.items()}
395
+ return batch
396
+
397
+ def numpy_call(self, features):
398
+ label_name = "label" if "label" in features[0].keys() else "labels"
399
+ labels = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
400
+ batch = pad_without_fast_tokenizer_warning(
401
+ self.tokenizer,
402
+ features,
403
+ padding=self.padding,
404
+ max_length=self.max_length,
405
+ pad_to_multiple_of=self.pad_to_multiple_of,
406
+ # Conversion to tensors will fail if we have labels as they are not of the same length yet.
407
+ return_tensors="np" if labels is None else None,
408
+ )
409
+
410
+ if labels is None:
411
+ return batch
412
+
413
+ sequence_length = np.array(batch["input_ids"]).shape[1]
414
+ padding_side = self.tokenizer.padding_side
415
+ if padding_side == "right":
416
+ batch["labels"] = [
417
+ list(label) + [self.label_pad_token_id] * (sequence_length - len(label)) for label in labels
418
+ ]
419
+ else:
420
+ batch["labels"] = [
421
+ [self.label_pad_token_id] * (sequence_length - len(label)) + list(label) for label in labels
422
+ ]
423
+
424
+ batch = {k: np.array(v, dtype=np.int64) for k, v in batch.items()}
425
+ return batch
426
+
427
+
428
+ def _torch_collate_batch(examples, tokenizer, pad_to_multiple_of: Optional[int] = None):
429
+ """Collate `examples` into a batch, using the information in `tokenizer` for padding if necessary."""
430
+ import torch
431
+
432
+ # Tensorize if necessary.
433
+ if isinstance(examples[0], (list, tuple, np.ndarray)):
434
+ examples = [torch.tensor(e, dtype=torch.long) for e in examples]
435
+
436
+ length_of_first = examples[0].size(0)
437
+
438
+ # Check if padding is necessary.
439
+
440
+ are_tensors_same_length = all(x.size(0) == length_of_first for x in examples)
441
+ if are_tensors_same_length and (pad_to_multiple_of is None or length_of_first % pad_to_multiple_of == 0):
442
+ return torch.stack(examples, dim=0)
443
+
444
+ # If yes, check if we have a `pad_token`.
445
+ if tokenizer._pad_token is None:
446
+ raise ValueError(
447
+ "You are attempting to pad samples but the tokenizer you are using"
448
+ f" ({tokenizer.__class__.__name__}) does not have a pad token."
449
+ )
450
+
451
+ # Creating the full tensor and filling it with our data.
452
+ max_length = max(x.size(0) for x in examples)
453
+ if pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
454
+ max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
455
+ result = examples[0].new_full([len(examples), max_length], tokenizer.pad_token_id)
456
+ for i, example in enumerate(examples):
457
+ if tokenizer.padding_side == "right":
458
+ result[i, : example.shape[0]] = example
459
+ else:
460
+ result[i, -example.shape[0] :] = example
461
+ return result
462
+
463
+
464
+ def _tf_collate_batch(examples, tokenizer, pad_to_multiple_of: Optional[int] = None):
465
+ import tensorflow as tf
466
+
467
+ """Collate `examples` into a batch, using the information in `tokenizer` for padding if necessary."""
468
+ # Tensorize if necessary.
469
+ if isinstance(examples[0], (list, tuple)):
470
+ examples = [tf.convert_to_tensor(e, dtype=tf.int64) for e in examples]
471
+
472
+ # Check if padding is necessary.
473
+ length_of_first = len(examples[0])
474
+ are_tensors_same_length = all(len(x) == length_of_first for x in examples)
475
+ if are_tensors_same_length and (pad_to_multiple_of is None or length_of_first % pad_to_multiple_of == 0):
476
+ return tf.stack(examples, axis=0)
477
+
478
+ # If yes, check if we have a `pad_token`.
479
+ if tokenizer._pad_token is None:
480
+ raise ValueError(
481
+ "You are attempting to pad samples but the tokenizer you are using"
482
+ f" ({tokenizer.__class__.__name__}) does not have a pad token."
483
+ )
484
+
485
+ # Creating the full tensor and filling it with our data.
486
+ max_length = max(len(x) for x in examples)
487
+ if pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
488
+ max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
489
+ # result = examples[0].new_full([len(examples), max_length], tokenizer.pad_token_id)
490
+ result = []
491
+ rank = tf.rank(examples[0])
492
+ paddings = np.zeros((rank, 2), dtype=np.int32)
493
+ for example in examples:
494
+ if tokenizer.padding_side == "right":
495
+ paddings[0, 1] = max_length - len(example)
496
+ else:
497
+ paddings[0, 0] = max_length - len(example)
498
+ result.append(tf.pad(example, paddings, constant_values=tokenizer.pad_token_id))
499
+ return tf.stack(result, axis=0)
500
+
501
+
502
+ def _numpy_collate_batch(examples, tokenizer, pad_to_multiple_of: Optional[int] = None):
503
+ """Collate `examples` into a batch, using the information in `tokenizer` for padding if necessary."""
504
+ # Tensorize if necessary.
505
+ if isinstance(examples[0], (list, tuple)):
506
+ examples = [np.array(e, dtype=np.int64) for e in examples]
507
+
508
+ # Check if padding is necessary.
509
+ length_of_first = len(examples[0])
510
+ are_tensors_same_length = all(len(x) == length_of_first for x in examples)
511
+ if are_tensors_same_length and (pad_to_multiple_of is None or length_of_first % pad_to_multiple_of == 0):
512
+ return np.stack(examples, axis=0)
513
+
514
+ # If yes, check if we have a `pad_token`.
515
+ if tokenizer._pad_token is None:
516
+ raise ValueError(
517
+ "You are attempting to pad samples but the tokenizer you are using"
518
+ f" ({tokenizer.__class__.__name__}) does not have a pad token."
519
+ )
520
+
521
+ # Creating the full tensor and filling it with our data.
522
+ max_length = max(len(x) for x in examples)
523
+ if pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
524
+ max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
525
+ result = np.full(shape=(len(examples), max_length), fill_value=tokenizer.pad_token_id, dtype=examples[0].dtype)
526
+ for i, example in enumerate(examples):
527
+ if tokenizer.padding_side == "right":
528
+ result[i, : example.shape[0]] = example
529
+ else:
530
+ result[i, -example.shape[0] :] = example
531
+ return result
532
+
533
+
534
+ def tolist(x):
535
+ if isinstance(x, list):
536
+ return x
537
+ elif hasattr(x, "numpy"): # Checks for TF tensors without needing the import
538
+ x = x.numpy()
539
+ return x.tolist()
540
+
541
+
542
+ @dataclass
543
+ class DataCollatorForSeq2Seq:
544
+ """
545
+ Data collator that will dynamically pad the inputs received, as well as the labels.
546
+
547
+ Args:
548
+ tokenizer ([`PreTrainedTokenizer`] or [`PreTrainedTokenizerFast`]):
549
+ The tokenizer used for encoding the data.
550
+ model ([`PreTrainedModel`], *optional*):
551
+ The model that is being trained. If set and has the *prepare_decoder_input_ids_from_labels*, use it to
552
+ prepare the *decoder_input_ids*
553
+
554
+ This is useful when using *label_smoothing* to avoid calculating loss twice.
555
+ padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`):
556
+ Select a strategy to pad the returned sequences (according to the model's padding side and padding index)
557
+ among:
558
+
559
+ - `True` or `'longest'` (default): Pad to the longest sequence in the batch (or no padding if only a single
560
+ sequence is provided).
561
+ - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
562
+ acceptable input length for the model if that argument is not provided.
563
+ - `False` or `'do_not_pad'`: No padding (i.e., can output a batch with sequences of different lengths).
564
+ max_length (`int`, *optional*):
565
+ Maximum length of the returned list and optionally padding length (see above).
566
+ pad_to_multiple_of (`int`, *optional*):
567
+ If set will pad the sequence to a multiple of the provided value.
568
+
569
+ This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >=
570
+ 7.5 (Volta).
571
+ label_pad_token_id (`int`, *optional*, defaults to -100):
572
+ The id to use when padding the labels (-100 will be automatically ignored by PyTorch loss functions).
573
+ return_tensors (`str`, *optional*, defaults to `"pt"`):
574
+ The type of Tensor to return. Allowable values are "np", "pt" and "tf".
575
+ """
576
+
577
+ tokenizer: PreTrainedTokenizerBase
578
+ model: Optional[Any] = None
579
+ padding: Union[bool, str, PaddingStrategy] = True
580
+ max_length: Optional[int] = None
581
+ pad_to_multiple_of: Optional[int] = None
582
+ label_pad_token_id: int = -100
583
+ return_tensors: str = "pt"
584
+
585
+ def __call__(self, features, return_tensors=None):
586
+ if return_tensors is None:
587
+ return_tensors = self.return_tensors
588
+ labels = [feature["labels"] for feature in features] if "labels" in features[0].keys() else None
589
+ # We have to pad the labels before calling `tokenizer.pad` as this method won't pad them and needs them of the
590
+ # same length to return tensors.
591
+ if labels is not None:
592
+ max_label_length = max(len(l) for l in labels)
593
+ if self.pad_to_multiple_of is not None:
594
+ max_label_length = (
595
+ (max_label_length + self.pad_to_multiple_of - 1)
596
+ // self.pad_to_multiple_of
597
+ * self.pad_to_multiple_of
598
+ )
599
+
600
+ padding_side = self.tokenizer.padding_side
601
+ for feature in features:
602
+ remainder = [self.label_pad_token_id] * (max_label_length - len(feature["labels"]))
603
+ if isinstance(feature["labels"], list):
604
+ feature["labels"] = (
605
+ feature["labels"] + remainder if padding_side == "right" else remainder + feature["labels"]
606
+ )
607
+ elif padding_side == "right":
608
+ feature["labels"] = np.concatenate([feature["labels"], remainder]).astype(np.int64)
609
+ else:
610
+ feature["labels"] = np.concatenate([remainder, feature["labels"]]).astype(np.int64)
611
+
612
+ features = pad_without_fast_tokenizer_warning(
613
+ self.tokenizer,
614
+ features,
615
+ padding=self.padding,
616
+ max_length=self.max_length,
617
+ pad_to_multiple_of=self.pad_to_multiple_of,
618
+ return_tensors=return_tensors,
619
+ )
620
+
621
+ # prepare decoder_input_ids
622
+ if (
623
+ labels is not None
624
+ and self.model is not None
625
+ and hasattr(self.model, "prepare_decoder_input_ids_from_labels")
626
+ ):
627
+ decoder_input_ids = self.model.prepare_decoder_input_ids_from_labels(labels=features["labels"])
628
+ features["decoder_input_ids"] = decoder_input_ids
629
+
630
+ return features
631
+
632
+
633
+ @dataclass
634
+ class DataCollatorForLanguageModeling(DataCollatorMixin):
635
+ """
636
+ Data collator used for language modeling. Inputs are dynamically padded to the maximum length of a batch if they
637
+ are not all of the same length.
638
+
639
+ Args:
640
+ tokenizer ([`PreTrainedTokenizer`] or [`PreTrainedTokenizerFast`]):
641
+ The tokenizer used for encoding the data.
642
+ mlm (`bool`, *optional*, defaults to `True`):
643
+ Whether or not to use masked language modeling. If set to `False`, the labels are the same as the inputs
644
+ with the padding tokens ignored (by setting them to -100). Otherwise, the labels are -100 for non-masked
645
+ tokens and the value to predict for the masked token.
646
+ mlm_probability (`float`, *optional*, defaults to 0.15):
647
+ The probability with which to (randomly) mask tokens in the input, when `mlm` is set to `True`.
648
+ pad_to_multiple_of (`int`, *optional*):
649
+ If set will pad the sequence to a multiple of the provided value.
650
+ return_tensors (`str`):
651
+ The type of Tensor to return. Allowable values are "np", "pt" and "tf".
652
+
653
+ <Tip>
654
+
655
+ For best performance, this data collator should be used with a dataset having items that are dictionaries or
656
+ BatchEncoding, with the `"special_tokens_mask"` key, as returned by a [`PreTrainedTokenizer`] or a
657
+ [`PreTrainedTokenizerFast`] with the argument `return_special_tokens_mask=True`.
658
+
659
+ </Tip>"""
660
+
661
+ tokenizer: PreTrainedTokenizerBase
662
+ mlm: bool = True
663
+ mlm_probability: float = 0.15
664
+ pad_to_multiple_of: Optional[int] = None
665
+ tf_experimental_compile: bool = False
666
+ return_tensors: str = "pt"
667
+
668
+ def __post_init__(self):
669
+ if self.mlm and self.tokenizer.mask_token is None:
670
+ raise ValueError(
671
+ "This tokenizer does not have a mask token which is necessary for masked language modeling. "
672
+ "You should pass `mlm=False` to train on causal language modeling instead."
673
+ )
674
+ if self.tf_experimental_compile:
675
+ import tensorflow as tf
676
+
677
+ self.tf_mask_tokens = tf.function(self.tf_mask_tokens, jit_compile=True)
678
+
679
+ @staticmethod
680
+ def tf_bernoulli(shape, probability):
681
+ import tensorflow as tf
682
+
683
+ prob_matrix = tf.fill(shape, probability)
684
+ return tf.cast(prob_matrix - tf.random.uniform(shape, 0, 1) >= 0, tf.bool)
685
+
686
+ def tf_mask_tokens(
687
+ self, inputs: Any, vocab_size, mask_token_id, special_tokens_mask: Optional[Any] = None
688
+ ) -> Tuple[Any, Any]:
689
+ """
690
+ Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original.
691
+ """
692
+ import tensorflow as tf
693
+
694
+ mask_token_id = tf.cast(mask_token_id, inputs.dtype)
695
+
696
+ input_shape = tf.shape(inputs)
697
+ # 1 for a special token, 0 for a normal token in the special tokens mask
698
+ # We sample a few tokens in each sequence for MLM training (with probability `self.mlm_probability`)
699
+ masked_indices = self.tf_bernoulli(input_shape, self.mlm_probability) & ~special_tokens_mask
700
+ # Replace unmasked indices with -100 in the labels since we only compute loss on masked tokens
701
+ labels = tf.where(masked_indices, inputs, -100)
702
+
703
+ # 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])
704
+ indices_replaced = self.tf_bernoulli(input_shape, 0.8) & masked_indices
705
+
706
+ inputs = tf.where(indices_replaced, mask_token_id, inputs)
707
+
708
+ # 10% of the time, we replace masked input tokens with random word
709
+ indices_random = self.tf_bernoulli(input_shape, 0.1) & masked_indices & ~indices_replaced
710
+ random_words = tf.random.uniform(input_shape, maxval=vocab_size, dtype=inputs.dtype)
711
+
712
+ inputs = tf.where(indices_random, random_words, inputs)
713
+
714
+ # The rest of the time (10% of the time) we keep the masked input tokens unchanged
715
+ return inputs, labels
716
+
717
+ def tf_call(self, examples: List[Union[List[int], Any, Dict[str, Any]]]) -> Dict[str, Any]:
718
+ import tensorflow as tf
719
+
720
+ # Handle dict or lists with proper padding and conversion to tensor.
721
+ if isinstance(examples[0], Mapping):
722
+ batch = pad_without_fast_tokenizer_warning(
723
+ self.tokenizer, examples, return_tensors="tf", pad_to_multiple_of=self.pad_to_multiple_of
724
+ )
725
+ else:
726
+ batch = {
727
+ "input_ids": _tf_collate_batch(examples, self.tokenizer, pad_to_multiple_of=self.pad_to_multiple_of)
728
+ }
729
+
730
+ # If special token mask has been preprocessed, pop it from the dict.
731
+ special_tokens_mask = batch.pop("special_tokens_mask", None)
732
+ if self.mlm:
733
+ if special_tokens_mask is None:
734
+ special_tokens_mask = [
735
+ self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True)
736
+ for val in batch["input_ids"].numpy().tolist()
737
+ ]
738
+ # Cannot directly create as bool
739
+ special_tokens_mask = tf.cast(tf.convert_to_tensor(special_tokens_mask, dtype=tf.int64), tf.bool)
740
+ else:
741
+ special_tokens_mask = tf.cast(special_tokens_mask, tf.bool)
742
+ batch["input_ids"], batch["labels"] = self.tf_mask_tokens(
743
+ tf.cast(batch["input_ids"], tf.int64),
744
+ special_tokens_mask=special_tokens_mask,
745
+ mask_token_id=self.tokenizer.mask_token_id,
746
+ vocab_size=len(self.tokenizer),
747
+ )
748
+ else:
749
+ labels = batch["input_ids"]
750
+ if self.tokenizer.pad_token_id is not None:
751
+ # Replace self.tokenizer.pad_token_id with -100
752
+ labels = tf.where(labels == self.tokenizer.pad_token_id, -100, labels)
753
+ else:
754
+ labels = tf.identity(labels) # Makes a copy, just in case
755
+ batch["labels"] = labels
756
+ return batch
757
+
758
+ def torch_call(self, examples: List[Union[List[int], Any, Dict[str, Any]]]) -> Dict[str, Any]:
759
+ # Handle dict or lists with proper padding and conversion to tensor.
760
+ if isinstance(examples[0], Mapping):
761
+ batch = pad_without_fast_tokenizer_warning(
762
+ self.tokenizer, examples, return_tensors="pt", pad_to_multiple_of=self.pad_to_multiple_of
763
+ )
764
+ else:
765
+ batch = {
766
+ "input_ids": _torch_collate_batch(examples, self.tokenizer, pad_to_multiple_of=self.pad_to_multiple_of)
767
+ }
768
+
769
+ # If special token mask has been preprocessed, pop it from the dict.
770
+ special_tokens_mask = batch.pop("special_tokens_mask", None)
771
+ if self.mlm:
772
+ batch["input_ids"], batch["labels"] = self.torch_mask_tokens(
773
+ batch["input_ids"], special_tokens_mask=special_tokens_mask
774
+ )
775
+ else:
776
+ labels = batch["input_ids"].clone()
777
+ if self.tokenizer.pad_token_id is not None:
778
+ labels[labels == self.tokenizer.pad_token_id] = -100
779
+ batch["labels"] = labels
780
+ return batch
781
+
782
+ def torch_mask_tokens(self, inputs: Any, special_tokens_mask: Optional[Any] = None) -> Tuple[Any, Any]:
783
+ """
784
+ Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original.
785
+ """
786
+ import torch
787
+
788
+ labels = inputs.clone()
789
+ # We sample a few tokens in each sequence for MLM training (with probability `self.mlm_probability`)
790
+ probability_matrix = torch.full(labels.shape, self.mlm_probability)
791
+ if special_tokens_mask is None:
792
+ special_tokens_mask = [
793
+ self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()
794
+ ]
795
+ special_tokens_mask = torch.tensor(special_tokens_mask, dtype=torch.bool)
796
+ else:
797
+ special_tokens_mask = special_tokens_mask.bool()
798
+
799
+ probability_matrix.masked_fill_(special_tokens_mask, value=0.0)
800
+ masked_indices = torch.bernoulli(probability_matrix).bool()
801
+ labels[~masked_indices] = -100 # We only compute loss on masked tokens
802
+
803
+ # 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])
804
+ indices_replaced = torch.bernoulli(torch.full(labels.shape, 0.8)).bool() & masked_indices
805
+ inputs[indices_replaced] = self.tokenizer.convert_tokens_to_ids(self.tokenizer.mask_token)
806
+
807
+ # 10% of the time, we replace masked input tokens with random word
808
+ indices_random = torch.bernoulli(torch.full(labels.shape, 0.5)).bool() & masked_indices & ~indices_replaced
809
+ random_words = torch.randint(len(self.tokenizer), labels.shape, dtype=torch.long)
810
+ inputs[indices_random] = random_words[indices_random]
811
+
812
+ # The rest of the time (10% of the time) we keep the masked input tokens unchanged
813
+ return inputs, labels
814
+
815
+ def numpy_call(self, examples: List[Union[List[int], Any, Dict[str, Any]]]) -> Dict[str, Any]:
816
+ # Handle dict or lists with proper padding and conversion to tensor.
817
+ if isinstance(examples[0], Mapping):
818
+ batch = pad_without_fast_tokenizer_warning(
819
+ self.tokenizer, examples, return_tensors="np", pad_to_multiple_of=self.pad_to_multiple_of
820
+ )
821
+ else:
822
+ batch = {
823
+ "input_ids": _numpy_collate_batch(examples, self.tokenizer, pad_to_multiple_of=self.pad_to_multiple_of)
824
+ }
825
+
826
+ # If special token mask has been preprocessed, pop it from the dict.
827
+ special_tokens_mask = batch.pop("special_tokens_mask", None)
828
+ if self.mlm:
829
+ batch["input_ids"], batch["labels"] = self.numpy_mask_tokens(
830
+ batch["input_ids"], special_tokens_mask=special_tokens_mask
831
+ )
832
+ else:
833
+ labels = np.copy(batch["input_ids"])
834
+ if self.tokenizer.pad_token_id is not None:
835
+ labels[labels == self.tokenizer.pad_token_id] = -100
836
+ batch["labels"] = labels
837
+ return batch
838
+
839
+ def numpy_mask_tokens(self, inputs: Any, special_tokens_mask: Optional[Any] = None) -> Tuple[Any, Any]:
840
+ """
841
+ Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original.
842
+ """
843
+ labels = np.copy(inputs)
844
+ # We sample a few tokens in each sequence for MLM training (with probability `self.mlm_probability`)
845
+ probability_matrix = np.full(labels.shape, self.mlm_probability)
846
+ if special_tokens_mask is None:
847
+ special_tokens_mask = [
848
+ self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()
849
+ ]
850
+ special_tokens_mask = np.array(special_tokens_mask, dtype=bool)
851
+ else:
852
+ special_tokens_mask = special_tokens_mask.astype(bool)
853
+
854
+ probability_matrix[special_tokens_mask] = 0
855
+ # Numpy doesn't have bernoulli, so we use a binomial with 1 trial
856
+ masked_indices = np.random.binomial(1, probability_matrix, size=probability_matrix.shape).astype(bool)
857
+ labels[~masked_indices] = -100 # We only compute loss on masked tokens
858
+
859
+ # 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])
860
+ indices_replaced = np.random.binomial(1, 0.8, size=labels.shape).astype(bool) & masked_indices
861
+ inputs[indices_replaced] = self.tokenizer.mask_token_id
862
+
863
+ # 10% of the time, we replace masked input tokens with random word
864
+ # indices_random = torch.bernoulli(torch.full(labels.shape, 0.5)).bool() & masked_indices & ~indices_replaced
865
+ indices_random = (
866
+ np.random.binomial(1, 0.5, size=labels.shape).astype(bool) & masked_indices & ~indices_replaced
867
+ )
868
+ random_words = np.random.randint(
869
+ low=0, high=len(self.tokenizer), size=np.count_nonzero(indices_random), dtype=np.int64
870
+ )
871
+ inputs[indices_random] = random_words
872
+
873
+ # The rest of the time (10% of the time) we keep the masked input tokens unchanged
874
+ return inputs, labels
875
+
876
+
877
+ @dataclass
878
+ class DataCollatorForWholeWordMask(DataCollatorForLanguageModeling):
879
+ """
880
+ Data collator used for language modeling that masks entire words.
881
+
882
+ - collates batches of tensors, honoring their tokenizer's pad_token
883
+ - preprocesses batches for masked language modeling
884
+
885
+ <Tip>
886
+
887
+ This collator relies on details of the implementation of subword tokenization by [`BertTokenizer`], specifically
888
+ that subword tokens are prefixed with *##*. For tokenizers that do not adhere to this scheme, this collator will
889
+ produce an output that is roughly equivalent to [`.DataCollatorForLanguageModeling`].
890
+
891
+ </Tip>"""
892
+
893
+ def torch_call(self, examples: List[Union[List[int], Any, Dict[str, Any]]]) -> Dict[str, Any]:
894
+ if isinstance(examples[0], Mapping):
895
+ input_ids = [e["input_ids"] for e in examples]
896
+ else:
897
+ input_ids = examples
898
+ examples = [{"input_ids": e} for e in examples]
899
+
900
+ batch_input = _torch_collate_batch(input_ids, self.tokenizer, pad_to_multiple_of=self.pad_to_multiple_of)
901
+
902
+ mask_labels = []
903
+ for e in examples:
904
+ ref_tokens = []
905
+ for id in tolist(e["input_ids"]):
906
+ token = self.tokenizer._convert_id_to_token(id)
907
+ ref_tokens.append(token)
908
+
909
+ # For Chinese tokens, we need extra inf to mark sub-word, e.g [喜,欢]-> [喜,##欢]
910
+ if "chinese_ref" in e:
911
+ ref_pos = tolist(e["chinese_ref"])
912
+ len_seq = len(e["input_ids"])
913
+ for i in range(len_seq):
914
+ if i in ref_pos:
915
+ ref_tokens[i] = "##" + ref_tokens[i]
916
+ mask_labels.append(self._whole_word_mask(ref_tokens))
917
+ batch_mask = _torch_collate_batch(mask_labels, self.tokenizer, pad_to_multiple_of=self.pad_to_multiple_of)
918
+ inputs, labels = self.torch_mask_tokens(batch_input, batch_mask)
919
+ return {"input_ids": inputs, "labels": labels}
920
+
921
+ def tf_call(self, examples: List[Union[List[int], Any, Dict[str, Any]]]) -> Dict[str, Any]:
922
+ import tensorflow as tf
923
+
924
+ if isinstance(examples[0], Mapping):
925
+ input_ids = [e["input_ids"] for e in examples]
926
+ else:
927
+ input_ids = examples
928
+ examples = [{"input_ids": e} for e in examples]
929
+
930
+ batch_input = _tf_collate_batch(input_ids, self.tokenizer, pad_to_multiple_of=self.pad_to_multiple_of)
931
+
932
+ mask_labels = []
933
+ for e in examples:
934
+ ref_tokens = []
935
+ for id in tolist(e["input_ids"]):
936
+ token = self.tokenizer._convert_id_to_token(id)
937
+ ref_tokens.append(token)
938
+
939
+ # For Chinese tokens, we need extra inf to mark sub-word, e.g [喜,欢]-> [喜,##欢]
940
+ if "chinese_ref" in e:
941
+ ref_pos = tolist(e["chinese_ref"])
942
+ len_seq = len(e["input_ids"])
943
+ for i in range(len_seq):
944
+ if i in ref_pos:
945
+ ref_tokens[i] = "##" + ref_tokens[i]
946
+ mask_labels.append(self._whole_word_mask(ref_tokens))
947
+ batch_mask = _tf_collate_batch(mask_labels, self.tokenizer, pad_to_multiple_of=self.pad_to_multiple_of)
948
+ inputs, labels = self.tf_mask_tokens(tf.cast(batch_input, tf.int64), batch_mask)
949
+ return {"input_ids": inputs, "labels": labels}
950
+
951
+ def numpy_call(self, examples: List[Union[List[int], Any, Dict[str, Any]]]) -> Dict[str, Any]:
952
+ if isinstance(examples[0], Mapping):
953
+ input_ids = [e["input_ids"] for e in examples]
954
+ else:
955
+ input_ids = examples
956
+ examples = [{"input_ids": e} for e in examples]
957
+
958
+ batch_input = _numpy_collate_batch(input_ids, self.tokenizer, pad_to_multiple_of=self.pad_to_multiple_of)
959
+
960
+ mask_labels = []
961
+ for e in examples:
962
+ ref_tokens = []
963
+ for id in tolist(e["input_ids"]):
964
+ token = self.tokenizer._convert_id_to_token(id)
965
+ ref_tokens.append(token)
966
+
967
+ # For Chinese tokens, we need extra inf to mark sub-word, e.g [喜,欢]-> [喜,##欢]
968
+ if "chinese_ref" in e:
969
+ ref_pos = tolist(e["chinese_ref"])
970
+ len_seq = len(e["input_ids"])
971
+ for i in range(len_seq):
972
+ if i in ref_pos:
973
+ ref_tokens[i] = "##" + ref_tokens[i]
974
+ mask_labels.append(self._whole_word_mask(ref_tokens))
975
+ batch_mask = _numpy_collate_batch(mask_labels, self.tokenizer, pad_to_multiple_of=self.pad_to_multiple_of)
976
+ inputs, labels = self.numpy_mask_tokens(batch_input, batch_mask)
977
+ return {"input_ids": inputs, "labels": labels}
978
+
979
+ def _whole_word_mask(self, input_tokens: List[str], max_predictions=512):
980
+ """
981
+ Get 0/1 labels for masked tokens with whole word mask proxy
982
+ """
983
+ if not isinstance(self.tokenizer, (BertTokenizer, BertTokenizerFast)):
984
+ warnings.warn(
985
+ "DataCollatorForWholeWordMask is only suitable for BertTokenizer-like tokenizers. "
986
+ "Please refer to the documentation for more information."
987
+ )
988
+
989
+ cand_indexes = []
990
+ for i, token in enumerate(input_tokens):
991
+ if token == "[CLS]" or token == "[SEP]":
992
+ continue
993
+
994
+ if len(cand_indexes) >= 1 and token.startswith("##"):
995
+ cand_indexes[-1].append(i)
996
+ else:
997
+ cand_indexes.append([i])
998
+
999
+ random.shuffle(cand_indexes)
1000
+ num_to_predict = min(max_predictions, max(1, int(round(len(input_tokens) * self.mlm_probability))))
1001
+ masked_lms = []
1002
+ covered_indexes = set()
1003
+ for index_set in cand_indexes:
1004
+ if len(masked_lms) >= num_to_predict:
1005
+ break
1006
+ # If adding a whole-word mask would exceed the maximum number of
1007
+ # predictions, then just skip this candidate.
1008
+ if len(masked_lms) + len(index_set) > num_to_predict:
1009
+ continue
1010
+ is_any_index_covered = False
1011
+ for index in index_set:
1012
+ if index in covered_indexes:
1013
+ is_any_index_covered = True
1014
+ break
1015
+ if is_any_index_covered:
1016
+ continue
1017
+ for index in index_set:
1018
+ covered_indexes.add(index)
1019
+ masked_lms.append(index)
1020
+
1021
+ if len(covered_indexes) != len(masked_lms):
1022
+ raise ValueError("Length of covered_indexes is not equal to length of masked_lms.")
1023
+ mask_labels = [1 if i in covered_indexes else 0 for i in range(len(input_tokens))]
1024
+ return mask_labels
1025
+
1026
+ def torch_mask_tokens(self, inputs: Any, mask_labels: Any) -> Tuple[Any, Any]:
1027
+ """
1028
+ Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original. Set
1029
+ 'mask_labels' means we use whole word mask (wwm), we directly mask idxs according to it's ref.
1030
+ """
1031
+ import torch
1032
+
1033
+ if self.tokenizer.mask_token is None:
1034
+ raise ValueError(
1035
+ "This tokenizer does not have a mask token which is necessary for masked language modeling. Remove the"
1036
+ " --mlm flag if you want to use this tokenizer."
1037
+ )
1038
+ labels = inputs.clone()
1039
+ # We sample a few tokens in each sequence for masked-LM training (with probability args.mlm_probability defaults to 0.15 in Bert/RoBERTa)
1040
+
1041
+ probability_matrix = mask_labels
1042
+
1043
+ special_tokens_mask = [
1044
+ self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()
1045
+ ]
1046
+ probability_matrix.masked_fill_(torch.tensor(special_tokens_mask, dtype=torch.bool), value=0.0)
1047
+ if self.tokenizer._pad_token is not None:
1048
+ padding_mask = labels.eq(self.tokenizer.pad_token_id)
1049
+ probability_matrix.masked_fill_(padding_mask, value=0.0)
1050
+
1051
+ masked_indices = probability_matrix.bool()
1052
+ labels[~masked_indices] = -100 # We only compute loss on masked tokens
1053
+
1054
+ # 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])
1055
+ indices_replaced = torch.bernoulli(torch.full(labels.shape, 0.8)).bool() & masked_indices
1056
+ inputs[indices_replaced] = self.tokenizer.convert_tokens_to_ids(self.tokenizer.mask_token)
1057
+
1058
+ # 10% of the time, we replace masked input tokens with random word
1059
+ indices_random = torch.bernoulli(torch.full(labels.shape, 0.5)).bool() & masked_indices & ~indices_replaced
1060
+ random_words = torch.randint(len(self.tokenizer), labels.shape, dtype=torch.long)
1061
+ inputs[indices_random] = random_words[indices_random]
1062
+
1063
+ # The rest of the time (10% of the time) we keep the masked input tokens unchanged
1064
+ return inputs, labels
1065
+
1066
+ def tf_mask_tokens(self, inputs: Any, mask_labels: Any) -> Tuple[Any, Any]:
1067
+ """
1068
+ Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original. Set
1069
+ 'mask_labels' means we use whole word mask (wwm), we directly mask idxs according to it's ref.
1070
+ """
1071
+ import tensorflow as tf
1072
+
1073
+ input_shape = tf.shape(inputs)
1074
+ if self.tokenizer.mask_token is None:
1075
+ raise ValueError(
1076
+ "This tokenizer does not have a mask token which is necessary for masked language modeling. Remove the"
1077
+ " --mlm flag if you want to use this tokenizer."
1078
+ )
1079
+ labels = tf.identity(inputs)
1080
+ # We sample a few tokens in each sequence for masked-LM training (with probability args.mlm_probability defaults to 0.15 in Bert/RoBERTa)
1081
+
1082
+ masked_indices = tf.cast(mask_labels, tf.bool)
1083
+
1084
+ special_tokens_mask = [
1085
+ self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels
1086
+ ]
1087
+ masked_indices = masked_indices & ~tf.cast(special_tokens_mask, dtype=tf.bool)
1088
+ if self.tokenizer._pad_token is not None:
1089
+ padding_mask = inputs == self.tokenizer.pad_token_id
1090
+ masked_indices = masked_indices & ~padding_mask
1091
+
1092
+ # Replace unmasked indices with -100 in the labels since we only compute loss on masked tokens
1093
+ labels = tf.where(masked_indices, inputs, -100)
1094
+
1095
+ # 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])
1096
+ indices_replaced = self.tf_bernoulli(input_shape, 0.8) & masked_indices
1097
+
1098
+ inputs = tf.where(indices_replaced, self.tokenizer.mask_token_id, inputs)
1099
+
1100
+ # 10% of the time, we replace masked input tokens with random word
1101
+ indices_random = self.tf_bernoulli(input_shape, 0.5) & masked_indices & ~indices_replaced
1102
+ random_words = tf.random.uniform(input_shape, maxval=len(self.tokenizer), dtype=tf.int64)
1103
+ inputs = tf.where(indices_random, random_words, inputs)
1104
+
1105
+ # The rest of the time (10% of the time) we keep the masked input tokens unchanged
1106
+ return inputs, labels
1107
+
1108
+ def numpy_mask_tokens(self, inputs: Any, mask_labels: Any) -> Tuple[Any, Any]:
1109
+ """
1110
+ Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original. Set
1111
+ 'mask_labels' means we use whole word mask (wwm), we directly mask idxs according to it's ref.
1112
+ """
1113
+ if self.tokenizer.mask_token is None:
1114
+ raise ValueError(
1115
+ "This tokenizer does not have a mask token which is necessary for masked language modeling. Remove the"
1116
+ " --mlm flag if you want to use this tokenizer."
1117
+ )
1118
+ labels = np.copy(inputs)
1119
+ # We sample a few tokens in each sequence for masked-LM training (with probability args.mlm_probability defaults to 0.15 in Bert/RoBERTa)
1120
+
1121
+ masked_indices = mask_labels.astype(bool)
1122
+
1123
+ special_tokens_mask = [
1124
+ self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()
1125
+ ]
1126
+ masked_indices[np.array(special_tokens_mask, dtype=bool)] = 0
1127
+ if self.tokenizer._pad_token is not None:
1128
+ padding_mask = labels == self.tokenizer.pad_token_id
1129
+ masked_indices[padding_mask] = 0
1130
+
1131
+ labels[~masked_indices] = -100 # We only compute loss on masked tokens
1132
+
1133
+ # 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])
1134
+ indices_replaced = np.random.binomial(1, 0.8, size=labels.shape).astype(bool) & masked_indices
1135
+ inputs[indices_replaced] = self.tokenizer.convert_tokens_to_ids(self.tokenizer.mask_token)
1136
+
1137
+ # 10% of the time, we replace masked input tokens with random word
1138
+ # indices_random = torch.bernoulli(torch.full(labels.shape, 0.5)).bool() & masked_indices & ~indices_replaced
1139
+ indices_random = (
1140
+ np.random.binomial(1, 0.5, size=labels.shape).astype(bool) & masked_indices & ~indices_replaced
1141
+ )
1142
+ random_words = np.random.randint(low=0, high=len(self.tokenizer), size=labels.shape, dtype=np.int64)
1143
+ inputs[indices_random] = random_words[indices_random]
1144
+
1145
+ # The rest of the time (10% of the time) we keep the masked input tokens unchanged
1146
+ return inputs, labels
1147
+
1148
+
1149
+ @dataclass
1150
+ class DataCollatorForSOP(DataCollatorForLanguageModeling):
1151
+ """
1152
+ Data collator used for sentence order prediction task.
1153
+
1154
+ - collates batches of tensors, honoring their tokenizer's pad_token
1155
+ - preprocesses batches for both masked language modeling and sentence order prediction
1156
+ """
1157
+
1158
+ def __init__(self, *args, **kwargs):
1159
+ warnings.warn(
1160
+ "DataCollatorForSOP is deprecated and will be removed in a future version, you can now use "
1161
+ "DataCollatorForLanguageModeling instead.",
1162
+ FutureWarning,
1163
+ )
1164
+
1165
+ def __call__(self, examples: List[Dict[str, Any]]) -> Dict[str, Any]:
1166
+ import torch
1167
+ from torch.nn.utils.rnn import pad_sequence
1168
+
1169
+ input_ids = [example["input_ids"] for example in examples]
1170
+ input_ids = _torch_collate_batch(input_ids, self.tokenizer)
1171
+ input_ids, labels, attention_mask = self.mask_tokens(input_ids)
1172
+
1173
+ token_type_ids = [example["token_type_ids"] for example in examples]
1174
+ # size of segment_ids varied because randomness, padding zero to the end as the original implementation
1175
+ token_type_ids = pad_sequence(token_type_ids, batch_first=True, padding_value=self.tokenizer.pad_token_id)
1176
+
1177
+ sop_label_list = [example["sentence_order_label"] for example in examples]
1178
+ sentence_order_label = torch.stack(sop_label_list)
1179
+
1180
+ return {
1181
+ "input_ids": input_ids,
1182
+ "labels": labels,
1183
+ "attention_mask": attention_mask,
1184
+ "token_type_ids": token_type_ids,
1185
+ "sentence_order_label": sentence_order_label,
1186
+ }
1187
+
1188
+ def mask_tokens(self, inputs: Any) -> Tuple[Any, Any, Any]:
1189
+ """
1190
+ Prepare masked tokens inputs/labels/attention_mask for masked language modeling: 80% MASK, 10% random, 10%
1191
+ original. N-gram not applied yet.
1192
+ """
1193
+ import torch
1194
+
1195
+ if self.tokenizer.mask_token is None:
1196
+ raise ValueError(
1197
+ "This tokenizer does not have a mask token which is necessary for masked language modeling. Remove the"
1198
+ " --mlm flag if you want to use this tokenizer."
1199
+ )
1200
+
1201
+ labels = inputs.clone()
1202
+ # We sample a few tokens in each sequence for masked-LM training (with probability args.mlm_probability defaults to 0.15 in Bert/RoBERTa)
1203
+ probability_matrix = torch.full(labels.shape, self.mlm_probability)
1204
+ special_tokens_mask = [
1205
+ self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()
1206
+ ]
1207
+ probability_matrix.masked_fill_(torch.tensor(special_tokens_mask, dtype=torch.bool), value=0.0)
1208
+ if self.tokenizer._pad_token is not None:
1209
+ padding_mask = labels.eq(self.tokenizer.pad_token_id)
1210
+ probability_matrix.masked_fill_(padding_mask, value=0.0)
1211
+ masked_indices = torch.bernoulli(probability_matrix).bool()
1212
+ # probability be `1` (masked), however in albert model attention mask `0` means masked, revert the value
1213
+ attention_mask = (~masked_indices).float()
1214
+ if self.tokenizer._pad_token is not None:
1215
+ attention_padding_mask = labels.eq(self.tokenizer.pad_token_id)
1216
+ attention_mask.masked_fill_(attention_padding_mask, value=1.0)
1217
+ labels[~masked_indices] = -100 # We only compute loss on masked tokens, -100 is default for CE compute
1218
+
1219
+ # 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])
1220
+ indices_replaced = torch.bernoulli(torch.full(labels.shape, 0.8)).bool() & masked_indices
1221
+ inputs[indices_replaced] = self.tokenizer.convert_tokens_to_ids(self.tokenizer.mask_token)
1222
+
1223
+ # 10% of the time, we replace masked input tokens with random word
1224
+ indices_random = torch.bernoulli(torch.full(labels.shape, 0.5)).bool() & masked_indices & ~indices_replaced
1225
+ random_words = torch.randint(len(self.tokenizer), labels.shape, dtype=torch.long)
1226
+ inputs[indices_random] = random_words[indices_random]
1227
+
1228
+ # The rest of the time (10% of the time) we keep the masked input tokens unchanged
1229
+ return inputs, labels, attention_mask
1230
+
1231
+
1232
+ @dataclass
1233
+ class DataCollatorForPermutationLanguageModeling(DataCollatorMixin):
1234
+ """
1235
+ Data collator used for permutation language modeling.
1236
+
1237
+ - collates batches of tensors, honoring their tokenizer's pad_token
1238
+ - preprocesses batches for permutation language modeling with procedures specific to XLNet
1239
+ """
1240
+
1241
+ tokenizer: PreTrainedTokenizerBase
1242
+ plm_probability: float = 1 / 6
1243
+ max_span_length: int = 5 # maximum length of a span of masked tokens
1244
+ return_tensors: str = "pt"
1245
+
1246
+ def torch_call(self, examples: List[Union[List[int], Any, Dict[str, Any]]]) -> Dict[str, Any]:
1247
+ if isinstance(examples[0], Mapping):
1248
+ examples = [e["input_ids"] for e in examples]
1249
+ batch = _torch_collate_batch(examples, self.tokenizer)
1250
+ inputs, perm_mask, target_mapping, labels = self.torch_mask_tokens(batch)
1251
+ return {"input_ids": inputs, "perm_mask": perm_mask, "target_mapping": target_mapping, "labels": labels}
1252
+
1253
+ def tf_call(self, examples: List[Union[List[int], Any, Dict[str, Any]]]) -> Dict[str, Any]:
1254
+ if isinstance(examples[0], Mapping):
1255
+ examples = [e["input_ids"] for e in examples]
1256
+ batch = _tf_collate_batch(examples, self.tokenizer)
1257
+ inputs, perm_mask, target_mapping, labels = self.tf_mask_tokens(batch)
1258
+ return {"input_ids": inputs, "perm_mask": perm_mask, "target_mapping": target_mapping, "labels": labels}
1259
+
1260
+ def numpy_call(self, examples: List[Union[List[int], Any, Dict[str, Any]]]) -> Dict[str, Any]:
1261
+ if isinstance(examples[0], Mapping):
1262
+ examples = [e["input_ids"] for e in examples]
1263
+ batch = _numpy_collate_batch(examples, self.tokenizer)
1264
+ inputs, perm_mask, target_mapping, labels = self.numpy_mask_tokens(batch)
1265
+ return {"input_ids": inputs, "perm_mask": perm_mask, "target_mapping": target_mapping, "labels": labels}
1266
+
1267
+ def torch_mask_tokens(self, inputs: Any) -> Tuple[Any, Any, Any, Any]:
1268
+ """
1269
+ The masked tokens to be predicted for a particular sequence are determined by the following algorithm:
1270
+
1271
+ 0. Start from the beginning of the sequence by setting `cur_len = 0` (number of tokens processed so far).
1272
+ 1. Sample a `span_length` from the interval `[1, max_span_length]` (length of span of tokens to be masked)
1273
+ 2. Reserve a context of length `context_length = span_length / plm_probability` to surround span to be
1274
+ masked
1275
+ 3. Sample a starting point `start_index` from the interval `[cur_len, cur_len + context_length -
1276
+ span_length]` and mask tokens `start_index:start_index + span_length`
1277
+ 4. Set `cur_len = cur_len + context_length`. If `cur_len < max_len` (i.e. there are tokens remaining in the
1278
+ sequence to be processed), repeat from Step 1.
1279
+ """
1280
+ import torch
1281
+
1282
+ if self.tokenizer.mask_token is None:
1283
+ raise ValueError(
1284
+ "This tokenizer does not have a mask token which is necessary for permutation language modeling."
1285
+ " Please add a mask token if you want to use this tokenizer."
1286
+ )
1287
+
1288
+ if inputs.size(1) % 2 != 0:
1289
+ raise ValueError(
1290
+ "This collator requires that sequence lengths be even to create a leakage-free perm_mask. Please see"
1291
+ " relevant comments in source code for details."
1292
+ )
1293
+
1294
+ labels = inputs.clone()
1295
+ # Creating the mask and target_mapping tensors
1296
+ masked_indices = torch.full(labels.shape, 0, dtype=torch.bool)
1297
+ target_mapping = torch.zeros((labels.size(0), labels.size(1), labels.size(1)), dtype=torch.float32)
1298
+
1299
+ for i in range(labels.size(0)):
1300
+ # Start from the beginning of the sequence by setting `cur_len = 0` (number of tokens processed so far).
1301
+ cur_len = 0
1302
+ max_len = labels.size(1)
1303
+
1304
+ while cur_len < max_len:
1305
+ # Sample a `span_length` from the interval `[1, max_span_length]` (length of span of tokens to be masked)
1306
+ span_length = torch.randint(1, self.max_span_length + 1, (1,)).item()
1307
+ # Reserve a context of length `context_length = span_length / plm_probability` to surround the span to be masked
1308
+ context_length = int(span_length / self.plm_probability)
1309
+ # Sample a starting point `start_index` from the interval `[cur_len, cur_len + context_length - span_length]` and mask tokens `start_index:start_index + span_length`
1310
+ start_index = cur_len + torch.randint(context_length - span_length + 1, (1,)).item()
1311
+ masked_indices[i, start_index : start_index + span_length] = 1
1312
+ # Set `cur_len = cur_len + context_length`
1313
+ cur_len += context_length
1314
+
1315
+ # Since we're replacing non-masked tokens with -100 in the labels tensor instead of skipping them altogether,
1316
+ # the i-th predict corresponds to the i-th token.
1317
+ target_mapping[i] = torch.eye(labels.size(1))
1318
+
1319
+ special_tokens_mask = torch.tensor(
1320
+ [self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()],
1321
+ dtype=torch.bool,
1322
+ )
1323
+ masked_indices.masked_fill_(special_tokens_mask, value=0.0)
1324
+ if self.tokenizer._pad_token is not None:
1325
+ padding_mask = labels.eq(self.tokenizer.pad_token_id)
1326
+ masked_indices.masked_fill_(padding_mask, value=0.0)
1327
+
1328
+ # Mask indicating non-functional tokens, where functional tokens are [SEP], [CLS], padding, etc.
1329
+ non_func_mask = ~(padding_mask | special_tokens_mask)
1330
+
1331
+ inputs[masked_indices] = self.tokenizer.mask_token_id
1332
+ labels[~masked_indices] = -100 # We only compute loss on masked tokens
1333
+
1334
+ perm_mask = torch.zeros((labels.size(0), labels.size(1), labels.size(1)), dtype=torch.float32)
1335
+
1336
+ for i in range(labels.size(0)):
1337
+ # Generate permutation indices i.e. sample a random factorisation order for the sequence. This will
1338
+ # determine which tokens a given token can attend to (encoded in `perm_mask`).
1339
+ # Note: Length of token sequence being permuted has to be less than or equal to reused sequence length
1340
+ # (see documentation for `mems`), otherwise information may leak through due to reuse. In this implementation,
1341
+ # we assume that reused length is half of sequence length and permutation length is equal to reused length.
1342
+ # This requires that the sequence length be even.
1343
+
1344
+ # Create a linear factorisation order
1345
+ perm_index = torch.arange(labels.size(1))
1346
+ # Split this into two halves, assuming that half the sequence is reused each time
1347
+ perm_index = perm_index.reshape((-1, labels.size(1) // 2)).transpose(0, 1)
1348
+ # Permute the two halves such that they do not cross over
1349
+ perm_index = perm_index[torch.randperm(labels.size(1) // 2)]
1350
+ # Flatten this out into the desired permuted factorisation order
1351
+ perm_index = torch.flatten(perm_index.transpose(0, 1))
1352
+ # Set the permutation indices of non-masked (non-functional) tokens to the
1353
+ # smallest index (-1) so that:
1354
+ # (1) They can be seen by all other positions
1355
+ # (2) They cannot see masked positions, so there won't be information leak
1356
+ perm_index.masked_fill_(~masked_indices[i] & non_func_mask[i], -1)
1357
+ # The logic for whether the i-th token can attend on the j-th token based on the factorisation order:
1358
+ # 0 (can attend): If perm_index[i] > perm_index[j] or j is neither masked nor a functional token
1359
+ # 1 (cannot attend): If perm_index[i] <= perm_index[j] and j is either masked or a functional token
1360
+ perm_mask[i] = (
1361
+ perm_index.reshape((labels.size(1), 1)) <= perm_index.reshape((1, labels.size(1)))
1362
+ ) & masked_indices[i]
1363
+
1364
+ return inputs.long(), perm_mask, target_mapping, labels.long()
1365
+
1366
+ def tf_mask_tokens(self, inputs: Any) -> Tuple[Any, Any, Any, Any]:
1367
+ """
1368
+ The masked tokens to be predicted for a particular sequence are determined by the following algorithm:
1369
+
1370
+ 0. Start from the beginning of the sequence by setting `cur_len = 0` (number of tokens processed so far).
1371
+ 1. Sample a `span_length` from the interval `[1, max_span_length]` (length of span of tokens to be masked)
1372
+ 2. Reserve a context of length `context_length = span_length / plm_probability` to surround span to be
1373
+ masked
1374
+ 3. Sample a starting point `start_index` from the interval `[cur_len, cur_len + context_length -
1375
+ span_length]` and mask tokens `start_index:start_index + span_length`
1376
+ 4. Set `cur_len = cur_len + context_length`. If `cur_len < max_len` (i.e. there are tokens remaining in the
1377
+ sequence to be processed), repeat from Step 1.
1378
+ """
1379
+ import tensorflow as tf
1380
+
1381
+ if self.tokenizer.mask_token is None:
1382
+ raise ValueError(
1383
+ "This tokenizer does not have a mask token which is necessary for permutation language modeling."
1384
+ " Please add a mask token if you want to use this tokenizer."
1385
+ )
1386
+
1387
+ if tf.shape(inputs)[1] % 2 != 0:
1388
+ raise ValueError(
1389
+ "This collator requires that sequence lengths be even to create a leakage-free perm_mask. Please see"
1390
+ " relevant comments in source code for details."
1391
+ )
1392
+
1393
+ labels = tf.identity(inputs)
1394
+ # Creating the mask and target_mapping tensors
1395
+ masked_indices = np.full(labels.shape.as_list(), 0, dtype=bool)
1396
+ labels_shape = tf.shape(labels)
1397
+ target_mapping = np.zeros((labels_shape[0], labels_shape[1], labels_shape[1]), dtype=np.float32)
1398
+
1399
+ for i in range(len(labels)):
1400
+ # Start from the beginning of the sequence by setting `cur_len = 0` (number of tokens processed so far).
1401
+ cur_len = 0
1402
+ max_len = tf.shape(labels)[1]
1403
+
1404
+ while cur_len < max_len:
1405
+ # Sample a `span_length` from the interval `[1, max_span_length]` (length of span of tokens to be masked)
1406
+ span_length = randint(1, self.max_span_length + 1)
1407
+ # Reserve a context of length `context_length = span_length / plm_probability` to surround the span to be masked
1408
+ context_length = int(span_length / self.plm_probability)
1409
+ # Sample a starting point `start_index` from the interval `[cur_len, cur_len + context_length - span_length]` and mask tokens `start_index:start_index + span_length`
1410
+ start_index = cur_len + randint(0, context_length - span_length + 1)
1411
+ masked_indices[i, start_index : start_index + span_length] = 1
1412
+ # Set `cur_len = cur_len + context_length`
1413
+ cur_len += context_length
1414
+
1415
+ # Since we're replacing non-masked tokens with -100 in the labels tensor instead of skipping them altogether,
1416
+ # the i-th predict corresponds to the i-th token.
1417
+ target_mapping[i] = np.eye(labels_shape[1])
1418
+ masked_indices = tf.cast(tf.convert_to_tensor(masked_indices), dtype=tf.bool)
1419
+ target_mapping = tf.convert_to_tensor(target_mapping)
1420
+ special_tokens_mask = tf.convert_to_tensor(
1421
+ [
1422
+ self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True)
1423
+ for val in labels.numpy().tolist()
1424
+ ],
1425
+ )
1426
+ special_tokens_mask = tf.cast(special_tokens_mask, dtype=tf.bool)
1427
+ masked_indices = masked_indices & ~special_tokens_mask
1428
+ if self.tokenizer._pad_token is not None:
1429
+ padding_mask = labels == self.tokenizer.pad_token_id
1430
+ masked_indices = masked_indices & ~padding_mask
1431
+
1432
+ # Mask indicating non-functional tokens, where functional tokens are [SEP], [CLS], padding, etc.
1433
+ non_func_mask = ~(padding_mask | special_tokens_mask)
1434
+
1435
+ inputs = tf.where(masked_indices, self.tokenizer.mask_token_id, inputs)
1436
+ labels = tf.where(masked_indices, labels, -100) # We only compute loss on masked tokens
1437
+
1438
+ perm_mask = []
1439
+
1440
+ for i in range(len(labels)):
1441
+ # Generate permutation indices i.e. sample a random factorisation order for the sequence. This will
1442
+ # determine which tokens a given token can attend to (encoded in `perm_mask`).
1443
+ # Note: Length of token sequence being permuted has to be less than or equal to reused sequence length
1444
+ # (see documentation for `mems`), otherwise information may leak through due to reuse. In this implementation,
1445
+ # we assume that reused length is half of sequence length and permutation length is equal to reused length.
1446
+ # This requires that the sequence length be even.
1447
+
1448
+ # Create a linear factorisation order
1449
+ # tf.range is the equivalent of torch.arange
1450
+ perm_index = tf.range(labels_shape[1])
1451
+ # Split this into two halves, assuming that half the sequence is reused each time
1452
+ perm_index = tf.transpose(tf.reshape(perm_index, (-1, labels_shape[1] // 2)))
1453
+ # Permute the two halves such that they do not cross over
1454
+ perm_index = tf.random.shuffle(perm_index) # Shuffles along the first dimension
1455
+ # Flatten this out into the desired permuted factorisation order
1456
+ perm_index = tf.reshape(tf.transpose(perm_index), (-1,))
1457
+ # Set the permutation indices of non-masked (non-functional) tokens to the
1458
+ # smallest index (-1) so that:
1459
+ # (1) They can be seen by all other positions
1460
+ # (2) They cannot see masked positions, so there won't be information leak
1461
+ perm_index = tf.where(~masked_indices[i] & non_func_mask[i], -1, perm_index)
1462
+ # The logic for whether the i-th token can attend on the j-th token based on the factorisation order:
1463
+ # 0 (can attend): If perm_index[i] > perm_index[j] or j is neither masked nor a functional token
1464
+ # 1 (cannot attend): If perm_index[i] <= perm_index[j] and j is either masked or a functional token
1465
+ perm_mask.append(
1466
+ (tf.reshape(perm_index, (labels_shape[1], 1)) <= tf.reshape(perm_index, (1, labels_shape[1])))
1467
+ & masked_indices[i]
1468
+ )
1469
+ perm_mask = tf.stack(perm_mask, axis=0)
1470
+
1471
+ return tf.cast(inputs, tf.int64), tf.cast(perm_mask, tf.float32), target_mapping, tf.cast(labels, tf.int64)
1472
+
1473
+ def numpy_mask_tokens(self, inputs: Any) -> Tuple[Any, Any, Any, Any]:
1474
+ """
1475
+ The masked tokens to be predicted for a particular sequence are determined by the following algorithm:
1476
+
1477
+ 0. Start from the beginning of the sequence by setting `cur_len = 0` (number of tokens processed so far).
1478
+ 1. Sample a `span_length` from the interval `[1, max_span_length]` (length of span of tokens to be masked)
1479
+ 2. Reserve a context of length `context_length = span_length / plm_probability` to surround span to be
1480
+ masked
1481
+ 3. Sample a starting point `start_index` from the interval `[cur_len, cur_len + context_length -
1482
+ span_length]` and mask tokens `start_index:start_index + span_length`
1483
+ 4. Set `cur_len = cur_len + context_length`. If `cur_len < max_len` (i.e. there are tokens remaining in the
1484
+ sequence to be processed), repeat from Step 1.
1485
+ """
1486
+ if self.tokenizer.mask_token is None:
1487
+ raise ValueError(
1488
+ "This tokenizer does not have a mask token which is necessary for permutation language modeling."
1489
+ " Please add a mask token if you want to use this tokenizer."
1490
+ )
1491
+
1492
+ if inputs.shape[1] % 2 != 0:
1493
+ raise ValueError(
1494
+ "This collator requires that sequence lengths be even to create a leakage-free perm_mask. Please see"
1495
+ " relevant comments in source code for details."
1496
+ )
1497
+
1498
+ labels = np.copy(inputs)
1499
+ # Creating the mask and target_mapping tensors
1500
+ masked_indices = np.full(labels.shape, 0, dtype=bool)
1501
+ target_mapping = np.zeros((labels.shape[0], labels.shape[1], labels.shape[1]), dtype=np.float32)
1502
+
1503
+ for i in range(labels.shape[0]):
1504
+ # Start from the beginning of the sequence by setting `cur_len = 0` (number of tokens processed so far).
1505
+ cur_len = 0
1506
+ max_len = labels.shape[1]
1507
+
1508
+ while cur_len < max_len:
1509
+ # Sample a `span_length` from the interval `[1, max_span_length]` (length of span of tokens to be masked)
1510
+ span_length = randint(1, self.max_span_length + 1)
1511
+ # Reserve a context of length `context_length = span_length / plm_probability` to surround the span to be masked
1512
+ context_length = int(span_length / self.plm_probability)
1513
+ # Sample a starting point `start_index` from the interval `[cur_len, cur_len + context_length - span_length]` and mask tokens `start_index:start_index + span_length`
1514
+ start_index = cur_len + randint(0, context_length - span_length + 1)
1515
+ masked_indices[i, start_index : start_index + span_length] = 1
1516
+ # Set `cur_len = cur_len + context_length`
1517
+ cur_len += context_length
1518
+
1519
+ # Since we're replacing non-masked tokens with -100 in the labels tensor instead of skipping them altogether,
1520
+ # the i-th predict corresponds to the i-th token.
1521
+ target_mapping[i] = np.eye(labels.shape[1])
1522
+
1523
+ special_tokens_mask = np.array(
1524
+ [self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()],
1525
+ dtype=bool,
1526
+ )
1527
+ masked_indices[special_tokens_mask] = 0
1528
+ if self.tokenizer._pad_token is not None:
1529
+ padding_mask = labels == self.tokenizer.pad_token_id
1530
+ masked_indices[padding_mask] = 0.0
1531
+
1532
+ # Mask indicating non-functional tokens, where functional tokens are [SEP], [CLS], padding, etc.
1533
+ non_func_mask = ~(padding_mask | special_tokens_mask)
1534
+
1535
+ inputs[masked_indices] = self.tokenizer.mask_token_id
1536
+ labels[~masked_indices] = -100 # We only compute loss on masked tokens
1537
+
1538
+ perm_mask = np.zeros((labels.shape[0], labels.shape[1], labels.shape[1]), dtype=np.float32)
1539
+
1540
+ for i in range(labels.shape[0]):
1541
+ # Generate permutation indices i.e. sample a random factorisation order for the sequence. This will
1542
+ # determine which tokens a given token can attend to (encoded in `perm_mask`).
1543
+ # Note: Length of token sequence being permuted has to be less than or equal to reused sequence length
1544
+ # (see documentation for `mems`), otherwise information may leak through due to reuse. In this implementation,
1545
+ # we assume that reused length is half of sequence length and permutation length is equal to reused length.
1546
+ # This requires that the sequence length be even.
1547
+
1548
+ # Create a linear factorisation order
1549
+ perm_index = np.arange(labels.shape[1])
1550
+ # Split this into two halves, assuming that half the sequence is reused each time
1551
+ perm_index = perm_index.reshape((-1, labels.shape[1] // 2)).T
1552
+ # Permute the two halves such that they do not cross over
1553
+ np.random.shuffle(perm_index)
1554
+ # Flatten this out into the desired permuted factorisation order
1555
+ perm_index = perm_index.T.flatten()
1556
+ # Set the permutation indices of non-masked (non-functional) tokens to the
1557
+ # smallest index (-1) so that:
1558
+ # (1) They can be seen by all other positions
1559
+ # (2) They cannot see masked positions, so there won't be information leak
1560
+ perm_index[~masked_indices[i] & non_func_mask[i]] = -1
1561
+ # The logic for whether the i-th token can attend on the j-th token based on the factorisation order:
1562
+ # 0 (can attend): If perm_index[i] > perm_index[j] or j is neither masked nor a functional token
1563
+ # 1 (cannot attend): If perm_index[i] <= perm_index[j] and j is either masked or a functional token
1564
+ perm_mask[i] = (
1565
+ perm_index.reshape((labels.shape[1], 1)) <= perm_index.reshape((1, labels.shape[1]))
1566
+ ) & masked_indices[i]
1567
+
1568
+ return inputs.astype(np.int64), perm_mask, target_mapping, labels.astype(np.int64)
env-llmeval/lib/python3.10/site-packages/transformers/data/datasets/__init__.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from .glue import GlueDataset, GlueDataTrainingArguments
16
+ from .language_modeling import (
17
+ LineByLineTextDataset,
18
+ LineByLineWithRefDataset,
19
+ LineByLineWithSOPTextDataset,
20
+ TextDataset,
21
+ TextDatasetForNextSentencePrediction,
22
+ )
23
+ from .squad import SquadDataset, SquadDataTrainingArguments
env-llmeval/lib/python3.10/site-packages/transformers/data/datasets/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (552 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/data/datasets/__pycache__/glue.cpython-310.pyc ADDED
Binary file (4.87 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/data/datasets/__pycache__/language_modeling.cpython-310.pyc ADDED
Binary file (13 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/data/datasets/__pycache__/squad.cpython-310.pyc ADDED
Binary file (6.36 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/data/datasets/glue.py ADDED
@@ -0,0 +1,161 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import os
16
+ import time
17
+ import warnings
18
+ from dataclasses import dataclass, field
19
+ from enum import Enum
20
+ from typing import List, Optional, Union
21
+
22
+ import torch
23
+ from filelock import FileLock
24
+ from torch.utils.data import Dataset
25
+
26
+ from ...tokenization_utils_base import PreTrainedTokenizerBase
27
+ from ...utils import logging
28
+ from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
29
+ from ..processors.utils import InputFeatures
30
+
31
+
32
+ logger = logging.get_logger(__name__)
33
+
34
+
35
+ @dataclass
36
+ class GlueDataTrainingArguments:
37
+ """
38
+ Arguments pertaining to what data we are going to input our model for training and eval.
39
+
40
+ Using `HfArgumentParser` we can turn this class into argparse arguments to be able to specify them on the command
41
+ line.
42
+ """
43
+
44
+ task_name: str = field(metadata={"help": "The name of the task to train on: " + ", ".join(glue_processors.keys())})
45
+ data_dir: str = field(
46
+ metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."}
47
+ )
48
+ max_seq_length: int = field(
49
+ default=128,
50
+ metadata={
51
+ "help": (
52
+ "The maximum total input sequence length after tokenization. Sequences longer "
53
+ "than this will be truncated, sequences shorter will be padded."
54
+ )
55
+ },
56
+ )
57
+ overwrite_cache: bool = field(
58
+ default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
59
+ )
60
+
61
+ def __post_init__(self):
62
+ self.task_name = self.task_name.lower()
63
+
64
+
65
+ class Split(Enum):
66
+ train = "train"
67
+ dev = "dev"
68
+ test = "test"
69
+
70
+
71
+ class GlueDataset(Dataset):
72
+ """
73
+ This will be superseded by a framework-agnostic approach soon.
74
+ """
75
+
76
+ args: GlueDataTrainingArguments
77
+ output_mode: str
78
+ features: List[InputFeatures]
79
+
80
+ def __init__(
81
+ self,
82
+ args: GlueDataTrainingArguments,
83
+ tokenizer: PreTrainedTokenizerBase,
84
+ limit_length: Optional[int] = None,
85
+ mode: Union[str, Split] = Split.train,
86
+ cache_dir: Optional[str] = None,
87
+ ):
88
+ warnings.warn(
89
+ "This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets "
90
+ "library. You can have a look at this example script for pointers: "
91
+ "https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py",
92
+ FutureWarning,
93
+ )
94
+ self.args = args
95
+ self.processor = glue_processors[args.task_name]()
96
+ self.output_mode = glue_output_modes[args.task_name]
97
+ if isinstance(mode, str):
98
+ try:
99
+ mode = Split[mode]
100
+ except KeyError:
101
+ raise KeyError("mode is not a valid split name")
102
+ # Load data features from cache or dataset file
103
+ cached_features_file = os.path.join(
104
+ cache_dir if cache_dir is not None else args.data_dir,
105
+ f"cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}",
106
+ )
107
+ label_list = self.processor.get_labels()
108
+ if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
109
+ "RobertaTokenizer",
110
+ "RobertaTokenizerFast",
111
+ "XLMRobertaTokenizer",
112
+ "BartTokenizer",
113
+ "BartTokenizerFast",
114
+ ):
115
+ # HACK(label indices are swapped in RoBERTa pretrained model)
116
+ label_list[1], label_list[2] = label_list[2], label_list[1]
117
+ self.label_list = label_list
118
+
119
+ # Make sure only the first process in distributed training processes the dataset,
120
+ # and the others will use the cache.
121
+ lock_path = cached_features_file + ".lock"
122
+ with FileLock(lock_path):
123
+ if os.path.exists(cached_features_file) and not args.overwrite_cache:
124
+ start = time.time()
125
+ self.features = torch.load(cached_features_file)
126
+ logger.info(
127
+ f"Loading features from cached file {cached_features_file} [took %.3f s]", time.time() - start
128
+ )
129
+ else:
130
+ logger.info(f"Creating features from dataset file at {args.data_dir}")
131
+
132
+ if mode == Split.dev:
133
+ examples = self.processor.get_dev_examples(args.data_dir)
134
+ elif mode == Split.test:
135
+ examples = self.processor.get_test_examples(args.data_dir)
136
+ else:
137
+ examples = self.processor.get_train_examples(args.data_dir)
138
+ if limit_length is not None:
139
+ examples = examples[:limit_length]
140
+ self.features = glue_convert_examples_to_features(
141
+ examples,
142
+ tokenizer,
143
+ max_length=args.max_seq_length,
144
+ label_list=label_list,
145
+ output_mode=self.output_mode,
146
+ )
147
+ start = time.time()
148
+ torch.save(self.features, cached_features_file)
149
+ # ^ This seems to take a lot of time so I want to investigate why and how we can improve.
150
+ logger.info(
151
+ f"Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]"
152
+ )
153
+
154
+ def __len__(self):
155
+ return len(self.features)
156
+
157
+ def __getitem__(self, i) -> InputFeatures:
158
+ return self.features[i]
159
+
160
+ def get_labels(self):
161
+ return self.label_list
env-llmeval/lib/python3.10/site-packages/transformers/data/datasets/language_modeling.py ADDED
@@ -0,0 +1,530 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import json
16
+ import os
17
+ import pickle
18
+ import random
19
+ import time
20
+ import warnings
21
+ from typing import Dict, List, Optional
22
+
23
+ import torch
24
+ from filelock import FileLock
25
+ from torch.utils.data import Dataset
26
+
27
+ from ...tokenization_utils import PreTrainedTokenizer
28
+ from ...utils import logging
29
+
30
+
31
+ logger = logging.get_logger(__name__)
32
+
33
+
34
+ DEPRECATION_WARNING = (
35
+ "This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets "
36
+ "library. You can have a look at this example script for pointers: {0}"
37
+ )
38
+
39
+
40
+ class TextDataset(Dataset):
41
+ """
42
+ This will be superseded by a framework-agnostic approach soon.
43
+ """
44
+
45
+ def __init__(
46
+ self,
47
+ tokenizer: PreTrainedTokenizer,
48
+ file_path: str,
49
+ block_size: int,
50
+ overwrite_cache=False,
51
+ cache_dir: Optional[str] = None,
52
+ ):
53
+ warnings.warn(
54
+ DEPRECATION_WARNING.format(
55
+ "https://github.com/huggingface/transformers/blob/main/examples/pytorch/language-modeling/run_mlm.py"
56
+ ),
57
+ FutureWarning,
58
+ )
59
+ if os.path.isfile(file_path) is False:
60
+ raise ValueError(f"Input file path {file_path} not found")
61
+
62
+ block_size = block_size - tokenizer.num_special_tokens_to_add(pair=False)
63
+
64
+ directory, filename = os.path.split(file_path)
65
+ cached_features_file = os.path.join(
66
+ cache_dir if cache_dir is not None else directory,
67
+ f"cached_lm_{tokenizer.__class__.__name__}_{block_size}_{filename}",
68
+ )
69
+
70
+ # Make sure only the first process in distributed training processes the dataset,
71
+ # and the others will use the cache.
72
+ lock_path = cached_features_file + ".lock"
73
+ with FileLock(lock_path):
74
+ if os.path.exists(cached_features_file) and not overwrite_cache:
75
+ start = time.time()
76
+ with open(cached_features_file, "rb") as handle:
77
+ self.examples = pickle.load(handle)
78
+ logger.info(
79
+ f"Loading features from cached file {cached_features_file} [took %.3f s]", time.time() - start
80
+ )
81
+
82
+ else:
83
+ logger.info(f"Creating features from dataset file at {directory}")
84
+
85
+ self.examples = []
86
+ with open(file_path, encoding="utf-8") as f:
87
+ text = f.read()
88
+
89
+ tokenized_text = tokenizer.convert_tokens_to_ids(tokenizer.tokenize(text))
90
+
91
+ for i in range(0, len(tokenized_text) - block_size + 1, block_size): # Truncate in block of block_size
92
+ self.examples.append(
93
+ tokenizer.build_inputs_with_special_tokens(tokenized_text[i : i + block_size])
94
+ )
95
+ # Note that we are losing the last truncated example here for the sake of simplicity (no padding)
96
+ # If your dataset is small, first you should look for a bigger one :-) and second you
97
+ # can change this behavior by adding (model specific) padding.
98
+
99
+ start = time.time()
100
+ with open(cached_features_file, "wb") as handle:
101
+ pickle.dump(self.examples, handle, protocol=pickle.HIGHEST_PROTOCOL)
102
+ logger.info(
103
+ f"Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]"
104
+ )
105
+
106
+ def __len__(self):
107
+ return len(self.examples)
108
+
109
+ def __getitem__(self, i) -> torch.Tensor:
110
+ return torch.tensor(self.examples[i], dtype=torch.long)
111
+
112
+
113
+ class LineByLineTextDataset(Dataset):
114
+ """
115
+ This will be superseded by a framework-agnostic approach soon.
116
+ """
117
+
118
+ def __init__(self, tokenizer: PreTrainedTokenizer, file_path: str, block_size: int):
119
+ warnings.warn(
120
+ DEPRECATION_WARNING.format(
121
+ "https://github.com/huggingface/transformers/blob/main/examples/pytorch/language-modeling/run_mlm.py"
122
+ ),
123
+ FutureWarning,
124
+ )
125
+ if os.path.isfile(file_path) is False:
126
+ raise ValueError(f"Input file path {file_path} not found")
127
+ # Here, we do not cache the features, operating under the assumption
128
+ # that we will soon use fast multithreaded tokenizers from the
129
+ # `tokenizers` repo everywhere =)
130
+ logger.info(f"Creating features from dataset file at {file_path}")
131
+
132
+ with open(file_path, encoding="utf-8") as f:
133
+ lines = [line for line in f.read().splitlines() if (len(line) > 0 and not line.isspace())]
134
+
135
+ batch_encoding = tokenizer(lines, add_special_tokens=True, truncation=True, max_length=block_size)
136
+ self.examples = batch_encoding["input_ids"]
137
+ self.examples = [{"input_ids": torch.tensor(e, dtype=torch.long)} for e in self.examples]
138
+
139
+ def __len__(self):
140
+ return len(self.examples)
141
+
142
+ def __getitem__(self, i) -> Dict[str, torch.tensor]:
143
+ return self.examples[i]
144
+
145
+
146
+ class LineByLineWithRefDataset(Dataset):
147
+ """
148
+ This will be superseded by a framework-agnostic approach soon.
149
+ """
150
+
151
+ def __init__(self, tokenizer: PreTrainedTokenizer, file_path: str, block_size: int, ref_path: str):
152
+ warnings.warn(
153
+ DEPRECATION_WARNING.format(
154
+ "https://github.com/huggingface/transformers/blob/main/examples/pytorch/language-modeling/run_mlm_wwm.py"
155
+ ),
156
+ FutureWarning,
157
+ )
158
+ if os.path.isfile(file_path) is False:
159
+ raise ValueError(f"Input file path {file_path} not found")
160
+ if os.path.isfile(ref_path) is False:
161
+ raise ValueError(f"Ref file path {file_path} not found")
162
+ # Here, we do not cache the features, operating under the assumption
163
+ # that we will soon use fast multithreaded tokenizers from the
164
+ # `tokenizers` repo everywhere =)
165
+ logger.info(f"Creating features from dataset file at {file_path}")
166
+ logger.info(f"Use ref segment results at {ref_path}")
167
+ with open(file_path, encoding="utf-8") as f:
168
+ data = f.readlines() # use this method to avoid delimiter '\u2029' to split a line
169
+ data = [line.strip() for line in data if len(line) > 0 and not line.isspace()]
170
+ # Get ref inf from file
171
+ with open(ref_path, encoding="utf-8") as f:
172
+ ref = [json.loads(line) for line in f.read().splitlines() if (len(line) > 0 and not line.isspace())]
173
+ if len(data) != len(ref):
174
+ raise ValueError(
175
+ f"Length of Input file should be equal to Ref file. But the length of {file_path} is {len(data)} "
176
+ f"while length of {ref_path} is {len(ref)}"
177
+ )
178
+
179
+ batch_encoding = tokenizer(data, add_special_tokens=True, truncation=True, max_length=block_size)
180
+ self.examples = batch_encoding["input_ids"]
181
+ self.examples = [{"input_ids": torch.tensor(e, dtype=torch.long)} for e in self.examples]
182
+
183
+ n = len(self.examples)
184
+ for i in range(n):
185
+ self.examples[i]["chinese_ref"] = torch.tensor(ref[i], dtype=torch.long)
186
+
187
+ def __len__(self):
188
+ return len(self.examples)
189
+
190
+ def __getitem__(self, i) -> Dict[str, torch.tensor]:
191
+ return self.examples[i]
192
+
193
+
194
+ class LineByLineWithSOPTextDataset(Dataset):
195
+ """
196
+ Dataset for sentence order prediction task, prepare sentence pairs for SOP task
197
+ """
198
+
199
+ def __init__(self, tokenizer: PreTrainedTokenizer, file_dir: str, block_size: int):
200
+ warnings.warn(
201
+ DEPRECATION_WARNING.format(
202
+ "https://github.com/huggingface/transformers/blob/main/examples/pytorch/language-modeling/run_mlm.py"
203
+ ),
204
+ FutureWarning,
205
+ )
206
+ if os.path.isdir(file_dir) is False:
207
+ raise ValueError(f"{file_dir} is not a directory")
208
+ logger.info(f"Creating features from dataset file folder at {file_dir}")
209
+ self.examples = []
210
+ # TODO: randomness could apply a random seed, ex. rng = random.Random(random_seed)
211
+ # file path looks like ./dataset/wiki_1, ./dataset/wiki_2
212
+ for file_name in os.listdir(file_dir):
213
+ file_path = os.path.join(file_dir, file_name)
214
+ if os.path.isfile(file_path) is False:
215
+ raise ValueError(f"{file_path} is not a file")
216
+ article_open = False
217
+ with open(file_path, encoding="utf-8") as f:
218
+ original_lines = f.readlines()
219
+ article_lines = []
220
+ for line in original_lines:
221
+ if "<doc id=" in line:
222
+ article_open = True
223
+ elif "</doc>" in line:
224
+ article_open = False
225
+ document = [
226
+ tokenizer.convert_tokens_to_ids(tokenizer.tokenize(line))
227
+ for line in article_lines[1:]
228
+ if (len(line) > 0 and not line.isspace())
229
+ ]
230
+
231
+ examples = self.create_examples_from_document(document, block_size, tokenizer)
232
+ self.examples.extend(examples)
233
+ article_lines = []
234
+ else:
235
+ if article_open:
236
+ article_lines.append(line)
237
+
238
+ logger.info("Dataset parse finished.")
239
+
240
+ def create_examples_from_document(self, document, block_size, tokenizer, short_seq_prob=0.1):
241
+ """Creates examples for a single document."""
242
+
243
+ # Account for special tokens
244
+ max_num_tokens = block_size - tokenizer.num_special_tokens_to_add(pair=True)
245
+
246
+ # We *usually* want to fill up the entire sequence since we are padding
247
+ # to `block_size` anyways, so short sequences are generally wasted
248
+ # computation. However, we *sometimes*
249
+ # (i.e., short_seq_prob == 0.1 == 10% of the time) want to use shorter
250
+ # sequences to minimize the mismatch between pretraining and fine-tuning.
251
+ # The `target_seq_length` is just a rough target however, whereas
252
+ # `block_size` is a hard limit.
253
+ target_seq_length = max_num_tokens
254
+ if random.random() < short_seq_prob:
255
+ target_seq_length = random.randint(2, max_num_tokens)
256
+
257
+ # We DON'T just concatenate all of the tokens from a document into a long
258
+ # sequence and choose an arbitrary split point because this would make the
259
+ # next sentence prediction task too easy. Instead, we split the input into
260
+ # segments "A" and "B" based on the actual "sentences" provided by the user
261
+ # input.
262
+ examples = []
263
+ current_chunk = [] # a buffer stored current working segments
264
+ current_length = 0
265
+ i = 0
266
+ while i < len(document):
267
+ segment = document[i] # get a segment
268
+ if not segment:
269
+ i += 1
270
+ continue
271
+ current_chunk.append(segment) # add a segment to current chunk
272
+ current_length += len(segment) # overall token length
273
+ # if current length goes to the target length or reaches the end of file, start building token a and b
274
+ if i == len(document) - 1 or current_length >= target_seq_length:
275
+ if current_chunk:
276
+ # `a_end` is how many segments from `current_chunk` go into the `A` (first) sentence.
277
+ a_end = 1
278
+ # if current chunk has more than 2 sentences, pick part of it `A` (first) sentence
279
+ if len(current_chunk) >= 2:
280
+ a_end = random.randint(1, len(current_chunk) - 1)
281
+ # token a
282
+ tokens_a = []
283
+ for j in range(a_end):
284
+ tokens_a.extend(current_chunk[j])
285
+
286
+ # token b
287
+ tokens_b = []
288
+ for j in range(a_end, len(current_chunk)):
289
+ tokens_b.extend(current_chunk[j])
290
+
291
+ if len(tokens_a) == 0 or len(tokens_b) == 0:
292
+ continue
293
+
294
+ # switch tokens_a and tokens_b randomly
295
+ if random.random() < 0.5:
296
+ is_next = False
297
+ tokens_a, tokens_b = tokens_b, tokens_a
298
+ else:
299
+ is_next = True
300
+
301
+ def truncate_seq_pair(tokens_a, tokens_b, max_num_tokens):
302
+ """Truncates a pair of sequences to a maximum sequence length."""
303
+ while True:
304
+ total_length = len(tokens_a) + len(tokens_b)
305
+ if total_length <= max_num_tokens:
306
+ break
307
+ trunc_tokens = tokens_a if len(tokens_a) > len(tokens_b) else tokens_b
308
+ if not (len(trunc_tokens) >= 1):
309
+ raise ValueError("Sequence length to be truncated must be no less than one")
310
+ # We want to sometimes truncate from the front and sometimes from the
311
+ # back to add more randomness and avoid biases.
312
+ if random.random() < 0.5:
313
+ del trunc_tokens[0]
314
+ else:
315
+ trunc_tokens.pop()
316
+
317
+ truncate_seq_pair(tokens_a, tokens_b, max_num_tokens)
318
+ if not (len(tokens_a) >= 1):
319
+ raise ValueError(f"Length of sequence a is {len(tokens_a)} which must be no less than 1")
320
+ if not (len(tokens_b) >= 1):
321
+ raise ValueError(f"Length of sequence b is {len(tokens_b)} which must be no less than 1")
322
+
323
+ # add special tokens
324
+ input_ids = tokenizer.build_inputs_with_special_tokens(tokens_a, tokens_b)
325
+ # add token type ids, 0 for sentence a, 1 for sentence b
326
+ token_type_ids = tokenizer.create_token_type_ids_from_sequences(tokens_a, tokens_b)
327
+
328
+ example = {
329
+ "input_ids": torch.tensor(input_ids, dtype=torch.long),
330
+ "token_type_ids": torch.tensor(token_type_ids, dtype=torch.long),
331
+ "sentence_order_label": torch.tensor(0 if is_next else 1, dtype=torch.long),
332
+ }
333
+ examples.append(example)
334
+ current_chunk = [] # clear current chunk
335
+ current_length = 0 # reset current text length
336
+ i += 1 # go to next line
337
+ return examples
338
+
339
+ def __len__(self):
340
+ return len(self.examples)
341
+
342
+ def __getitem__(self, i) -> Dict[str, torch.tensor]:
343
+ return self.examples[i]
344
+
345
+
346
+ class TextDatasetForNextSentencePrediction(Dataset):
347
+ """
348
+ This will be superseded by a framework-agnostic approach soon.
349
+ """
350
+
351
+ def __init__(
352
+ self,
353
+ tokenizer: PreTrainedTokenizer,
354
+ file_path: str,
355
+ block_size: int,
356
+ overwrite_cache=False,
357
+ short_seq_probability=0.1,
358
+ nsp_probability=0.5,
359
+ ):
360
+ warnings.warn(
361
+ DEPRECATION_WARNING.format(
362
+ "https://github.com/huggingface/transformers/blob/main/examples/pytorch/language-modeling/run_mlm.py"
363
+ ),
364
+ FutureWarning,
365
+ )
366
+ if not os.path.isfile(file_path):
367
+ raise ValueError(f"Input file path {file_path} not found")
368
+
369
+ self.short_seq_probability = short_seq_probability
370
+ self.nsp_probability = nsp_probability
371
+
372
+ directory, filename = os.path.split(file_path)
373
+ cached_features_file = os.path.join(
374
+ directory,
375
+ f"cached_nsp_{tokenizer.__class__.__name__}_{block_size}_{filename}",
376
+ )
377
+
378
+ self.tokenizer = tokenizer
379
+
380
+ # Make sure only the first process in distributed training processes the dataset,
381
+ # and the others will use the cache.
382
+ lock_path = cached_features_file + ".lock"
383
+
384
+ # Input file format:
385
+ # (1) One sentence per line. These should ideally be actual sentences, not
386
+ # entire paragraphs or arbitrary spans of text. (Because we use the
387
+ # sentence boundaries for the "next sentence prediction" task).
388
+ # (2) Blank lines between documents. Document boundaries are needed so
389
+ # that the "next sentence prediction" task doesn't span between documents.
390
+ #
391
+ # Example:
392
+ # I am very happy.
393
+ # Here is the second sentence.
394
+ #
395
+ # A new document.
396
+
397
+ with FileLock(lock_path):
398
+ if os.path.exists(cached_features_file) and not overwrite_cache:
399
+ start = time.time()
400
+ with open(cached_features_file, "rb") as handle:
401
+ self.examples = pickle.load(handle)
402
+ logger.info(
403
+ f"Loading features from cached file {cached_features_file} [took %.3f s]", time.time() - start
404
+ )
405
+ else:
406
+ logger.info(f"Creating features from dataset file at {directory}")
407
+
408
+ self.documents = [[]]
409
+ with open(file_path, encoding="utf-8") as f:
410
+ while True:
411
+ line = f.readline()
412
+ if not line:
413
+ break
414
+ line = line.strip()
415
+
416
+ # Empty lines are used as document delimiters
417
+ if not line and len(self.documents[-1]) != 0:
418
+ self.documents.append([])
419
+ tokens = tokenizer.tokenize(line)
420
+ tokens = tokenizer.convert_tokens_to_ids(tokens)
421
+ if tokens:
422
+ self.documents[-1].append(tokens)
423
+
424
+ logger.info(f"Creating examples from {len(self.documents)} documents.")
425
+ self.examples = []
426
+ for doc_index, document in enumerate(self.documents):
427
+ self.create_examples_from_document(document, doc_index, block_size)
428
+
429
+ start = time.time()
430
+ with open(cached_features_file, "wb") as handle:
431
+ pickle.dump(self.examples, handle, protocol=pickle.HIGHEST_PROTOCOL)
432
+ logger.info(
433
+ f"Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]"
434
+ )
435
+
436
+ def create_examples_from_document(self, document: List[List[int]], doc_index: int, block_size: int):
437
+ """Creates examples for a single document."""
438
+
439
+ max_num_tokens = block_size - self.tokenizer.num_special_tokens_to_add(pair=True)
440
+
441
+ # We *usually* want to fill up the entire sequence since we are padding
442
+ # to `block_size` anyways, so short sequences are generally wasted
443
+ # computation. However, we *sometimes*
444
+ # (i.e., short_seq_prob == 0.1 == 10% of the time) want to use shorter
445
+ # sequences to minimize the mismatch between pretraining and fine-tuning.
446
+ # The `target_seq_length` is just a rough target however, whereas
447
+ # `block_size` is a hard limit.
448
+ target_seq_length = max_num_tokens
449
+ if random.random() < self.short_seq_probability:
450
+ target_seq_length = random.randint(2, max_num_tokens)
451
+
452
+ current_chunk = [] # a buffer stored current working segments
453
+ current_length = 0
454
+ i = 0
455
+
456
+ while i < len(document):
457
+ segment = document[i]
458
+ current_chunk.append(segment)
459
+ current_length += len(segment)
460
+ if i == len(document) - 1 or current_length >= target_seq_length:
461
+ if current_chunk:
462
+ # `a_end` is how many segments from `current_chunk` go into the `A`
463
+ # (first) sentence.
464
+ a_end = 1
465
+ if len(current_chunk) >= 2:
466
+ a_end = random.randint(1, len(current_chunk) - 1)
467
+
468
+ tokens_a = []
469
+ for j in range(a_end):
470
+ tokens_a.extend(current_chunk[j])
471
+
472
+ tokens_b = []
473
+
474
+ if len(current_chunk) == 1 or random.random() < self.nsp_probability:
475
+ is_random_next = True
476
+ target_b_length = target_seq_length - len(tokens_a)
477
+
478
+ # This should rarely go for more than one iteration for large
479
+ # corpora. However, just to be careful, we try to make sure that
480
+ # the random document is not the same as the document
481
+ # we're processing.
482
+ for _ in range(10):
483
+ random_document_index = random.randint(0, len(self.documents) - 1)
484
+ if random_document_index != doc_index:
485
+ break
486
+
487
+ random_document = self.documents[random_document_index]
488
+ random_start = random.randint(0, len(random_document) - 1)
489
+ for j in range(random_start, len(random_document)):
490
+ tokens_b.extend(random_document[j])
491
+ if len(tokens_b) >= target_b_length:
492
+ break
493
+ # We didn't actually use these segments so we "put them back" so
494
+ # they don't go to waste.
495
+ num_unused_segments = len(current_chunk) - a_end
496
+ i -= num_unused_segments
497
+ # Actual next
498
+ else:
499
+ is_random_next = False
500
+ for j in range(a_end, len(current_chunk)):
501
+ tokens_b.extend(current_chunk[j])
502
+
503
+ if not (len(tokens_a) >= 1):
504
+ raise ValueError(f"Length of sequence a is {len(tokens_a)} which must be no less than 1")
505
+ if not (len(tokens_b) >= 1):
506
+ raise ValueError(f"Length of sequence b is {len(tokens_b)} which must be no less than 1")
507
+
508
+ # add special tokens
509
+ input_ids = self.tokenizer.build_inputs_with_special_tokens(tokens_a, tokens_b)
510
+ # add token type ids, 0 for sentence a, 1 for sentence b
511
+ token_type_ids = self.tokenizer.create_token_type_ids_from_sequences(tokens_a, tokens_b)
512
+
513
+ example = {
514
+ "input_ids": torch.tensor(input_ids, dtype=torch.long),
515
+ "token_type_ids": torch.tensor(token_type_ids, dtype=torch.long),
516
+ "next_sentence_label": torch.tensor(1 if is_random_next else 0, dtype=torch.long),
517
+ }
518
+
519
+ self.examples.append(example)
520
+
521
+ current_chunk = []
522
+ current_length = 0
523
+
524
+ i += 1
525
+
526
+ def __len__(self):
527
+ return len(self.examples)
528
+
529
+ def __getitem__(self, i):
530
+ return self.examples[i]
env-llmeval/lib/python3.10/site-packages/transformers/data/datasets/squad.py ADDED
@@ -0,0 +1,229 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import os
16
+ import time
17
+ from dataclasses import dataclass, field
18
+ from enum import Enum
19
+ from typing import Dict, List, Optional, Union
20
+
21
+ import torch
22
+ from filelock import FileLock
23
+ from torch.utils.data import Dataset
24
+
25
+ from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
26
+ from ...tokenization_utils import PreTrainedTokenizer
27
+ from ...utils import logging
28
+ from ..processors.squad import SquadFeatures, SquadV1Processor, SquadV2Processor, squad_convert_examples_to_features
29
+
30
+
31
+ logger = logging.get_logger(__name__)
32
+
33
+ MODEL_CONFIG_CLASSES = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
34
+ MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
35
+
36
+
37
+ @dataclass
38
+ class SquadDataTrainingArguments:
39
+ """
40
+ Arguments pertaining to what data we are going to input our model for training and eval.
41
+ """
42
+
43
+ model_type: str = field(
44
+ default=None, metadata={"help": "Model type selected in the list: " + ", ".join(MODEL_TYPES)}
45
+ )
46
+ data_dir: str = field(
47
+ default=None, metadata={"help": "The input data dir. Should contain the .json files for the SQuAD task."}
48
+ )
49
+ max_seq_length: int = field(
50
+ default=128,
51
+ metadata={
52
+ "help": (
53
+ "The maximum total input sequence length after tokenization. Sequences longer "
54
+ "than this will be truncated, sequences shorter will be padded."
55
+ )
56
+ },
57
+ )
58
+ doc_stride: int = field(
59
+ default=128,
60
+ metadata={"help": "When splitting up a long document into chunks, how much stride to take between chunks."},
61
+ )
62
+ max_query_length: int = field(
63
+ default=64,
64
+ metadata={
65
+ "help": (
66
+ "The maximum number of tokens for the question. Questions longer than this will "
67
+ "be truncated to this length."
68
+ )
69
+ },
70
+ )
71
+ max_answer_length: int = field(
72
+ default=30,
73
+ metadata={
74
+ "help": (
75
+ "The maximum length of an answer that can be generated. This is needed because the start "
76
+ "and end predictions are not conditioned on one another."
77
+ )
78
+ },
79
+ )
80
+ overwrite_cache: bool = field(
81
+ default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
82
+ )
83
+ version_2_with_negative: bool = field(
84
+ default=False, metadata={"help": "If true, the SQuAD examples contain some that do not have an answer."}
85
+ )
86
+ null_score_diff_threshold: float = field(
87
+ default=0.0, metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."}
88
+ )
89
+ n_best_size: int = field(
90
+ default=20, metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."}
91
+ )
92
+ lang_id: int = field(
93
+ default=0,
94
+ metadata={
95
+ "help": (
96
+ "language id of input for language-specific xlm models (see"
97
+ " tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)"
98
+ )
99
+ },
100
+ )
101
+ threads: int = field(default=1, metadata={"help": "multiple threads for converting example to features"})
102
+
103
+
104
+ class Split(Enum):
105
+ train = "train"
106
+ dev = "dev"
107
+
108
+
109
+ class SquadDataset(Dataset):
110
+ """
111
+ This will be superseded by a framework-agnostic approach soon.
112
+ """
113
+
114
+ args: SquadDataTrainingArguments
115
+ features: List[SquadFeatures]
116
+ mode: Split
117
+ is_language_sensitive: bool
118
+
119
+ def __init__(
120
+ self,
121
+ args: SquadDataTrainingArguments,
122
+ tokenizer: PreTrainedTokenizer,
123
+ limit_length: Optional[int] = None,
124
+ mode: Union[str, Split] = Split.train,
125
+ is_language_sensitive: Optional[bool] = False,
126
+ cache_dir: Optional[str] = None,
127
+ dataset_format: Optional[str] = "pt",
128
+ ):
129
+ self.args = args
130
+ self.is_language_sensitive = is_language_sensitive
131
+ self.processor = SquadV2Processor() if args.version_2_with_negative else SquadV1Processor()
132
+ if isinstance(mode, str):
133
+ try:
134
+ mode = Split[mode]
135
+ except KeyError:
136
+ raise KeyError("mode is not a valid split name")
137
+ self.mode = mode
138
+ # Load data features from cache or dataset file
139
+ version_tag = "v2" if args.version_2_with_negative else "v1"
140
+ cached_features_file = os.path.join(
141
+ cache_dir if cache_dir is not None else args.data_dir,
142
+ f"cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}",
143
+ )
144
+
145
+ # Make sure only the first process in distributed training processes the dataset,
146
+ # and the others will use the cache.
147
+ lock_path = cached_features_file + ".lock"
148
+ with FileLock(lock_path):
149
+ if os.path.exists(cached_features_file) and not args.overwrite_cache:
150
+ start = time.time()
151
+ self.old_features = torch.load(cached_features_file)
152
+
153
+ # Legacy cache files have only features, while new cache files
154
+ # will have dataset and examples also.
155
+ self.features = self.old_features["features"]
156
+ self.dataset = self.old_features.get("dataset", None)
157
+ self.examples = self.old_features.get("examples", None)
158
+ logger.info(
159
+ f"Loading features from cached file {cached_features_file} [took %.3f s]", time.time() - start
160
+ )
161
+
162
+ if self.dataset is None or self.examples is None:
163
+ logger.warning(
164
+ f"Deleting cached file {cached_features_file} will allow dataset and examples to be cached in"
165
+ " future run"
166
+ )
167
+ else:
168
+ if mode == Split.dev:
169
+ self.examples = self.processor.get_dev_examples(args.data_dir)
170
+ else:
171
+ self.examples = self.processor.get_train_examples(args.data_dir)
172
+
173
+ self.features, self.dataset = squad_convert_examples_to_features(
174
+ examples=self.examples,
175
+ tokenizer=tokenizer,
176
+ max_seq_length=args.max_seq_length,
177
+ doc_stride=args.doc_stride,
178
+ max_query_length=args.max_query_length,
179
+ is_training=mode == Split.train,
180
+ threads=args.threads,
181
+ return_dataset=dataset_format,
182
+ )
183
+
184
+ start = time.time()
185
+ torch.save(
186
+ {"features": self.features, "dataset": self.dataset, "examples": self.examples},
187
+ cached_features_file,
188
+ )
189
+ # ^ This seems to take a lot of time so I want to investigate why and how we can improve.
190
+ logger.info(
191
+ f"Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]"
192
+ )
193
+
194
+ def __len__(self):
195
+ return len(self.features)
196
+
197
+ def __getitem__(self, i) -> Dict[str, torch.Tensor]:
198
+ # Convert to Tensors and build dataset
199
+ feature = self.features[i]
200
+
201
+ input_ids = torch.tensor(feature.input_ids, dtype=torch.long)
202
+ attention_mask = torch.tensor(feature.attention_mask, dtype=torch.long)
203
+ token_type_ids = torch.tensor(feature.token_type_ids, dtype=torch.long)
204
+ cls_index = torch.tensor(feature.cls_index, dtype=torch.long)
205
+ p_mask = torch.tensor(feature.p_mask, dtype=torch.float)
206
+ is_impossible = torch.tensor(feature.is_impossible, dtype=torch.float)
207
+
208
+ inputs = {
209
+ "input_ids": input_ids,
210
+ "attention_mask": attention_mask,
211
+ "token_type_ids": token_type_ids,
212
+ }
213
+
214
+ if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
215
+ del inputs["token_type_ids"]
216
+
217
+ if self.args.model_type in ["xlnet", "xlm"]:
218
+ inputs.update({"cls_index": cls_index, "p_mask": p_mask})
219
+ if self.args.version_2_with_negative:
220
+ inputs.update({"is_impossible": is_impossible})
221
+ if self.is_language_sensitive:
222
+ inputs.update({"langs": (torch.ones(input_ids.shape, dtype=torch.int64) * self.args.lang_id)})
223
+
224
+ if self.mode == Split.train:
225
+ start_positions = torch.tensor(feature.start_position, dtype=torch.long)
226
+ end_positions = torch.tensor(feature.end_position, dtype=torch.long)
227
+ inputs.update({"start_positions": start_positions, "end_positions": end_positions})
228
+
229
+ return inputs
env-llmeval/lib/python3.10/site-packages/transformers/data/metrics/__init__.py ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Licensed under the Apache License, Version 2.0 (the "License");
2
+ # you may not use this file except in compliance with the License.
3
+ # You may obtain a copy of the License at
4
+ #
5
+ # http://www.apache.org/licenses/LICENSE-2.0
6
+ #
7
+ # Unless required by applicable law or agreed to in writing, software
8
+ # distributed under the License is distributed on an "AS IS" BASIS,
9
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10
+ # See the License for the specific language governing permissions and
11
+ # limitations under the License.
12
+
13
+ import warnings
14
+
15
+ from ...utils import is_sklearn_available, requires_backends
16
+
17
+
18
+ if is_sklearn_available():
19
+ from scipy.stats import pearsonr, spearmanr
20
+ from sklearn.metrics import f1_score, matthews_corrcoef
21
+
22
+
23
+ DEPRECATION_WARNING = (
24
+ "This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate "
25
+ "library. You can have a look at this example script for pointers: "
26
+ "https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py"
27
+ )
28
+
29
+
30
+ def simple_accuracy(preds, labels):
31
+ warnings.warn(DEPRECATION_WARNING, FutureWarning)
32
+ requires_backends(simple_accuracy, "sklearn")
33
+ return (preds == labels).mean()
34
+
35
+
36
+ def acc_and_f1(preds, labels):
37
+ warnings.warn(DEPRECATION_WARNING, FutureWarning)
38
+ requires_backends(acc_and_f1, "sklearn")
39
+ acc = simple_accuracy(preds, labels)
40
+ f1 = f1_score(y_true=labels, y_pred=preds)
41
+ return {
42
+ "acc": acc,
43
+ "f1": f1,
44
+ "acc_and_f1": (acc + f1) / 2,
45
+ }
46
+
47
+
48
+ def pearson_and_spearman(preds, labels):
49
+ warnings.warn(DEPRECATION_WARNING, FutureWarning)
50
+ requires_backends(pearson_and_spearman, "sklearn")
51
+ pearson_corr = pearsonr(preds, labels)[0]
52
+ spearman_corr = spearmanr(preds, labels)[0]
53
+ return {
54
+ "pearson": pearson_corr,
55
+ "spearmanr": spearman_corr,
56
+ "corr": (pearson_corr + spearman_corr) / 2,
57
+ }
58
+
59
+
60
+ def glue_compute_metrics(task_name, preds, labels):
61
+ warnings.warn(DEPRECATION_WARNING, FutureWarning)
62
+ requires_backends(glue_compute_metrics, "sklearn")
63
+ assert len(preds) == len(labels), f"Predictions and labels have mismatched lengths {len(preds)} and {len(labels)}"
64
+ if task_name == "cola":
65
+ return {"mcc": matthews_corrcoef(labels, preds)}
66
+ elif task_name == "sst-2":
67
+ return {"acc": simple_accuracy(preds, labels)}
68
+ elif task_name == "mrpc":
69
+ return acc_and_f1(preds, labels)
70
+ elif task_name == "sts-b":
71
+ return pearson_and_spearman(preds, labels)
72
+ elif task_name == "qqp":
73
+ return acc_and_f1(preds, labels)
74
+ elif task_name == "mnli":
75
+ return {"mnli/acc": simple_accuracy(preds, labels)}
76
+ elif task_name == "mnli-mm":
77
+ return {"mnli-mm/acc": simple_accuracy(preds, labels)}
78
+ elif task_name == "qnli":
79
+ return {"acc": simple_accuracy(preds, labels)}
80
+ elif task_name == "rte":
81
+ return {"acc": simple_accuracy(preds, labels)}
82
+ elif task_name == "wnli":
83
+ return {"acc": simple_accuracy(preds, labels)}
84
+ elif task_name == "hans":
85
+ return {"acc": simple_accuracy(preds, labels)}
86
+ else:
87
+ raise KeyError(task_name)
88
+
89
+
90
+ def xnli_compute_metrics(task_name, preds, labels):
91
+ warnings.warn(DEPRECATION_WARNING, FutureWarning)
92
+ requires_backends(xnli_compute_metrics, "sklearn")
93
+ if len(preds) != len(labels):
94
+ raise ValueError(f"Predictions and labels have mismatched lengths {len(preds)} and {len(labels)}")
95
+ if task_name == "xnli":
96
+ return {"acc": simple_accuracy(preds, labels)}
97
+ else:
98
+ raise KeyError(task_name)
env-llmeval/lib/python3.10/site-packages/transformers/data/metrics/__pycache__/squad_metrics.cpython-310.pyc ADDED
Binary file (16.1 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/data/metrics/squad_metrics.py ADDED
@@ -0,0 +1,780 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """
15
+ Very heavily inspired by the official evaluation script for SQuAD version 2.0 which was modified by XLNet authors to
16
+ update `find_best_threshold` scripts for SQuAD V2.0
17
+
18
+ In addition to basic functionality, we also compute additional statistics and plot precision-recall curves if an
19
+ additional na_prob.json file is provided. This file is expected to map question ID's to the model's predicted
20
+ probability that a question is unanswerable.
21
+ """
22
+
23
+
24
+ import collections
25
+ import json
26
+ import math
27
+ import re
28
+ import string
29
+
30
+ from ...models.bert import BasicTokenizer
31
+ from ...utils import logging
32
+
33
+
34
+ logger = logging.get_logger(__name__)
35
+
36
+
37
+ def normalize_answer(s):
38
+ """Lower text and remove punctuation, articles and extra whitespace."""
39
+
40
+ def remove_articles(text):
41
+ regex = re.compile(r"\b(a|an|the)\b", re.UNICODE)
42
+ return re.sub(regex, " ", text)
43
+
44
+ def white_space_fix(text):
45
+ return " ".join(text.split())
46
+
47
+ def remove_punc(text):
48
+ exclude = set(string.punctuation)
49
+ return "".join(ch for ch in text if ch not in exclude)
50
+
51
+ def lower(text):
52
+ return text.lower()
53
+
54
+ return white_space_fix(remove_articles(remove_punc(lower(s))))
55
+
56
+
57
+ def get_tokens(s):
58
+ if not s:
59
+ return []
60
+ return normalize_answer(s).split()
61
+
62
+
63
+ def compute_exact(a_gold, a_pred):
64
+ return int(normalize_answer(a_gold) == normalize_answer(a_pred))
65
+
66
+
67
+ def compute_f1(a_gold, a_pred):
68
+ gold_toks = get_tokens(a_gold)
69
+ pred_toks = get_tokens(a_pred)
70
+ common = collections.Counter(gold_toks) & collections.Counter(pred_toks)
71
+ num_same = sum(common.values())
72
+ if len(gold_toks) == 0 or len(pred_toks) == 0:
73
+ # If either is no-answer, then F1 is 1 if they agree, 0 otherwise
74
+ return int(gold_toks == pred_toks)
75
+ if num_same == 0:
76
+ return 0
77
+ precision = 1.0 * num_same / len(pred_toks)
78
+ recall = 1.0 * num_same / len(gold_toks)
79
+ f1 = (2 * precision * recall) / (precision + recall)
80
+ return f1
81
+
82
+
83
+ def get_raw_scores(examples, preds):
84
+ """
85
+ Computes the exact and f1 scores from the examples and the model predictions
86
+ """
87
+ exact_scores = {}
88
+ f1_scores = {}
89
+
90
+ for example in examples:
91
+ qas_id = example.qas_id
92
+ gold_answers = [answer["text"] for answer in example.answers if normalize_answer(answer["text"])]
93
+
94
+ if not gold_answers:
95
+ # For unanswerable questions, only correct answer is empty string
96
+ gold_answers = [""]
97
+
98
+ if qas_id not in preds:
99
+ print(f"Missing prediction for {qas_id}")
100
+ continue
101
+
102
+ prediction = preds[qas_id]
103
+ exact_scores[qas_id] = max(compute_exact(a, prediction) for a in gold_answers)
104
+ f1_scores[qas_id] = max(compute_f1(a, prediction) for a in gold_answers)
105
+
106
+ return exact_scores, f1_scores
107
+
108
+
109
+ def apply_no_ans_threshold(scores, na_probs, qid_to_has_ans, na_prob_thresh):
110
+ new_scores = {}
111
+ for qid, s in scores.items():
112
+ pred_na = na_probs[qid] > na_prob_thresh
113
+ if pred_na:
114
+ new_scores[qid] = float(not qid_to_has_ans[qid])
115
+ else:
116
+ new_scores[qid] = s
117
+ return new_scores
118
+
119
+
120
+ def make_eval_dict(exact_scores, f1_scores, qid_list=None):
121
+ if not qid_list:
122
+ total = len(exact_scores)
123
+ return collections.OrderedDict(
124
+ [
125
+ ("exact", 100.0 * sum(exact_scores.values()) / total),
126
+ ("f1", 100.0 * sum(f1_scores.values()) / total),
127
+ ("total", total),
128
+ ]
129
+ )
130
+ else:
131
+ total = len(qid_list)
132
+ return collections.OrderedDict(
133
+ [
134
+ ("exact", 100.0 * sum(exact_scores[k] for k in qid_list) / total),
135
+ ("f1", 100.0 * sum(f1_scores[k] for k in qid_list) / total),
136
+ ("total", total),
137
+ ]
138
+ )
139
+
140
+
141
+ def merge_eval(main_eval, new_eval, prefix):
142
+ for k in new_eval:
143
+ main_eval[f"{prefix}_{k}"] = new_eval[k]
144
+
145
+
146
+ def find_best_thresh_v2(preds, scores, na_probs, qid_to_has_ans):
147
+ num_no_ans = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k])
148
+ cur_score = num_no_ans
149
+ best_score = cur_score
150
+ best_thresh = 0.0
151
+ qid_list = sorted(na_probs, key=lambda k: na_probs[k])
152
+ for i, qid in enumerate(qid_list):
153
+ if qid not in scores:
154
+ continue
155
+ if qid_to_has_ans[qid]:
156
+ diff = scores[qid]
157
+ else:
158
+ if preds[qid]:
159
+ diff = -1
160
+ else:
161
+ diff = 0
162
+ cur_score += diff
163
+ if cur_score > best_score:
164
+ best_score = cur_score
165
+ best_thresh = na_probs[qid]
166
+
167
+ has_ans_score, has_ans_cnt = 0, 0
168
+ for qid in qid_list:
169
+ if not qid_to_has_ans[qid]:
170
+ continue
171
+ has_ans_cnt += 1
172
+
173
+ if qid not in scores:
174
+ continue
175
+ has_ans_score += scores[qid]
176
+
177
+ return 100.0 * best_score / len(scores), best_thresh, 1.0 * has_ans_score / has_ans_cnt
178
+
179
+
180
+ def find_all_best_thresh_v2(main_eval, preds, exact_raw, f1_raw, na_probs, qid_to_has_ans):
181
+ best_exact, exact_thresh, has_ans_exact = find_best_thresh_v2(preds, exact_raw, na_probs, qid_to_has_ans)
182
+ best_f1, f1_thresh, has_ans_f1 = find_best_thresh_v2(preds, f1_raw, na_probs, qid_to_has_ans)
183
+ main_eval["best_exact"] = best_exact
184
+ main_eval["best_exact_thresh"] = exact_thresh
185
+ main_eval["best_f1"] = best_f1
186
+ main_eval["best_f1_thresh"] = f1_thresh
187
+ main_eval["has_ans_exact"] = has_ans_exact
188
+ main_eval["has_ans_f1"] = has_ans_f1
189
+
190
+
191
+ def find_best_thresh(preds, scores, na_probs, qid_to_has_ans):
192
+ num_no_ans = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k])
193
+ cur_score = num_no_ans
194
+ best_score = cur_score
195
+ best_thresh = 0.0
196
+ qid_list = sorted(na_probs, key=lambda k: na_probs[k])
197
+ for _, qid in enumerate(qid_list):
198
+ if qid not in scores:
199
+ continue
200
+ if qid_to_has_ans[qid]:
201
+ diff = scores[qid]
202
+ else:
203
+ if preds[qid]:
204
+ diff = -1
205
+ else:
206
+ diff = 0
207
+ cur_score += diff
208
+ if cur_score > best_score:
209
+ best_score = cur_score
210
+ best_thresh = na_probs[qid]
211
+ return 100.0 * best_score / len(scores), best_thresh
212
+
213
+
214
+ def find_all_best_thresh(main_eval, preds, exact_raw, f1_raw, na_probs, qid_to_has_ans):
215
+ best_exact, exact_thresh = find_best_thresh(preds, exact_raw, na_probs, qid_to_has_ans)
216
+ best_f1, f1_thresh = find_best_thresh(preds, f1_raw, na_probs, qid_to_has_ans)
217
+
218
+ main_eval["best_exact"] = best_exact
219
+ main_eval["best_exact_thresh"] = exact_thresh
220
+ main_eval["best_f1"] = best_f1
221
+ main_eval["best_f1_thresh"] = f1_thresh
222
+
223
+
224
+ def squad_evaluate(examples, preds, no_answer_probs=None, no_answer_probability_threshold=1.0):
225
+ qas_id_to_has_answer = {example.qas_id: bool(example.answers) for example in examples}
226
+ has_answer_qids = [qas_id for qas_id, has_answer in qas_id_to_has_answer.items() if has_answer]
227
+ no_answer_qids = [qas_id for qas_id, has_answer in qas_id_to_has_answer.items() if not has_answer]
228
+
229
+ if no_answer_probs is None:
230
+ no_answer_probs = {k: 0.0 for k in preds}
231
+
232
+ exact, f1 = get_raw_scores(examples, preds)
233
+
234
+ exact_threshold = apply_no_ans_threshold(
235
+ exact, no_answer_probs, qas_id_to_has_answer, no_answer_probability_threshold
236
+ )
237
+ f1_threshold = apply_no_ans_threshold(f1, no_answer_probs, qas_id_to_has_answer, no_answer_probability_threshold)
238
+
239
+ evaluation = make_eval_dict(exact_threshold, f1_threshold)
240
+
241
+ if has_answer_qids:
242
+ has_ans_eval = make_eval_dict(exact_threshold, f1_threshold, qid_list=has_answer_qids)
243
+ merge_eval(evaluation, has_ans_eval, "HasAns")
244
+
245
+ if no_answer_qids:
246
+ no_ans_eval = make_eval_dict(exact_threshold, f1_threshold, qid_list=no_answer_qids)
247
+ merge_eval(evaluation, no_ans_eval, "NoAns")
248
+
249
+ if no_answer_probs:
250
+ find_all_best_thresh(evaluation, preds, exact, f1, no_answer_probs, qas_id_to_has_answer)
251
+
252
+ return evaluation
253
+
254
+
255
+ def get_final_text(pred_text, orig_text, do_lower_case, verbose_logging=False):
256
+ """Project the tokenized prediction back to the original text."""
257
+
258
+ # When we created the data, we kept track of the alignment between original
259
+ # (whitespace tokenized) tokens and our WordPiece tokenized tokens. So
260
+ # now `orig_text` contains the span of our original text corresponding to the
261
+ # span that we predicted.
262
+ #
263
+ # However, `orig_text` may contain extra characters that we don't want in
264
+ # our prediction.
265
+ #
266
+ # For example, let's say:
267
+ # pred_text = steve smith
268
+ # orig_text = Steve Smith's
269
+ #
270
+ # We don't want to return `orig_text` because it contains the extra "'s".
271
+ #
272
+ # We don't want to return `pred_text` because it's already been normalized
273
+ # (the SQuAD eval script also does punctuation stripping/lower casing but
274
+ # our tokenizer does additional normalization like stripping accent
275
+ # characters).
276
+ #
277
+ # What we really want to return is "Steve Smith".
278
+ #
279
+ # Therefore, we have to apply a semi-complicated alignment heuristic between
280
+ # `pred_text` and `orig_text` to get a character-to-character alignment. This
281
+ # can fail in certain cases in which case we just return `orig_text`.
282
+
283
+ def _strip_spaces(text):
284
+ ns_chars = []
285
+ ns_to_s_map = collections.OrderedDict()
286
+ for i, c in enumerate(text):
287
+ if c == " ":
288
+ continue
289
+ ns_to_s_map[len(ns_chars)] = i
290
+ ns_chars.append(c)
291
+ ns_text = "".join(ns_chars)
292
+ return (ns_text, ns_to_s_map)
293
+
294
+ # We first tokenize `orig_text`, strip whitespace from the result
295
+ # and `pred_text`, and check if they are the same length. If they are
296
+ # NOT the same length, the heuristic has failed. If they are the same
297
+ # length, we assume the characters are one-to-one aligned.
298
+ tokenizer = BasicTokenizer(do_lower_case=do_lower_case)
299
+
300
+ tok_text = " ".join(tokenizer.tokenize(orig_text))
301
+
302
+ start_position = tok_text.find(pred_text)
303
+ if start_position == -1:
304
+ if verbose_logging:
305
+ logger.info(f"Unable to find text: '{pred_text}' in '{orig_text}'")
306
+ return orig_text
307
+ end_position = start_position + len(pred_text) - 1
308
+
309
+ (orig_ns_text, orig_ns_to_s_map) = _strip_spaces(orig_text)
310
+ (tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text)
311
+
312
+ if len(orig_ns_text) != len(tok_ns_text):
313
+ if verbose_logging:
314
+ logger.info(f"Length not equal after stripping spaces: '{orig_ns_text}' vs '{tok_ns_text}'")
315
+ return orig_text
316
+
317
+ # We then project the characters in `pred_text` back to `orig_text` using
318
+ # the character-to-character alignment.
319
+ tok_s_to_ns_map = {}
320
+ for i, tok_index in tok_ns_to_s_map.items():
321
+ tok_s_to_ns_map[tok_index] = i
322
+
323
+ orig_start_position = None
324
+ if start_position in tok_s_to_ns_map:
325
+ ns_start_position = tok_s_to_ns_map[start_position]
326
+ if ns_start_position in orig_ns_to_s_map:
327
+ orig_start_position = orig_ns_to_s_map[ns_start_position]
328
+
329
+ if orig_start_position is None:
330
+ if verbose_logging:
331
+ logger.info("Couldn't map start position")
332
+ return orig_text
333
+
334
+ orig_end_position = None
335
+ if end_position in tok_s_to_ns_map:
336
+ ns_end_position = tok_s_to_ns_map[end_position]
337
+ if ns_end_position in orig_ns_to_s_map:
338
+ orig_end_position = orig_ns_to_s_map[ns_end_position]
339
+
340
+ if orig_end_position is None:
341
+ if verbose_logging:
342
+ logger.info("Couldn't map end position")
343
+ return orig_text
344
+
345
+ output_text = orig_text[orig_start_position : (orig_end_position + 1)]
346
+ return output_text
347
+
348
+
349
+ def _get_best_indexes(logits, n_best_size):
350
+ """Get the n-best logits from a list."""
351
+ index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True)
352
+
353
+ best_indexes = []
354
+ for i in range(len(index_and_score)):
355
+ if i >= n_best_size:
356
+ break
357
+ best_indexes.append(index_and_score[i][0])
358
+ return best_indexes
359
+
360
+
361
+ def _compute_softmax(scores):
362
+ """Compute softmax probability over raw logits."""
363
+ if not scores:
364
+ return []
365
+
366
+ max_score = None
367
+ for score in scores:
368
+ if max_score is None or score > max_score:
369
+ max_score = score
370
+
371
+ exp_scores = []
372
+ total_sum = 0.0
373
+ for score in scores:
374
+ x = math.exp(score - max_score)
375
+ exp_scores.append(x)
376
+ total_sum += x
377
+
378
+ probs = []
379
+ for score in exp_scores:
380
+ probs.append(score / total_sum)
381
+ return probs
382
+
383
+
384
+ def compute_predictions_logits(
385
+ all_examples,
386
+ all_features,
387
+ all_results,
388
+ n_best_size,
389
+ max_answer_length,
390
+ do_lower_case,
391
+ output_prediction_file,
392
+ output_nbest_file,
393
+ output_null_log_odds_file,
394
+ verbose_logging,
395
+ version_2_with_negative,
396
+ null_score_diff_threshold,
397
+ tokenizer,
398
+ ):
399
+ """Write final predictions to the json file and log-odds of null if needed."""
400
+ if output_prediction_file:
401
+ logger.info(f"Writing predictions to: {output_prediction_file}")
402
+ if output_nbest_file:
403
+ logger.info(f"Writing nbest to: {output_nbest_file}")
404
+ if output_null_log_odds_file and version_2_with_negative:
405
+ logger.info(f"Writing null_log_odds to: {output_null_log_odds_file}")
406
+
407
+ example_index_to_features = collections.defaultdict(list)
408
+ for feature in all_features:
409
+ example_index_to_features[feature.example_index].append(feature)
410
+
411
+ unique_id_to_result = {}
412
+ for result in all_results:
413
+ unique_id_to_result[result.unique_id] = result
414
+
415
+ _PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name
416
+ "PrelimPrediction", ["feature_index", "start_index", "end_index", "start_logit", "end_logit"]
417
+ )
418
+
419
+ all_predictions = collections.OrderedDict()
420
+ all_nbest_json = collections.OrderedDict()
421
+ scores_diff_json = collections.OrderedDict()
422
+
423
+ for example_index, example in enumerate(all_examples):
424
+ features = example_index_to_features[example_index]
425
+
426
+ prelim_predictions = []
427
+ # keep track of the minimum score of null start+end of position 0
428
+ score_null = 1000000 # large and positive
429
+ min_null_feature_index = 0 # the paragraph slice with min null score
430
+ null_start_logit = 0 # the start logit at the slice with min null score
431
+ null_end_logit = 0 # the end logit at the slice with min null score
432
+ for feature_index, feature in enumerate(features):
433
+ result = unique_id_to_result[feature.unique_id]
434
+ start_indexes = _get_best_indexes(result.start_logits, n_best_size)
435
+ end_indexes = _get_best_indexes(result.end_logits, n_best_size)
436
+ # if we could have irrelevant answers, get the min score of irrelevant
437
+ if version_2_with_negative:
438
+ feature_null_score = result.start_logits[0] + result.end_logits[0]
439
+ if feature_null_score < score_null:
440
+ score_null = feature_null_score
441
+ min_null_feature_index = feature_index
442
+ null_start_logit = result.start_logits[0]
443
+ null_end_logit = result.end_logits[0]
444
+ for start_index in start_indexes:
445
+ for end_index in end_indexes:
446
+ # We could hypothetically create invalid predictions, e.g., predict
447
+ # that the start of the span is in the question. We throw out all
448
+ # invalid predictions.
449
+ if start_index >= len(feature.tokens):
450
+ continue
451
+ if end_index >= len(feature.tokens):
452
+ continue
453
+ if start_index not in feature.token_to_orig_map:
454
+ continue
455
+ if end_index not in feature.token_to_orig_map:
456
+ continue
457
+ if not feature.token_is_max_context.get(start_index, False):
458
+ continue
459
+ if end_index < start_index:
460
+ continue
461
+ length = end_index - start_index + 1
462
+ if length > max_answer_length:
463
+ continue
464
+ prelim_predictions.append(
465
+ _PrelimPrediction(
466
+ feature_index=feature_index,
467
+ start_index=start_index,
468
+ end_index=end_index,
469
+ start_logit=result.start_logits[start_index],
470
+ end_logit=result.end_logits[end_index],
471
+ )
472
+ )
473
+ if version_2_with_negative:
474
+ prelim_predictions.append(
475
+ _PrelimPrediction(
476
+ feature_index=min_null_feature_index,
477
+ start_index=0,
478
+ end_index=0,
479
+ start_logit=null_start_logit,
480
+ end_logit=null_end_logit,
481
+ )
482
+ )
483
+ prelim_predictions = sorted(prelim_predictions, key=lambda x: (x.start_logit + x.end_logit), reverse=True)
484
+
485
+ _NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name
486
+ "NbestPrediction", ["text", "start_logit", "end_logit"]
487
+ )
488
+
489
+ seen_predictions = {}
490
+ nbest = []
491
+ for pred in prelim_predictions:
492
+ if len(nbest) >= n_best_size:
493
+ break
494
+ feature = features[pred.feature_index]
495
+ if pred.start_index > 0: # this is a non-null prediction
496
+ tok_tokens = feature.tokens[pred.start_index : (pred.end_index + 1)]
497
+ orig_doc_start = feature.token_to_orig_map[pred.start_index]
498
+ orig_doc_end = feature.token_to_orig_map[pred.end_index]
499
+ orig_tokens = example.doc_tokens[orig_doc_start : (orig_doc_end + 1)]
500
+
501
+ tok_text = tokenizer.convert_tokens_to_string(tok_tokens)
502
+
503
+ # tok_text = " ".join(tok_tokens)
504
+ #
505
+ # # De-tokenize WordPieces that have been split off.
506
+ # tok_text = tok_text.replace(" ##", "")
507
+ # tok_text = tok_text.replace("##", "")
508
+
509
+ # Clean whitespace
510
+ tok_text = tok_text.strip()
511
+ tok_text = " ".join(tok_text.split())
512
+ orig_text = " ".join(orig_tokens)
513
+
514
+ final_text = get_final_text(tok_text, orig_text, do_lower_case, verbose_logging)
515
+ if final_text in seen_predictions:
516
+ continue
517
+
518
+ seen_predictions[final_text] = True
519
+ else:
520
+ final_text = ""
521
+ seen_predictions[final_text] = True
522
+
523
+ nbest.append(_NbestPrediction(text=final_text, start_logit=pred.start_logit, end_logit=pred.end_logit))
524
+ # if we didn't include the empty option in the n-best, include it
525
+ if version_2_with_negative:
526
+ if "" not in seen_predictions:
527
+ nbest.append(_NbestPrediction(text="", start_logit=null_start_logit, end_logit=null_end_logit))
528
+
529
+ # In very rare edge cases we could only have single null prediction.
530
+ # So we just create a nonce prediction in this case to avoid failure.
531
+ if len(nbest) == 1:
532
+ nbest.insert(0, _NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0))
533
+
534
+ # In very rare edge cases we could have no valid predictions. So we
535
+ # just create a nonce prediction in this case to avoid failure.
536
+ if not nbest:
537
+ nbest.append(_NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0))
538
+
539
+ if len(nbest) < 1:
540
+ raise ValueError("No valid predictions")
541
+
542
+ total_scores = []
543
+ best_non_null_entry = None
544
+ for entry in nbest:
545
+ total_scores.append(entry.start_logit + entry.end_logit)
546
+ if not best_non_null_entry:
547
+ if entry.text:
548
+ best_non_null_entry = entry
549
+
550
+ probs = _compute_softmax(total_scores)
551
+
552
+ nbest_json = []
553
+ for i, entry in enumerate(nbest):
554
+ output = collections.OrderedDict()
555
+ output["text"] = entry.text
556
+ output["probability"] = probs[i]
557
+ output["start_logit"] = entry.start_logit
558
+ output["end_logit"] = entry.end_logit
559
+ nbest_json.append(output)
560
+
561
+ if len(nbest_json) < 1:
562
+ raise ValueError("No valid predictions")
563
+
564
+ if not version_2_with_negative:
565
+ all_predictions[example.qas_id] = nbest_json[0]["text"]
566
+ else:
567
+ # predict "" iff the null score - the score of best non-null > threshold
568
+ score_diff = score_null - best_non_null_entry.start_logit - (best_non_null_entry.end_logit)
569
+ scores_diff_json[example.qas_id] = score_diff
570
+ if score_diff > null_score_diff_threshold:
571
+ all_predictions[example.qas_id] = ""
572
+ else:
573
+ all_predictions[example.qas_id] = best_non_null_entry.text
574
+ all_nbest_json[example.qas_id] = nbest_json
575
+
576
+ if output_prediction_file:
577
+ with open(output_prediction_file, "w") as writer:
578
+ writer.write(json.dumps(all_predictions, indent=4) + "\n")
579
+
580
+ if output_nbest_file:
581
+ with open(output_nbest_file, "w") as writer:
582
+ writer.write(json.dumps(all_nbest_json, indent=4) + "\n")
583
+
584
+ if output_null_log_odds_file and version_2_with_negative:
585
+ with open(output_null_log_odds_file, "w") as writer:
586
+ writer.write(json.dumps(scores_diff_json, indent=4) + "\n")
587
+
588
+ return all_predictions
589
+
590
+
591
+ def compute_predictions_log_probs(
592
+ all_examples,
593
+ all_features,
594
+ all_results,
595
+ n_best_size,
596
+ max_answer_length,
597
+ output_prediction_file,
598
+ output_nbest_file,
599
+ output_null_log_odds_file,
600
+ start_n_top,
601
+ end_n_top,
602
+ version_2_with_negative,
603
+ tokenizer,
604
+ verbose_logging,
605
+ ):
606
+ """
607
+ XLNet write prediction logic (more complex than Bert's). Write final predictions to the json file and log-odds of
608
+ null if needed.
609
+
610
+ Requires utils_squad_evaluate.py
611
+ """
612
+ _PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name
613
+ "PrelimPrediction", ["feature_index", "start_index", "end_index", "start_log_prob", "end_log_prob"]
614
+ )
615
+
616
+ _NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name
617
+ "NbestPrediction", ["text", "start_log_prob", "end_log_prob"]
618
+ )
619
+
620
+ logger.info(f"Writing predictions to: {output_prediction_file}")
621
+
622
+ example_index_to_features = collections.defaultdict(list)
623
+ for feature in all_features:
624
+ example_index_to_features[feature.example_index].append(feature)
625
+
626
+ unique_id_to_result = {}
627
+ for result in all_results:
628
+ unique_id_to_result[result.unique_id] = result
629
+
630
+ all_predictions = collections.OrderedDict()
631
+ all_nbest_json = collections.OrderedDict()
632
+ scores_diff_json = collections.OrderedDict()
633
+
634
+ for example_index, example in enumerate(all_examples):
635
+ features = example_index_to_features[example_index]
636
+
637
+ prelim_predictions = []
638
+ # keep track of the minimum score of null start+end of position 0
639
+ score_null = 1000000 # large and positive
640
+
641
+ for feature_index, feature in enumerate(features):
642
+ result = unique_id_to_result[feature.unique_id]
643
+
644
+ cur_null_score = result.cls_logits
645
+
646
+ # if we could have irrelevant answers, get the min score of irrelevant
647
+ score_null = min(score_null, cur_null_score)
648
+
649
+ for i in range(start_n_top):
650
+ for j in range(end_n_top):
651
+ start_log_prob = result.start_logits[i]
652
+ start_index = result.start_top_index[i]
653
+
654
+ j_index = i * end_n_top + j
655
+
656
+ end_log_prob = result.end_logits[j_index]
657
+ end_index = result.end_top_index[j_index]
658
+
659
+ # We could hypothetically create invalid predictions, e.g., predict
660
+ # that the start of the span is in the question. We throw out all
661
+ # invalid predictions.
662
+ if start_index >= feature.paragraph_len - 1:
663
+ continue
664
+ if end_index >= feature.paragraph_len - 1:
665
+ continue
666
+
667
+ if not feature.token_is_max_context.get(start_index, False):
668
+ continue
669
+ if end_index < start_index:
670
+ continue
671
+ length = end_index - start_index + 1
672
+ if length > max_answer_length:
673
+ continue
674
+
675
+ prelim_predictions.append(
676
+ _PrelimPrediction(
677
+ feature_index=feature_index,
678
+ start_index=start_index,
679
+ end_index=end_index,
680
+ start_log_prob=start_log_prob,
681
+ end_log_prob=end_log_prob,
682
+ )
683
+ )
684
+
685
+ prelim_predictions = sorted(
686
+ prelim_predictions, key=lambda x: (x.start_log_prob + x.end_log_prob), reverse=True
687
+ )
688
+
689
+ seen_predictions = {}
690
+ nbest = []
691
+ for pred in prelim_predictions:
692
+ if len(nbest) >= n_best_size:
693
+ break
694
+ feature = features[pred.feature_index]
695
+
696
+ # XLNet un-tokenizer
697
+ # Let's keep it simple for now and see if we need all this later.
698
+ #
699
+ # tok_start_to_orig_index = feature.tok_start_to_orig_index
700
+ # tok_end_to_orig_index = feature.tok_end_to_orig_index
701
+ # start_orig_pos = tok_start_to_orig_index[pred.start_index]
702
+ # end_orig_pos = tok_end_to_orig_index[pred.end_index]
703
+ # paragraph_text = example.paragraph_text
704
+ # final_text = paragraph_text[start_orig_pos: end_orig_pos + 1].strip()
705
+
706
+ # Previously used Bert untokenizer
707
+ tok_tokens = feature.tokens[pred.start_index : (pred.end_index + 1)]
708
+ orig_doc_start = feature.token_to_orig_map[pred.start_index]
709
+ orig_doc_end = feature.token_to_orig_map[pred.end_index]
710
+ orig_tokens = example.doc_tokens[orig_doc_start : (orig_doc_end + 1)]
711
+ tok_text = tokenizer.convert_tokens_to_string(tok_tokens)
712
+
713
+ # Clean whitespace
714
+ tok_text = tok_text.strip()
715
+ tok_text = " ".join(tok_text.split())
716
+ orig_text = " ".join(orig_tokens)
717
+
718
+ if hasattr(tokenizer, "do_lower_case"):
719
+ do_lower_case = tokenizer.do_lower_case
720
+ else:
721
+ do_lower_case = tokenizer.do_lowercase_and_remove_accent
722
+
723
+ final_text = get_final_text(tok_text, orig_text, do_lower_case, verbose_logging)
724
+
725
+ if final_text in seen_predictions:
726
+ continue
727
+
728
+ seen_predictions[final_text] = True
729
+
730
+ nbest.append(
731
+ _NbestPrediction(text=final_text, start_log_prob=pred.start_log_prob, end_log_prob=pred.end_log_prob)
732
+ )
733
+
734
+ # In very rare edge cases we could have no valid predictions. So we
735
+ # just create a nonce prediction in this case to avoid failure.
736
+ if not nbest:
737
+ nbest.append(_NbestPrediction(text="", start_log_prob=-1e6, end_log_prob=-1e6))
738
+
739
+ total_scores = []
740
+ best_non_null_entry = None
741
+ for entry in nbest:
742
+ total_scores.append(entry.start_log_prob + entry.end_log_prob)
743
+ if not best_non_null_entry:
744
+ best_non_null_entry = entry
745
+
746
+ probs = _compute_softmax(total_scores)
747
+
748
+ nbest_json = []
749
+ for i, entry in enumerate(nbest):
750
+ output = collections.OrderedDict()
751
+ output["text"] = entry.text
752
+ output["probability"] = probs[i]
753
+ output["start_log_prob"] = entry.start_log_prob
754
+ output["end_log_prob"] = entry.end_log_prob
755
+ nbest_json.append(output)
756
+
757
+ if len(nbest_json) < 1:
758
+ raise ValueError("No valid predictions")
759
+ if best_non_null_entry is None:
760
+ raise ValueError("No valid predictions")
761
+
762
+ score_diff = score_null
763
+ scores_diff_json[example.qas_id] = score_diff
764
+ # note(zhiliny): always predict best_non_null_entry
765
+ # and the evaluation script will search for the best threshold
766
+ all_predictions[example.qas_id] = best_non_null_entry.text
767
+
768
+ all_nbest_json[example.qas_id] = nbest_json
769
+
770
+ with open(output_prediction_file, "w") as writer:
771
+ writer.write(json.dumps(all_predictions, indent=4) + "\n")
772
+
773
+ with open(output_nbest_file, "w") as writer:
774
+ writer.write(json.dumps(all_nbest_json, indent=4) + "\n")
775
+
776
+ if version_2_with_negative:
777
+ with open(output_null_log_odds_file, "w") as writer:
778
+ writer.write(json.dumps(scores_diff_json, indent=4) + "\n")
779
+
780
+ return all_predictions
env-llmeval/lib/python3.10/site-packages/transformers/pipelines/audio_classification.py ADDED
@@ -0,0 +1,215 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2021 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ import subprocess
15
+ from typing import Union
16
+
17
+ import numpy as np
18
+ import requests
19
+
20
+ from ..utils import add_end_docstrings, is_torch_available, is_torchaudio_available, logging
21
+ from .base import Pipeline, build_pipeline_init_args
22
+
23
+
24
+ if is_torch_available():
25
+ from ..models.auto.modeling_auto import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
26
+
27
+ logger = logging.get_logger(__name__)
28
+
29
+
30
+ def ffmpeg_read(bpayload: bytes, sampling_rate: int) -> np.array:
31
+ """
32
+ Helper function to read an audio file through ffmpeg.
33
+ """
34
+ ar = f"{sampling_rate}"
35
+ ac = "1"
36
+ format_for_conversion = "f32le"
37
+ ffmpeg_command = [
38
+ "ffmpeg",
39
+ "-i",
40
+ "pipe:0",
41
+ "-ac",
42
+ ac,
43
+ "-ar",
44
+ ar,
45
+ "-f",
46
+ format_for_conversion,
47
+ "-hide_banner",
48
+ "-loglevel",
49
+ "quiet",
50
+ "pipe:1",
51
+ ]
52
+
53
+ try:
54
+ ffmpeg_process = subprocess.Popen(ffmpeg_command, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
55
+ except FileNotFoundError:
56
+ raise ValueError("ffmpeg was not found but is required to load audio files from filename")
57
+ output_stream = ffmpeg_process.communicate(bpayload)
58
+ out_bytes = output_stream[0]
59
+
60
+ audio = np.frombuffer(out_bytes, np.float32)
61
+ if audio.shape[0] == 0:
62
+ raise ValueError("Malformed soundfile")
63
+ return audio
64
+
65
+
66
+ @add_end_docstrings(build_pipeline_init_args(has_feature_extractor=True))
67
+ class AudioClassificationPipeline(Pipeline):
68
+ """
69
+ Audio classification pipeline using any `AutoModelForAudioClassification`. This pipeline predicts the class of a
70
+ raw waveform or an audio file. In case of an audio file, ffmpeg should be installed to support multiple audio
71
+ formats.
72
+
73
+ Example:
74
+
75
+ ```python
76
+ >>> from transformers import pipeline
77
+
78
+ >>> classifier = pipeline(model="superb/wav2vec2-base-superb-ks")
79
+ >>> classifier("https://huggingface.co/datasets/Narsil/asr_dummy/resolve/main/1.flac")
80
+ [{'score': 0.997, 'label': '_unknown_'}, {'score': 0.002, 'label': 'left'}, {'score': 0.0, 'label': 'yes'}, {'score': 0.0, 'label': 'down'}, {'score': 0.0, 'label': 'stop'}]
81
+ ```
82
+
83
+ Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial)
84
+
85
+
86
+ This pipeline can currently be loaded from [`pipeline`] using the following task identifier:
87
+ `"audio-classification"`.
88
+
89
+ See the list of available models on
90
+ [huggingface.co/models](https://huggingface.co/models?filter=audio-classification).
91
+ """
92
+
93
+ def __init__(self, *args, **kwargs):
94
+ # Default, might be overriden by the model.config.
95
+ kwargs["top_k"] = 5
96
+ super().__init__(*args, **kwargs)
97
+
98
+ if self.framework != "pt":
99
+ raise ValueError(f"The {self.__class__} is only available in PyTorch.")
100
+
101
+ self.check_model_type(MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES)
102
+
103
+ def __call__(
104
+ self,
105
+ inputs: Union[np.ndarray, bytes, str],
106
+ **kwargs,
107
+ ):
108
+ """
109
+ Classify the sequence(s) given as inputs. See the [`AutomaticSpeechRecognitionPipeline`] documentation for more
110
+ information.
111
+
112
+ Args:
113
+ inputs (`np.ndarray` or `bytes` or `str` or `dict`):
114
+ The inputs is either :
115
+ - `str` that is the filename of the audio file, the file will be read at the correct sampling rate
116
+ to get the waveform using *ffmpeg*. This requires *ffmpeg* to be installed on the system.
117
+ - `bytes` it is supposed to be the content of an audio file and is interpreted by *ffmpeg* in the
118
+ same way.
119
+ - (`np.ndarray` of shape (n, ) of type `np.float32` or `np.float64`)
120
+ Raw audio at the correct sampling rate (no further check will be done)
121
+ - `dict` form can be used to pass raw audio sampled at arbitrary `sampling_rate` and let this
122
+ pipeline do the resampling. The dict must be either be in the format `{"sampling_rate": int,
123
+ "raw": np.array}`, or `{"sampling_rate": int, "array": np.array}`, where the key `"raw"` or
124
+ `"array"` is used to denote the raw audio waveform.
125
+ top_k (`int`, *optional*, defaults to None):
126
+ The number of top labels that will be returned by the pipeline. If the provided number is `None` or
127
+ higher than the number of labels available in the model configuration, it will default to the number of
128
+ labels.
129
+
130
+ Return:
131
+ A list of `dict` with the following keys:
132
+
133
+ - **label** (`str`) -- The label predicted.
134
+ - **score** (`float`) -- The corresponding probability.
135
+ """
136
+ return super().__call__(inputs, **kwargs)
137
+
138
+ def _sanitize_parameters(self, top_k=None, **kwargs):
139
+ # No parameters on this pipeline right now
140
+ postprocess_params = {}
141
+ if top_k is not None:
142
+ if top_k > self.model.config.num_labels:
143
+ top_k = self.model.config.num_labels
144
+ postprocess_params["top_k"] = top_k
145
+ return {}, {}, postprocess_params
146
+
147
+ def preprocess(self, inputs):
148
+ if isinstance(inputs, str):
149
+ if inputs.startswith("http://") or inputs.startswith("https://"):
150
+ # We need to actually check for a real protocol, otherwise it's impossible to use a local file
151
+ # like http_huggingface_co.png
152
+ inputs = requests.get(inputs).content
153
+ else:
154
+ with open(inputs, "rb") as f:
155
+ inputs = f.read()
156
+
157
+ if isinstance(inputs, bytes):
158
+ inputs = ffmpeg_read(inputs, self.feature_extractor.sampling_rate)
159
+
160
+ if isinstance(inputs, dict):
161
+ # Accepting `"array"` which is the key defined in `datasets` for
162
+ # better integration
163
+ if not ("sampling_rate" in inputs and ("raw" in inputs or "array" in inputs)):
164
+ raise ValueError(
165
+ "When passing a dictionary to AudioClassificationPipeline, the dict needs to contain a "
166
+ '"raw" key containing the numpy array representing the audio and a "sampling_rate" key, '
167
+ "containing the sampling_rate associated with that array"
168
+ )
169
+
170
+ _inputs = inputs.pop("raw", None)
171
+ if _inputs is None:
172
+ # Remove path which will not be used from `datasets`.
173
+ inputs.pop("path", None)
174
+ _inputs = inputs.pop("array", None)
175
+ in_sampling_rate = inputs.pop("sampling_rate")
176
+ inputs = _inputs
177
+ if in_sampling_rate != self.feature_extractor.sampling_rate:
178
+ import torch
179
+
180
+ if is_torchaudio_available():
181
+ from torchaudio import functional as F
182
+ else:
183
+ raise ImportError(
184
+ "torchaudio is required to resample audio samples in AudioClassificationPipeline. "
185
+ "The torchaudio package can be installed through: `pip install torchaudio`."
186
+ )
187
+
188
+ inputs = F.resample(
189
+ torch.from_numpy(inputs), in_sampling_rate, self.feature_extractor.sampling_rate
190
+ ).numpy()
191
+
192
+ if not isinstance(inputs, np.ndarray):
193
+ raise ValueError("We expect a numpy ndarray as input")
194
+ if len(inputs.shape) != 1:
195
+ raise ValueError("We expect a single channel audio input for AudioClassificationPipeline")
196
+
197
+ processed = self.feature_extractor(
198
+ inputs, sampling_rate=self.feature_extractor.sampling_rate, return_tensors="pt"
199
+ )
200
+ return processed
201
+
202
+ def _forward(self, model_inputs):
203
+ model_outputs = self.model(**model_inputs)
204
+ return model_outputs
205
+
206
+ def postprocess(self, model_outputs, top_k=5):
207
+ probs = model_outputs.logits[0].softmax(-1)
208
+ scores, ids = probs.topk(top_k)
209
+
210
+ scores = scores.tolist()
211
+ ids = ids.tolist()
212
+
213
+ labels = [{"score": score, "label": self.model.config.id2label[_id]} for score, _id in zip(scores, ids)]
214
+
215
+ return labels
env-llmeval/lib/python3.10/site-packages/transformers/pipelines/document_question_answering.py ADDED
@@ -0,0 +1,502 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The Impira Team and the HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import re
16
+ from typing import List, Optional, Tuple, Union
17
+
18
+ import numpy as np
19
+
20
+ from ..utils import (
21
+ ExplicitEnum,
22
+ add_end_docstrings,
23
+ is_pytesseract_available,
24
+ is_torch_available,
25
+ is_vision_available,
26
+ logging,
27
+ )
28
+ from .base import ChunkPipeline, build_pipeline_init_args
29
+ from .question_answering import select_starts_ends
30
+
31
+
32
+ if is_vision_available():
33
+ from PIL import Image
34
+
35
+ from ..image_utils import load_image
36
+
37
+ if is_torch_available():
38
+ import torch
39
+
40
+ from ..models.auto.modeling_auto import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES
41
+
42
+ TESSERACT_LOADED = False
43
+ if is_pytesseract_available():
44
+ TESSERACT_LOADED = True
45
+ import pytesseract
46
+
47
+ logger = logging.get_logger(__name__)
48
+
49
+
50
+ # normalize_bbox() and apply_tesseract() are derived from apply_tesseract in models/layoutlmv3/feature_extraction_layoutlmv3.py.
51
+ # However, because the pipeline may evolve from what layoutlmv3 currently does, it's copied (vs. imported) to avoid creating an
52
+ # unnecessary dependency.
53
+ def normalize_box(box, width, height):
54
+ return [
55
+ int(1000 * (box[0] / width)),
56
+ int(1000 * (box[1] / height)),
57
+ int(1000 * (box[2] / width)),
58
+ int(1000 * (box[3] / height)),
59
+ ]
60
+
61
+
62
+ def apply_tesseract(image: "Image.Image", lang: Optional[str], tesseract_config: Optional[str]):
63
+ """Applies Tesseract OCR on a document image, and returns recognized words + normalized bounding boxes."""
64
+ # apply OCR
65
+ data = pytesseract.image_to_data(image, lang=lang, output_type="dict", config=tesseract_config)
66
+ words, left, top, width, height = data["text"], data["left"], data["top"], data["width"], data["height"]
67
+
68
+ # filter empty words and corresponding coordinates
69
+ irrelevant_indices = [idx for idx, word in enumerate(words) if not word.strip()]
70
+ words = [word for idx, word in enumerate(words) if idx not in irrelevant_indices]
71
+ left = [coord for idx, coord in enumerate(left) if idx not in irrelevant_indices]
72
+ top = [coord for idx, coord in enumerate(top) if idx not in irrelevant_indices]
73
+ width = [coord for idx, coord in enumerate(width) if idx not in irrelevant_indices]
74
+ height = [coord for idx, coord in enumerate(height) if idx not in irrelevant_indices]
75
+
76
+ # turn coordinates into (left, top, left+width, top+height) format
77
+ actual_boxes = []
78
+ for x, y, w, h in zip(left, top, width, height):
79
+ actual_box = [x, y, x + w, y + h]
80
+ actual_boxes.append(actual_box)
81
+
82
+ image_width, image_height = image.size
83
+
84
+ # finally, normalize the bounding boxes
85
+ normalized_boxes = []
86
+ for box in actual_boxes:
87
+ normalized_boxes.append(normalize_box(box, image_width, image_height))
88
+
89
+ if len(words) != len(normalized_boxes):
90
+ raise ValueError("Not as many words as there are bounding boxes")
91
+
92
+ return words, normalized_boxes
93
+
94
+
95
+ class ModelType(ExplicitEnum):
96
+ LayoutLM = "layoutlm"
97
+ LayoutLMv2andv3 = "layoutlmv2andv3"
98
+ VisionEncoderDecoder = "vision_encoder_decoder"
99
+
100
+
101
+ @add_end_docstrings(build_pipeline_init_args(has_image_processor=True, has_tokenizer=True))
102
+ class DocumentQuestionAnsweringPipeline(ChunkPipeline):
103
+ # TODO: Update task_summary docs to include an example with document QA and then update the first sentence
104
+ """
105
+ Document Question Answering pipeline using any `AutoModelForDocumentQuestionAnswering`. The inputs/outputs are
106
+ similar to the (extractive) question answering pipeline; however, the pipeline takes an image (and optional OCR'd
107
+ words/boxes) as input instead of text context.
108
+
109
+ Example:
110
+
111
+ ```python
112
+ >>> from transformers import pipeline
113
+
114
+ >>> document_qa = pipeline(model="impira/layoutlm-document-qa")
115
+ >>> document_qa(
116
+ ... image="https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png",
117
+ ... question="What is the invoice number?",
118
+ ... )
119
+ [{'score': 0.425, 'answer': 'us-001', 'start': 16, 'end': 16}]
120
+ ```
121
+
122
+ Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial)
123
+
124
+ This document question answering pipeline can currently be loaded from [`pipeline`] using the following task
125
+ identifier: `"document-question-answering"`.
126
+
127
+ The models that this pipeline can use are models that have been fine-tuned on a document question answering task.
128
+ See the up-to-date list of available models on
129
+ [huggingface.co/models](https://huggingface.co/models?filter=document-question-answering).
130
+ """
131
+
132
+ def __init__(self, *args, **kwargs):
133
+ super().__init__(*args, **kwargs)
134
+ if self.tokenizer is not None and not self.tokenizer.__class__.__name__.endswith("Fast"):
135
+ raise ValueError(
136
+ "`DocumentQuestionAnsweringPipeline` requires a fast tokenizer, but a slow tokenizer "
137
+ f"(`{self.tokenizer.__class__.__name__}`) is provided."
138
+ )
139
+
140
+ if self.model.config.__class__.__name__ == "VisionEncoderDecoderConfig":
141
+ self.model_type = ModelType.VisionEncoderDecoder
142
+ if self.model.config.encoder.model_type != "donut-swin":
143
+ raise ValueError("Currently, the only supported VisionEncoderDecoder model is Donut")
144
+ else:
145
+ self.check_model_type(MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES)
146
+ if self.model.config.__class__.__name__ == "LayoutLMConfig":
147
+ self.model_type = ModelType.LayoutLM
148
+ else:
149
+ self.model_type = ModelType.LayoutLMv2andv3
150
+
151
+ def _sanitize_parameters(
152
+ self,
153
+ padding=None,
154
+ doc_stride=None,
155
+ max_question_len=None,
156
+ lang: Optional[str] = None,
157
+ tesseract_config: Optional[str] = None,
158
+ max_answer_len=None,
159
+ max_seq_len=None,
160
+ top_k=None,
161
+ handle_impossible_answer=None,
162
+ timeout=None,
163
+ **kwargs,
164
+ ):
165
+ preprocess_params, postprocess_params = {}, {}
166
+ if padding is not None:
167
+ preprocess_params["padding"] = padding
168
+ if doc_stride is not None:
169
+ preprocess_params["doc_stride"] = doc_stride
170
+ if max_question_len is not None:
171
+ preprocess_params["max_question_len"] = max_question_len
172
+ if max_seq_len is not None:
173
+ preprocess_params["max_seq_len"] = max_seq_len
174
+ if lang is not None:
175
+ preprocess_params["lang"] = lang
176
+ if tesseract_config is not None:
177
+ preprocess_params["tesseract_config"] = tesseract_config
178
+ if timeout is not None:
179
+ preprocess_params["timeout"] = timeout
180
+
181
+ if top_k is not None:
182
+ if top_k < 1:
183
+ raise ValueError(f"top_k parameter should be >= 1 (got {top_k})")
184
+ postprocess_params["top_k"] = top_k
185
+ if max_answer_len is not None:
186
+ if max_answer_len < 1:
187
+ raise ValueError(f"max_answer_len parameter should be >= 1 (got {max_answer_len}")
188
+ postprocess_params["max_answer_len"] = max_answer_len
189
+ if handle_impossible_answer is not None:
190
+ postprocess_params["handle_impossible_answer"] = handle_impossible_answer
191
+
192
+ return preprocess_params, {}, postprocess_params
193
+
194
+ def __call__(
195
+ self,
196
+ image: Union["Image.Image", str],
197
+ question: Optional[str] = None,
198
+ word_boxes: Tuple[str, List[float]] = None,
199
+ **kwargs,
200
+ ):
201
+ """
202
+ Answer the question(s) given as inputs by using the document(s). A document is defined as an image and an
203
+ optional list of (word, box) tuples which represent the text in the document. If the `word_boxes` are not
204
+ provided, it will use the Tesseract OCR engine (if available) to extract the words and boxes automatically for
205
+ LayoutLM-like models which require them as input. For Donut, no OCR is run.
206
+
207
+ You can invoke the pipeline several ways:
208
+
209
+ - `pipeline(image=image, question=question)`
210
+ - `pipeline(image=image, question=question, word_boxes=word_boxes)`
211
+ - `pipeline([{"image": image, "question": question}])`
212
+ - `pipeline([{"image": image, "question": question, "word_boxes": word_boxes}])`
213
+
214
+ Args:
215
+ image (`str` or `PIL.Image`):
216
+ The pipeline handles three types of images:
217
+
218
+ - A string containing a http link pointing to an image
219
+ - A string containing a local path to an image
220
+ - An image loaded in PIL directly
221
+
222
+ The pipeline accepts either a single image or a batch of images. If given a single image, it can be
223
+ broadcasted to multiple questions.
224
+ question (`str`):
225
+ A question to ask of the document.
226
+ word_boxes (`List[str, Tuple[float, float, float, float]]`, *optional*):
227
+ A list of words and bounding boxes (normalized 0->1000). If you provide this optional input, then the
228
+ pipeline will use these words and boxes instead of running OCR on the image to derive them for models
229
+ that need them (e.g. LayoutLM). This allows you to reuse OCR'd results across many invocations of the
230
+ pipeline without having to re-run it each time.
231
+ top_k (`int`, *optional*, defaults to 1):
232
+ The number of answers to return (will be chosen by order of likelihood). Note that we return less than
233
+ top_k answers if there are not enough options available within the context.
234
+ doc_stride (`int`, *optional*, defaults to 128):
235
+ If the words in the document are too long to fit with the question for the model, it will be split in
236
+ several chunks with some overlap. This argument controls the size of that overlap.
237
+ max_answer_len (`int`, *optional*, defaults to 15):
238
+ The maximum length of predicted answers (e.g., only answers with a shorter length are considered).
239
+ max_seq_len (`int`, *optional*, defaults to 384):
240
+ The maximum length of the total sentence (context + question) in tokens of each chunk passed to the
241
+ model. The context will be split in several chunks (using `doc_stride` as overlap) if needed.
242
+ max_question_len (`int`, *optional*, defaults to 64):
243
+ The maximum length of the question after tokenization. It will be truncated if needed.
244
+ handle_impossible_answer (`bool`, *optional*, defaults to `False`):
245
+ Whether or not we accept impossible as an answer.
246
+ lang (`str`, *optional*):
247
+ Language to use while running OCR. Defaults to english.
248
+ tesseract_config (`str`, *optional*):
249
+ Additional flags to pass to tesseract while running OCR.
250
+ timeout (`float`, *optional*, defaults to None):
251
+ The maximum time in seconds to wait for fetching images from the web. If None, no timeout is set and
252
+ the call may block forever.
253
+
254
+ Return:
255
+ A `dict` or a list of `dict`: Each result comes as a dictionary with the following keys:
256
+
257
+ - **score** (`float`) -- The probability associated to the answer.
258
+ - **start** (`int`) -- The start word index of the answer (in the OCR'd version of the input or provided
259
+ `word_boxes`).
260
+ - **end** (`int`) -- The end word index of the answer (in the OCR'd version of the input or provided
261
+ `word_boxes`).
262
+ - **answer** (`str`) -- The answer to the question.
263
+ - **words** (`list[int]`) -- The index of each word/box pair that is in the answer
264
+ """
265
+ if isinstance(question, str):
266
+ inputs = {"question": question, "image": image}
267
+ if word_boxes is not None:
268
+ inputs["word_boxes"] = word_boxes
269
+ else:
270
+ inputs = image
271
+ return super().__call__(inputs, **kwargs)
272
+
273
+ def preprocess(
274
+ self,
275
+ input,
276
+ padding="do_not_pad",
277
+ doc_stride=None,
278
+ max_seq_len=None,
279
+ word_boxes: Tuple[str, List[float]] = None,
280
+ lang=None,
281
+ tesseract_config="",
282
+ timeout=None,
283
+ ):
284
+ # NOTE: This code mirrors the code in question answering and will be implemented in a follow up PR
285
+ # to support documents with enough tokens that overflow the model's window
286
+ if max_seq_len is None:
287
+ max_seq_len = self.tokenizer.model_max_length
288
+
289
+ if doc_stride is None:
290
+ doc_stride = min(max_seq_len // 2, 256)
291
+
292
+ image = None
293
+ image_features = {}
294
+ if input.get("image", None) is not None:
295
+ image = load_image(input["image"], timeout=timeout)
296
+ if self.image_processor is not None:
297
+ image_features.update(self.image_processor(images=image, return_tensors=self.framework))
298
+ elif self.feature_extractor is not None:
299
+ image_features.update(self.feature_extractor(images=image, return_tensors=self.framework))
300
+ elif self.model_type == ModelType.VisionEncoderDecoder:
301
+ raise ValueError("If you are using a VisionEncoderDecoderModel, you must provide a feature extractor")
302
+
303
+ words, boxes = None, None
304
+ if not self.model_type == ModelType.VisionEncoderDecoder:
305
+ if "word_boxes" in input:
306
+ words = [x[0] for x in input["word_boxes"]]
307
+ boxes = [x[1] for x in input["word_boxes"]]
308
+ elif "words" in image_features and "boxes" in image_features:
309
+ words = image_features.pop("words")[0]
310
+ boxes = image_features.pop("boxes")[0]
311
+ elif image is not None:
312
+ if not TESSERACT_LOADED:
313
+ raise ValueError(
314
+ "If you provide an image without word_boxes, then the pipeline will run OCR using Tesseract,"
315
+ " but pytesseract is not available"
316
+ )
317
+ if TESSERACT_LOADED:
318
+ words, boxes = apply_tesseract(image, lang=lang, tesseract_config=tesseract_config)
319
+ else:
320
+ raise ValueError(
321
+ "You must provide an image or word_boxes. If you provide an image, the pipeline will automatically"
322
+ " run OCR to derive words and boxes"
323
+ )
324
+
325
+ if self.tokenizer.padding_side != "right":
326
+ raise ValueError(
327
+ "Document question answering only supports tokenizers whose padding side is 'right', not"
328
+ f" {self.tokenizer.padding_side}"
329
+ )
330
+
331
+ if self.model_type == ModelType.VisionEncoderDecoder:
332
+ task_prompt = f'<s_docvqa><s_question>{input["question"]}</s_question><s_answer>'
333
+ # Adapted from https://huggingface.co/spaces/nielsr/donut-docvqa/blob/main/app.py
334
+ encoding = {
335
+ "inputs": image_features["pixel_values"],
336
+ "decoder_input_ids": self.tokenizer(
337
+ task_prompt, add_special_tokens=False, return_tensors=self.framework
338
+ ).input_ids,
339
+ "return_dict_in_generate": True,
340
+ }
341
+ yield {
342
+ **encoding,
343
+ "p_mask": None,
344
+ "word_ids": None,
345
+ "words": None,
346
+ "output_attentions": True,
347
+ "is_last": True,
348
+ }
349
+ else:
350
+ tokenizer_kwargs = {}
351
+ if self.model_type == ModelType.LayoutLM:
352
+ tokenizer_kwargs["text"] = input["question"].split()
353
+ tokenizer_kwargs["text_pair"] = words
354
+ tokenizer_kwargs["is_split_into_words"] = True
355
+ else:
356
+ tokenizer_kwargs["text"] = [input["question"]]
357
+ tokenizer_kwargs["text_pair"] = [words]
358
+ tokenizer_kwargs["boxes"] = [boxes]
359
+
360
+ encoding = self.tokenizer(
361
+ padding=padding,
362
+ max_length=max_seq_len,
363
+ stride=doc_stride,
364
+ return_token_type_ids=True,
365
+ truncation="only_second",
366
+ return_overflowing_tokens=True,
367
+ **tokenizer_kwargs,
368
+ )
369
+ # TODO: check why slower `LayoutLMTokenizer` and `LayoutLMv2Tokenizer` don't have this key in outputs
370
+ # FIXME: ydshieh and/or Narsil
371
+ encoding.pop("overflow_to_sample_mapping", None) # We do not use this
372
+
373
+ num_spans = len(encoding["input_ids"])
374
+
375
+ # p_mask: mask with 1 for token than cannot be in the answer (0 for token which can be in an answer)
376
+ # We put 0 on the tokens from the context and 1 everywhere else (question and special tokens)
377
+ # This logic mirrors the logic in the question_answering pipeline
378
+ p_mask = [[tok != 1 for tok in encoding.sequence_ids(span_id)] for span_id in range(num_spans)]
379
+ for span_idx in range(num_spans):
380
+ if self.framework == "pt":
381
+ span_encoding = {k: torch.tensor(v[span_idx : span_idx + 1]) for (k, v) in encoding.items()}
382
+ if "pixel_values" in image_features:
383
+ span_encoding["image"] = image_features["pixel_values"]
384
+ else:
385
+ raise ValueError("Unsupported: Tensorflow preprocessing for DocumentQuestionAnsweringPipeline")
386
+
387
+ input_ids_span_idx = encoding["input_ids"][span_idx]
388
+ # keep the cls_token unmasked (some models use it to indicate unanswerable questions)
389
+ if self.tokenizer.cls_token_id is not None:
390
+ cls_indices = np.nonzero(np.array(input_ids_span_idx) == self.tokenizer.cls_token_id)[0]
391
+ for cls_index in cls_indices:
392
+ p_mask[span_idx][cls_index] = 0
393
+
394
+ # For each span, place a bounding box [0,0,0,0] for question and CLS tokens, [1000,1000,1000,1000]
395
+ # for SEP tokens, and the word's bounding box for words in the original document.
396
+ if "boxes" not in tokenizer_kwargs:
397
+ bbox = []
398
+ for input_id, sequence_id, word_id in zip(
399
+ encoding.input_ids[span_idx],
400
+ encoding.sequence_ids(span_idx),
401
+ encoding.word_ids(span_idx),
402
+ ):
403
+ if sequence_id == 1:
404
+ bbox.append(boxes[word_id])
405
+ elif input_id == self.tokenizer.sep_token_id:
406
+ bbox.append([1000] * 4)
407
+ else:
408
+ bbox.append([0] * 4)
409
+
410
+ if self.framework == "pt":
411
+ span_encoding["bbox"] = torch.tensor(bbox).unsqueeze(0)
412
+ elif self.framework == "tf":
413
+ raise ValueError("Unsupported: Tensorflow preprocessing for DocumentQuestionAnsweringPipeline")
414
+ yield {
415
+ **span_encoding,
416
+ "p_mask": p_mask[span_idx],
417
+ "word_ids": encoding.word_ids(span_idx),
418
+ "words": words,
419
+ "is_last": span_idx == num_spans - 1,
420
+ }
421
+
422
+ def _forward(self, model_inputs, **generate_kwargs):
423
+ p_mask = model_inputs.pop("p_mask", None)
424
+ word_ids = model_inputs.pop("word_ids", None)
425
+ words = model_inputs.pop("words", None)
426
+ is_last = model_inputs.pop("is_last", False)
427
+
428
+ if self.model_type == ModelType.VisionEncoderDecoder:
429
+ model_outputs = self.model.generate(**model_inputs, **generate_kwargs)
430
+ else:
431
+ model_outputs = self.model(**model_inputs)
432
+
433
+ model_outputs = dict(model_outputs.items())
434
+ model_outputs["p_mask"] = p_mask
435
+ model_outputs["word_ids"] = word_ids
436
+ model_outputs["words"] = words
437
+ model_outputs["attention_mask"] = model_inputs.get("attention_mask", None)
438
+ model_outputs["is_last"] = is_last
439
+ return model_outputs
440
+
441
+ def postprocess(self, model_outputs, top_k=1, **kwargs):
442
+ if self.model_type == ModelType.VisionEncoderDecoder:
443
+ answers = [self.postprocess_encoder_decoder_single(o) for o in model_outputs]
444
+ else:
445
+ answers = self.postprocess_extractive_qa(model_outputs, top_k=top_k, **kwargs)
446
+
447
+ answers = sorted(answers, key=lambda x: x.get("score", 0), reverse=True)[:top_k]
448
+ return answers
449
+
450
+ def postprocess_encoder_decoder_single(self, model_outputs, **kwargs):
451
+ sequence = self.tokenizer.batch_decode(model_outputs["sequences"])[0]
452
+
453
+ # TODO: A lot of this logic is specific to Donut and should probably be handled in the tokenizer
454
+ # (see https://github.com/huggingface/transformers/pull/18414/files#r961747408 for more context).
455
+ sequence = sequence.replace(self.tokenizer.eos_token, "").replace(self.tokenizer.pad_token, "")
456
+ sequence = re.sub(r"<.*?>", "", sequence, count=1).strip() # remove first task start token
457
+ ret = {
458
+ "answer": None,
459
+ }
460
+
461
+ answer = re.search(r"<s_answer>(.*)</s_answer>", sequence)
462
+ if answer is not None:
463
+ ret["answer"] = answer.group(1).strip()
464
+ return ret
465
+
466
+ def postprocess_extractive_qa(
467
+ self, model_outputs, top_k=1, handle_impossible_answer=False, max_answer_len=15, **kwargs
468
+ ):
469
+ min_null_score = 1000000 # large and positive
470
+ answers = []
471
+ for output in model_outputs:
472
+ words = output["words"]
473
+
474
+ starts, ends, scores, min_null_score = select_starts_ends(
475
+ start=output["start_logits"],
476
+ end=output["end_logits"],
477
+ p_mask=output["p_mask"],
478
+ attention_mask=output["attention_mask"].numpy()
479
+ if output.get("attention_mask", None) is not None
480
+ else None,
481
+ min_null_score=min_null_score,
482
+ top_k=top_k,
483
+ handle_impossible_answer=handle_impossible_answer,
484
+ max_answer_len=max_answer_len,
485
+ )
486
+ word_ids = output["word_ids"]
487
+ for start, end, score in zip(starts, ends, scores):
488
+ word_start, word_end = word_ids[start], word_ids[end]
489
+ if word_start is not None and word_end is not None:
490
+ answers.append(
491
+ {
492
+ "score": float(score),
493
+ "answer": " ".join(words[word_start : word_end + 1]),
494
+ "start": word_start,
495
+ "end": word_end,
496
+ }
497
+ )
498
+
499
+ if handle_impossible_answer:
500
+ answers.append({"score": min_null_score, "answer": "", "start": 0, "end": 0})
501
+
502
+ return answers
env-llmeval/lib/python3.10/site-packages/transformers/pipelines/image_classification.py ADDED
@@ -0,0 +1,201 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List, Union
2
+
3
+ import numpy as np
4
+
5
+ from ..utils import (
6
+ ExplicitEnum,
7
+ add_end_docstrings,
8
+ is_tf_available,
9
+ is_torch_available,
10
+ is_vision_available,
11
+ logging,
12
+ requires_backends,
13
+ )
14
+ from .base import Pipeline, build_pipeline_init_args
15
+
16
+
17
+ if is_vision_available():
18
+ from PIL import Image
19
+
20
+ from ..image_utils import load_image
21
+
22
+ if is_tf_available():
23
+ from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
24
+
25
+ if is_torch_available():
26
+ from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
27
+
28
+ logger = logging.get_logger(__name__)
29
+
30
+
31
+ # Copied from transformers.pipelines.text_classification.sigmoid
32
+ def sigmoid(_outputs):
33
+ return 1.0 / (1.0 + np.exp(-_outputs))
34
+
35
+
36
+ # Copied from transformers.pipelines.text_classification.softmax
37
+ def softmax(_outputs):
38
+ maxes = np.max(_outputs, axis=-1, keepdims=True)
39
+ shifted_exp = np.exp(_outputs - maxes)
40
+ return shifted_exp / shifted_exp.sum(axis=-1, keepdims=True)
41
+
42
+
43
+ # Copied from transformers.pipelines.text_classification.ClassificationFunction
44
+ class ClassificationFunction(ExplicitEnum):
45
+ SIGMOID = "sigmoid"
46
+ SOFTMAX = "softmax"
47
+ NONE = "none"
48
+
49
+
50
+ @add_end_docstrings(
51
+ build_pipeline_init_args(has_image_processor=True),
52
+ r"""
53
+ function_to_apply (`str`, *optional*, defaults to `"default"`):
54
+ The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:
55
+
56
+ - `"default"`: if the model has a single label, will apply the sigmoid function on the output. If the model
57
+ has several labels, will apply the softmax function on the output.
58
+ - `"sigmoid"`: Applies the sigmoid function on the output.
59
+ - `"softmax"`: Applies the softmax function on the output.
60
+ - `"none"`: Does not apply any function on the output.""",
61
+ )
62
+ class ImageClassificationPipeline(Pipeline):
63
+ """
64
+ Image classification pipeline using any `AutoModelForImageClassification`. This pipeline predicts the class of an
65
+ image.
66
+
67
+ Example:
68
+
69
+ ```python
70
+ >>> from transformers import pipeline
71
+
72
+ >>> classifier = pipeline(model="microsoft/beit-base-patch16-224-pt22k-ft22k")
73
+ >>> classifier("https://huggingface.co/datasets/Narsil/image_dummy/raw/main/parrots.png")
74
+ [{'score': 0.442, 'label': 'macaw'}, {'score': 0.088, 'label': 'popinjay'}, {'score': 0.075, 'label': 'parrot'}, {'score': 0.073, 'label': 'parodist, lampooner'}, {'score': 0.046, 'label': 'poll, poll_parrot'}]
75
+ ```
76
+
77
+ Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial)
78
+
79
+ This image classification pipeline can currently be loaded from [`pipeline`] using the following task identifier:
80
+ `"image-classification"`.
81
+
82
+ See the list of available models on
83
+ [huggingface.co/models](https://huggingface.co/models?filter=image-classification).
84
+ """
85
+
86
+ function_to_apply: ClassificationFunction = ClassificationFunction.NONE
87
+
88
+ def __init__(self, *args, **kwargs):
89
+ super().__init__(*args, **kwargs)
90
+ requires_backends(self, "vision")
91
+ self.check_model_type(
92
+ TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
93
+ if self.framework == "tf"
94
+ else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
95
+ )
96
+
97
+ def _sanitize_parameters(self, top_k=None, function_to_apply=None, timeout=None):
98
+ preprocess_params = {}
99
+ if timeout is not None:
100
+ preprocess_params["timeout"] = timeout
101
+ postprocess_params = {}
102
+ if top_k is not None:
103
+ postprocess_params["top_k"] = top_k
104
+ if isinstance(function_to_apply, str):
105
+ function_to_apply = ClassificationFunction(function_to_apply.lower())
106
+ if function_to_apply is not None:
107
+ postprocess_params["function_to_apply"] = function_to_apply
108
+ return preprocess_params, {}, postprocess_params
109
+
110
+ def __call__(self, images: Union[str, List[str], "Image.Image", List["Image.Image"]], **kwargs):
111
+ """
112
+ Assign labels to the image(s) passed as inputs.
113
+
114
+ Args:
115
+ images (`str`, `List[str]`, `PIL.Image` or `List[PIL.Image]`):
116
+ The pipeline handles three types of images:
117
+
118
+ - A string containing a http link pointing to an image
119
+ - A string containing a local path to an image
120
+ - An image loaded in PIL directly
121
+
122
+ The pipeline accepts either a single image or a batch of images, which must then be passed as a string.
123
+ Images in a batch must all be in the same format: all as http links, all as local paths, or all as PIL
124
+ images.
125
+ function_to_apply (`str`, *optional*, defaults to `"default"`):
126
+ The function to apply to the model outputs in order to retrieve the scores. Accepts four different
127
+ values:
128
+
129
+ If this argument is not specified, then it will apply the following functions according to the number
130
+ of labels:
131
+
132
+ - If the model has a single label, will apply the sigmoid function on the output.
133
+ - If the model has several labels, will apply the softmax function on the output.
134
+
135
+ Possible values are:
136
+
137
+ - `"sigmoid"`: Applies the sigmoid function on the output.
138
+ - `"softmax"`: Applies the softmax function on the output.
139
+ - `"none"`: Does not apply any function on the output.
140
+ top_k (`int`, *optional*, defaults to 5):
141
+ The number of top labels that will be returned by the pipeline. If the provided number is higher than
142
+ the number of labels available in the model configuration, it will default to the number of labels.
143
+ timeout (`float`, *optional*, defaults to None):
144
+ The maximum time in seconds to wait for fetching images from the web. If None, no timeout is set and
145
+ the call may block forever.
146
+
147
+ Return:
148
+ A dictionary or a list of dictionaries containing result. If the input is a single image, will return a
149
+ dictionary, if the input is a list of several images, will return a list of dictionaries corresponding to
150
+ the images.
151
+
152
+ The dictionaries contain the following keys:
153
+
154
+ - **label** (`str`) -- The label identified by the model.
155
+ - **score** (`int`) -- The score attributed by the model for that label.
156
+ """
157
+ return super().__call__(images, **kwargs)
158
+
159
+ def preprocess(self, image, timeout=None):
160
+ image = load_image(image, timeout=timeout)
161
+ model_inputs = self.image_processor(images=image, return_tensors=self.framework)
162
+ return model_inputs
163
+
164
+ def _forward(self, model_inputs):
165
+ model_outputs = self.model(**model_inputs)
166
+ return model_outputs
167
+
168
+ def postprocess(self, model_outputs, function_to_apply=None, top_k=5):
169
+ if function_to_apply is None:
170
+ if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1:
171
+ function_to_apply = ClassificationFunction.SIGMOID
172
+ elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1:
173
+ function_to_apply = ClassificationFunction.SOFTMAX
174
+ elif hasattr(self.model.config, "function_to_apply") and function_to_apply is None:
175
+ function_to_apply = self.model.config.function_to_apply
176
+ else:
177
+ function_to_apply = ClassificationFunction.NONE
178
+
179
+ if top_k > self.model.config.num_labels:
180
+ top_k = self.model.config.num_labels
181
+
182
+ outputs = model_outputs["logits"][0]
183
+ outputs = outputs.numpy()
184
+
185
+ if function_to_apply == ClassificationFunction.SIGMOID:
186
+ scores = sigmoid(outputs)
187
+ elif function_to_apply == ClassificationFunction.SOFTMAX:
188
+ scores = softmax(outputs)
189
+ elif function_to_apply == ClassificationFunction.NONE:
190
+ scores = outputs
191
+ else:
192
+ raise ValueError(f"Unrecognized `function_to_apply` argument: {function_to_apply}")
193
+
194
+ dict_scores = [
195
+ {"label": self.model.config.id2label[i], "score": score.item()} for i, score in enumerate(scores)
196
+ ]
197
+ dict_scores.sort(key=lambda x: x["score"], reverse=True)
198
+ if top_k is not None:
199
+ dict_scores = dict_scores[:top_k]
200
+
201
+ return dict_scores
env-llmeval/lib/python3.10/site-packages/transformers/pipelines/image_segmentation.py ADDED
@@ -0,0 +1,211 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, Dict, List, Union
2
+
3
+ import numpy as np
4
+
5
+ from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
6
+ from .base import Pipeline, build_pipeline_init_args
7
+
8
+
9
+ if is_vision_available():
10
+ from PIL import Image
11
+
12
+ from ..image_utils import load_image
13
+
14
+ if is_torch_available():
15
+ from ..models.auto.modeling_auto import (
16
+ MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES,
17
+ MODEL_FOR_INSTANCE_SEGMENTATION_MAPPING_NAMES,
18
+ MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
19
+ MODEL_FOR_UNIVERSAL_SEGMENTATION_MAPPING_NAMES,
20
+ )
21
+
22
+
23
+ logger = logging.get_logger(__name__)
24
+
25
+
26
+ Prediction = Dict[str, Any]
27
+ Predictions = List[Prediction]
28
+
29
+
30
+ @add_end_docstrings(build_pipeline_init_args(has_image_processor=True))
31
+ class ImageSegmentationPipeline(Pipeline):
32
+ """
33
+ Image segmentation pipeline using any `AutoModelForXXXSegmentation`. This pipeline predicts masks of objects and
34
+ their classes.
35
+
36
+ Example:
37
+
38
+ ```python
39
+ >>> from transformers import pipeline
40
+
41
+ >>> segmenter = pipeline(model="facebook/detr-resnet-50-panoptic")
42
+ >>> segments = segmenter("https://huggingface.co/datasets/Narsil/image_dummy/raw/main/parrots.png")
43
+ >>> len(segments)
44
+ 2
45
+
46
+ >>> segments[0]["label"]
47
+ 'bird'
48
+
49
+ >>> segments[1]["label"]
50
+ 'bird'
51
+
52
+ >>> type(segments[0]["mask"]) # This is a black and white mask showing where is the bird on the original image.
53
+ <class 'PIL.Image.Image'>
54
+
55
+ >>> segments[0]["mask"].size
56
+ (768, 512)
57
+ ```
58
+
59
+
60
+ This image segmentation pipeline can currently be loaded from [`pipeline`] using the following task identifier:
61
+ `"image-segmentation"`.
62
+
63
+ See the list of available models on
64
+ [huggingface.co/models](https://huggingface.co/models?filter=image-segmentation).
65
+ """
66
+
67
+ def __init__(self, *args, **kwargs):
68
+ super().__init__(*args, **kwargs)
69
+
70
+ if self.framework == "tf":
71
+ raise ValueError(f"The {self.__class__} is only available in PyTorch.")
72
+
73
+ requires_backends(self, "vision")
74
+ mapping = MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES.copy()
75
+ mapping.update(MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES)
76
+ mapping.update(MODEL_FOR_INSTANCE_SEGMENTATION_MAPPING_NAMES)
77
+ mapping.update(MODEL_FOR_UNIVERSAL_SEGMENTATION_MAPPING_NAMES)
78
+ self.check_model_type(mapping)
79
+
80
+ def _sanitize_parameters(self, **kwargs):
81
+ preprocess_kwargs = {}
82
+ postprocess_kwargs = {}
83
+ if "subtask" in kwargs:
84
+ postprocess_kwargs["subtask"] = kwargs["subtask"]
85
+ preprocess_kwargs["subtask"] = kwargs["subtask"]
86
+ if "threshold" in kwargs:
87
+ postprocess_kwargs["threshold"] = kwargs["threshold"]
88
+ if "mask_threshold" in kwargs:
89
+ postprocess_kwargs["mask_threshold"] = kwargs["mask_threshold"]
90
+ if "overlap_mask_area_threshold" in kwargs:
91
+ postprocess_kwargs["overlap_mask_area_threshold"] = kwargs["overlap_mask_area_threshold"]
92
+ if "timeout" in kwargs:
93
+ preprocess_kwargs["timeout"] = kwargs["timeout"]
94
+
95
+ return preprocess_kwargs, {}, postprocess_kwargs
96
+
97
+ def __call__(self, images, **kwargs) -> Union[Predictions, List[Prediction]]:
98
+ """
99
+ Perform segmentation (detect masks & classes) in the image(s) passed as inputs.
100
+
101
+ Args:
102
+ images (`str`, `List[str]`, `PIL.Image` or `List[PIL.Image]`):
103
+ The pipeline handles three types of images:
104
+
105
+ - A string containing an HTTP(S) link pointing to an image
106
+ - A string containing a local path to an image
107
+ - An image loaded in PIL directly
108
+
109
+ The pipeline accepts either a single image or a batch of images. Images in a batch must all be in the
110
+ same format: all as HTTP(S) links, all as local paths, or all as PIL images.
111
+ subtask (`str`, *optional*):
112
+ Segmentation task to be performed, choose [`semantic`, `instance` and `panoptic`] depending on model
113
+ capabilities. If not set, the pipeline will attempt tp resolve in the following order:
114
+ `panoptic`, `instance`, `semantic`.
115
+ threshold (`float`, *optional*, defaults to 0.9):
116
+ Probability threshold to filter out predicted masks.
117
+ mask_threshold (`float`, *optional*, defaults to 0.5):
118
+ Threshold to use when turning the predicted masks into binary values.
119
+ overlap_mask_area_threshold (`float`, *optional*, defaults to 0.5):
120
+ Mask overlap threshold to eliminate small, disconnected segments.
121
+ timeout (`float`, *optional*, defaults to None):
122
+ The maximum time in seconds to wait for fetching images from the web. If None, no timeout is set and
123
+ the call may block forever.
124
+
125
+ Return:
126
+ A dictionary or a list of dictionaries containing the result. If the input is a single image, will return a
127
+ list of dictionaries, if the input is a list of several images, will return a list of list of dictionaries
128
+ corresponding to each image.
129
+
130
+ The dictionaries contain the mask, label and score (where applicable) of each detected object and contains
131
+ the following keys:
132
+
133
+ - **label** (`str`) -- The class label identified by the model.
134
+ - **mask** (`PIL.Image`) -- A binary mask of the detected object as a Pil Image of shape (width, height) of
135
+ the original image. Returns a mask filled with zeros if no object is found.
136
+ - **score** (*optional* `float`) -- Optionally, when the model is capable of estimating a confidence of the
137
+ "object" described by the label and the mask.
138
+ """
139
+ return super().__call__(images, **kwargs)
140
+
141
+ def preprocess(self, image, subtask=None, timeout=None):
142
+ image = load_image(image, timeout=timeout)
143
+ target_size = [(image.height, image.width)]
144
+ if self.model.config.__class__.__name__ == "OneFormerConfig":
145
+ if subtask is None:
146
+ kwargs = {}
147
+ else:
148
+ kwargs = {"task_inputs": [subtask]}
149
+ inputs = self.image_processor(images=[image], return_tensors="pt", **kwargs)
150
+ inputs["task_inputs"] = self.tokenizer(
151
+ inputs["task_inputs"],
152
+ padding="max_length",
153
+ max_length=self.model.config.task_seq_len,
154
+ return_tensors=self.framework,
155
+ )["input_ids"]
156
+ else:
157
+ inputs = self.image_processor(images=[image], return_tensors="pt")
158
+ inputs["target_size"] = target_size
159
+ return inputs
160
+
161
+ def _forward(self, model_inputs):
162
+ target_size = model_inputs.pop("target_size")
163
+ model_outputs = self.model(**model_inputs)
164
+ model_outputs["target_size"] = target_size
165
+ return model_outputs
166
+
167
+ def postprocess(
168
+ self, model_outputs, subtask=None, threshold=0.9, mask_threshold=0.5, overlap_mask_area_threshold=0.5
169
+ ):
170
+ fn = None
171
+ if subtask in {"panoptic", None} and hasattr(self.image_processor, "post_process_panoptic_segmentation"):
172
+ fn = self.image_processor.post_process_panoptic_segmentation
173
+ elif subtask in {"instance", None} and hasattr(self.image_processor, "post_process_instance_segmentation"):
174
+ fn = self.image_processor.post_process_instance_segmentation
175
+
176
+ if fn is not None:
177
+ outputs = fn(
178
+ model_outputs,
179
+ threshold=threshold,
180
+ mask_threshold=mask_threshold,
181
+ overlap_mask_area_threshold=overlap_mask_area_threshold,
182
+ target_sizes=model_outputs["target_size"],
183
+ )[0]
184
+
185
+ annotation = []
186
+ segmentation = outputs["segmentation"]
187
+
188
+ for segment in outputs["segments_info"]:
189
+ mask = (segmentation == segment["id"]) * 255
190
+ mask = Image.fromarray(mask.numpy().astype(np.uint8), mode="L")
191
+ label = self.model.config.id2label[segment["label_id"]]
192
+ score = segment["score"]
193
+ annotation.append({"score": score, "label": label, "mask": mask})
194
+
195
+ elif subtask in {"semantic", None} and hasattr(self.image_processor, "post_process_semantic_segmentation"):
196
+ outputs = self.image_processor.post_process_semantic_segmentation(
197
+ model_outputs, target_sizes=model_outputs["target_size"]
198
+ )[0]
199
+
200
+ annotation = []
201
+ segmentation = outputs.numpy()
202
+ labels = np.unique(segmentation)
203
+
204
+ for label in labels:
205
+ mask = (segmentation == label) * 255
206
+ mask = Image.fromarray(mask.astype(np.uint8), mode="L")
207
+ label = self.model.config.id2label[label]
208
+ annotation.append({"score": None, "label": label, "mask": mask})
209
+ else:
210
+ raise ValueError(f"Subtask {subtask} is not supported for model {type(self.model)}")
211
+ return annotation
env-llmeval/lib/python3.10/site-packages/transformers/pipelines/image_to_image.py ADDED
@@ -0,0 +1,134 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import List, Union
15
+
16
+ import numpy as np
17
+
18
+ from ..utils import (
19
+ add_end_docstrings,
20
+ is_torch_available,
21
+ is_vision_available,
22
+ logging,
23
+ requires_backends,
24
+ )
25
+ from .base import Pipeline, build_pipeline_init_args
26
+
27
+
28
+ if is_vision_available():
29
+ from PIL import Image
30
+
31
+ from ..image_utils import load_image
32
+
33
+ if is_torch_available():
34
+ from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_TO_IMAGE_MAPPING_NAMES
35
+
36
+ logger = logging.get_logger(__name__)
37
+
38
+
39
+ @add_end_docstrings(build_pipeline_init_args(has_image_processor=True))
40
+ class ImageToImagePipeline(Pipeline):
41
+ """
42
+ Image to Image pipeline using any `AutoModelForImageToImage`. This pipeline generates an image based on a previous
43
+ image input.
44
+
45
+ Example:
46
+
47
+ ```python
48
+ >>> from PIL import Image
49
+ >>> import requests
50
+
51
+ >>> from transformers import pipeline
52
+
53
+ >>> upscaler = pipeline("image-to-image", model="caidas/swin2SR-classical-sr-x2-64")
54
+ >>> img = Image.open(requests.get("http://images.cocodataset.org/val2017/000000039769.jpg", stream=True).raw)
55
+ >>> img = img.resize((64, 64))
56
+ >>> upscaled_img = upscaler(img)
57
+ >>> img.size
58
+ (64, 64)
59
+
60
+ >>> upscaled_img.size
61
+ (144, 144)
62
+ ```
63
+
64
+ This image to image pipeline can currently be loaded from [`pipeline`] using the following task identifier:
65
+ `"image-to-image"`.
66
+
67
+ See the list of available models on [huggingface.co/models](https://huggingface.co/models?filter=image-to-image).
68
+ """
69
+
70
+ def __init__(self, *args, **kwargs):
71
+ super().__init__(*args, **kwargs)
72
+ requires_backends(self, "vision")
73
+ self.check_model_type(MODEL_FOR_IMAGE_TO_IMAGE_MAPPING_NAMES)
74
+
75
+ def _sanitize_parameters(self, **kwargs):
76
+ preprocess_params = {}
77
+ postprocess_params = {}
78
+ forward_params = {}
79
+
80
+ if "timeout" in kwargs:
81
+ preprocess_params["timeout"] = kwargs["timeout"]
82
+ if "head_mask" in kwargs:
83
+ forward_params["head_mask"] = kwargs["head_mask"]
84
+
85
+ return preprocess_params, forward_params, postprocess_params
86
+
87
+ def __call__(
88
+ self, images: Union[str, List[str], "Image.Image", List["Image.Image"]], **kwargs
89
+ ) -> Union["Image.Image", List["Image.Image"]]:
90
+ """
91
+ Transform the image(s) passed as inputs.
92
+
93
+ Args:
94
+ images (`str`, `List[str]`, `PIL.Image` or `List[PIL.Image]`):
95
+ The pipeline handles three types of images:
96
+
97
+ - A string containing a http link pointing to an image
98
+ - A string containing a local path to an image
99
+ - An image loaded in PIL directly
100
+
101
+ The pipeline accepts either a single image or a batch of images, which must then be passed as a string.
102
+ Images in a batch must all be in the same format: all as http links, all as local paths, or all as PIL
103
+ images.
104
+ timeout (`float`, *optional*, defaults to None):
105
+ The maximum time in seconds to wait for fetching images from the web. If None, no timeout is used and
106
+ the call may block forever.
107
+
108
+ Return:
109
+ An image (Image.Image) or a list of images (List["Image.Image"]) containing result(s). If the input is a
110
+ single image, the return will be also a single image, if the input is a list of several images, it will
111
+ return a list of transformed images.
112
+ """
113
+ return super().__call__(images, **kwargs)
114
+
115
+ def _forward(self, model_inputs):
116
+ model_outputs = self.model(**model_inputs)
117
+ return model_outputs
118
+
119
+ def preprocess(self, image, timeout=None):
120
+ image = load_image(image, timeout=timeout)
121
+ inputs = self.image_processor(images=[image], return_tensors="pt")
122
+ return inputs
123
+
124
+ def postprocess(self, model_outputs):
125
+ images = []
126
+ if "reconstruction" in model_outputs.keys():
127
+ outputs = model_outputs.reconstruction
128
+ for output in outputs:
129
+ output = output.data.squeeze().float().cpu().clamp_(0, 1).numpy()
130
+ output = np.moveaxis(output, source=0, destination=-1)
131
+ output = (output * 255.0).round().astype(np.uint8) # float32 to uint8
132
+ images.append(Image.fromarray(output))
133
+
134
+ return images if len(images) > 1 else images[0]
env-llmeval/lib/python3.10/site-packages/transformers/pipelines/mask_generation.py ADDED
@@ -0,0 +1,285 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from collections import defaultdict
2
+ from typing import Optional
3
+
4
+ from ..image_utils import load_image
5
+ from ..utils import (
6
+ add_end_docstrings,
7
+ is_torch_available,
8
+ logging,
9
+ requires_backends,
10
+ )
11
+ from .base import ChunkPipeline, build_pipeline_init_args
12
+
13
+
14
+ if is_torch_available():
15
+ import torch
16
+
17
+ from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING_NAMES
18
+
19
+ logger = logging.get_logger(__name__)
20
+
21
+
22
+ @add_end_docstrings(
23
+ build_pipeline_init_args(has_image_processor=True),
24
+ r"""
25
+ points_per_batch (*optional*, int, default to 64):
26
+ Sets the number of points run simultaneously by the model. Higher numbers may be faster but use more GPU
27
+ memory.
28
+ output_bboxes_mask (`bool`, *optional*, default to `False`):
29
+ Whether or not to output the bounding box predictions.
30
+ output_rle_masks (`bool`, *optional*, default to `False`):
31
+ Whether or not to output the masks in `RLE` format""",
32
+ )
33
+ class MaskGenerationPipeline(ChunkPipeline):
34
+ """
35
+ Automatic mask generation for images using `SamForMaskGeneration`. This pipeline predicts binary masks for an
36
+ image, given an image. It is a `ChunkPipeline` because you can seperate the points in a mini-batch in order to
37
+ avoid OOM issues. Use the `points_per_batch` argument to control the number of points that will be processed at the
38
+ same time. Default is `64`.
39
+
40
+ The pipeline works in 3 steps:
41
+ 1. `preprocess`: A grid of 1024 points evenly separated is generated along with bounding boxes and point
42
+ labels.
43
+ For more details on how the points and bounding boxes are created, check the `_generate_crop_boxes`
44
+ function. The image is also preprocessed using the `image_processor`. This function `yields` a minibatch of
45
+ `points_per_batch`.
46
+
47
+ 2. `forward`: feeds the outputs of `preprocess` to the model. The image embedding is computed only once.
48
+ Calls both `self.model.get_image_embeddings` and makes sure that the gradients are not computed, and the
49
+ tensors and models are on the same device.
50
+
51
+ 3. `postprocess`: The most important part of the automatic mask generation happens here. Three steps
52
+ are induced:
53
+ - image_processor.postprocess_masks (run on each minibatch loop): takes in the raw output masks,
54
+ resizes them according
55
+ to the image size, and transforms there to binary masks.
56
+ - image_processor.filter_masks (on each minibatch loop): uses both `pred_iou_thresh` and
57
+ `stability_scores`. Also
58
+ applies a variety of filters based on non maximum suppression to remove bad masks.
59
+ - image_processor.postprocess_masks_for_amg applies the NSM on the mask to only keep relevant ones.
60
+
61
+ Example:
62
+
63
+ ```python
64
+ >>> from transformers import pipeline
65
+
66
+ >>> generator = pipeline(model="facebook/sam-vit-base", task="mask-generation")
67
+ >>> outputs = generator(
68
+ ... "http://images.cocodataset.org/val2017/000000039769.jpg",
69
+ ... )
70
+
71
+ >>> outputs = generator(
72
+ ... "https://huggingface.co/datasets/Narsil/image_dummy/raw/main/parrots.png", points_per_batch=128
73
+ ... )
74
+ ```
75
+
76
+ Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial)
77
+
78
+ This segmentation pipeline can currently be loaded from [`pipeline`] using the following task identifier:
79
+ `"mask-generation"`.
80
+
81
+ See the list of available models on [huggingface.co/models](https://huggingface.co/models?filter=mask-generation).
82
+ """
83
+
84
+ def __init__(self, **kwargs):
85
+ super().__init__(**kwargs)
86
+ requires_backends(self, "vision")
87
+ requires_backends(self, "torch")
88
+
89
+ if self.framework != "pt":
90
+ raise ValueError(f"The {self.__class__} is only available in PyTorch.")
91
+
92
+ self.check_model_type(MODEL_FOR_MASK_GENERATION_MAPPING_NAMES)
93
+
94
+ def _sanitize_parameters(self, **kwargs):
95
+ preprocess_kwargs = {}
96
+ postprocess_kwargs = {}
97
+ forward_params = {}
98
+ # preprocess args
99
+ if "points_per_batch" in kwargs:
100
+ preprocess_kwargs["points_per_batch"] = kwargs["points_per_batch"]
101
+ if "points_per_crop" in kwargs:
102
+ preprocess_kwargs["points_per_crop"] = kwargs["points_per_crop"]
103
+ if "crops_n_layers" in kwargs:
104
+ preprocess_kwargs["crops_n_layers"] = kwargs["crops_n_layers"]
105
+ if "crop_overlap_ratio" in kwargs:
106
+ preprocess_kwargs["crop_overlap_ratio"] = kwargs["crop_overlap_ratio"]
107
+ if "crop_n_points_downscale_factor" in kwargs:
108
+ preprocess_kwargs["crop_n_points_downscale_factor"] = kwargs["crop_n_points_downscale_factor"]
109
+ if "timeout" in kwargs:
110
+ preprocess_kwargs["timeout"] = kwargs["timeout"]
111
+ # postprocess args
112
+ if "pred_iou_thresh" in kwargs:
113
+ forward_params["pred_iou_thresh"] = kwargs["pred_iou_thresh"]
114
+ if "stability_score_offset" in kwargs:
115
+ forward_params["stability_score_offset"] = kwargs["stability_score_offset"]
116
+ if "mask_threshold" in kwargs:
117
+ forward_params["mask_threshold"] = kwargs["mask_threshold"]
118
+ if "stability_score_thresh" in kwargs:
119
+ forward_params["stability_score_thresh"] = kwargs["stability_score_thresh"]
120
+ if "crops_nms_thresh" in kwargs:
121
+ postprocess_kwargs["crops_nms_thresh"] = kwargs["crops_nms_thresh"]
122
+ if "output_rle_mask" in kwargs:
123
+ postprocess_kwargs["output_rle_mask"] = kwargs["output_rle_mask"]
124
+ if "output_bboxes_mask" in kwargs:
125
+ postprocess_kwargs["output_bboxes_mask"] = kwargs["output_bboxes_mask"]
126
+ return preprocess_kwargs, forward_params, postprocess_kwargs
127
+
128
+ def __call__(self, image, *args, num_workers=None, batch_size=None, **kwargs):
129
+ """
130
+ Generates binary segmentation masks
131
+
132
+ Args:
133
+ inputs (`np.ndarray` or `bytes` or `str` or `dict`):
134
+ Image or list of images.
135
+ mask_threshold (`float`, *optional*, defaults to 0.0):
136
+ Threshold to use when turning the predicted masks into binary values.
137
+ pred_iou_thresh (`float`, *optional*, defaults to 0.88):
138
+ A filtering threshold in `[0,1]` applied on the model's predicted mask quality.
139
+ stability_score_thresh (`float`, *optional*, defaults to 0.95):
140
+ A filtering threshold in `[0,1]`, using the stability of the mask under changes to the cutoff used to
141
+ binarize the model's mask predictions.
142
+ stability_score_offset (`int`, *optional*, defaults to 1):
143
+ The amount to shift the cutoff when calculated the stability score.
144
+ crops_nms_thresh (`float`, *optional*, defaults to 0.7):
145
+ The box IoU cutoff used by non-maximal suppression to filter duplicate masks.
146
+ crops_n_layers (`int`, *optional*, defaults to 0):
147
+ If `crops_n_layers>0`, mask prediction will be run again on crops of the image. Sets the number of
148
+ layers to run, where each layer has 2**i_layer number of image crops.
149
+ crop_overlap_ratio (`float`, *optional*, defaults to `512 / 1500`):
150
+ Sets the degree to which crops overlap. In the first crop layer, crops will overlap by this fraction of
151
+ the image length. Later layers with more crops scale down this overlap.
152
+ crop_n_points_downscale_factor (`int`, *optional*, defaults to `1`):
153
+ The number of points-per-side sampled in layer n is scaled down by crop_n_points_downscale_factor**n.
154
+ timeout (`float`, *optional*, defaults to None):
155
+ The maximum time in seconds to wait for fetching images from the web. If None, no timeout is set and
156
+ the call may block forever.
157
+
158
+ Return:
159
+ `Dict`: A dictionary with the following keys:
160
+ - **mask** (`PIL.Image`) -- A binary mask of the detected object as a PIL Image of shape `(width,
161
+ height)` of the original image. Returns a mask filled with zeros if no object is found.
162
+ - **score** (*optional* `float`) -- Optionally, when the model is capable of estimating a confidence of
163
+ the "object" described by the label and the mask.
164
+
165
+ """
166
+ return super().__call__(image, *args, num_workers=num_workers, batch_size=batch_size, **kwargs)
167
+
168
+ def preprocess(
169
+ self,
170
+ image,
171
+ points_per_batch=64,
172
+ crops_n_layers: int = 0,
173
+ crop_overlap_ratio: float = 512 / 1500,
174
+ points_per_crop: Optional[int] = 32,
175
+ crop_n_points_downscale_factor: Optional[int] = 1,
176
+ timeout: Optional[float] = None,
177
+ ):
178
+ image = load_image(image, timeout=timeout)
179
+ target_size = self.image_processor.size["longest_edge"]
180
+ crop_boxes, grid_points, cropped_images, input_labels = self.image_processor.generate_crop_boxes(
181
+ image, target_size, crops_n_layers, crop_overlap_ratio, points_per_crop, crop_n_points_downscale_factor
182
+ )
183
+ model_inputs = self.image_processor(images=cropped_images, return_tensors="pt")
184
+
185
+ with self.device_placement():
186
+ if self.framework == "pt":
187
+ inference_context = self.get_inference_context()
188
+ with inference_context():
189
+ model_inputs = self._ensure_tensor_on_device(model_inputs, device=self.device)
190
+ image_embeddings = self.model.get_image_embeddings(model_inputs.pop("pixel_values"))
191
+ model_inputs["image_embeddings"] = image_embeddings
192
+
193
+ n_points = grid_points.shape[1]
194
+ points_per_batch = points_per_batch if points_per_batch is not None else n_points
195
+
196
+ if points_per_batch <= 0:
197
+ raise ValueError(
198
+ "Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. "
199
+ "To return all points at once, set points_per_batch to None"
200
+ )
201
+
202
+ for i in range(0, n_points, points_per_batch):
203
+ batched_points = grid_points[:, i : i + points_per_batch, :, :]
204
+ labels = input_labels[:, i : i + points_per_batch]
205
+ is_last = i == n_points - points_per_batch
206
+ yield {
207
+ "input_points": batched_points,
208
+ "input_labels": labels,
209
+ "input_boxes": crop_boxes,
210
+ "is_last": is_last,
211
+ **model_inputs,
212
+ }
213
+
214
+ def _forward(
215
+ self,
216
+ model_inputs,
217
+ pred_iou_thresh=0.88,
218
+ stability_score_thresh=0.95,
219
+ mask_threshold=0,
220
+ stability_score_offset=1,
221
+ ):
222
+ input_boxes = model_inputs.pop("input_boxes")
223
+ is_last = model_inputs.pop("is_last")
224
+ original_sizes = model_inputs.pop("original_sizes").tolist()
225
+ reshaped_input_sizes = model_inputs.pop("reshaped_input_sizes").tolist()
226
+
227
+ model_outputs = self.model(**model_inputs)
228
+
229
+ # post processing happens here in order to avoid CPU GPU copies of ALL the masks
230
+ low_resolution_masks = model_outputs["pred_masks"]
231
+ masks = self.image_processor.post_process_masks(
232
+ low_resolution_masks, original_sizes, reshaped_input_sizes, mask_threshold, binarize=False
233
+ )
234
+ iou_scores = model_outputs["iou_scores"]
235
+ masks, iou_scores, boxes = self.image_processor.filter_masks(
236
+ masks[0],
237
+ iou_scores[0],
238
+ original_sizes[0],
239
+ input_boxes[0],
240
+ pred_iou_thresh,
241
+ stability_score_thresh,
242
+ mask_threshold,
243
+ stability_score_offset,
244
+ )
245
+ return {
246
+ "masks": masks,
247
+ "is_last": is_last,
248
+ "boxes": boxes,
249
+ "iou_scores": iou_scores,
250
+ }
251
+
252
+ def postprocess(
253
+ self,
254
+ model_outputs,
255
+ output_rle_mask=False,
256
+ output_bboxes_mask=False,
257
+ crops_nms_thresh=0.7,
258
+ ):
259
+ all_scores = []
260
+ all_masks = []
261
+ all_boxes = []
262
+ for model_output in model_outputs:
263
+ all_scores.append(model_output.pop("iou_scores"))
264
+ all_masks.extend(model_output.pop("masks"))
265
+ all_boxes.append(model_output.pop("boxes"))
266
+
267
+ all_scores = torch.cat(all_scores)
268
+ all_boxes = torch.cat(all_boxes)
269
+ output_masks, iou_scores, rle_mask, bounding_boxes = self.image_processor.post_process_for_mask_generation(
270
+ all_masks, all_scores, all_boxes, crops_nms_thresh
271
+ )
272
+
273
+ extra = defaultdict(list)
274
+ for output in model_outputs:
275
+ for k, v in output.items():
276
+ extra[k].append(v)
277
+
278
+ optional = {}
279
+ if output_rle_mask:
280
+ optional["rle_mask"] = rle_mask
281
+
282
+ if output_bboxes_mask:
283
+ optional["bounding_boxes"] = bounding_boxes
284
+
285
+ return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
env-llmeval/lib/python3.10/site-packages/transformers/pipelines/object_detection.py ADDED
@@ -0,0 +1,187 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, Dict, List, Union
2
+
3
+ from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
4
+ from .base import Pipeline, build_pipeline_init_args
5
+
6
+
7
+ if is_vision_available():
8
+ from ..image_utils import load_image
9
+
10
+
11
+ if is_torch_available():
12
+ import torch
13
+
14
+ from ..models.auto.modeling_auto import (
15
+ MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
16
+ MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
17
+ )
18
+
19
+ logger = logging.get_logger(__name__)
20
+
21
+
22
+ Prediction = Dict[str, Any]
23
+ Predictions = List[Prediction]
24
+
25
+
26
+ @add_end_docstrings(build_pipeline_init_args(has_image_processor=True))
27
+ class ObjectDetectionPipeline(Pipeline):
28
+ """
29
+ Object detection pipeline using any `AutoModelForObjectDetection`. This pipeline predicts bounding boxes of objects
30
+ and their classes.
31
+
32
+ Example:
33
+
34
+ ```python
35
+ >>> from transformers import pipeline
36
+
37
+ >>> detector = pipeline(model="facebook/detr-resnet-50")
38
+ >>> detector("https://huggingface.co/datasets/Narsil/image_dummy/raw/main/parrots.png")
39
+ [{'score': 0.997, 'label': 'bird', 'box': {'xmin': 69, 'ymin': 171, 'xmax': 396, 'ymax': 507}}, {'score': 0.999, 'label': 'bird', 'box': {'xmin': 398, 'ymin': 105, 'xmax': 767, 'ymax': 507}}]
40
+
41
+ >>> # x, y are expressed relative to the top left hand corner.
42
+ ```
43
+
44
+ Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial)
45
+
46
+ This object detection pipeline can currently be loaded from [`pipeline`] using the following task identifier:
47
+ `"object-detection"`.
48
+
49
+ See the list of available models on [huggingface.co/models](https://huggingface.co/models?filter=object-detection).
50
+ """
51
+
52
+ def __init__(self, *args, **kwargs):
53
+ super().__init__(*args, **kwargs)
54
+
55
+ if self.framework == "tf":
56
+ raise ValueError(f"The {self.__class__} is only available in PyTorch.")
57
+
58
+ requires_backends(self, "vision")
59
+ mapping = MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES.copy()
60
+ mapping.update(MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES)
61
+ self.check_model_type(mapping)
62
+
63
+ def _sanitize_parameters(self, **kwargs):
64
+ preprocess_params = {}
65
+ if "timeout" in kwargs:
66
+ preprocess_params["timeout"] = kwargs["timeout"]
67
+ postprocess_kwargs = {}
68
+ if "threshold" in kwargs:
69
+ postprocess_kwargs["threshold"] = kwargs["threshold"]
70
+ return preprocess_params, {}, postprocess_kwargs
71
+
72
+ def __call__(self, *args, **kwargs) -> Union[Predictions, List[Prediction]]:
73
+ """
74
+ Detect objects (bounding boxes & classes) in the image(s) passed as inputs.
75
+
76
+ Args:
77
+ images (`str`, `List[str]`, `PIL.Image` or `List[PIL.Image]`):
78
+ The pipeline handles three types of images:
79
+
80
+ - A string containing an HTTP(S) link pointing to an image
81
+ - A string containing a local path to an image
82
+ - An image loaded in PIL directly
83
+
84
+ The pipeline accepts either a single image or a batch of images. Images in a batch must all be in the
85
+ same format: all as HTTP(S) links, all as local paths, or all as PIL images.
86
+ threshold (`float`, *optional*, defaults to 0.9):
87
+ The probability necessary to make a prediction.
88
+ timeout (`float`, *optional*, defaults to None):
89
+ The maximum time in seconds to wait for fetching images from the web. If None, no timeout is set and
90
+ the call may block forever.
91
+
92
+ Return:
93
+ A list of dictionaries or a list of list of dictionaries containing the result. If the input is a single
94
+ image, will return a list of dictionaries, if the input is a list of several images, will return a list of
95
+ list of dictionaries corresponding to each image.
96
+
97
+ The dictionaries contain the following keys:
98
+
99
+ - **label** (`str`) -- The class label identified by the model.
100
+ - **score** (`float`) -- The score attributed by the model for that label.
101
+ - **box** (`List[Dict[str, int]]`) -- The bounding box of detected object in image's original size.
102
+ """
103
+
104
+ return super().__call__(*args, **kwargs)
105
+
106
+ def preprocess(self, image, timeout=None):
107
+ image = load_image(image, timeout=timeout)
108
+ target_size = torch.IntTensor([[image.height, image.width]])
109
+ inputs = self.image_processor(images=[image], return_tensors="pt")
110
+ if self.tokenizer is not None:
111
+ inputs = self.tokenizer(text=inputs["words"], boxes=inputs["boxes"], return_tensors="pt")
112
+ inputs["target_size"] = target_size
113
+ return inputs
114
+
115
+ def _forward(self, model_inputs):
116
+ target_size = model_inputs.pop("target_size")
117
+ outputs = self.model(**model_inputs)
118
+ model_outputs = outputs.__class__({"target_size": target_size, **outputs})
119
+ if self.tokenizer is not None:
120
+ model_outputs["bbox"] = model_inputs["bbox"]
121
+ return model_outputs
122
+
123
+ def postprocess(self, model_outputs, threshold=0.9):
124
+ target_size = model_outputs["target_size"]
125
+ if self.tokenizer is not None:
126
+ # This is a LayoutLMForTokenClassification variant.
127
+ # The OCR got the boxes and the model classified the words.
128
+ height, width = target_size[0].tolist()
129
+
130
+ def unnormalize(bbox):
131
+ return self._get_bounding_box(
132
+ torch.Tensor(
133
+ [
134
+ (width * bbox[0] / 1000),
135
+ (height * bbox[1] / 1000),
136
+ (width * bbox[2] / 1000),
137
+ (height * bbox[3] / 1000),
138
+ ]
139
+ )
140
+ )
141
+
142
+ scores, classes = model_outputs["logits"].squeeze(0).softmax(dim=-1).max(dim=-1)
143
+ labels = [self.model.config.id2label[prediction] for prediction in classes.tolist()]
144
+ boxes = [unnormalize(bbox) for bbox in model_outputs["bbox"].squeeze(0)]
145
+ keys = ["score", "label", "box"]
146
+ annotation = [dict(zip(keys, vals)) for vals in zip(scores.tolist(), labels, boxes) if vals[0] > threshold]
147
+ else:
148
+ # This is a regular ForObjectDetectionModel
149
+ raw_annotations = self.image_processor.post_process_object_detection(model_outputs, threshold, target_size)
150
+ raw_annotation = raw_annotations[0]
151
+ scores = raw_annotation["scores"]
152
+ labels = raw_annotation["labels"]
153
+ boxes = raw_annotation["boxes"]
154
+
155
+ raw_annotation["scores"] = scores.tolist()
156
+ raw_annotation["labels"] = [self.model.config.id2label[label.item()] for label in labels]
157
+ raw_annotation["boxes"] = [self._get_bounding_box(box) for box in boxes]
158
+
159
+ # {"scores": [...], ...} --> [{"score":x, ...}, ...]
160
+ keys = ["score", "label", "box"]
161
+ annotation = [
162
+ dict(zip(keys, vals))
163
+ for vals in zip(raw_annotation["scores"], raw_annotation["labels"], raw_annotation["boxes"])
164
+ ]
165
+
166
+ return annotation
167
+
168
+ def _get_bounding_box(self, box: "torch.Tensor") -> Dict[str, int]:
169
+ """
170
+ Turns list [xmin, xmax, ymin, ymax] into dict { "xmin": xmin, ... }
171
+
172
+ Args:
173
+ box (`torch.Tensor`): Tensor containing the coordinates in corners format.
174
+
175
+ Returns:
176
+ bbox (`Dict[str, int]`): Dict containing the coordinates in corners format.
177
+ """
178
+ if self.framework != "pt":
179
+ raise ValueError("The ObjectDetectionPipeline is only available in PyTorch.")
180
+ xmin, ymin, xmax, ymax = box.int().tolist()
181
+ bbox = {
182
+ "xmin": xmin,
183
+ "ymin": ymin,
184
+ "xmax": xmax,
185
+ "ymax": ymax,
186
+ }
187
+ return bbox
env-llmeval/lib/python3.10/site-packages/transformers/pipelines/text_to_audio.py ADDED
@@ -0,0 +1,207 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.from typing import List, Union
14
+ from typing import List, Union
15
+
16
+ from ..utils import is_torch_available
17
+ from .base import Pipeline
18
+
19
+
20
+ if is_torch_available():
21
+ from ..models.auto.modeling_auto import MODEL_FOR_TEXT_TO_SPECTROGRAM_MAPPING
22
+ from ..models.speecht5.modeling_speecht5 import SpeechT5HifiGan
23
+
24
+ DEFAULT_VOCODER_ID = "microsoft/speecht5_hifigan"
25
+
26
+
27
+ class TextToAudioPipeline(Pipeline):
28
+ """
29
+ Text-to-audio generation pipeline using any `AutoModelForTextToWaveform` or `AutoModelForTextToSpectrogram`. This
30
+ pipeline generates an audio file from an input text and optional other conditional inputs.
31
+
32
+ Example:
33
+
34
+ ```python
35
+ >>> from transformers import pipeline
36
+
37
+ >>> pipe = pipeline(model="suno/bark-small")
38
+ >>> output = pipe("Hey it's HuggingFace on the phone!")
39
+
40
+ >>> audio = output["audio"]
41
+ >>> sampling_rate = output["sampling_rate"]
42
+ ```
43
+
44
+ Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial)
45
+
46
+ <Tip>
47
+
48
+ You can specify parameters passed to the model by using [`TextToAudioPipeline.__call__.forward_params`] or
49
+ [`TextToAudioPipeline.__call__.generate_kwargs`].
50
+
51
+ Example:
52
+
53
+ ```python
54
+ >>> from transformers import pipeline
55
+
56
+ >>> music_generator = pipeline(task="text-to-audio", model="facebook/musicgen-small", framework="pt")
57
+
58
+ >>> # diversify the music generation by adding randomness with a high temperature and set a maximum music length
59
+ >>> generate_kwargs = {
60
+ ... "do_sample": True,
61
+ ... "temperature": 0.7,
62
+ ... "max_new_tokens": 35,
63
+ ... }
64
+
65
+ >>> outputs = music_generator("Techno music with high melodic riffs", generate_kwargs=generate_kwargs)
66
+ ```
67
+
68
+ </Tip>
69
+
70
+ This pipeline can currently be loaded from [`pipeline`] using the following task identifiers: `"text-to-speech"` or
71
+ `"text-to-audio"`.
72
+
73
+ See the list of available models on [huggingface.co/models](https://huggingface.co/models?filter=text-to-speech).
74
+ """
75
+
76
+ def __init__(self, *args, vocoder=None, sampling_rate=None, **kwargs):
77
+ super().__init__(*args, **kwargs)
78
+
79
+ if self.framework == "tf":
80
+ raise ValueError("The TextToAudioPipeline is only available in PyTorch.")
81
+
82
+ self.vocoder = None
83
+ if self.model.__class__ in MODEL_FOR_TEXT_TO_SPECTROGRAM_MAPPING.values():
84
+ self.vocoder = (
85
+ SpeechT5HifiGan.from_pretrained(DEFAULT_VOCODER_ID).to(self.model.device)
86
+ if vocoder is None
87
+ else vocoder
88
+ )
89
+
90
+ self.sampling_rate = sampling_rate
91
+ if self.vocoder is not None:
92
+ self.sampling_rate = self.vocoder.config.sampling_rate
93
+
94
+ if self.sampling_rate is None:
95
+ # get sampling_rate from config and generation config
96
+
97
+ config = self.model.config
98
+ gen_config = self.model.__dict__.get("generation_config", None)
99
+ if gen_config is not None:
100
+ config.update(gen_config.to_dict())
101
+
102
+ for sampling_rate_name in ["sample_rate", "sampling_rate"]:
103
+ sampling_rate = getattr(config, sampling_rate_name, None)
104
+ if sampling_rate is not None:
105
+ self.sampling_rate = sampling_rate
106
+
107
+ def preprocess(self, text, **kwargs):
108
+ if isinstance(text, str):
109
+ text = [text]
110
+
111
+ if self.model.config.model_type == "bark":
112
+ # bark Tokenizer is called with BarkProcessor which uses those kwargs
113
+ new_kwargs = {
114
+ "max_length": self.model.generation_config.semantic_config.get("max_input_semantic_length", 256),
115
+ "add_special_tokens": False,
116
+ "return_attention_mask": True,
117
+ "return_token_type_ids": False,
118
+ "padding": "max_length",
119
+ }
120
+
121
+ # priority is given to kwargs
122
+ new_kwargs.update(kwargs)
123
+
124
+ kwargs = new_kwargs
125
+
126
+ output = self.tokenizer(text, **kwargs, return_tensors="pt")
127
+
128
+ return output
129
+
130
+ def _forward(self, model_inputs, **kwargs):
131
+ # we expect some kwargs to be additional tensors which need to be on the right device
132
+ kwargs = self._ensure_tensor_on_device(kwargs, device=self.device)
133
+ forward_params = kwargs["forward_params"]
134
+ generate_kwargs = kwargs["generate_kwargs"]
135
+
136
+ if self.model.can_generate():
137
+ # we expect some kwargs to be additional tensors which need to be on the right device
138
+ generate_kwargs = self._ensure_tensor_on_device(generate_kwargs, device=self.device)
139
+
140
+ # generate_kwargs get priority over forward_params
141
+ forward_params.update(generate_kwargs)
142
+
143
+ output = self.model.generate(**model_inputs, **forward_params)
144
+ else:
145
+ if len(generate_kwargs):
146
+ raise ValueError(
147
+ f"""You're using the `TextToAudioPipeline` with a forward-only model, but `generate_kwargs` is non empty.
148
+ For forward-only TTA models, please use `forward_params` instead of of
149
+ `generate_kwargs`. For reference, here are the `generate_kwargs` used here:
150
+ {generate_kwargs.keys()}"""
151
+ )
152
+ output = self.model(**model_inputs, **forward_params)[0]
153
+
154
+ if self.vocoder is not None:
155
+ # in that case, the output is a spectrogram that needs to be converted into a waveform
156
+ output = self.vocoder(output)
157
+
158
+ return output
159
+
160
+ def __call__(self, text_inputs: Union[str, List[str]], **forward_params):
161
+ """
162
+ Generates speech/audio from the inputs. See the [`TextToAudioPipeline`] documentation for more information.
163
+
164
+ Args:
165
+ text_inputs (`str` or `List[str]`):
166
+ The text(s) to generate.
167
+ forward_params (`dict`, *optional*):
168
+ Parameters passed to the model generation/forward method. `forward_params` are always passed to the
169
+ underlying model.
170
+ generate_kwargs (`dict`, *optional*):
171
+ The dictionary of ad-hoc parametrization of `generate_config` to be used for the generation call. For a
172
+ complete overview of generate, check the [following
173
+ guide](https://huggingface.co/docs/transformers/en/main_classes/text_generation). `generate_kwargs` are
174
+ only passed to the underlying model if the latter is a generative model.
175
+
176
+ Return:
177
+ A `dict` or a list of `dict`: The dictionaries have two keys:
178
+
179
+ - **audio** (`np.ndarray` of shape `(nb_channels, audio_length)`) -- The generated audio waveform.
180
+ - **sampling_rate** (`int`) -- The sampling rate of the generated audio waveform.
181
+ """
182
+ return super().__call__(text_inputs, **forward_params)
183
+
184
+ def _sanitize_parameters(
185
+ self,
186
+ preprocess_params=None,
187
+ forward_params=None,
188
+ generate_kwargs=None,
189
+ ):
190
+ params = {
191
+ "forward_params": forward_params if forward_params else {},
192
+ "generate_kwargs": generate_kwargs if generate_kwargs else {},
193
+ }
194
+
195
+ if preprocess_params is None:
196
+ preprocess_params = {}
197
+ postprocess_params = {}
198
+
199
+ return preprocess_params, params, postprocess_params
200
+
201
+ def postprocess(self, waveform):
202
+ output_dict = {}
203
+
204
+ output_dict["audio"] = waveform.cpu().float().numpy()
205
+ output_dict["sampling_rate"] = self.sampling_rate
206
+
207
+ return output_dict
env-llmeval/lib/python3.10/site-packages/transformers/pipelines/visual_question_answering.py ADDED
@@ -0,0 +1,151 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Union
2
+
3
+ from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
4
+ from .base import Pipeline, build_pipeline_init_args
5
+
6
+
7
+ if is_vision_available():
8
+ from PIL import Image
9
+
10
+ from ..image_utils import load_image
11
+
12
+ if is_torch_available():
13
+ from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES
14
+
15
+ logger = logging.get_logger(__name__)
16
+
17
+
18
+ @add_end_docstrings(build_pipeline_init_args(has_tokenizer=True, has_image_processor=True))
19
+ class VisualQuestionAnsweringPipeline(Pipeline):
20
+ """
21
+ Visual Question Answering pipeline using a `AutoModelForVisualQuestionAnswering`. This pipeline is currently only
22
+ available in PyTorch.
23
+
24
+ Example:
25
+
26
+ ```python
27
+ >>> from transformers import pipeline
28
+
29
+ >>> oracle = pipeline(model="dandelin/vilt-b32-finetuned-vqa")
30
+ >>> image_url = "https://huggingface.co/datasets/Narsil/image_dummy/raw/main/lena.png"
31
+ >>> oracle(question="What is she wearing ?", image=image_url)
32
+ [{'score': 0.948, 'answer': 'hat'}, {'score': 0.009, 'answer': 'fedora'}, {'score': 0.003, 'answer': 'clothes'}, {'score': 0.003, 'answer': 'sun hat'}, {'score': 0.002, 'answer': 'nothing'}]
33
+
34
+ >>> oracle(question="What is she wearing ?", image=image_url, top_k=1)
35
+ [{'score': 0.948, 'answer': 'hat'}]
36
+
37
+ >>> oracle(question="Is this a person ?", image=image_url, top_k=1)
38
+ [{'score': 0.993, 'answer': 'yes'}]
39
+
40
+ >>> oracle(question="Is this a man ?", image=image_url, top_k=1)
41
+ [{'score': 0.996, 'answer': 'no'}]
42
+ ```
43
+
44
+ Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial)
45
+
46
+ This visual question answering pipeline can currently be loaded from [`pipeline`] using the following task
47
+ identifiers: `"visual-question-answering", "vqa"`.
48
+
49
+ The models that this pipeline can use are models that have been fine-tuned on a visual question answering task. See
50
+ the up-to-date list of available models on
51
+ [huggingface.co/models](https://huggingface.co/models?filter=visual-question-answering).
52
+ """
53
+
54
+ def __init__(self, *args, **kwargs):
55
+ super().__init__(*args, **kwargs)
56
+ self.check_model_type(MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES)
57
+
58
+ def _sanitize_parameters(self, top_k=None, padding=None, truncation=None, timeout=None, **kwargs):
59
+ preprocess_params, postprocess_params = {}, {}
60
+ if padding is not None:
61
+ preprocess_params["padding"] = padding
62
+ if truncation is not None:
63
+ preprocess_params["truncation"] = truncation
64
+ if timeout is not None:
65
+ preprocess_params["timeout"] = timeout
66
+ if top_k is not None:
67
+ postprocess_params["top_k"] = top_k
68
+ return preprocess_params, {}, postprocess_params
69
+
70
+ def __call__(self, image: Union["Image.Image", str], question: str = None, **kwargs):
71
+ r"""
72
+ Answers open-ended questions about images. The pipeline accepts several types of inputs which are detailed
73
+ below:
74
+
75
+ - `pipeline(image=image, question=question)`
76
+ - `pipeline({"image": image, "question": question})`
77
+ - `pipeline([{"image": image, "question": question}])`
78
+ - `pipeline([{"image": image, "question": question}, {"image": image, "question": question}])`
79
+
80
+ Args:
81
+ image (`str`, `List[str]`, `PIL.Image` or `List[PIL.Image]`):
82
+ The pipeline handles three types of images:
83
+
84
+ - A string containing a http link pointing to an image
85
+ - A string containing a local path to an image
86
+ - An image loaded in PIL directly
87
+
88
+ The pipeline accepts either a single image or a batch of images. If given a single image, it can be
89
+ broadcasted to multiple questions.
90
+ question (`str`, `List[str]`):
91
+ The question(s) asked. If given a single question, it can be broadcasted to multiple images.
92
+ top_k (`int`, *optional*, defaults to 5):
93
+ The number of top labels that will be returned by the pipeline. If the provided number is higher than
94
+ the number of labels available in the model configuration, it will default to the number of labels.
95
+ timeout (`float`, *optional*, defaults to None):
96
+ The maximum time in seconds to wait for fetching images from the web. If None, no timeout is set and
97
+ the call may block forever.
98
+ Return:
99
+ A dictionary or a list of dictionaries containing the result. The dictionaries contain the following keys:
100
+
101
+ - **label** (`str`) -- The label identified by the model.
102
+ - **score** (`int`) -- The score attributed by the model for that label.
103
+ """
104
+ if isinstance(image, (Image.Image, str)) and isinstance(question, str):
105
+ inputs = {"image": image, "question": question}
106
+ else:
107
+ """
108
+ Supports the following format
109
+ - {"image": image, "question": question}
110
+ - [{"image": image, "question": question}]
111
+ - Generator and datasets
112
+ """
113
+ inputs = image
114
+ results = super().__call__(inputs, **kwargs)
115
+ return results
116
+
117
+ def preprocess(self, inputs, padding=False, truncation=False, timeout=None):
118
+ image = load_image(inputs["image"], timeout=timeout)
119
+ model_inputs = self.tokenizer(
120
+ inputs["question"], return_tensors=self.framework, padding=padding, truncation=truncation
121
+ )
122
+ image_features = self.image_processor(images=image, return_tensors=self.framework)
123
+ model_inputs.update(image_features)
124
+ return model_inputs
125
+
126
+ def _forward(self, model_inputs, **generate_kwargs):
127
+ if self.model.can_generate():
128
+ model_outputs = self.model.generate(**model_inputs, **generate_kwargs)
129
+ else:
130
+ model_outputs = self.model(**model_inputs)
131
+ return model_outputs
132
+
133
+ def postprocess(self, model_outputs, top_k=5):
134
+ if self.model.can_generate():
135
+ return [
136
+ {"answer": self.tokenizer.decode(output_ids, skip_special_tokens=True).strip()}
137
+ for output_ids in model_outputs
138
+ ]
139
+ else:
140
+ if top_k > self.model.config.num_labels:
141
+ top_k = self.model.config.num_labels
142
+
143
+ if self.framework == "pt":
144
+ probs = model_outputs.logits.sigmoid()[0]
145
+ scores, ids = probs.topk(top_k)
146
+ else:
147
+ raise ValueError(f"Unsupported framework: {self.framework}")
148
+
149
+ scores = scores.tolist()
150
+ ids = ids.tolist()
151
+ return [{"score": score, "answer": self.model.config.id2label[_id]} for score, _id in zip(scores, ids)]