applied-ai-018 commited on
Commit
208647c
·
verified ·
1 Parent(s): f07e988

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. llmeval-env/lib/python3.10/site-packages/transformers/__init__.py +0 -0
  2. llmeval-env/lib/python3.10/site-packages/transformers/benchmark/__init__.py +0 -0
  3. llmeval-env/lib/python3.10/site-packages/transformers/benchmark/__pycache__/__init__.cpython-310.pyc +0 -0
  4. llmeval-env/lib/python3.10/site-packages/transformers/benchmark/__pycache__/benchmark.cpython-310.pyc +0 -0
  5. llmeval-env/lib/python3.10/site-packages/transformers/benchmark/__pycache__/benchmark_args.cpython-310.pyc +0 -0
  6. llmeval-env/lib/python3.10/site-packages/transformers/benchmark/__pycache__/benchmark_args_tf.cpython-310.pyc +0 -0
  7. llmeval-env/lib/python3.10/site-packages/transformers/benchmark/__pycache__/benchmark_args_utils.cpython-310.pyc +0 -0
  8. llmeval-env/lib/python3.10/site-packages/transformers/benchmark/__pycache__/benchmark_tf.cpython-310.pyc +0 -0
  9. llmeval-env/lib/python3.10/site-packages/transformers/benchmark/__pycache__/benchmark_utils.cpython-310.pyc +0 -0
  10. llmeval-env/lib/python3.10/site-packages/transformers/benchmark/benchmark.py +271 -0
  11. llmeval-env/lib/python3.10/site-packages/transformers/benchmark/benchmark_args.py +124 -0
  12. llmeval-env/lib/python3.10/site-packages/transformers/benchmark/benchmark_args_tf.py +136 -0
  13. llmeval-env/lib/python3.10/site-packages/transformers/benchmark/benchmark_args_utils.py +166 -0
  14. llmeval-env/lib/python3.10/site-packages/transformers/benchmark/benchmark_tf.py +303 -0
  15. llmeval-env/lib/python3.10/site-packages/transformers/benchmark/benchmark_utils.py +914 -0
  16. llmeval-env/lib/python3.10/site-packages/transformers/cache_utils.py +435 -0
  17. llmeval-env/lib/python3.10/site-packages/transformers/configuration_utils.py +1133 -0
  18. llmeval-env/lib/python3.10/site-packages/transformers/convert_graph_to_onnx.py +551 -0
  19. llmeval-env/lib/python3.10/site-packages/transformers/convert_tf_hub_seq_to_seq_bert_to_pytorch.py +88 -0
  20. llmeval-env/lib/python3.10/site-packages/transformers/deepspeed.py +40 -0
  21. llmeval-env/lib/python3.10/site-packages/transformers/dependency_versions_table.py +92 -0
  22. llmeval-env/lib/python3.10/site-packages/transformers/feature_extraction_sequence_utils.py +371 -0
  23. llmeval-env/lib/python3.10/site-packages/transformers/feature_extraction_utils.py +684 -0
  24. llmeval-env/lib/python3.10/site-packages/transformers/file_utils.py +133 -0
  25. llmeval-env/lib/python3.10/site-packages/transformers/image_utils.py +769 -0
  26. llmeval-env/lib/python3.10/site-packages/transformers/modelcard.py +904 -0
  27. llmeval-env/lib/python3.10/site-packages/transformers/modeling_attn_mask_utils.py +492 -0
  28. llmeval-env/lib/python3.10/site-packages/transformers/modeling_flax_outputs.py +700 -0
  29. llmeval-env/lib/python3.10/site-packages/transformers/modeling_flax_utils.py +1288 -0
  30. llmeval-env/lib/python3.10/site-packages/transformers/modeling_tf_outputs.py +991 -0
  31. llmeval-env/lib/python3.10/site-packages/transformers/modeling_tf_utils.py +0 -0
  32. llmeval-env/lib/python3.10/site-packages/transformers/processing_utils.py +524 -0
  33. llmeval-env/lib/python3.10/site-packages/transformers/pytorch_utils.py +296 -0
  34. llmeval-env/lib/python3.10/site-packages/transformers/safetensors_conversion.py +111 -0
  35. llmeval-env/lib/python3.10/site-packages/transformers/tokenization_utils.py +1040 -0
  36. llmeval-env/lib/python3.10/site-packages/transformers/trainer.py +0 -0
  37. llmeval-env/lib/python3.10/site-packages/transformers/trainer_callback.py +607 -0
  38. llmeval-env/lib/python3.10/site-packages/transformers/trainer_pt_utils.py +1361 -0
  39. llmeval-env/lib/python3.10/site-packages/transformers/trainer_seq2seq.py +367 -0
  40. llmeval-env/lib/python3.10/site-packages/transformers/training_args_tf.py +299 -0
  41. llmeval-env/lib/python3.10/site-packages/transformers/utils/__init__.py +258 -0
  42. llmeval-env/lib/python3.10/site-packages/transformers/utils/__pycache__/backbone_utils.cpython-310.pyc +0 -0
  43. llmeval-env/lib/python3.10/site-packages/transformers/utils/__pycache__/dummy_essentia_and_librosa_and_pretty_midi_and_scipy_and_torch_objects.cpython-310.pyc +0 -0
  44. llmeval-env/lib/python3.10/site-packages/transformers/utils/__pycache__/dummy_flax_objects.cpython-310.pyc +0 -0
  45. llmeval-env/lib/python3.10/site-packages/transformers/utils/__pycache__/dummy_pt_objects.cpython-310.pyc +0 -0
  46. llmeval-env/lib/python3.10/site-packages/transformers/utils/__pycache__/dummy_sentencepiece_and_tokenizers_objects.cpython-310.pyc +0 -0
  47. llmeval-env/lib/python3.10/site-packages/transformers/utils/__pycache__/dummy_speech_objects.cpython-310.pyc +0 -0
  48. llmeval-env/lib/python3.10/site-packages/transformers/utils/__pycache__/dummy_tokenizers_objects.cpython-310.pyc +0 -0
  49. llmeval-env/lib/python3.10/site-packages/transformers/utils/__pycache__/fx.cpython-310.pyc +0 -0
  50. llmeval-env/lib/python3.10/site-packages/transformers/utils/__pycache__/peft_utils.cpython-310.pyc +0 -0
llmeval-env/lib/python3.10/site-packages/transformers/__init__.py ADDED
The diff for this file is too large to render. See raw diff
 
llmeval-env/lib/python3.10/site-packages/transformers/benchmark/__init__.py ADDED
File without changes
llmeval-env/lib/python3.10/site-packages/transformers/benchmark/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (195 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/benchmark/__pycache__/benchmark.cpython-310.pyc ADDED
Binary file (7.44 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/benchmark/__pycache__/benchmark_args.cpython-310.pyc ADDED
Binary file (3.45 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/benchmark/__pycache__/benchmark_args_tf.cpython-310.pyc ADDED
Binary file (3.95 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/benchmark/__pycache__/benchmark_args_utils.cpython-310.pyc ADDED
Binary file (5.61 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/benchmark/__pycache__/benchmark_tf.cpython-310.pyc ADDED
Binary file (9.52 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/benchmark/__pycache__/benchmark_utils.cpython-310.pyc ADDED
Binary file (30 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/benchmark/benchmark.py ADDED
@@ -0,0 +1,271 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The HuggingFace Inc. team.
3
+ # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """
17
+ Benchmarking the library on inference and training in PyTorch.
18
+ """
19
+
20
+
21
+ import timeit
22
+ from typing import Callable, Optional
23
+
24
+ from ..configuration_utils import PretrainedConfig
25
+ from ..models.auto.modeling_auto import MODEL_MAPPING, MODEL_WITH_LM_HEAD_MAPPING
26
+ from ..utils import is_py3nvml_available, is_torch_available, logging
27
+ from .benchmark_utils import (
28
+ Benchmark,
29
+ Memory,
30
+ MemorySummary,
31
+ measure_peak_memory_cpu,
32
+ start_memory_tracing,
33
+ stop_memory_tracing,
34
+ )
35
+
36
+
37
+ if is_torch_available():
38
+ import torch
39
+
40
+ from .benchmark_args import PyTorchBenchmarkArguments
41
+
42
+
43
+ if is_py3nvml_available():
44
+ import py3nvml.py3nvml as nvml
45
+
46
+
47
+ logger = logging.get_logger(__name__)
48
+
49
+
50
+ class PyTorchBenchmark(Benchmark):
51
+ args: PyTorchBenchmarkArguments
52
+ configs: PretrainedConfig
53
+ framework: str = "PyTorch"
54
+
55
+ @property
56
+ def framework_version(self):
57
+ return torch.__version__
58
+
59
+ def _inference_speed(self, model_name: str, batch_size: int, sequence_length: int) -> float:
60
+ _inference = self._prepare_inference_func(model_name, batch_size, sequence_length)
61
+ return self._measure_speed(_inference)
62
+
63
+ def _inference_memory(
64
+ self, model_name: str, batch_size: int, sequence_length: int
65
+ ) -> [Memory, Optional[MemorySummary]]:
66
+ _inference = self._prepare_inference_func(model_name, batch_size, sequence_length)
67
+ return self._measure_memory(_inference)
68
+
69
+ def _train_speed(self, model_name: str, batch_size: int, sequence_length: int) -> float:
70
+ _train = self._prepare_train_func(model_name, batch_size, sequence_length)
71
+ return self._measure_speed(_train)
72
+
73
+ def _train_memory(
74
+ self, model_name: str, batch_size: int, sequence_length: int
75
+ ) -> [Memory, Optional[MemorySummary]]:
76
+ _train = self._prepare_train_func(model_name, batch_size, sequence_length)
77
+ return self._measure_memory(_train)
78
+
79
+ def _prepare_inference_func(self, model_name: str, batch_size: int, sequence_length: int) -> Callable[[], None]:
80
+ config = self.config_dict[model_name]
81
+
82
+ if self.args.torchscript:
83
+ config.torchscript = True
84
+
85
+ has_model_class_in_config = (
86
+ hasattr(config, "architectures")
87
+ and isinstance(config.architectures, list)
88
+ and len(config.architectures) > 0
89
+ )
90
+ if not self.args.only_pretrain_model and has_model_class_in_config:
91
+ try:
92
+ model_class = config.architectures[0]
93
+ transformers_module = __import__("transformers", fromlist=[model_class])
94
+ model_cls = getattr(transformers_module, model_class)
95
+ model = model_cls(config)
96
+ except ImportError:
97
+ raise ImportError(
98
+ f"{model_class} does not exist. If you just want to test the pretrained model, you might want to"
99
+ " set `--only_pretrain_model` or `args.only_pretrain_model=True`."
100
+ )
101
+ else:
102
+ model = MODEL_MAPPING[config.__class__](config)
103
+
104
+ model.eval()
105
+ model.to(self.args.device)
106
+
107
+ # encoder-decoder has vocab size saved differently
108
+ vocab_size = config.vocab_size if hasattr(config, "vocab_size") else config.encoder.vocab_size
109
+ input_ids = torch.randint(vocab_size, (batch_size, sequence_length), dtype=torch.long, device=self.args.device)
110
+
111
+ if self.args.fp16:
112
+ logger.info("Running training in Mixed Precision...")
113
+ if not self.args.is_gpu:
114
+ raise ValueError("Mixed precision is possible only for GPU.")
115
+ # amp seems to have memory leaks so that memory usage
116
+ # is measured using .half() for now https://github.com/NVIDIA/apex/issues/439
117
+ model.half()
118
+
119
+ if self.args.torchscript:
120
+ with torch.no_grad():
121
+ inference_model = torch.jit.trace(model, input_ids)
122
+ else:
123
+ inference_model = model
124
+
125
+ def encoder_decoder_forward():
126
+ with torch.no_grad():
127
+ outputs = inference_model(input_ids, decoder_input_ids=input_ids)
128
+ return outputs
129
+
130
+ def encoder_forward():
131
+ with torch.no_grad():
132
+ outputs = inference_model(input_ids)
133
+ return outputs
134
+
135
+ _forward = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
136
+ return _forward
137
+
138
+ def _prepare_train_func(self, model_name: str, batch_size: int, sequence_length: int) -> Callable[[], None]:
139
+ config = self.config_dict[model_name]
140
+
141
+ has_model_class_in_config = (
142
+ hasattr(config, "architectures")
143
+ and isinstance(config.architectures, list)
144
+ and len(config.architectures) > 0
145
+ )
146
+ if not self.args.only_pretrain_model and has_model_class_in_config:
147
+ try:
148
+ model_class = config.architectures[0]
149
+ transformers_module = __import__("transformers", fromlist=[model_class])
150
+ model_cls = getattr(transformers_module, model_class)
151
+ model = model_cls(config)
152
+ except ImportError:
153
+ raise ImportError(
154
+ f"{model_class} does not exist. If you just want to test the pretrained model, you might want to"
155
+ " set `--only_pretrain_model` or `args.only_pretrain_model=True`."
156
+ )
157
+ else:
158
+ model = MODEL_WITH_LM_HEAD_MAPPING[config.__class__](config)
159
+
160
+ if self.args.torchscript:
161
+ raise NotImplementedError("Training for torchscript is currently not implemented")
162
+ else:
163
+ train_model = model
164
+
165
+ model.train()
166
+ model.to(self.args.device)
167
+
168
+ # encoder-decoder has vocab size saved differently
169
+ vocab_size = config.vocab_size if hasattr(config, "vocab_size") else config.encoder.vocab_size
170
+ input_ids = torch.randint(vocab_size, (batch_size, sequence_length), dtype=torch.long, device=self.args.device)
171
+
172
+ if self.args.fp16:
173
+ logger.info("Running training in Mixed Precision...")
174
+ if not self.args.is_gpu:
175
+ raise ValueError("Mixed precision is possible only for GPU.")
176
+
177
+ # amp seems to have memory leaks so that memory usage
178
+ # is measured using .half() for now https://github.com/NVIDIA/apex/issues/439
179
+ model.half()
180
+
181
+ def compute_loss_and_backprob_encoder():
182
+ loss = train_model(input_ids, labels=input_ids)[0]
183
+ loss.backward()
184
+ return loss
185
+
186
+ def compute_loss_and_backprob_encoder_decoder():
187
+ loss = train_model(input_ids, decoder_input_ids=input_ids, labels=input_ids)[0]
188
+ loss.backward()
189
+ return loss
190
+
191
+ _train = (
192
+ compute_loss_and_backprob_encoder_decoder
193
+ if config.is_encoder_decoder
194
+ else compute_loss_and_backprob_encoder
195
+ )
196
+ return _train
197
+
198
+ def _measure_speed(self, func) -> float:
199
+ try:
200
+ if self.args.is_tpu or self.args.torchscript:
201
+ # run additional 10 times to stabilize compilation for tpu and torchscript
202
+ logger.info("Do inference on TPU or torchscript. Running model 5 times to stabilize compilation")
203
+ timeit.repeat(
204
+ func,
205
+ repeat=1,
206
+ number=5,
207
+ )
208
+
209
+ # as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
210
+ runtimes = timeit.repeat(
211
+ func,
212
+ repeat=self.args.repeat,
213
+ number=10,
214
+ )
215
+
216
+ if self.args.is_tpu and self.args.torch_xla_tpu_print_metrics:
217
+ import torch_xla.debug.metrics as met
218
+
219
+ self.print_fn(met.metrics_report())
220
+
221
+ return min(runtimes) / 10.0
222
+ except RuntimeError as e:
223
+ self.print_fn(f"Doesn't fit on GPU. {e}")
224
+ return "N/A"
225
+
226
+ def _measure_memory(self, func: Callable[[], None]) -> [Memory, MemorySummary]:
227
+ try:
228
+ if self.args.trace_memory_line_by_line:
229
+ trace = start_memory_tracing("transformers")
230
+
231
+ if self.args.is_tpu:
232
+ # tpu
233
+ raise NotImplementedError(
234
+ "Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking with"
235
+ " `--no-memory` or `args.memory=False`"
236
+ )
237
+ elif self.args.is_gpu:
238
+ if not is_py3nvml_available():
239
+ logger.warning(
240
+ "py3nvml not installed, we won't log GPU memory usage. "
241
+ "Install py3nvml (pip install py3nvml) to log information about GPU."
242
+ )
243
+ memory = "N/A"
244
+ else:
245
+ logger.info(
246
+ "Measuring total GPU usage on GPU device. Make sure to not have additional processes running"
247
+ " on the same GPU."
248
+ )
249
+ # init nvml
250
+ nvml.nvmlInit()
251
+ func()
252
+ handle = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx)
253
+ meminfo = nvml.nvmlDeviceGetMemoryInfo(handle)
254
+ max_bytes_in_use = meminfo.used
255
+ memory = Memory(max_bytes_in_use)
256
+ # shutdown nvml
257
+ nvml.nvmlShutdown()
258
+ else:
259
+ # cpu
260
+ memory_bytes = measure_peak_memory_cpu(func)
261
+ memory = Memory(memory_bytes) if isinstance(memory_bytes, int) else memory_bytes
262
+
263
+ if self.args.trace_memory_line_by_line:
264
+ summary = stop_memory_tracing(trace)
265
+ else:
266
+ summary = None
267
+
268
+ return memory, summary
269
+ except RuntimeError as e:
270
+ self.print_fn(f"Doesn't fit on GPU. {e}")
271
+ return "N/A", None
llmeval-env/lib/python3.10/site-packages/transformers/benchmark/benchmark_args.py ADDED
@@ -0,0 +1,124 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The HuggingFace Inc. team.
3
+ # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ from dataclasses import dataclass, field
18
+ from typing import Tuple
19
+
20
+ from ..utils import (
21
+ cached_property,
22
+ is_torch_available,
23
+ is_torch_xla_available,
24
+ is_torch_xpu_available,
25
+ logging,
26
+ requires_backends,
27
+ )
28
+ from .benchmark_args_utils import BenchmarkArguments
29
+
30
+
31
+ if is_torch_available():
32
+ import torch
33
+
34
+ if is_torch_xla_available():
35
+ import torch_xla.core.xla_model as xm
36
+
37
+
38
+ logger = logging.get_logger(__name__)
39
+
40
+
41
+ @dataclass
42
+ class PyTorchBenchmarkArguments(BenchmarkArguments):
43
+ deprecated_args = [
44
+ "no_inference",
45
+ "no_cuda",
46
+ "no_tpu",
47
+ "no_speed",
48
+ "no_memory",
49
+ "no_env_print",
50
+ "no_multi_process",
51
+ ]
52
+
53
+ def __init__(self, **kwargs):
54
+ """
55
+ This __init__ is there for legacy code. When removing deprecated args completely, the class can simply be
56
+ deleted
57
+ """
58
+ for deprecated_arg in self.deprecated_args:
59
+ if deprecated_arg in kwargs:
60
+ positive_arg = deprecated_arg[3:]
61
+ setattr(self, positive_arg, not kwargs.pop(deprecated_arg))
62
+ logger.warning(
63
+ f"{deprecated_arg} is depreciated. Please use --no_{positive_arg} or"
64
+ f" {positive_arg}={kwargs[positive_arg]}"
65
+ )
66
+
67
+ self.torchscript = kwargs.pop("torchscript", self.torchscript)
68
+ self.torch_xla_tpu_print_metrics = kwargs.pop("torch_xla_tpu_print_metrics", self.torch_xla_tpu_print_metrics)
69
+ self.fp16_opt_level = kwargs.pop("fp16_opt_level", self.fp16_opt_level)
70
+ super().__init__(**kwargs)
71
+
72
+ torchscript: bool = field(default=False, metadata={"help": "Trace the models using torchscript"})
73
+ torch_xla_tpu_print_metrics: bool = field(default=False, metadata={"help": "Print Xla/PyTorch tpu metrics"})
74
+ fp16_opt_level: str = field(
75
+ default="O1",
76
+ metadata={
77
+ "help": (
78
+ "For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. "
79
+ "See details at https://nvidia.github.io/apex/amp.html"
80
+ )
81
+ },
82
+ )
83
+
84
+ @cached_property
85
+ def _setup_devices(self) -> Tuple["torch.device", int]:
86
+ requires_backends(self, ["torch"])
87
+ logger.info("PyTorch: setting up devices")
88
+ if not self.cuda:
89
+ device = torch.device("cpu")
90
+ n_gpu = 0
91
+ elif is_torch_xla_available():
92
+ device = xm.xla_device()
93
+ n_gpu = 0
94
+ elif is_torch_xpu_available():
95
+ device = torch.device("xpu")
96
+ n_gpu = torch.xpu.device_count()
97
+ else:
98
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
99
+ n_gpu = torch.cuda.device_count()
100
+ return device, n_gpu
101
+
102
+ @property
103
+ def is_tpu(self):
104
+ return is_torch_xla_available() and self.tpu
105
+
106
+ @property
107
+ def device_idx(self) -> int:
108
+ requires_backends(self, ["torch"])
109
+ # TODO(PVP): currently only single GPU is supported
110
+ return torch.cuda.current_device()
111
+
112
+ @property
113
+ def device(self) -> "torch.device":
114
+ requires_backends(self, ["torch"])
115
+ return self._setup_devices[0]
116
+
117
+ @property
118
+ def n_gpu(self):
119
+ requires_backends(self, ["torch"])
120
+ return self._setup_devices[1]
121
+
122
+ @property
123
+ def is_gpu(self):
124
+ return self.n_gpu > 0
llmeval-env/lib/python3.10/site-packages/transformers/benchmark/benchmark_args_tf.py ADDED
@@ -0,0 +1,136 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The HuggingFace Inc. team.
3
+ # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ from dataclasses import dataclass, field
18
+ from typing import Tuple
19
+
20
+ from ..utils import cached_property, is_tf_available, logging, requires_backends
21
+ from .benchmark_args_utils import BenchmarkArguments
22
+
23
+
24
+ if is_tf_available():
25
+ import tensorflow as tf
26
+
27
+
28
+ logger = logging.get_logger(__name__)
29
+
30
+
31
+ @dataclass
32
+ class TensorFlowBenchmarkArguments(BenchmarkArguments):
33
+ deprecated_args = [
34
+ "no_inference",
35
+ "no_cuda",
36
+ "no_tpu",
37
+ "no_speed",
38
+ "no_memory",
39
+ "no_env_print",
40
+ "no_multi_process",
41
+ ]
42
+
43
+ def __init__(self, **kwargs):
44
+ """
45
+ This __init__ is there for legacy code. When removing deprecated args completely, the class can simply be
46
+ deleted
47
+ """
48
+ for deprecated_arg in self.deprecated_args:
49
+ if deprecated_arg in kwargs:
50
+ positive_arg = deprecated_arg[3:]
51
+ kwargs[positive_arg] = not kwargs.pop(deprecated_arg)
52
+ logger.warning(
53
+ f"{deprecated_arg} is depreciated. Please use --no-{positive_arg} or"
54
+ f" {positive_arg}={kwargs[positive_arg]}"
55
+ )
56
+ self.tpu_name = kwargs.pop("tpu_name", self.tpu_name)
57
+ self.device_idx = kwargs.pop("device_idx", self.device_idx)
58
+ self.eager_mode = kwargs.pop("eager_mode", self.eager_mode)
59
+ self.use_xla = kwargs.pop("use_xla", self.use_xla)
60
+ super().__init__(**kwargs)
61
+
62
+ tpu_name: str = field(
63
+ default=None,
64
+ metadata={"help": "Name of TPU"},
65
+ )
66
+ device_idx: int = field(
67
+ default=0,
68
+ metadata={"help": "CPU / GPU device index. Defaults to 0."},
69
+ )
70
+ eager_mode: bool = field(default=False, metadata={"help": "Benchmark models in eager model."})
71
+ use_xla: bool = field(
72
+ default=False,
73
+ metadata={
74
+ "help": "Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`."
75
+ },
76
+ )
77
+
78
+ @cached_property
79
+ def _setup_tpu(self) -> Tuple["tf.distribute.cluster_resolver.TPUClusterResolver"]:
80
+ requires_backends(self, ["tf"])
81
+ tpu = None
82
+ if self.tpu:
83
+ try:
84
+ if self.tpu_name:
85
+ tpu = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name)
86
+ else:
87
+ tpu = tf.distribute.cluster_resolver.TPUClusterResolver()
88
+ except ValueError:
89
+ tpu = None
90
+ return tpu
91
+
92
+ @cached_property
93
+ def _setup_strategy(self) -> Tuple["tf.distribute.Strategy", "tf.distribute.cluster_resolver.TPUClusterResolver"]:
94
+ requires_backends(self, ["tf"])
95
+ if self.is_tpu:
96
+ tf.config.experimental_connect_to_cluster(self._setup_tpu)
97
+ tf.tpu.experimental.initialize_tpu_system(self._setup_tpu)
98
+
99
+ strategy = tf.distribute.TPUStrategy(self._setup_tpu)
100
+ else:
101
+ # currently no multi gpu is allowed
102
+ if self.is_gpu:
103
+ # TODO: Currently only single GPU is supported
104
+ tf.config.set_visible_devices(self.gpu_list[self.device_idx], "GPU")
105
+ strategy = tf.distribute.OneDeviceStrategy(device=f"/gpu:{self.device_idx}")
106
+ else:
107
+ tf.config.set_visible_devices([], "GPU") # disable GPU
108
+ strategy = tf.distribute.OneDeviceStrategy(device=f"/cpu:{self.device_idx}")
109
+
110
+ return strategy
111
+
112
+ @property
113
+ def is_tpu(self) -> bool:
114
+ requires_backends(self, ["tf"])
115
+ return self._setup_tpu is not None
116
+
117
+ @property
118
+ def strategy(self) -> "tf.distribute.Strategy":
119
+ requires_backends(self, ["tf"])
120
+ return self._setup_strategy
121
+
122
+ @property
123
+ def gpu_list(self):
124
+ requires_backends(self, ["tf"])
125
+ return tf.config.list_physical_devices("GPU")
126
+
127
+ @property
128
+ def n_gpu(self) -> int:
129
+ requires_backends(self, ["tf"])
130
+ if self.cuda:
131
+ return len(self.gpu_list)
132
+ return 0
133
+
134
+ @property
135
+ def is_gpu(self) -> bool:
136
+ return self.n_gpu > 0
llmeval-env/lib/python3.10/site-packages/transformers/benchmark/benchmark_args_utils.py ADDED
@@ -0,0 +1,166 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The HuggingFace Inc. team.
3
+ # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ import dataclasses
18
+ import json
19
+ import warnings
20
+ from dataclasses import dataclass, field
21
+ from time import time
22
+ from typing import List
23
+
24
+ from ..utils import logging
25
+
26
+
27
+ logger = logging.get_logger(__name__)
28
+
29
+
30
+ def list_field(default=None, metadata=None):
31
+ return field(default_factory=lambda: default, metadata=metadata)
32
+
33
+
34
+ @dataclass
35
+ class BenchmarkArguments:
36
+ """
37
+ BenchMarkArguments are arguments we use in our benchmark scripts **which relate to the training loop itself**.
38
+
39
+ Using `HfArgumentParser` we can turn this class into argparse arguments to be able to specify them on the command
40
+ line.
41
+ """
42
+
43
+ models: List[str] = list_field(
44
+ default=[],
45
+ metadata={
46
+ "help": (
47
+ "Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version"
48
+ " of all available models"
49
+ )
50
+ },
51
+ )
52
+
53
+ batch_sizes: List[int] = list_field(
54
+ default=[8], metadata={"help": "List of batch sizes for which memory and time performance will be evaluated"}
55
+ )
56
+
57
+ sequence_lengths: List[int] = list_field(
58
+ default=[8, 32, 128, 512],
59
+ metadata={"help": "List of sequence lengths for which memory and time performance will be evaluated"},
60
+ )
61
+
62
+ inference: bool = field(
63
+ default=True,
64
+ metadata={"help": "Whether to benchmark inference of model. Inference can be disabled via --no-inference."},
65
+ )
66
+ cuda: bool = field(
67
+ default=True,
68
+ metadata={"help": "Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."},
69
+ )
70
+ tpu: bool = field(
71
+ default=True, metadata={"help": "Whether to run on available tpu devices. TPU can be disabled via --no-tpu."}
72
+ )
73
+ fp16: bool = field(default=False, metadata={"help": "Use FP16 to accelerate inference."})
74
+ training: bool = field(default=False, metadata={"help": "Benchmark training of model"})
75
+ verbose: bool = field(default=False, metadata={"help": "Verbose memory tracing"})
76
+ speed: bool = field(
77
+ default=True,
78
+ metadata={"help": "Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."},
79
+ )
80
+ memory: bool = field(
81
+ default=True,
82
+ metadata={
83
+ "help": "Whether to perform memory measurements. Memory measurements can be disabled via --no-memory"
84
+ },
85
+ )
86
+ trace_memory_line_by_line: bool = field(default=False, metadata={"help": "Trace memory line by line"})
87
+ save_to_csv: bool = field(default=False, metadata={"help": "Save result to a CSV file"})
88
+ log_print: bool = field(default=False, metadata={"help": "Save all print statements in a log file"})
89
+ env_print: bool = field(default=False, metadata={"help": "Whether to print environment information"})
90
+ multi_process: bool = field(
91
+ default=True,
92
+ metadata={
93
+ "help": (
94
+ "Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use"
95
+ " multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled"
96
+ " for debugging / testing and on TPU."
97
+ )
98
+ },
99
+ )
100
+ inference_time_csv_file: str = field(
101
+ default=f"inference_time_{round(time())}.csv",
102
+ metadata={"help": "CSV filename used if saving time results to csv."},
103
+ )
104
+ inference_memory_csv_file: str = field(
105
+ default=f"inference_memory_{round(time())}.csv",
106
+ metadata={"help": "CSV filename used if saving memory results to csv."},
107
+ )
108
+ train_time_csv_file: str = field(
109
+ default=f"train_time_{round(time())}.csv",
110
+ metadata={"help": "CSV filename used if saving time results to csv for training."},
111
+ )
112
+ train_memory_csv_file: str = field(
113
+ default=f"train_memory_{round(time())}.csv",
114
+ metadata={"help": "CSV filename used if saving memory results to csv for training."},
115
+ )
116
+ env_info_csv_file: str = field(
117
+ default=f"env_info_{round(time())}.csv",
118
+ metadata={"help": "CSV filename used if saving environment information."},
119
+ )
120
+ log_filename: str = field(
121
+ default=f"log_{round(time())}.csv",
122
+ metadata={"help": "Log filename used if print statements are saved in log."},
123
+ )
124
+ repeat: int = field(default=3, metadata={"help": "Times an experiment will be run."})
125
+ only_pretrain_model: bool = field(
126
+ default=False,
127
+ metadata={
128
+ "help": (
129
+ "Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain"
130
+ " model weights."
131
+ )
132
+ },
133
+ )
134
+
135
+ def __post_init__(self):
136
+ warnings.warn(
137
+ f"The class {self.__class__} is deprecated. Hugging Face Benchmarking utils"
138
+ " are deprecated in general and it is advised to use external Benchmarking libraries "
139
+ " to benchmark Transformer models.",
140
+ FutureWarning,
141
+ )
142
+
143
+ def to_json_string(self):
144
+ """
145
+ Serializes this instance to a JSON string.
146
+ """
147
+ return json.dumps(dataclasses.asdict(self), indent=2)
148
+
149
+ @property
150
+ def model_names(self) -> List[str]:
151
+ if len(self.models) <= 0:
152
+ raise ValueError(
153
+ "Please make sure you provide at least one model name / model identifier, *e.g.* `--models"
154
+ " google-bert/bert-base-cased` or `args.models = ['google-bert/bert-base-cased']."
155
+ )
156
+ return self.models
157
+
158
+ @property
159
+ def do_multi_processing(self):
160
+ if not self.multi_process:
161
+ return False
162
+ elif self.is_tpu:
163
+ logger.info("Multiprocessing is currently not possible on TPU.")
164
+ return False
165
+ else:
166
+ return True
llmeval-env/lib/python3.10/site-packages/transformers/benchmark/benchmark_tf.py ADDED
@@ -0,0 +1,303 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The HuggingFace Inc. team.
3
+ # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """
17
+ Benchmarking the library on inference and training in PyTorch.
18
+ """
19
+
20
+
21
+ import random
22
+ import timeit
23
+ from functools import wraps
24
+ from typing import Callable, Optional
25
+
26
+ from ..configuration_utils import PretrainedConfig
27
+ from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
28
+ from ..utils import is_py3nvml_available, is_tf_available, logging
29
+ from .benchmark_utils import (
30
+ Benchmark,
31
+ Memory,
32
+ MemorySummary,
33
+ measure_peak_memory_cpu,
34
+ start_memory_tracing,
35
+ stop_memory_tracing,
36
+ )
37
+
38
+
39
+ if is_tf_available():
40
+ import tensorflow as tf
41
+ from tensorflow.python.framework.errors_impl import ResourceExhaustedError
42
+
43
+ from .benchmark_args_tf import TensorFlowBenchmarkArguments
44
+
45
+ if is_py3nvml_available():
46
+ import py3nvml.py3nvml as nvml
47
+
48
+ logger = logging.get_logger(__name__)
49
+
50
+
51
+ def run_with_tf_optimizations(do_eager_mode: bool, use_xla: bool):
52
+ def run_func(func):
53
+ @wraps(func)
54
+ def run_in_eager_mode(*args, **kwargs):
55
+ return func(*args, **kwargs)
56
+
57
+ @wraps(func)
58
+ @tf.function(experimental_compile=use_xla)
59
+ def run_in_graph_mode(*args, **kwargs):
60
+ return func(*args, **kwargs)
61
+
62
+ if do_eager_mode is True:
63
+ if use_xla is not False:
64
+ raise ValueError(
65
+ "Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`."
66
+ )
67
+ return run_in_eager_mode
68
+ else:
69
+ return run_in_graph_mode
70
+
71
+ return run_func
72
+
73
+
74
+ def random_input_ids(batch_size: int, sequence_length: int, vocab_size: int) -> ["tf.Tensor"]:
75
+ rng = random.Random()
76
+ values = [rng.randint(0, vocab_size - 1) for i in range(batch_size * sequence_length)]
77
+ return tf.constant(values, shape=(batch_size, sequence_length), dtype=tf.int32)
78
+
79
+
80
+ class TensorFlowBenchmark(Benchmark):
81
+ args: TensorFlowBenchmarkArguments
82
+ configs: PretrainedConfig
83
+ framework: str = "TensorFlow"
84
+
85
+ @property
86
+ def framework_version(self):
87
+ return tf.__version__
88
+
89
+ def _inference_speed(self, model_name: str, batch_size: int, sequence_length: int) -> float:
90
+ # initialize GPU on separate process
91
+ strategy = self.args.strategy
92
+ if strategy is None:
93
+ raise ValueError("A device strategy has to be initialized before using TensorFlow.")
94
+ _inference = self._prepare_inference_func(model_name, batch_size, sequence_length)
95
+ return self._measure_speed(_inference)
96
+
97
+ def _train_speed(self, model_name: str, batch_size: int, sequence_length: int) -> float:
98
+ strategy = self.args.strategy
99
+ if strategy is None:
100
+ raise ValueError("A device strategy has to be initialized before using TensorFlow.")
101
+ _train = self._prepare_train_func(model_name, batch_size, sequence_length)
102
+ return self._measure_speed(_train)
103
+
104
+ def _inference_memory(
105
+ self, model_name: str, batch_size: int, sequence_length: int
106
+ ) -> [Memory, Optional[MemorySummary]]:
107
+ # initialize GPU on separate process
108
+ if self.args.is_gpu:
109
+ tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx], True)
110
+ strategy = self.args.strategy
111
+ if strategy is None:
112
+ raise ValueError("A device strategy has to be initialized before using TensorFlow.")
113
+ _inference = self._prepare_inference_func(model_name, batch_size, sequence_length)
114
+ return self._measure_memory(_inference)
115
+
116
+ def _train_memory(
117
+ self, model_name: str, batch_size: int, sequence_length: int
118
+ ) -> [Memory, Optional[MemorySummary]]:
119
+ if self.args.is_gpu:
120
+ tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx], True)
121
+ strategy = self.args.strategy
122
+ if strategy is None:
123
+ raise ValueError("A device strategy has to be initialized before using TensorFlow.")
124
+
125
+ _train = self._prepare_train_func(model_name, batch_size, sequence_length)
126
+ return self._measure_memory(_train)
127
+
128
+ def _prepare_inference_func(self, model_name: str, batch_size: int, sequence_length: int) -> Callable[[], None]:
129
+ config = self.config_dict[model_name]
130
+
131
+ if self.args.fp16:
132
+ raise NotImplementedError("Mixed precision is currently not supported.")
133
+
134
+ has_model_class_in_config = (
135
+ hasattr(config, "architectures")
136
+ and isinstance(config.architectures, list)
137
+ and len(config.architectures) > 0
138
+ )
139
+ if not self.args.only_pretrain_model and has_model_class_in_config:
140
+ try:
141
+ model_class = "TF" + config.architectures[0] # prepend 'TF' for tensorflow model
142
+ transformers_module = __import__("transformers", fromlist=[model_class])
143
+ model_cls = getattr(transformers_module, model_class)
144
+ model = model_cls(config)
145
+ except ImportError:
146
+ raise ImportError(
147
+ f"{model_class} does not exist. If you just want to test the pretrained model, you might want to"
148
+ " set `--only_pretrain_model` or `args.only_pretrain_model=True`."
149
+ )
150
+ else:
151
+ model = TF_MODEL_MAPPING[config.__class__](config)
152
+
153
+ # encoder-decoder has vocab size saved differently
154
+ vocab_size = config.vocab_size if hasattr(config, "vocab_size") else config.encoder.vocab_size
155
+ input_ids = random_input_ids(batch_size, sequence_length, vocab_size)
156
+
157
+ @run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla)
158
+ def encoder_decoder_forward():
159
+ return model(input_ids, decoder_input_ids=input_ids, training=False)
160
+
161
+ @run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla)
162
+ def encoder_forward():
163
+ return model(input_ids, training=False)
164
+
165
+ _inference = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
166
+
167
+ return _inference
168
+
169
+ def _prepare_train_func(self, model_name: str, batch_size: int, sequence_length: int) -> Callable[[], None]:
170
+ config = self.config_dict[model_name]
171
+
172
+ if self.args.eager_mode is not False:
173
+ raise ValueError("Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.")
174
+
175
+ if self.args.fp16:
176
+ raise NotImplementedError("Mixed precision is currently not supported.")
177
+
178
+ has_model_class_in_config = (
179
+ hasattr(config, "architectures")
180
+ and isinstance(config.architectures, list)
181
+ and len(config.architectures) > 0
182
+ )
183
+ if not self.args.only_pretrain_model and has_model_class_in_config:
184
+ try:
185
+ model_class = "TF" + config.architectures[0] # prepend 'TF' for tensorflow model
186
+ transformers_module = __import__("transformers", fromlist=[model_class])
187
+ model_cls = getattr(transformers_module, model_class)
188
+ model = model_cls(config)
189
+ except ImportError:
190
+ raise ImportError(
191
+ f"{model_class} does not exist. If you just want to test the pretrained model, you might want to"
192
+ " set `--only_pretrain_model` or `args.only_pretrain_model=True`."
193
+ )
194
+ else:
195
+ model = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](config)
196
+
197
+ # encoder-decoder has vocab size saved differently
198
+ vocab_size = config.vocab_size if hasattr(config, "vocab_size") else config.encoder.vocab_size
199
+ input_ids = random_input_ids(batch_size, sequence_length, vocab_size)
200
+
201
+ @run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla)
202
+ def encoder_decoder_train():
203
+ loss = model(input_ids, decoder_input_ids=input_ids, labels=input_ids, training=True)[0]
204
+ gradients = tf.gradients(loss, model.trainable_variables)
205
+ return gradients
206
+
207
+ @run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla)
208
+ def encoder_train():
209
+ loss = model(input_ids, labels=input_ids, training=True)[0]
210
+ gradients = tf.gradients(loss, model.trainable_variables)
211
+ return gradients
212
+
213
+ _train = encoder_decoder_train if config.is_encoder_decoder else encoder_train
214
+
215
+ return _train
216
+
217
+ def _measure_speed(self, func) -> float:
218
+ with self.args.strategy.scope():
219
+ try:
220
+ if self.args.is_tpu or self.args.use_xla:
221
+ # run additional 10 times to stabilize compilation for tpu
222
+ logger.info("Do inference on TPU. Running model 5 times to stabilize compilation")
223
+ timeit.repeat(func, repeat=1, number=5)
224
+
225
+ # as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
226
+ runtimes = timeit.repeat(
227
+ func,
228
+ repeat=self.args.repeat,
229
+ number=10,
230
+ )
231
+
232
+ return min(runtimes) / 10.0
233
+ except ResourceExhaustedError as e:
234
+ self.print_fn(f"Doesn't fit on GPU. {e}")
235
+
236
+ def _measure_memory(self, func: Callable[[], None]) -> [Memory, MemorySummary]:
237
+ logger.info(
238
+ "Note that TensorFlow allocates more memory than "
239
+ "it might need to speed up computation. "
240
+ "The memory reported here corresponds to the memory "
241
+ "reported by `nvidia-smi`, which can vary depending "
242
+ "on total available memory on the GPU that is used."
243
+ )
244
+ with self.args.strategy.scope():
245
+ try:
246
+ if self.args.trace_memory_line_by_line:
247
+ if not self.args.eager_mode:
248
+ raise ValueError(
249
+ "`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory"
250
+ " consumption line by line."
251
+ )
252
+ trace = start_memory_tracing("transformers")
253
+
254
+ if self.args.is_tpu:
255
+ # tpu
256
+ raise NotImplementedError(
257
+ "Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking"
258
+ " with `args.memory=False`"
259
+ )
260
+ elif self.args.is_gpu:
261
+ # gpu
262
+ if not is_py3nvml_available():
263
+ logger.warning(
264
+ "py3nvml not installed, we won't log GPU memory usage. "
265
+ "Install py3nvml (pip install py3nvml) to log information about GPU."
266
+ )
267
+ memory = "N/A"
268
+ else:
269
+ logger.info(
270
+ "Measuring total GPU usage on GPU device. Make sure to not have additional processes"
271
+ " running on the same GPU."
272
+ )
273
+ # init nvml
274
+ nvml.nvmlInit()
275
+ func()
276
+ handle = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx)
277
+ meminfo = nvml.nvmlDeviceGetMemoryInfo(handle)
278
+ max_bytes_in_use = meminfo.used
279
+ memory = Memory(max_bytes_in_use)
280
+ # shutdown nvml
281
+ nvml.nvmlShutdown()
282
+ else:
283
+ # cpu
284
+ if self.args.trace_memory_line_by_line:
285
+ logger.info(
286
+ "When enabling line by line tracing, the max peak memory for CPU is inaccurate in"
287
+ " TensorFlow."
288
+ )
289
+ memory = None
290
+ else:
291
+ memory_bytes = measure_peak_memory_cpu(func)
292
+ memory = Memory(memory_bytes) if isinstance(memory_bytes, int) else memory_bytes
293
+ if self.args.trace_memory_line_by_line:
294
+ summary = stop_memory_tracing(trace)
295
+ if memory is None:
296
+ memory = summary.total
297
+ else:
298
+ summary = None
299
+
300
+ return memory, summary
301
+ except ResourceExhaustedError as e:
302
+ self.print_fn(f"Doesn't fit on GPU. {e}")
303
+ return "N/A", None
llmeval-env/lib/python3.10/site-packages/transformers/benchmark/benchmark_utils.py ADDED
@@ -0,0 +1,914 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file is adapted from the AllenNLP library at https://github.com/allenai/allennlp
2
+
3
+ # Copyright 2020 The HuggingFace Team and the AllenNLP authors. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """
17
+ Utilities for working with the local dataset cache.
18
+ """
19
+
20
+ import copy
21
+ import csv
22
+ import linecache
23
+ import os
24
+ import platform
25
+ import sys
26
+ import warnings
27
+ from abc import ABC, abstractmethod
28
+ from collections import defaultdict, namedtuple
29
+ from datetime import datetime
30
+ from multiprocessing import Pipe, Process, Queue
31
+ from multiprocessing.connection import Connection
32
+ from typing import Callable, Iterable, List, NamedTuple, Optional, Union
33
+
34
+ from .. import AutoConfig, PretrainedConfig
35
+ from .. import __version__ as version
36
+ from ..utils import is_psutil_available, is_py3nvml_available, is_tf_available, is_torch_available, logging
37
+ from .benchmark_args_utils import BenchmarkArguments
38
+
39
+
40
+ if is_torch_available():
41
+ from torch.cuda import empty_cache as torch_empty_cache
42
+
43
+ if is_tf_available():
44
+ from tensorflow.python.eager import context as tf_context
45
+
46
+ if is_psutil_available():
47
+ import psutil
48
+
49
+ if is_py3nvml_available():
50
+ import py3nvml.py3nvml as nvml
51
+
52
+ if platform.system() == "Windows":
53
+ from signal import CTRL_C_EVENT as SIGKILL
54
+ else:
55
+ from signal import SIGKILL
56
+
57
+
58
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
59
+
60
+
61
+ _is_memory_tracing_enabled = False
62
+
63
+ BenchmarkOutput = namedtuple(
64
+ "BenchmarkOutput",
65
+ [
66
+ "time_inference_result",
67
+ "memory_inference_result",
68
+ "time_train_result",
69
+ "memory_train_result",
70
+ "inference_summary",
71
+ "train_summary",
72
+ ],
73
+ )
74
+
75
+
76
+ def separate_process_wrapper_fn(func: Callable[[], None], do_multi_processing: bool) -> Callable[[], None]:
77
+ """
78
+ This function wraps another function into its own separated process. In order to ensure accurate memory
79
+ measurements it is important that the function is executed in a separate process
80
+
81
+ Args:
82
+ - `func`: (`callable`): function() -> ... generic function which will be executed in its own separate process
83
+ - `do_multi_processing`: (`bool`) Whether to run function on separate process or not
84
+ """
85
+
86
+ def multi_process_func(*args, **kwargs):
87
+ # run function in an individual
88
+ # process to get correct memory
89
+ def wrapper_func(queue: Queue, *args):
90
+ try:
91
+ result = func(*args)
92
+ except Exception as e:
93
+ logger.error(e)
94
+ print(e)
95
+ result = "N/A"
96
+ queue.put(result)
97
+
98
+ queue = Queue()
99
+ p = Process(target=wrapper_func, args=[queue] + list(args))
100
+ p.start()
101
+ result = queue.get()
102
+ p.join()
103
+ return result
104
+
105
+ if do_multi_processing:
106
+ logger.info(f"Function {func} is executed in its own process...")
107
+ return multi_process_func
108
+ else:
109
+ return func
110
+
111
+
112
+ def is_memory_tracing_enabled():
113
+ global _is_memory_tracing_enabled
114
+ return _is_memory_tracing_enabled
115
+
116
+
117
+ class Frame(NamedTuple):
118
+ """
119
+ `Frame` is a NamedTuple used to gather the current frame state. `Frame` has the following fields:
120
+
121
+ - 'filename' (string): Name of the file currently executed
122
+ - 'module' (string): Name of the module currently executed
123
+ - 'line_number' (int): Number of the line currently executed
124
+ - 'event' (string): Event that triggered the tracing (default will be "line")
125
+ - 'line_text' (string): Text of the line in the python script
126
+ """
127
+
128
+ filename: str
129
+ module: str
130
+ line_number: int
131
+ event: str
132
+ line_text: str
133
+
134
+
135
+ class UsedMemoryState(NamedTuple):
136
+ """
137
+ `UsedMemoryState` are named tuples with the following fields:
138
+
139
+ - 'frame': a `Frame` namedtuple (see below) storing information on the current tracing frame (current file,
140
+ location in current file)
141
+ - 'cpu_memory': CPU RSS memory state *before* executing the line
142
+ - 'gpu_memory': GPU used memory *before* executing the line (sum for all GPUs or for only `gpus_to_trace` if
143
+ provided)
144
+ """
145
+
146
+ frame: Frame
147
+ cpu_memory: int
148
+ gpu_memory: int
149
+
150
+
151
+ class Memory(NamedTuple):
152
+ """
153
+ `Memory` NamedTuple have a single field `bytes` and you can get a human readable str of the number of mega bytes by
154
+ calling `__repr__`
155
+
156
+ - `byte` (integer): number of bytes,
157
+ """
158
+
159
+ bytes: int
160
+
161
+ def __repr__(self) -> str:
162
+ return str(bytes_to_mega_bytes(self.bytes))
163
+
164
+
165
+ class MemoryState(NamedTuple):
166
+ """
167
+ `MemoryState` are namedtuples listing frame + CPU/GPU memory with the following fields:
168
+
169
+ - `frame` (`Frame`): the current frame (see above)
170
+ - `cpu`: CPU memory consumed at during the current frame as a `Memory` named tuple
171
+ - `gpu`: GPU memory consumed at during the current frame as a `Memory` named tuple
172
+ - `cpu_gpu`: CPU + GPU memory consumed at during the current frame as a `Memory` named tuple
173
+ """
174
+
175
+ frame: Frame
176
+ cpu: Memory
177
+ gpu: Memory
178
+ cpu_gpu: Memory
179
+
180
+
181
+ class MemorySummary(NamedTuple):
182
+ """
183
+ `MemorySummary` namedtuple otherwise with the fields:
184
+
185
+ - `sequential`: a list of `MemoryState` namedtuple (see below) computed from the provided `memory_trace` by
186
+ subtracting the memory after executing each line from the memory before executing said line.
187
+ - `cumulative`: a list of `MemoryState` namedtuple (see below) with cumulative increase in memory for each line
188
+ obtained by summing repeated memory increase for a line if it's executed several times. The list is sorted
189
+ from the frame with the largest memory consumption to the frame with the smallest (can be negative if memory
190
+ is released)
191
+ - `total`: total memory increase during the full tracing as a `Memory` named tuple (see below). Line with
192
+ memory release (negative consumption) are ignored if `ignore_released_memory` is `True` (default).
193
+ """
194
+
195
+ sequential: List[MemoryState]
196
+ cumulative: List[MemoryState]
197
+ current: List[MemoryState]
198
+ total: Memory
199
+
200
+
201
+ MemoryTrace = List[UsedMemoryState]
202
+
203
+
204
+ def measure_peak_memory_cpu(function: Callable[[], None], interval=0.5, device_idx=None) -> int:
205
+ """
206
+ measures peak cpu memory consumption of a given `function` running the function for at least interval seconds and
207
+ at most 20 * interval seconds. This function is heavily inspired by: `memory_usage` of the package
208
+ `memory_profiler`:
209
+ https://github.com/pythonprofilers/memory_profiler/blob/895c4ac7a08020d66ae001e24067da6dcea42451/memory_profiler.py#L239
210
+
211
+ Args:
212
+ - `function`: (`callable`): function() -> ... function without any arguments to measure for which to measure
213
+ the peak memory
214
+
215
+ - `interval`: (`float`, `optional`, defaults to `0.5`) interval in second for which to measure the memory usage
216
+
217
+ - `device_idx`: (`int`, `optional`, defaults to `None`) device id for which to measure gpu usage
218
+
219
+ Returns:
220
+
221
+ - `max_memory`: (`int`) consumed memory peak in Bytes
222
+ """
223
+
224
+ def get_cpu_memory(process_id: int) -> int:
225
+ """
226
+ measures current cpu memory usage of a given `process_id`
227
+
228
+ Args:
229
+ - `process_id`: (`int`) process_id for which to measure memory
230
+
231
+ Returns
232
+
233
+ - `memory`: (`int`) consumed memory in Bytes
234
+ """
235
+ process = psutil.Process(process_id)
236
+ try:
237
+ meminfo_attr = "memory_info" if hasattr(process, "memory_info") else "get_memory_info"
238
+ memory = getattr(process, meminfo_attr)()[0]
239
+ except psutil.AccessDenied:
240
+ raise ValueError("Error with Psutil.")
241
+ return memory
242
+
243
+ if not is_psutil_available():
244
+ logger.warning(
245
+ "Psutil not installed, we won't log CPU memory usage. "
246
+ "Install Psutil (pip install psutil) to use CPU memory tracing."
247
+ )
248
+ max_memory = "N/A"
249
+ else:
250
+
251
+ class MemoryMeasureProcess(Process):
252
+
253
+ """
254
+ `MemoryMeasureProcess` inherits from `Process` and overwrites its `run()` method. Used to measure the
255
+ memory usage of a process
256
+ """
257
+
258
+ def __init__(self, process_id: int, child_connection: Connection, interval: float):
259
+ super().__init__()
260
+ self.process_id = process_id
261
+ self.interval = interval
262
+ self.connection = child_connection
263
+ self.num_measurements = 1
264
+ self.mem_usage = get_cpu_memory(self.process_id)
265
+
266
+ def run(self):
267
+ self.connection.send(0)
268
+ stop = False
269
+ while True:
270
+ self.mem_usage = max(self.mem_usage, get_cpu_memory(self.process_id))
271
+ self.num_measurements += 1
272
+
273
+ if stop:
274
+ break
275
+
276
+ stop = self.connection.poll(self.interval)
277
+
278
+ # send results to parent pipe
279
+ self.connection.send(self.mem_usage)
280
+ self.connection.send(self.num_measurements)
281
+
282
+ while True:
283
+ # create child, parent connection
284
+ child_connection, parent_connection = Pipe()
285
+
286
+ # instantiate process
287
+ mem_process = MemoryMeasureProcess(os.getpid(), child_connection, interval)
288
+ mem_process.start()
289
+
290
+ # wait until we get memory
291
+ parent_connection.recv()
292
+
293
+ try:
294
+ # execute function
295
+ function()
296
+
297
+ # start parent connection
298
+ parent_connection.send(0)
299
+
300
+ # receive memory and num measurements
301
+ max_memory = parent_connection.recv()
302
+ num_measurements = parent_connection.recv()
303
+ except Exception:
304
+ # kill process in a clean way
305
+ parent = psutil.Process(os.getpid())
306
+ for child in parent.children(recursive=True):
307
+ os.kill(child.pid, SIGKILL)
308
+ mem_process.join(0)
309
+ raise RuntimeError("Process killed. Error in Process")
310
+
311
+ # run process at least 20 * interval or until it finishes
312
+ mem_process.join(20 * interval)
313
+
314
+ if (num_measurements > 4) or (interval < 1e-6):
315
+ break
316
+
317
+ # reduce interval
318
+ interval /= 10
319
+
320
+ return max_memory
321
+
322
+
323
+ def start_memory_tracing(
324
+ modules_to_trace: Optional[Union[str, Iterable[str]]] = None,
325
+ modules_not_to_trace: Optional[Union[str, Iterable[str]]] = None,
326
+ events_to_trace: str = "line",
327
+ gpus_to_trace: Optional[List[int]] = None,
328
+ ) -> MemoryTrace:
329
+ """
330
+ Setup line-by-line tracing to record rss mem (RAM) at each line of a module or sub-module. See `./benchmark.py` for
331
+ usage examples. Current memory consumption is returned using psutil and in particular is the RSS memory "Resident
332
+ Set Size” (the non-swapped physical memory the process is using). See
333
+ https://psutil.readthedocs.io/en/latest/#psutil.Process.memory_info
334
+
335
+ Args:
336
+ - `modules_to_trace`: (None, string, list/tuple of string) if None, all events are recorded if string or list
337
+ of strings: only events from the listed module/sub-module will be recorded (e.g. 'fairseq' or
338
+ 'transformers.models.gpt2.modeling_gpt2')
339
+ - `modules_not_to_trace`: (None, string, list/tuple of string) if None, no module is avoided if string or list
340
+ of strings: events from the listed module/sub-module will not be recorded (e.g. 'torch')
341
+ - `events_to_trace`: string or list of string of events to be recorded (see official python doc for
342
+ `sys.settrace` for the list of events) default to line
343
+ - `gpus_to_trace`: (optional list, default None) list of GPUs to trace. Default to tracing all GPUs
344
+
345
+ Return:
346
+
347
+ - `memory_trace` is a list of `UsedMemoryState` for each event (default each line of the traced script).
348
+
349
+ - `UsedMemoryState` are named tuples with the following fields:
350
+
351
+ - 'frame': a `Frame` namedtuple (see below) storing information on the current tracing frame (current
352
+ file, location in current file)
353
+ - 'cpu_memory': CPU RSS memory state *before* executing the line
354
+ - 'gpu_memory': GPU used memory *before* executing the line (sum for all GPUs or for only
355
+ `gpus_to_trace` if provided)
356
+
357
+ `Frame` is a namedtuple used by `UsedMemoryState` to list the current frame state. `Frame` has the following
358
+ fields: - 'filename' (string): Name of the file currently executed - 'module' (string): Name of the module
359
+ currently executed - 'line_number' (int): Number of the line currently executed - 'event' (string): Event that
360
+ triggered the tracing (default will be "line") - 'line_text' (string): Text of the line in the python script
361
+
362
+ """
363
+ if is_psutil_available():
364
+ process = psutil.Process(os.getpid())
365
+ else:
366
+ logger.warning(
367
+ "Psutil not installed, we won't log CPU memory usage. "
368
+ "Install psutil (pip install psutil) to use CPU memory tracing."
369
+ )
370
+ process = None
371
+
372
+ if is_py3nvml_available():
373
+ try:
374
+ nvml.nvmlInit()
375
+ devices = list(range(nvml.nvmlDeviceGetCount())) if gpus_to_trace is None else gpus_to_trace
376
+ nvml.nvmlShutdown()
377
+ except (OSError, nvml.NVMLError):
378
+ logger.warning("Error while initializing communication with GPU. We won't perform GPU memory tracing.")
379
+ log_gpu = False
380
+ else:
381
+ log_gpu = is_torch_available() or is_tf_available()
382
+ else:
383
+ logger.warning(
384
+ "py3nvml not installed, we won't log GPU memory usage. "
385
+ "Install py3nvml (pip install py3nvml) to use GPU memory tracing."
386
+ )
387
+ log_gpu = False
388
+
389
+ memory_trace = []
390
+
391
+ def traceit(frame, event, args):
392
+ """
393
+ Tracing method executed before running each line in a module or sub-module Record memory allocated in a list
394
+ with debugging information
395
+ """
396
+ global _is_memory_tracing_enabled
397
+
398
+ if not _is_memory_tracing_enabled:
399
+ return traceit
400
+
401
+ # Filter events
402
+ if events_to_trace is not None:
403
+ if isinstance(events_to_trace, str) and event != events_to_trace:
404
+ return traceit
405
+ elif isinstance(events_to_trace, (list, tuple)) and event not in events_to_trace:
406
+ return traceit
407
+
408
+ if "__name__" not in frame.f_globals:
409
+ return traceit
410
+
411
+ # Filter modules
412
+ name = frame.f_globals["__name__"]
413
+ if not isinstance(name, str):
414
+ return traceit
415
+ else:
416
+ # Filter whitelist of modules to trace
417
+ if modules_to_trace is not None:
418
+ if isinstance(modules_to_trace, str) and modules_to_trace not in name:
419
+ return traceit
420
+ elif isinstance(modules_to_trace, (list, tuple)) and all(m not in name for m in modules_to_trace):
421
+ return traceit
422
+
423
+ # Filter blacklist of modules not to trace
424
+ if modules_not_to_trace is not None:
425
+ if isinstance(modules_not_to_trace, str) and modules_not_to_trace in name:
426
+ return traceit
427
+ elif isinstance(modules_not_to_trace, (list, tuple)) and any(m in name for m in modules_not_to_trace):
428
+ return traceit
429
+
430
+ # Record current tracing state (file, location in file...)
431
+ lineno = frame.f_lineno
432
+ filename = frame.f_globals["__file__"]
433
+ if filename.endswith(".pyc") or filename.endswith(".pyo"):
434
+ filename = filename[:-1]
435
+ line = linecache.getline(filename, lineno).rstrip()
436
+ traced_state = Frame(filename, name, lineno, event, line)
437
+
438
+ # Record current memory state (rss memory) and compute difference with previous memory state
439
+ cpu_mem = 0
440
+ if process is not None:
441
+ mem = process.memory_info()
442
+ cpu_mem = mem.rss
443
+
444
+ gpu_mem = 0
445
+ if log_gpu:
446
+ # Clear GPU caches
447
+ if is_torch_available():
448
+ torch_empty_cache()
449
+ if is_tf_available():
450
+ tf_context.context()._clear_caches() # See https://github.com/tensorflow/tensorflow/issues/20218#issuecomment-416771802
451
+
452
+ # Sum used memory for all GPUs
453
+ nvml.nvmlInit()
454
+
455
+ for i in devices:
456
+ handle = nvml.nvmlDeviceGetHandleByIndex(i)
457
+ meminfo = nvml.nvmlDeviceGetMemoryInfo(handle)
458
+ gpu_mem += meminfo.used
459
+
460
+ nvml.nvmlShutdown()
461
+
462
+ mem_state = UsedMemoryState(traced_state, cpu_mem, gpu_mem)
463
+ memory_trace.append(mem_state)
464
+
465
+ return traceit
466
+
467
+ sys.settrace(traceit)
468
+
469
+ global _is_memory_tracing_enabled
470
+ _is_memory_tracing_enabled = True
471
+
472
+ return memory_trace
473
+
474
+
475
+ def stop_memory_tracing(
476
+ memory_trace: Optional[MemoryTrace] = None, ignore_released_memory: bool = True
477
+ ) -> Optional[MemorySummary]:
478
+ """
479
+ Stop memory tracing cleanly and return a summary of the memory trace if a trace is given.
480
+
481
+ Args:
482
+ `memory_trace` (optional output of start_memory_tracing, default: None):
483
+ memory trace to convert in summary
484
+ `ignore_released_memory` (boolean, default: None):
485
+ if True we only sum memory increase to compute total memory
486
+
487
+ Return:
488
+
489
+ - None if `memory_trace` is None
490
+ - `MemorySummary` namedtuple otherwise with the fields:
491
+
492
+ - `sequential`: a list of `MemoryState` namedtuple (see below) computed from the provided `memory_trace` by
493
+ subtracting the memory after executing each line from the memory before executing said line.
494
+ - `cumulative`: a list of `MemoryState` namedtuple (see below) with cumulative increase in memory for each
495
+ line obtained by summing repeated memory increase for a line if it's executed several times. The list is
496
+ sorted from the frame with the largest memory consumption to the frame with the smallest (can be negative
497
+ if memory is released)
498
+ - `total`: total memory increase during the full tracing as a `Memory` named tuple (see below). Line with
499
+ memory release (negative consumption) are ignored if `ignore_released_memory` is `True` (default).
500
+
501
+ `Memory` named tuple have fields
502
+
503
+ - `byte` (integer): number of bytes,
504
+ - `string` (string): same as human readable string (ex: "3.5MB")
505
+
506
+ `Frame` are namedtuple used to list the current frame state and have the following fields:
507
+
508
+ - 'filename' (string): Name of the file currently executed
509
+ - 'module' (string): Name of the module currently executed
510
+ - 'line_number' (int): Number of the line currently executed
511
+ - 'event' (string): Event that triggered the tracing (default will be "line")
512
+ - 'line_text' (string): Text of the line in the python script
513
+
514
+ `MemoryState` are namedtuples listing frame + CPU/GPU memory with the following fields:
515
+
516
+ - `frame` (`Frame`): the current frame (see above)
517
+ - `cpu`: CPU memory consumed at during the current frame as a `Memory` named tuple
518
+ - `gpu`: GPU memory consumed at during the current frame as a `Memory` named tuple
519
+ - `cpu_gpu`: CPU + GPU memory consumed at during the current frame as a `Memory` named tuple
520
+ """
521
+ global _is_memory_tracing_enabled
522
+ _is_memory_tracing_enabled = False
523
+
524
+ if memory_trace is not None and len(memory_trace) > 1:
525
+ memory_diff_trace = []
526
+ memory_curr_trace = []
527
+
528
+ cumulative_memory_dict = defaultdict(lambda: [0, 0, 0])
529
+
530
+ for (
531
+ (frame, cpu_mem, gpu_mem),
532
+ (next_frame, next_cpu_mem, next_gpu_mem),
533
+ ) in zip(memory_trace[:-1], memory_trace[1:]):
534
+ cpu_mem_inc = next_cpu_mem - cpu_mem
535
+ gpu_mem_inc = next_gpu_mem - gpu_mem
536
+ cpu_gpu_mem_inc = cpu_mem_inc + gpu_mem_inc
537
+ memory_diff_trace.append(
538
+ MemoryState(
539
+ frame=frame,
540
+ cpu=Memory(cpu_mem_inc),
541
+ gpu=Memory(gpu_mem_inc),
542
+ cpu_gpu=Memory(cpu_gpu_mem_inc),
543
+ )
544
+ )
545
+
546
+ memory_curr_trace.append(
547
+ MemoryState(
548
+ frame=frame,
549
+ cpu=Memory(next_cpu_mem),
550
+ gpu=Memory(next_gpu_mem),
551
+ cpu_gpu=Memory(next_gpu_mem + next_cpu_mem),
552
+ )
553
+ )
554
+
555
+ cumulative_memory_dict[frame][0] += cpu_mem_inc
556
+ cumulative_memory_dict[frame][1] += gpu_mem_inc
557
+ cumulative_memory_dict[frame][2] += cpu_gpu_mem_inc
558
+
559
+ cumulative_memory = sorted(
560
+ cumulative_memory_dict.items(), key=lambda x: x[1][2], reverse=True
561
+ ) # order by the total CPU + GPU memory increase
562
+ cumulative_memory = [
563
+ MemoryState(
564
+ frame=frame,
565
+ cpu=Memory(cpu_mem_inc),
566
+ gpu=Memory(gpu_mem_inc),
567
+ cpu_gpu=Memory(cpu_gpu_mem_inc),
568
+ )
569
+ for frame, (cpu_mem_inc, gpu_mem_inc, cpu_gpu_mem_inc) in cumulative_memory
570
+ ]
571
+
572
+ memory_curr_trace = sorted(memory_curr_trace, key=lambda x: x.cpu_gpu.bytes, reverse=True)
573
+
574
+ if ignore_released_memory:
575
+ total_memory = sum(max(0, step_trace.cpu_gpu.bytes) for step_trace in memory_diff_trace)
576
+ else:
577
+ total_memory = sum(step_trace.cpu_gpu.bytes for step_trace in memory_diff_trace)
578
+
579
+ total_memory = Memory(total_memory)
580
+
581
+ return MemorySummary(
582
+ sequential=memory_diff_trace,
583
+ cumulative=cumulative_memory,
584
+ current=memory_curr_trace,
585
+ total=total_memory,
586
+ )
587
+
588
+ return None
589
+
590
+
591
+ def bytes_to_mega_bytes(memory_amount: int) -> int:
592
+ """Utility to convert a number of bytes (int) into a number of mega bytes (int)"""
593
+ return memory_amount >> 20
594
+
595
+
596
+ class Benchmark(ABC):
597
+ """
598
+ Benchmarks is a simple but feature-complete benchmarking script to compare memory and time performance of models in
599
+ Transformers.
600
+ """
601
+
602
+ args: BenchmarkArguments
603
+ configs: PretrainedConfig
604
+ framework: str
605
+
606
+ def __init__(self, args: BenchmarkArguments = None, configs: PretrainedConfig = None):
607
+ self.args = args
608
+ if configs is None:
609
+ self.config_dict = {
610
+ model_name: AutoConfig.from_pretrained(model_name) for model_name in self.args.model_names
611
+ }
612
+ else:
613
+ self.config_dict = dict(zip(self.args.model_names, configs))
614
+
615
+ warnings.warn(
616
+ f"The class {self.__class__} is deprecated. Hugging Face Benchmarking utils"
617
+ " are deprecated in general and it is advised to use external Benchmarking libraries "
618
+ " to benchmark Transformer models.",
619
+ FutureWarning,
620
+ )
621
+
622
+ if self.args.memory and os.getenv("TRANSFORMERS_USE_MULTIPROCESSING") == 0:
623
+ logger.warning(
624
+ "Memory consumption will not be measured accurately if `args.multi_process` is set to `False.` The"
625
+ " flag 'TRANSFORMERS_USE_MULTIPROCESSING' should only be disabled for debugging / testing."
626
+ )
627
+
628
+ self._print_fn = None
629
+ self._framework_version = None
630
+ self._environment_info = None
631
+
632
+ @property
633
+ def print_fn(self):
634
+ if self._print_fn is None:
635
+ if self.args.log_print:
636
+
637
+ def print_and_log(*args):
638
+ with open(self.args.log_filename, "a") as log_file:
639
+ log_file.write("".join(args) + "\n")
640
+ print(*args)
641
+
642
+ self._print_fn = print_and_log
643
+ else:
644
+ self._print_fn = print
645
+ return self._print_fn
646
+
647
+ @property
648
+ @abstractmethod
649
+ def framework_version(self):
650
+ pass
651
+
652
+ @abstractmethod
653
+ def _inference_speed(self, model_name: str, batch_size: int, sequence_length: int) -> float:
654
+ pass
655
+
656
+ @abstractmethod
657
+ def _train_speed(self, model_name: str, batch_size: int, sequence_length: int) -> float:
658
+ pass
659
+
660
+ @abstractmethod
661
+ def _inference_memory(
662
+ self, model_name: str, batch_size: int, sequence_length: int
663
+ ) -> [Memory, Optional[MemorySummary]]:
664
+ pass
665
+
666
+ @abstractmethod
667
+ def _train_memory(
668
+ self, model_name: str, batch_size: int, sequence_length: int
669
+ ) -> [Memory, Optional[MemorySummary]]:
670
+ pass
671
+
672
+ def inference_speed(self, *args, **kwargs) -> float:
673
+ return separate_process_wrapper_fn(self._inference_speed, self.args.do_multi_processing)(*args, **kwargs)
674
+
675
+ def train_speed(self, *args, **kwargs) -> float:
676
+ return separate_process_wrapper_fn(self._train_speed, self.args.do_multi_processing)(*args, **kwargs)
677
+
678
+ def inference_memory(self, *args, **kwargs) -> [Memory, Optional[MemorySummary]]:
679
+ return separate_process_wrapper_fn(self._inference_memory, self.args.do_multi_processing)(*args, **kwargs)
680
+
681
+ def train_memory(self, *args, **kwargs) -> [Memory, Optional[MemorySummary]]:
682
+ return separate_process_wrapper_fn(self._train_memory, self.args.do_multi_processing)(*args, **kwargs)
683
+
684
+ def run(self):
685
+ result_dict = {model_name: {} for model_name in self.args.model_names}
686
+ inference_result_time = copy.deepcopy(result_dict)
687
+ inference_result_memory = copy.deepcopy(result_dict)
688
+ train_result_time = copy.deepcopy(result_dict)
689
+ train_result_memory = copy.deepcopy(result_dict)
690
+
691
+ for c, model_name in enumerate(self.args.model_names):
692
+ self.print_fn(f"{c + 1} / {len(self.args.model_names)}")
693
+
694
+ model_dict = {
695
+ "bs": self.args.batch_sizes,
696
+ "ss": self.args.sequence_lengths,
697
+ "result": {i: {} for i in self.args.batch_sizes},
698
+ }
699
+ inference_result_time[model_name] = copy.deepcopy(model_dict)
700
+ inference_result_memory[model_name] = copy.deepcopy(model_dict)
701
+ train_result_time[model_name] = copy.deepcopy(model_dict)
702
+ train_result_memory[model_name] = copy.deepcopy(model_dict)
703
+
704
+ inference_summary = train_summary = None
705
+
706
+ for batch_size in self.args.batch_sizes:
707
+ for sequence_length in self.args.sequence_lengths:
708
+ if self.args.inference:
709
+ if self.args.memory:
710
+ memory, inference_summary = self.inference_memory(model_name, batch_size, sequence_length)
711
+ inference_result_memory[model_name]["result"][batch_size][sequence_length] = memory
712
+ if self.args.speed:
713
+ time = self.inference_speed(model_name, batch_size, sequence_length)
714
+ inference_result_time[model_name]["result"][batch_size][sequence_length] = time
715
+
716
+ if self.args.training:
717
+ if self.args.memory:
718
+ memory, train_summary = self.train_memory(model_name, batch_size, sequence_length)
719
+ train_result_memory[model_name]["result"][batch_size][sequence_length] = memory
720
+ if self.args.speed:
721
+ time = self.train_speed(model_name, batch_size, sequence_length)
722
+ train_result_time[model_name]["result"][batch_size][sequence_length] = time
723
+
724
+ if self.args.inference:
725
+ if self.args.speed:
726
+ self.print_fn("\n" + 20 * "=" + ("INFERENCE - SPEED - RESULT").center(40) + 20 * "=")
727
+ self.print_results(inference_result_time, type_label="Time in s")
728
+ self.save_to_csv(inference_result_time, self.args.inference_time_csv_file)
729
+ if self.args.is_tpu:
730
+ self.print_fn(
731
+ "TPU was used for inference. Note that the time after compilation stabilized (after ~10"
732
+ " inferences model.forward(..) calls) was measured."
733
+ )
734
+
735
+ if self.args.memory:
736
+ self.print_fn("\n" + 20 * "=" + ("INFERENCE - MEMORY - RESULT").center(40) + 20 * "=")
737
+ self.print_results(inference_result_memory, type_label="Memory in MB")
738
+ self.save_to_csv(inference_result_memory, self.args.inference_memory_csv_file)
739
+
740
+ if self.args.trace_memory_line_by_line:
741
+ self.print_fn("\n" + 20 * "=" + ("INFERENCE - MEMOMRY - LINE BY LINE - SUMMARY").center(40) + 20 * "=")
742
+ self.print_memory_trace_statistics(inference_summary)
743
+
744
+ if self.args.training:
745
+ if self.args.speed:
746
+ self.print_fn("\n" + 20 * "=" + ("TRAIN - SPEED - RESULTS").center(40) + 20 * "=")
747
+ self.print_results(train_result_time, "Time in s")
748
+ self.save_to_csv(train_result_time, self.args.train_time_csv_file)
749
+ if self.args.is_tpu:
750
+ self.print_fn(
751
+ "TPU was used for training. Note that the time after compilation stabilized (after ~10 train"
752
+ " loss=model.forward(...) + loss.backward() calls) was measured."
753
+ )
754
+
755
+ if self.args.memory:
756
+ self.print_fn("\n" + 20 * "=" + ("TRAIN - MEMORY - RESULTS").center(40) + 20 * "=")
757
+ self.print_results(train_result_memory, type_label="Memory in MB")
758
+ self.save_to_csv(train_result_memory, self.args.train_memory_csv_file)
759
+
760
+ if self.args.trace_memory_line_by_line:
761
+ self.print_fn("\n" + 20 * "=" + ("TRAIN - MEMOMRY - LINE BY LINE - SUMMARY").center(40) + 20 * "=")
762
+ self.print_memory_trace_statistics(train_summary)
763
+
764
+ if self.args.env_print:
765
+ self.print_fn("\n" + 20 * "=" + ("ENVIRONMENT INFORMATION").center(40) + 20 * "=")
766
+ self.print_fn("\n".join([f"- {prop}: {val}" for prop, val in self.environment_info.items()]) + "\n")
767
+
768
+ if self.args.save_to_csv:
769
+ with open(self.args.env_info_csv_file, mode="w", newline="") as csv_file:
770
+ writer = csv.writer(csv_file)
771
+ for key, value in self.environment_info.items():
772
+ writer.writerow([key, value])
773
+
774
+ return BenchmarkOutput(
775
+ inference_result_time,
776
+ inference_result_memory,
777
+ train_result_time,
778
+ train_result_memory,
779
+ inference_summary,
780
+ train_summary,
781
+ )
782
+
783
+ @property
784
+ def environment_info(self):
785
+ if self._environment_info is None:
786
+ info = {}
787
+ info["transformers_version"] = version
788
+ info["framework"] = self.framework
789
+ if self.framework == "PyTorch":
790
+ info["use_torchscript"] = self.args.torchscript
791
+ if self.framework == "TensorFlow":
792
+ info["eager_mode"] = self.args.eager_mode
793
+ info["use_xla"] = self.args.use_xla
794
+ info["framework_version"] = self.framework_version
795
+ info["python_version"] = platform.python_version()
796
+ info["system"] = platform.system()
797
+ info["cpu"] = platform.processor()
798
+ info["architecture"] = platform.architecture()[0]
799
+ info["date"] = datetime.date(datetime.now())
800
+ info["time"] = datetime.time(datetime.now())
801
+ info["fp16"] = self.args.fp16
802
+ info["use_multiprocessing"] = self.args.do_multi_processing
803
+ info["only_pretrain_model"] = self.args.only_pretrain_model
804
+
805
+ if is_psutil_available():
806
+ info["cpu_ram_mb"] = bytes_to_mega_bytes(psutil.virtual_memory().total)
807
+ else:
808
+ logger.warning(
809
+ "Psutil not installed, we won't log available CPU memory. "
810
+ "Install psutil (pip install psutil) to log available CPU memory."
811
+ )
812
+ info["cpu_ram_mb"] = "N/A"
813
+
814
+ info["use_gpu"] = self.args.is_gpu
815
+ if self.args.is_gpu:
816
+ info["num_gpus"] = 1 # TODO(PVP) Currently only single GPU is supported
817
+ if is_py3nvml_available():
818
+ nvml.nvmlInit()
819
+ handle = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx)
820
+ info["gpu"] = nvml.nvmlDeviceGetName(handle)
821
+ info["gpu_ram_mb"] = bytes_to_mega_bytes(nvml.nvmlDeviceGetMemoryInfo(handle).total)
822
+ info["gpu_power_watts"] = nvml.nvmlDeviceGetPowerManagementLimit(handle) / 1000
823
+ info["gpu_performance_state"] = nvml.nvmlDeviceGetPerformanceState(handle)
824
+ nvml.nvmlShutdown()
825
+ else:
826
+ logger.warning(
827
+ "py3nvml not installed, we won't log GPU memory usage. "
828
+ "Install py3nvml (pip install py3nvml) to log information about GPU."
829
+ )
830
+ info["gpu"] = "N/A"
831
+ info["gpu_ram_mb"] = "N/A"
832
+ info["gpu_power_watts"] = "N/A"
833
+ info["gpu_performance_state"] = "N/A"
834
+
835
+ info["use_tpu"] = self.args.is_tpu
836
+ # TODO(PVP): See if we can add more information about TPU
837
+ # see: https://github.com/pytorch/xla/issues/2180
838
+
839
+ self._environment_info = info
840
+ return self._environment_info
841
+
842
+ def print_results(self, result_dict, type_label):
843
+ self.print_fn(80 * "-")
844
+ self.print_fn(
845
+ "Model Name".center(30) + "Batch Size".center(15) + "Seq Length".center(15) + type_label.center(15)
846
+ )
847
+ self.print_fn(80 * "-")
848
+ for model_name in self.args.model_names:
849
+ for batch_size in result_dict[model_name]["bs"]:
850
+ for sequence_length in result_dict[model_name]["ss"]:
851
+ result = result_dict[model_name]["result"][batch_size][sequence_length]
852
+ if isinstance(result, float):
853
+ result = round(1000 * result) / 1000
854
+ result = "< 0.001" if result == 0.0 else str(result)
855
+ else:
856
+ result = str(result)
857
+ self.print_fn(
858
+ model_name[:30].center(30) + str(batch_size).center(15),
859
+ str(sequence_length).center(15),
860
+ result.center(15),
861
+ )
862
+ self.print_fn(80 * "-")
863
+
864
+ def print_memory_trace_statistics(self, summary: MemorySummary):
865
+ self.print_fn(
866
+ "\nLine by line memory consumption:\n"
867
+ + "\n".join(
868
+ f"{state.frame.filename}:{state.frame.line_number}: mem {state.cpu_gpu}: {state.frame.line_text}"
869
+ for state in summary.sequential
870
+ )
871
+ )
872
+ self.print_fn(
873
+ "\nLines with top memory consumption:\n"
874
+ + "\n".join(
875
+ f"=> {state.frame.filename}:{state.frame.line_number}: mem {state.cpu_gpu}: {state.frame.line_text}"
876
+ for state in summary.cumulative[:6]
877
+ )
878
+ )
879
+ self.print_fn(
880
+ "\nLines with lowest memory consumption:\n"
881
+ + "\n".join(
882
+ f"=> {state.frame.filename}:{state.frame.line_number}: mem {state.cpu_gpu}: {state.frame.line_text}"
883
+ for state in summary.cumulative[-6:]
884
+ )
885
+ )
886
+ self.print_fn(f"\nTotal memory increase: {summary.total}")
887
+
888
+ def save_to_csv(self, result_dict, filename):
889
+ if not self.args.save_to_csv:
890
+ return
891
+ self.print_fn("Saving results to csv.")
892
+ with open(filename, mode="w") as csv_file:
893
+ if len(self.args.model_names) <= 0:
894
+ raise ValueError(f"At least 1 model should be defined, but got {self.model_names}")
895
+
896
+ fieldnames = ["model", "batch_size", "sequence_length"]
897
+ writer = csv.DictWriter(csv_file, fieldnames=fieldnames + ["result"])
898
+ writer.writeheader()
899
+
900
+ for model_name in self.args.model_names:
901
+ result_dict_model = result_dict[model_name]["result"]
902
+ for bs in result_dict_model:
903
+ for ss in result_dict_model[bs]:
904
+ result_model = result_dict_model[bs][ss]
905
+ writer.writerow(
906
+ {
907
+ "model": model_name,
908
+ "batch_size": bs,
909
+ "sequence_length": ss,
910
+ "result": ("{}" if not isinstance(result_model, float) else "{:.4f}").format(
911
+ result_model
912
+ ),
913
+ }
914
+ )
llmeval-env/lib/python3.10/site-packages/transformers/cache_utils.py ADDED
@@ -0,0 +1,435 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dataclasses import dataclass
2
+ from typing import Any, Dict, List, Optional, Tuple
3
+
4
+ import torch
5
+
6
+ from .configuration_utils import PretrainedConfig
7
+ from .utils import logging
8
+
9
+
10
+ logger = logging.get_logger(__name__)
11
+
12
+
13
+ @dataclass
14
+ class Cache:
15
+ """
16
+ Base, abstract class for all caches. The actual data structure is specific to each subclass.
17
+ """
18
+
19
+ def update(
20
+ self,
21
+ key_states: torch.Tensor,
22
+ value_states: torch.Tensor,
23
+ layer_idx: int,
24
+ cache_kwargs: Optional[Dict[str, Any]] = None,
25
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
26
+ """
27
+ Updates the cache with the new `key_states` and `value_states` for the layer `layer_idx`.
28
+
29
+ Parameters:
30
+ key_states (`torch.Tensor`):
31
+ The new key states to cache.
32
+ value_states (`torch.Tensor`):
33
+ The new value states to cache.
34
+ layer_idx (`int`):
35
+ The index of the layer to cache the states for.
36
+ cache_kwargs (`Dict[str, Any]`, `optional`):
37
+ Additional arguments for the cache subclass. These are specific to each subclass and allow new types of
38
+ cache to be created.
39
+
40
+ Return:
41
+ A tuple containing the updated key and value states.
42
+ """
43
+ raise NotImplementedError("Make sure to implement `update` in a subclass.")
44
+
45
+ def get_seq_length(self, layer_idx: Optional[int] = 0) -> int:
46
+ """Returns the sequence length of the cached states. A layer index can be optionally passed."""
47
+ raise NotImplementedError("Make sure to implement `get_seq_length` in a subclass.")
48
+
49
+ def get_max_length(self) -> Optional[int]:
50
+ """Returns the maximum sequence length of the cached states, if there is any."""
51
+ raise NotImplementedError("Make sure to implement `get_max_length` in a subclass.")
52
+
53
+ def get_usable_length(self, new_seq_length: int, layer_idx: Optional[int] = 0) -> int:
54
+ """Given the sequence length of the new inputs, returns the usable length of the cache."""
55
+ # Cache without size limit -> all cache is usable
56
+ # Cache with size limit -> if the length cache plus the length of the new inputs is larger the maximum cache
57
+ # length, we will need to evict part of the cache (and thus not all cache is usable)
58
+ max_length = self.get_max_length()
59
+ previous_seq_length = self.get_seq_length(layer_idx)
60
+ if max_length is not None and previous_seq_length + new_seq_length > max_length:
61
+ return max_length - new_seq_length
62
+ return previous_seq_length
63
+
64
+ @property
65
+ def seen_tokens(self):
66
+ logger.warning_once(
67
+ "The `seen_tokens` attribute is deprecated and will be removed in v4.41. Use the `cache_position` "
68
+ "model input instead."
69
+ )
70
+ if hasattr(self, "_seen_tokens"):
71
+ return self._seen_tokens
72
+ else:
73
+ return None
74
+
75
+
76
+ class DynamicCache(Cache):
77
+ """
78
+ A cache that grows dynamically as more tokens are generated. This is the default for generative models.
79
+
80
+ It stores the Key and Value states as a list of tensors, one for each layer. The expected shape for each tensor is
81
+ `[batch_size, num_heads, seq_len, head_dim]`.
82
+ """
83
+
84
+ def __init__(self) -> None:
85
+ self.key_cache: List[torch.Tensor] = []
86
+ self.value_cache: List[torch.Tensor] = []
87
+ self._seen_tokens = 0 # Used in `generate` to keep tally of how many tokens the cache has seen
88
+
89
+ def __getitem__(self, layer_idx: int) -> List[Tuple[torch.Tensor]]:
90
+ """
91
+ Support for backwards-compatible `past_key_value` indexing, e.g. `past_key_value[0][0].shape[2]` to get the
92
+ sequence length.
93
+ """
94
+ if layer_idx < len(self):
95
+ return (self.key_cache[layer_idx], self.value_cache[layer_idx])
96
+ else:
97
+ raise KeyError(f"Cache only has {len(self)} layers, attempted to access layer with index {layer_idx}")
98
+
99
+ def __iter__(self):
100
+ """
101
+ Support for backwards-compatible `past_key_value` iteration, e.g. `for x in past_key_value:` to iterate over
102
+ keys and values
103
+ """
104
+ for layer_idx in range(len(self)):
105
+ yield (self.key_cache[layer_idx], self.value_cache[layer_idx])
106
+
107
+ def __len__(self):
108
+ """
109
+ Support for backwards-compatible `past_key_value` length, e.g. `len(past_key_value)`. This value corresponds
110
+ to the number of layers in the model.
111
+ """
112
+ return len(self.key_cache)
113
+
114
+ def update(
115
+ self,
116
+ key_states: torch.Tensor,
117
+ value_states: torch.Tensor,
118
+ layer_idx: int,
119
+ cache_kwargs: Optional[Dict[str, Any]] = None,
120
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
121
+ """
122
+ Updates the cache with the new `key_states` and `value_states` for the layer `layer_idx`.
123
+
124
+ Parameters:
125
+ key_states (`torch.Tensor`):
126
+ The new key states to cache.
127
+ value_states (`torch.Tensor`):
128
+ The new value states to cache.
129
+ layer_idx (`int`):
130
+ The index of the layer to cache the states for.
131
+ cache_kwargs (`Dict[str, Any]`, `optional`):
132
+ Additional arguments for the cache subclass. No additional arguments are used in `DynamicCache`.
133
+
134
+ Return:
135
+ A tuple containing the updated key and value states.
136
+ """
137
+ # Update the number of seen tokens
138
+ if layer_idx == 0:
139
+ self._seen_tokens += key_states.shape[-2]
140
+
141
+ # Update the cache
142
+ if len(self.key_cache) <= layer_idx:
143
+ self.key_cache.append(key_states)
144
+ self.value_cache.append(value_states)
145
+ else:
146
+ self.key_cache[layer_idx] = torch.cat([self.key_cache[layer_idx], key_states], dim=-2)
147
+ self.value_cache[layer_idx] = torch.cat([self.value_cache[layer_idx], value_states], dim=-2)
148
+
149
+ return self.key_cache[layer_idx], self.value_cache[layer_idx]
150
+
151
+ def get_seq_length(self, layer_idx: Optional[int] = 0) -> int:
152
+ """Returns the sequence length of the cached states. A layer index can be optionally passed."""
153
+ if len(self.key_cache) <= layer_idx:
154
+ return 0
155
+ return self.key_cache[layer_idx].shape[-2]
156
+
157
+ def get_max_length(self) -> Optional[int]:
158
+ """Returns the maximum sequence length of the cached states. DynamicCache does not have a maximum length."""
159
+ return None
160
+
161
+ def reorder_cache(self, beam_idx: torch.LongTensor):
162
+ """Reorders the cache for beam search, given the selected beam indices."""
163
+ for layer_idx in range(len(self.key_cache)):
164
+ device = self.key_cache[layer_idx].device
165
+ self.key_cache[layer_idx] = self.key_cache[layer_idx].index_select(0, beam_idx.to(device))
166
+ device = self.value_cache[layer_idx].device
167
+ self.value_cache[layer_idx] = self.value_cache[layer_idx].index_select(0, beam_idx.to(device))
168
+
169
+ def to_legacy_cache(self) -> Tuple[Tuple[torch.Tensor], Tuple[torch.Tensor]]:
170
+ """Converts the `DynamicCache` instance into the its equivalent in the legacy cache format."""
171
+ legacy_cache = ()
172
+ for layer_idx in range(len(self)):
173
+ legacy_cache += ((self.key_cache[layer_idx], self.value_cache[layer_idx]),)
174
+ return legacy_cache
175
+
176
+ @classmethod
177
+ def from_legacy_cache(cls, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None) -> "DynamicCache":
178
+ """Converts a cache in the legacy cache format into an equivalent `DynamicCache`."""
179
+ cache = cls()
180
+ if past_key_values is not None:
181
+ for layer_idx in range(len(past_key_values)):
182
+ key_states, value_states = past_key_values[layer_idx]
183
+ cache.update(key_states, value_states, layer_idx)
184
+ return cache
185
+
186
+
187
+ class SinkCache(Cache):
188
+ """
189
+ A cache that as described in the [Attention Sinks paper](https://arxiv.org/abs/2309.17453). It allows the model to
190
+ generate beyond the length of its context window, without losing fluency in the conversation. As it discards past
191
+ tokens, the model will lose the ability to generate tokens that depend on the context that was discarded.
192
+
193
+ It stores the Key and Value states as a list of tensors, one for each layer. The expected shape for each tensor is
194
+ `[batch_size, num_heads, seq_len, head_dim]`.
195
+
196
+ Parameters:
197
+ window_length (`int`):
198
+ The length of the context window.
199
+ num_sink_tokens (`int`):
200
+ The number of sink tokens. See the original paper for more information.
201
+ """
202
+
203
+ def __init__(self, window_length: int, num_sink_tokens: int) -> None:
204
+ self.key_cache: List[torch.Tensor] = []
205
+ self.value_cache: List[torch.Tensor] = []
206
+ self.window_length = window_length
207
+ self.num_sink_tokens = num_sink_tokens
208
+ self.cos_sin_cache = {}
209
+ self._seen_tokens = 0 # Used in `generate` to keep tally of how many tokens the cache has seen
210
+
211
+ @staticmethod
212
+ def _rotate_half(x):
213
+ x1 = x[..., : x.shape[-1] // 2]
214
+ x2 = x[..., x.shape[-1] // 2 :]
215
+ return torch.cat((-x2, x1), dim=-1)
216
+
217
+ def _apply_key_rotary_pos_emb(
218
+ self, key_states: torch.Tensor, cos: torch.Tensor, sin: torch.Tensor
219
+ ) -> torch.Tensor:
220
+ rotated_key_states = (key_states * cos) + (self._rotate_half(key_states) * sin)
221
+ return rotated_key_states
222
+
223
+ def _get_rerotation_cos_sin(
224
+ self, key_states: torch.Tensor, cos: torch.Tensor, sin: torch.Tensor
225
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
226
+ if key_states.shape[-2] not in self.cos_sin_cache:
227
+ # Upcast to float32 temporarily for better accuracy
228
+ cos = cos.to(torch.float32)
229
+ sin = sin.to(torch.float32)
230
+
231
+ # Compute the cos and sin required for back- and forward-rotating to one position earlier in the sequence
232
+ original_cos = cos[self.num_sink_tokens + key_states.shape[-2] :]
233
+ shifted_cos = cos[self.num_sink_tokens : -key_states.shape[-2]]
234
+ original_sin = sin[self.num_sink_tokens + key_states.shape[-2] :]
235
+ shifted_sin = sin[self.num_sink_tokens : -key_states.shape[-2]]
236
+ rerotation_cos = original_cos * shifted_cos + original_sin * shifted_sin
237
+ rerotation_sin = -original_sin * shifted_cos + original_cos * shifted_sin
238
+
239
+ self.cos_sin_cache[key_states.shape[-2]] = (
240
+ rerotation_cos.to(key_states.dtype).unsqueeze(0),
241
+ rerotation_sin.to(key_states.dtype).unsqueeze(0),
242
+ )
243
+ return self.cos_sin_cache[key_states.shape[-2]]
244
+
245
+ def get_seq_length(self, layer_idx: Optional[int] = 0) -> int:
246
+ """Returns the sequence length of the cached states. A layer index can be optionally passed."""
247
+ # Workaround to make 'key_states.shape[-2] + past_key_value.get_seq_length(self.layer_idx)' <= window_length
248
+ if len(self.key_cache) <= layer_idx:
249
+ return 0
250
+ return self.key_cache[layer_idx].shape[-2]
251
+
252
+ def get_max_length(self) -> Optional[int]:
253
+ """Returns the maximum sequence length of the cached states."""
254
+ return self.window_length
255
+
256
+ def update(
257
+ self,
258
+ key_states: torch.Tensor,
259
+ value_states: torch.Tensor,
260
+ layer_idx: int,
261
+ cache_kwargs: Optional[Dict[str, Any]] = None,
262
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
263
+ """
264
+ Updates the cache with the new `key_states` and `value_states` for the layer `layer_idx`.
265
+
266
+ Parameters:
267
+ key_states (`torch.Tensor`):
268
+ The new key states to cache.
269
+ value_states (`torch.Tensor`):
270
+ The new value states to cache.
271
+ layer_idx (`int`):
272
+ The index of the layer to cache the states for.
273
+ cache_kwargs (`Dict[str, Any]`, `optional`):
274
+ Additional arguments for the cache subclass. The following arguments can be used in `SinkCache`: `sin`,
275
+ `cos` and `partial_rotation_size`. These arguments are used with models using RoPE, to recompute the
276
+ rotation as the tokens are shifted.
277
+
278
+ Return:
279
+ A tuple containing the updated key and value states.
280
+ """
281
+ # Optional kwargs for `SinkCache` -- needed on models using RoPE. `partial_rotation_size` is used on models
282
+ # with partially rotated position embeddings, like Phi or Persimmon.
283
+ sin = cache_kwargs.get("sin")
284
+ cos = cache_kwargs.get("cos")
285
+ partial_rotation_size = cache_kwargs.get("partial_rotation_size")
286
+ using_rope = cos is not None and sin is not None
287
+
288
+ # Update the number of seen tokens
289
+ if layer_idx == 0:
290
+ self._seen_tokens += key_states.shape[-2]
291
+
292
+ # [bsz, num_heads, seq_len, head_dim]
293
+ if len(self.key_cache) <= layer_idx:
294
+ # Empty cache
295
+ self.key_cache.append(key_states)
296
+ self.value_cache.append(value_states)
297
+
298
+ elif key_states.shape[-2] + self.get_seq_length(layer_idx) < self.window_length:
299
+ # Growing cache
300
+ self.key_cache[layer_idx] = torch.cat([self.key_cache[layer_idx], key_states], dim=-2)
301
+ self.value_cache[layer_idx] = torch.cat([self.value_cache[layer_idx], value_states], dim=-2)
302
+
303
+ else:
304
+ # Shifting cache
305
+ keys_to_keep = self.key_cache[layer_idx][
306
+ :, :, -self.window_length + self.num_sink_tokens + key_states.shape[-2] :
307
+ ]
308
+
309
+ # On RoPE models, we need to recompute the Key rotation as the tokens are shifted
310
+ if using_rope:
311
+ rerotation_cos, rerotation_sin = self._get_rerotation_cos_sin(
312
+ key_states, cos[: self.window_length], sin[: self.window_length]
313
+ )
314
+ if partial_rotation_size is not None:
315
+ keys_to_keep, keys_pass = (
316
+ keys_to_keep[..., :partial_rotation_size],
317
+ keys_to_keep[..., partial_rotation_size:],
318
+ )
319
+ keys_to_keep = self._apply_key_rotary_pos_emb(keys_to_keep, rerotation_cos, rerotation_sin)
320
+ if partial_rotation_size is not None:
321
+ keys_to_keep = torch.cat((keys_to_keep, keys_pass), dim=-1)
322
+
323
+ # Concatenate sink tokens, shifted & rotated tokens (if needed), and new tokens
324
+ sink_keys = self.key_cache[layer_idx][:, :, : self.num_sink_tokens]
325
+ self.key_cache[layer_idx] = torch.cat([sink_keys, keys_to_keep, key_states], dim=-2)
326
+
327
+ sink_values = self.value_cache[layer_idx][:, :, : self.num_sink_tokens]
328
+ values_to_keep = self.value_cache[layer_idx][
329
+ :, :, -self.window_length + self.num_sink_tokens + value_states.shape[-2] :
330
+ ]
331
+ self.value_cache[layer_idx] = torch.cat([sink_values, values_to_keep, value_states], dim=-2)
332
+
333
+ return self.key_cache[layer_idx], self.value_cache[layer_idx]
334
+
335
+ def reorder_cache(self, beam_idx: torch.LongTensor):
336
+ """Reorders the cache for beam search, given the selected beam indices."""
337
+ for layer_idx in range(len(self.key_cache)):
338
+ device = self.key_cache[layer_idx].device
339
+ self.key_cache[layer_idx] = self.key_cache[layer_idx].index_select(0, beam_idx.to(device))
340
+ device = self.value_cache[layer_idx].device
341
+ self.value_cache[layer_idx] = self.value_cache[layer_idx].index_select(0, beam_idx.to(device))
342
+
343
+
344
+ class StaticCache(Cache):
345
+ """
346
+ Static Cache class to be used with `torch.compile(model)`.
347
+
348
+ Parameters:
349
+ config (`PretrainedConfig):
350
+ The configuration file defining the `max_position_embeddings`, `hidden_size` and `num_attention_heads`
351
+ required to initialize the static cache.
352
+ max_batch_size (`int`):
353
+ The maximum batch size with which the model will be used.
354
+ max_cache_len (`int`):
355
+ The maximum sequence length with which the model will be used.
356
+ device (`torch.device`):
357
+ The device on which the cache should be initialized. Should be the same as the layer.
358
+ dtype (*optional*, defaults to `torch.float32`):
359
+ The default `dtype` to use when initializing the layer.
360
+ """
361
+
362
+ def __init__(self, config: PretrainedConfig, max_batch_size: int, max_cache_len: int, device, dtype=None) -> None:
363
+ super().__init__()
364
+ self.max_batch_size = max_batch_size
365
+ self.max_cache_len = config.max_position_embeddings if max_cache_len is None else max_cache_len
366
+ # Some model define a custom `head_dim` != config.hidden_size // config.num_attention_heads
367
+ self.head_dim = (
368
+ config.head_dim if hasattr(config, "head_dim") else config.hidden_size // config.num_attention_heads
369
+ )
370
+
371
+ self.dtype = dtype if dtype is not None else torch.float32
372
+ self.num_key_value_heads = (
373
+ config.num_attention_heads if config.num_key_value_heads is None else config.num_key_value_heads
374
+ )
375
+
376
+ cache_shape = (max_batch_size, self.num_key_value_heads, self.max_cache_len, self.head_dim)
377
+ self.key_cache: torch.Tensor = torch.zeros(cache_shape, dtype=self.dtype, device=device)
378
+ self.value_cache: torch.Tensor = torch.zeros(cache_shape, dtype=self.dtype, device=device)
379
+
380
+ def update(
381
+ self,
382
+ key_states: torch.Tensor,
383
+ value_states: torch.Tensor,
384
+ layer_idx: int,
385
+ cache_kwargs: Optional[Dict[str, Any]] = None,
386
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
387
+ """
388
+ Updates the cache with the new `key_states` and `value_states` for the layer `layer_idx`.
389
+ It is VERY important to index using a tensor, otherwise you introduce a copy to the device.
390
+
391
+ Parameters:
392
+ key_states (`torch.Tensor`):
393
+ The new key states to cache.
394
+ value_states (`torch.Tensor`):
395
+ The new value states to cache.
396
+ layer_idx (`int`):
397
+ The index of the layer to cache the states for. Kept for backward compatibility
398
+ cache_kwargs (`Dict[str, Any]`, `optional`):
399
+ Additional arguments for the cache subclass. The `StaticCache` just needs the `q_len`
400
+ to know how much of the cache it should overwrite.
401
+
402
+ Return:
403
+ A tuple containing the updated key and value states.
404
+ """
405
+ new_cache_positions = cache_kwargs.get("cache_position")
406
+ k_out = self.key_cache
407
+ v_out = self.value_cache
408
+
409
+ k_out[:, :, new_cache_positions] = key_states
410
+ v_out[:, :, new_cache_positions] = value_states
411
+
412
+ return k_out, v_out
413
+
414
+ def get_seq_length(self, layer_idx: Optional[int] = 0) -> int:
415
+ """Returns the sequence length of the cached states that were seen by the model. `layer_idx` kept for BC"""
416
+ # Occupied cache == any slot in the 3rd dim (sequence length) holds a non-zero value. To save on compute, let's
417
+ # limit the check to the first batch member and head dimension.
418
+ # TODO: This is error prone, a filled cache may be `0.0`. Let's use a stateless integer instead, after
419
+ # https://github.com/pytorch/pytorch/issues/120248 is fixed
420
+ return (self.key_cache[0, 0].any(dim=-1)).sum()
421
+
422
+ def get_max_length(self) -> Optional[int]:
423
+ """Returns the maximum sequence length of the cached states. DynamicCache does not have a maximum length."""
424
+ return self.max_cache_len
425
+
426
+ def reorder_cache(self, beam_idx: torch.LongTensor):
427
+ """Reorders the cache for beam search, given the selected beam indices."""
428
+ device = self.key_cache.device
429
+ self.key_cache = self.key_cache.index_select(0, beam_idx.to(device))
430
+ device = self.value_cache.device
431
+ self.value_cache = self.value_cache.index_select(0, beam_idx.to(device))
432
+
433
+ def to_legacy_cache(self):
434
+ """Dummy function for BC. We have to keep it because otherwise the call in the forward of models will break it"""
435
+ return None
llmeval-env/lib/python3.10/site-packages/transformers/configuration_utils.py ADDED
@@ -0,0 +1,1133 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
3
+ # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """ Configuration base class and utilities."""
17
+
18
+
19
+ import copy
20
+ import json
21
+ import os
22
+ import re
23
+ import warnings
24
+ from typing import Any, Dict, List, Optional, Tuple, Union
25
+
26
+ from packaging import version
27
+
28
+ from . import __version__
29
+ from .dynamic_module_utils import custom_object_save
30
+ from .utils import (
31
+ CONFIG_NAME,
32
+ PushToHubMixin,
33
+ add_model_info_to_auto_map,
34
+ cached_file,
35
+ copy_func,
36
+ download_url,
37
+ extract_commit_hash,
38
+ is_remote_url,
39
+ is_torch_available,
40
+ logging,
41
+ )
42
+
43
+
44
+ logger = logging.get_logger(__name__)
45
+
46
+ _re_configuration_file = re.compile(r"config\.(.*)\.json")
47
+
48
+
49
+ class PretrainedConfig(PushToHubMixin):
50
+ # no-format
51
+ r"""
52
+ Base class for all configuration classes. Handles a few parameters common to all models' configurations as well as
53
+ methods for loading/downloading/saving configurations.
54
+
55
+ <Tip>
56
+
57
+ A configuration file can be loaded and saved to disk. Loading the configuration file and using this file to
58
+ initialize a model does **not** load the model weights. It only affects the model's configuration.
59
+
60
+ </Tip>
61
+
62
+ Class attributes (overridden by derived classes):
63
+
64
+ - **model_type** (`str`) -- An identifier for the model type, serialized into the JSON file, and used to recreate
65
+ the correct object in [`~transformers.AutoConfig`].
66
+ - **is_composition** (`bool`) -- Whether the config class is composed of multiple sub-configs. In this case the
67
+ config has to be initialized from two or more configs of type [`~transformers.PretrainedConfig`] like:
68
+ [`~transformers.EncoderDecoderConfig`] or [`~RagConfig`].
69
+ - **keys_to_ignore_at_inference** (`List[str]`) -- A list of keys to ignore by default when looking at dictionary
70
+ outputs of the model during inference.
71
+ - **attribute_map** (`Dict[str, str]`) -- A dict that maps model specific attribute names to the standardized
72
+ naming of attributes.
73
+
74
+ Common attributes (present in all subclasses):
75
+
76
+ - **vocab_size** (`int`) -- The number of tokens in the vocabulary, which is also the first dimension of the
77
+ embeddings matrix (this attribute may be missing for models that don't have a text modality like ViT).
78
+ - **hidden_size** (`int`) -- The hidden size of the model.
79
+ - **num_attention_heads** (`int`) -- The number of attention heads used in the multi-head attention layers of the
80
+ model.
81
+ - **num_hidden_layers** (`int`) -- The number of blocks in the model.
82
+
83
+ Arg:
84
+ name_or_path (`str`, *optional*, defaults to `""`):
85
+ Store the string that was passed to [`PreTrainedModel.from_pretrained`] or
86
+ [`TFPreTrainedModel.from_pretrained`] as `pretrained_model_name_or_path` if the configuration was created
87
+ with such a method.
88
+ output_hidden_states (`bool`, *optional*, defaults to `False`):
89
+ Whether or not the model should return all hidden-states.
90
+ output_attentions (`bool`, *optional*, defaults to `False`):
91
+ Whether or not the model should returns all attentions.
92
+ return_dict (`bool`, *optional*, defaults to `True`):
93
+ Whether or not the model should return a [`~transformers.utils.ModelOutput`] instead of a plain tuple.
94
+ is_encoder_decoder (`bool`, *optional*, defaults to `False`):
95
+ Whether the model is used as an encoder/decoder or not.
96
+ is_decoder (`bool`, *optional*, defaults to `False`):
97
+ Whether the model is used as decoder or not (in which case it's used as an encoder).
98
+ cross_attention_hidden_size** (`bool`, *optional*):
99
+ The hidden size of the cross-attention layer in case the model is used as a decoder in an encoder-decoder
100
+ setting and the cross-attention hidden dimension differs from `self.config.hidden_size`.
101
+ add_cross_attention (`bool`, *optional*, defaults to `False`):
102
+ Whether cross-attention layers should be added to the model. Note, this option is only relevant for models
103
+ that can be used as decoder models within the [`EncoderDecoderModel`] class, which consists of all models
104
+ in `AUTO_MODELS_FOR_CAUSAL_LM`.
105
+ tie_encoder_decoder (`bool`, *optional*, defaults to `False`):
106
+ Whether all encoder weights should be tied to their equivalent decoder weights. This requires the encoder
107
+ and decoder model to have the exact same parameter names.
108
+ prune_heads (`Dict[int, List[int]]`, *optional*, defaults to `{}`):
109
+ Pruned heads of the model. The keys are the selected layer indices and the associated values, the list of
110
+ heads to prune in said layer.
111
+
112
+ For instance `{1: [0, 2], 2: [2, 3]}` will prune heads 0 and 2 on layer 1 and heads 2 and 3 on layer 2.
113
+ chunk_size_feed_forward (`int`, *optional*, defaults to `0`):
114
+ The chunk size of all feed forward layers in the residual attention blocks. A chunk size of `0` means that
115
+ the feed forward layer is not chunked. A chunk size of n means that the feed forward layer processes `n` <
116
+ sequence_length embeddings at a time. For more information on feed forward chunking, see [How does Feed
117
+ Forward Chunking work?](../glossary.html#feed-forward-chunking).
118
+
119
+ > Parameters for sequence generation
120
+
121
+ max_length (`int`, *optional*, defaults to 20):
122
+ Maximum length that will be used by default in the `generate` method of the model.
123
+ min_length (`int`, *optional*, defaults to 0):
124
+ Minimum length that will be used by default in the `generate` method of the model.
125
+ do_sample (`bool`, *optional*, defaults to `False`):
126
+ Flag that will be used by default in the `generate` method of the model. Whether or not to use sampling ;
127
+ use greedy decoding otherwise.
128
+ early_stopping (`bool`, *optional*, defaults to `False`):
129
+ Flag that will be used by default in the `generate` method of the model. Whether to stop the beam search
130
+ when at least `num_beams` sentences are finished per batch or not.
131
+ num_beams (`int`, *optional*, defaults to 1):
132
+ Number of beams for beam search that will be used by default in the `generate` method of the model. 1 means
133
+ no beam search.
134
+ num_beam_groups (`int`, *optional*, defaults to 1):
135
+ Number of groups to divide `num_beams` into in order to ensure diversity among different groups of beams
136
+ that will be used by default in the `generate` method of the model. 1 means no group beam search.
137
+ diversity_penalty (`float`, *optional*, defaults to 0.0):
138
+ Value to control diversity for group beam search. that will be used by default in the `generate` method of
139
+ the model. 0 means no diversity penalty. The higher the penalty, the more diverse are the outputs.
140
+ temperature (`float`, *optional*, defaults to 1.0):
141
+ The value used to module the next token probabilities that will be used by default in the `generate` method
142
+ of the model. Must be strictly positive.
143
+ top_k (`int`, *optional*, defaults to 50):
144
+ Number of highest probability vocabulary tokens to keep for top-k-filtering that will be used by default in
145
+ the `generate` method of the model.
146
+ top_p (`float`, *optional*, defaults to 1):
147
+ Value that will be used by default in the `generate` method of the model for `top_p`. If set to float < 1,
148
+ only the most probable tokens with probabilities that add up to `top_p` or higher are kept for generation.
149
+ typical_p (`float`, *optional*, defaults to 1):
150
+ Local typicality measures how similar the conditional probability of predicting a target token next is to
151
+ the expected conditional probability of predicting a random token next, given the partial text already
152
+ generated. If set to float < 1, the smallest set of the most locally typical tokens with probabilities that
153
+ add up to `typical_p` or higher are kept for generation. See [this
154
+ paper](https://arxiv.org/pdf/2202.00666.pdf) for more details.
155
+ repetition_penalty (`float`, *optional*, defaults to 1):
156
+ Parameter for repetition penalty that will be used by default in the `generate` method of the model. 1.0
157
+ means no penalty.
158
+ length_penalty (`float`, *optional*, defaults to 1):
159
+ Exponential penalty to the length that is used with beam-based generation. It is applied as an exponent to
160
+ the sequence length, which in turn is used to divide the score of the sequence. Since the score is the log
161
+ likelihood of the sequence (i.e. negative), `length_penalty` > 0.0 promotes longer sequences, while
162
+ `length_penalty` < 0.0 encourages shorter sequences.
163
+ no_repeat_ngram_size (`int`, *optional*, defaults to 0) -- Value that will be used by default in the
164
+ `generate` method of the model for `no_repeat_ngram_size`. If set to int > 0, all ngrams of that size can
165
+ only occur once.
166
+ encoder_no_repeat_ngram_size (`int`, *optional*, defaults to 0) -- Value that will be used by
167
+ default in the `generate` method of the model for `encoder_no_repeat_ngram_size`. If set to int > 0, all
168
+ ngrams of that size that occur in the `encoder_input_ids` cannot occur in the `decoder_input_ids`.
169
+ bad_words_ids (`List[int]`, *optional*):
170
+ List of token ids that are not allowed to be generated that will be used by default in the `generate`
171
+ method of the model. In order to get the tokens of the words that should not appear in the generated text,
172
+ use `tokenizer.encode(bad_word, add_prefix_space=True)`.
173
+ num_return_sequences (`int`, *optional*, defaults to 1):
174
+ Number of independently computed returned sequences for each element in the batch that will be used by
175
+ default in the `generate` method of the model.
176
+ output_scores (`bool`, *optional*, defaults to `False`):
177
+ Whether the model should return the logits when used for generation.
178
+ return_dict_in_generate (`bool`, *optional*, defaults to `False`):
179
+ Whether the model should return a [`~transformers.utils.ModelOutput`] instead of a `torch.LongTensor`.
180
+ forced_bos_token_id (`int`, *optional*):
181
+ The id of the token to force as the first generated token after the `decoder_start_token_id`. Useful for
182
+ multilingual models like [mBART](../model_doc/mbart) where the first generated token needs to be the target
183
+ language token.
184
+ forced_eos_token_id (`int`, *optional*):
185
+ The id of the token to force as the last generated token when `max_length` is reached.
186
+ remove_invalid_values (`bool`, *optional*):
187
+ Whether to remove possible _nan_ and _inf_ outputs of the model to prevent the generation method to crash.
188
+ Note that using `remove_invalid_values` can slow down generation.
189
+
190
+ > Parameters for fine-tuning tasks
191
+
192
+ architectures (`List[str]`, *optional*):
193
+ Model architectures that can be used with the model pretrained weights.
194
+ finetuning_task (`str`, *optional*):
195
+ Name of the task used to fine-tune the model. This can be used when converting from an original (TensorFlow
196
+ or PyTorch) checkpoint.
197
+ id2label (`Dict[int, str]`, *optional*):
198
+ A map from index (for instance prediction index, or target index) to label.
199
+ label2id (`Dict[str, int]`, *optional*): A map from label to index for the model.
200
+ num_labels (`int`, *optional*):
201
+ Number of labels to use in the last layer added to the model, typically for a classification task.
202
+ task_specific_params (`Dict[str, Any]`, *optional*):
203
+ Additional keyword arguments to store for the current task.
204
+ problem_type (`str`, *optional*):
205
+ Problem type for `XxxForSequenceClassification` models. Can be one of `"regression"`,
206
+ `"single_label_classification"` or `"multi_label_classification"`.
207
+
208
+ > Parameters linked to the tokenizer
209
+
210
+ tokenizer_class (`str`, *optional*):
211
+ The name of the associated tokenizer class to use (if none is set, will use the tokenizer associated to the
212
+ model by default).
213
+ prefix (`str`, *optional*):
214
+ A specific prompt that should be added at the beginning of each text before calling the model.
215
+ bos_token_id (`int`, *optional*): The id of the _beginning-of-stream_ token.
216
+ pad_token_id (`int`, *optional*): The id of the _padding_ token.
217
+ eos_token_id (`int`, *optional*): The id of the _end-of-stream_ token.
218
+ decoder_start_token_id (`int`, *optional*):
219
+ If an encoder-decoder model starts decoding with a different token than _bos_, the id of that token.
220
+ sep_token_id (`int`, *optional*): The id of the _separation_ token.
221
+
222
+ > PyTorch specific parameters
223
+
224
+ torchscript (`bool`, *optional*, defaults to `False`):
225
+ Whether or not the model should be used with Torchscript.
226
+ tie_word_embeddings (`bool`, *optional*, defaults to `True`):
227
+ Whether the model's input and output word embeddings should be tied. Note that this is only relevant if the
228
+ model has a output word embedding layer.
229
+ torch_dtype (`str`, *optional*):
230
+ The `dtype` of the weights. This attribute can be used to initialize the model to a non-default `dtype`
231
+ (which is normally `float32`) and thus allow for optimal storage allocation. For example, if the saved
232
+ model is `float16`, ideally we want to load it back using the minimal amount of memory needed to load
233
+ `float16` weights. Since the config object is stored in plain text, this attribute contains just the
234
+ floating type string without the `torch.` prefix. For example, for `torch.float16` ``torch_dtype` is the
235
+ `"float16"` string.
236
+
237
+ This attribute is currently not being used during model loading time, but this may change in the future
238
+ versions. But we can already start preparing for the future by saving the dtype with save_pretrained.
239
+
240
+ > TensorFlow specific parameters
241
+
242
+ use_bfloat16 (`bool`, *optional*, defaults to `False`):
243
+ Whether or not the model should use BFloat16 scalars (only used by some TensorFlow models).
244
+ tf_legacy_loss (`bool`, *optional*, defaults to `False`):
245
+ Whether the model should use legacy TensorFlow losses. Legacy losses have variable output shapes and may
246
+ not be XLA-compatible. This option is here for backward compatibility and will be removed in Transformers
247
+ v5.
248
+ """
249
+
250
+ model_type: str = ""
251
+ is_composition: bool = False
252
+ attribute_map: Dict[str, str] = {}
253
+ _auto_class: Optional[str] = None
254
+
255
+ def __setattr__(self, key, value):
256
+ if key in super().__getattribute__("attribute_map"):
257
+ key = super().__getattribute__("attribute_map")[key]
258
+ super().__setattr__(key, value)
259
+
260
+ def __getattribute__(self, key):
261
+ if key != "attribute_map" and key in super().__getattribute__("attribute_map"):
262
+ key = super().__getattribute__("attribute_map")[key]
263
+ return super().__getattribute__(key)
264
+
265
+ def __init__(self, **kwargs):
266
+ # Attributes with defaults
267
+ self.return_dict = kwargs.pop("return_dict", True)
268
+ self.output_hidden_states = kwargs.pop("output_hidden_states", False)
269
+ self.output_attentions = kwargs.pop("output_attentions", False)
270
+ self.torchscript = kwargs.pop("torchscript", False) # Only used by PyTorch models
271
+ self.torch_dtype = kwargs.pop("torch_dtype", None) # Only used by PyTorch models
272
+ self.use_bfloat16 = kwargs.pop("use_bfloat16", False)
273
+ self.tf_legacy_loss = kwargs.pop("tf_legacy_loss", False) # Only used by TensorFlow models
274
+ self.pruned_heads = kwargs.pop("pruned_heads", {})
275
+ self.tie_word_embeddings = kwargs.pop(
276
+ "tie_word_embeddings", True
277
+ ) # Whether input and output word embeddings should be tied for all MLM, LM and Seq2Seq models.
278
+ self.chunk_size_feed_forward = kwargs.pop("chunk_size_feed_forward", 0)
279
+
280
+ # Is decoder is used in encoder-decoder models to differentiate encoder from decoder
281
+ self.is_encoder_decoder = kwargs.pop("is_encoder_decoder", False)
282
+ self.is_decoder = kwargs.pop("is_decoder", False)
283
+ self.cross_attention_hidden_size = kwargs.pop("cross_attention_hidden_size", None)
284
+ self.add_cross_attention = kwargs.pop("add_cross_attention", False)
285
+ self.tie_encoder_decoder = kwargs.pop("tie_encoder_decoder", False)
286
+
287
+ # Retrocompatibility: Parameters for sequence generation. While we will keep the ability to load these
288
+ # parameters, saving them will be deprecated. In a distant future, we won't need to load them.
289
+ for parameter_name, default_value in self._get_generation_defaults().items():
290
+ setattr(self, parameter_name, kwargs.pop(parameter_name, default_value))
291
+
292
+ # Fine-tuning task arguments
293
+ self.architectures = kwargs.pop("architectures", None)
294
+ self.finetuning_task = kwargs.pop("finetuning_task", None)
295
+ self.id2label = kwargs.pop("id2label", None)
296
+ self.label2id = kwargs.pop("label2id", None)
297
+ if self.label2id is not None and not isinstance(self.label2id, dict):
298
+ raise ValueError("Argument label2id should be a dictionary.")
299
+ if self.id2label is not None:
300
+ if not isinstance(self.id2label, dict):
301
+ raise ValueError("Argument id2label should be a dictionary.")
302
+ num_labels = kwargs.pop("num_labels", None)
303
+ if num_labels is not None and len(self.id2label) != num_labels:
304
+ logger.warning(
305
+ f"You passed along `num_labels={num_labels}` with an incompatible id to label map: "
306
+ f"{self.id2label}. The number of labels wil be overwritten to {self.num_labels}."
307
+ )
308
+ self.id2label = {int(key): value for key, value in self.id2label.items()}
309
+ # Keys are always strings in JSON so convert ids to int here.
310
+ else:
311
+ self.num_labels = kwargs.pop("num_labels", 2)
312
+
313
+ if self.torch_dtype is not None and isinstance(self.torch_dtype, str):
314
+ # we will start using self.torch_dtype in v5, but to be consistent with
315
+ # from_pretrained's torch_dtype arg convert it to an actual torch.dtype object
316
+ if is_torch_available():
317
+ import torch
318
+
319
+ self.torch_dtype = getattr(torch, self.torch_dtype)
320
+
321
+ # Tokenizer arguments TODO: eventually tokenizer and models should share the same config
322
+ self.tokenizer_class = kwargs.pop("tokenizer_class", None)
323
+ self.prefix = kwargs.pop("prefix", None)
324
+ self.bos_token_id = kwargs.pop("bos_token_id", None)
325
+ self.pad_token_id = kwargs.pop("pad_token_id", None)
326
+ self.eos_token_id = kwargs.pop("eos_token_id", None)
327
+ self.sep_token_id = kwargs.pop("sep_token_id", None)
328
+
329
+ self.decoder_start_token_id = kwargs.pop("decoder_start_token_id", None)
330
+
331
+ # task specific arguments
332
+ self.task_specific_params = kwargs.pop("task_specific_params", None)
333
+
334
+ # regression / multi-label classification
335
+ self.problem_type = kwargs.pop("problem_type", None)
336
+ allowed_problem_types = ("regression", "single_label_classification", "multi_label_classification")
337
+ if self.problem_type is not None and self.problem_type not in allowed_problem_types:
338
+ raise ValueError(
339
+ f"The config parameter `problem_type` was not understood: received {self.problem_type} "
340
+ "but only 'regression', 'single_label_classification' and 'multi_label_classification' are valid."
341
+ )
342
+
343
+ # TPU arguments
344
+ if kwargs.pop("xla_device", None) is not None:
345
+ logger.warning(
346
+ "The `xla_device` argument has been deprecated in v4.4.0 of Transformers. It is ignored and you can "
347
+ "safely remove it from your `config.json` file."
348
+ )
349
+
350
+ # Name or path to the pretrained checkpoint
351
+ self._name_or_path = str(kwargs.pop("name_or_path", ""))
352
+ # Config hash
353
+ self._commit_hash = kwargs.pop("_commit_hash", None)
354
+
355
+ # Attention implementation to use, if relevant.
356
+ self._attn_implementation_internal = kwargs.pop("attn_implementation", None)
357
+
358
+ # Drop the transformers version info
359
+ self.transformers_version = kwargs.pop("transformers_version", None)
360
+
361
+ # Deal with gradient checkpointing
362
+ if kwargs.get("gradient_checkpointing", False):
363
+ warnings.warn(
364
+ "Passing `gradient_checkpointing` to a config initialization is deprecated and will be removed in v5 "
365
+ "Transformers. Using `model.gradient_checkpointing_enable()` instead, or if you are using the "
366
+ "`Trainer` API, pass `gradient_checkpointing=True` in your `TrainingArguments`."
367
+ )
368
+
369
+ # Additional attributes without default values
370
+ for key, value in kwargs.items():
371
+ try:
372
+ setattr(self, key, value)
373
+ except AttributeError as err:
374
+ logger.error(f"Can't set {key} with value {value} for {self}")
375
+ raise err
376
+
377
+ @property
378
+ def name_or_path(self) -> str:
379
+ return getattr(self, "_name_or_path", None)
380
+
381
+ @name_or_path.setter
382
+ def name_or_path(self, value):
383
+ self._name_or_path = str(value) # Make sure that name_or_path is a string (for JSON encoding)
384
+
385
+ @property
386
+ def use_return_dict(self) -> bool:
387
+ """
388
+ `bool`: Whether or not return [`~utils.ModelOutput`] instead of tuples.
389
+ """
390
+ # If torchscript is set, force `return_dict=False` to avoid jit errors
391
+ return self.return_dict and not self.torchscript
392
+
393
+ @property
394
+ def num_labels(self) -> int:
395
+ """
396
+ `int`: The number of labels for classification models.
397
+ """
398
+ return len(self.id2label)
399
+
400
+ @num_labels.setter
401
+ def num_labels(self, num_labels: int):
402
+ if not hasattr(self, "id2label") or self.id2label is None or len(self.id2label) != num_labels:
403
+ self.id2label = {i: f"LABEL_{i}" for i in range(num_labels)}
404
+ self.label2id = dict(zip(self.id2label.values(), self.id2label.keys()))
405
+
406
+ @property
407
+ def _attn_implementation(self):
408
+ # This property is made private for now (as it cannot be changed and a PreTrainedModel.use_attn_implementation method needs to be implemented.)
409
+ if hasattr(self, "_attn_implementation_internal"):
410
+ if self._attn_implementation_internal is None:
411
+ # `config.attn_implementation` should never be None, for backward compatibility.
412
+ return "eager"
413
+ else:
414
+ return self._attn_implementation_internal
415
+ else:
416
+ return "eager"
417
+
418
+ @_attn_implementation.setter
419
+ def _attn_implementation(self, value):
420
+ self._attn_implementation_internal = value
421
+
422
+ def save_pretrained(self, save_directory: Union[str, os.PathLike], push_to_hub: bool = False, **kwargs):
423
+ """
424
+ Save a configuration object to the directory `save_directory`, so that it can be re-loaded using the
425
+ [`~PretrainedConfig.from_pretrained`] class method.
426
+
427
+ Args:
428
+ save_directory (`str` or `os.PathLike`):
429
+ Directory where the configuration JSON file will be saved (will be created if it does not exist).
430
+ push_to_hub (`bool`, *optional*, defaults to `False`):
431
+ Whether or not to push your model to the Hugging Face model hub after saving it. You can specify the
432
+ repository you want to push to with `repo_id` (will default to the name of `save_directory` in your
433
+ namespace).
434
+ kwargs (`Dict[str, Any]`, *optional*):
435
+ Additional key word arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method.
436
+ """
437
+ self._set_token_in_kwargs(kwargs)
438
+
439
+ if os.path.isfile(save_directory):
440
+ raise AssertionError(f"Provided path ({save_directory}) should be a directory, not a file")
441
+
442
+ non_default_generation_parameters = {}
443
+ for parameter_name, default_value in self._get_generation_defaults().items():
444
+ if hasattr(self, parameter_name) and getattr(self, parameter_name) != default_value:
445
+ non_default_generation_parameters[parameter_name] = getattr(self, parameter_name)
446
+ if len(non_default_generation_parameters) > 0:
447
+ logger.warning(
448
+ "Some non-default generation parameters are set in the model config. These should go into a "
449
+ "GenerationConfig file (https://huggingface.co/docs/transformers/generation_strategies#save-a-custom-decoding-strategy-with-your-model) "
450
+ "instead. This warning will be raised to an exception in v4.41.\n"
451
+ f"Non-default generation parameters: {str(non_default_generation_parameters)}"
452
+ )
453
+
454
+ os.makedirs(save_directory, exist_ok=True)
455
+
456
+ if push_to_hub:
457
+ commit_message = kwargs.pop("commit_message", None)
458
+ repo_id = kwargs.pop("repo_id", save_directory.split(os.path.sep)[-1])
459
+ repo_id = self._create_repo(repo_id, **kwargs)
460
+ files_timestamps = self._get_files_timestamps(save_directory)
461
+
462
+ # If we have a custom config, we copy the file defining it in the folder and set the attributes so it can be
463
+ # loaded from the Hub.
464
+ if self._auto_class is not None:
465
+ custom_object_save(self, save_directory, config=self)
466
+
467
+ # If we save using the predefined names, we can load using `from_pretrained`
468
+ output_config_file = os.path.join(save_directory, CONFIG_NAME)
469
+
470
+ self.to_json_file(output_config_file, use_diff=True)
471
+ logger.info(f"Configuration saved in {output_config_file}")
472
+
473
+ if push_to_hub:
474
+ self._upload_modified_files(
475
+ save_directory,
476
+ repo_id,
477
+ files_timestamps,
478
+ commit_message=commit_message,
479
+ token=kwargs.get("token"),
480
+ )
481
+
482
+ @staticmethod
483
+ def _set_token_in_kwargs(kwargs, token=None):
484
+ """Temporary method to deal with `token` and `use_auth_token`.
485
+
486
+ This method is to avoid apply the same changes in all model config classes that overwrite `from_pretrained`.
487
+
488
+ Need to clean up `use_auth_token` in a follow PR.
489
+ """
490
+ # Some model config classes like CLIP define their own `from_pretrained` without the new argument `token` yet.
491
+ if token is None:
492
+ token = kwargs.pop("token", None)
493
+ use_auth_token = kwargs.pop("use_auth_token", None)
494
+
495
+ if use_auth_token is not None:
496
+ warnings.warn(
497
+ "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.",
498
+ FutureWarning,
499
+ )
500
+ if token is not None:
501
+ raise ValueError(
502
+ "`token` and `use_auth_token` are both specified. Please set only the argument `token`."
503
+ )
504
+ token = use_auth_token
505
+
506
+ if token is not None:
507
+ kwargs["token"] = token
508
+
509
+ @classmethod
510
+ def from_pretrained(
511
+ cls,
512
+ pretrained_model_name_or_path: Union[str, os.PathLike],
513
+ cache_dir: Optional[Union[str, os.PathLike]] = None,
514
+ force_download: bool = False,
515
+ local_files_only: bool = False,
516
+ token: Optional[Union[str, bool]] = None,
517
+ revision: str = "main",
518
+ **kwargs,
519
+ ) -> "PretrainedConfig":
520
+ r"""
521
+ Instantiate a [`PretrainedConfig`] (or a derived class) from a pretrained model configuration.
522
+
523
+ Args:
524
+ pretrained_model_name_or_path (`str` or `os.PathLike`):
525
+ This can be either:
526
+
527
+ - a string, the *model id* of a pretrained model configuration hosted inside a model repo on
528
+ huggingface.co.
529
+ - a path to a *directory* containing a configuration file saved using the
530
+ [`~PretrainedConfig.save_pretrained`] method, e.g., `./my_model_directory/`.
531
+ - a path or url to a saved configuration JSON *file*, e.g., `./my_model_directory/configuration.json`.
532
+ cache_dir (`str` or `os.PathLike`, *optional*):
533
+ Path to a directory in which a downloaded pretrained model configuration should be cached if the
534
+ standard cache should not be used.
535
+ force_download (`bool`, *optional*, defaults to `False`):
536
+ Whether or not to force to (re-)download the configuration files and override the cached versions if
537
+ they exist.
538
+ resume_download (`bool`, *optional*, defaults to `False`):
539
+ Whether or not to delete incompletely received file. Attempts to resume the download if such a file
540
+ exists.
541
+ proxies (`Dict[str, str]`, *optional*):
542
+ A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',
543
+ 'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request.
544
+ token (`str` or `bool`, *optional*):
545
+ The token to use as HTTP bearer authorization for remote files. If `True`, or not specified, will use
546
+ the token generated when running `huggingface-cli login` (stored in `~/.huggingface`).
547
+ revision (`str`, *optional*, defaults to `"main"`):
548
+ The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
549
+ git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any
550
+ identifier allowed by git.
551
+
552
+ <Tip>
553
+
554
+ To test a pull request you made on the Hub, you can pass `revision="refs/pr/<pr_number>".
555
+
556
+ </Tip>
557
+
558
+ return_unused_kwargs (`bool`, *optional*, defaults to `False`):
559
+ If `False`, then this function returns just the final configuration object.
560
+
561
+ If `True`, then this functions returns a `Tuple(config, unused_kwargs)` where *unused_kwargs* is a
562
+ dictionary consisting of the key/value pairs whose keys are not configuration attributes: i.e., the
563
+ part of `kwargs` which has not been used to update `config` and is otherwise ignored.
564
+ subfolder (`str`, *optional*, defaults to `""`):
565
+ In case the relevant files are located inside a subfolder of the model repo on huggingface.co, you can
566
+ specify the folder name here.
567
+ kwargs (`Dict[str, Any]`, *optional*):
568
+ The values in kwargs of any keys which are configuration attributes will be used to override the loaded
569
+ values. Behavior concerning key/value pairs whose keys are *not* configuration attributes is controlled
570
+ by the `return_unused_kwargs` keyword parameter.
571
+
572
+ Returns:
573
+ [`PretrainedConfig`]: The configuration object instantiated from this pretrained model.
574
+
575
+ Examples:
576
+
577
+ ```python
578
+ # We can't instantiate directly the base class *PretrainedConfig* so let's show the examples on a
579
+ # derived class: BertConfig
580
+ config = BertConfig.from_pretrained(
581
+ "google-bert/bert-base-uncased"
582
+ ) # Download configuration from huggingface.co and cache.
583
+ config = BertConfig.from_pretrained(
584
+ "./test/saved_model/"
585
+ ) # E.g. config (or model) was saved using *save_pretrained('./test/saved_model/')*
586
+ config = BertConfig.from_pretrained("./test/saved_model/my_configuration.json")
587
+ config = BertConfig.from_pretrained("google-bert/bert-base-uncased", output_attentions=True, foo=False)
588
+ assert config.output_attentions == True
589
+ config, unused_kwargs = BertConfig.from_pretrained(
590
+ "google-bert/bert-base-uncased", output_attentions=True, foo=False, return_unused_kwargs=True
591
+ )
592
+ assert config.output_attentions == True
593
+ assert unused_kwargs == {"foo": False}
594
+ ```"""
595
+ kwargs["cache_dir"] = cache_dir
596
+ kwargs["force_download"] = force_download
597
+ kwargs["local_files_only"] = local_files_only
598
+ kwargs["revision"] = revision
599
+
600
+ cls._set_token_in_kwargs(kwargs, token)
601
+
602
+ config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
603
+ if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
604
+ logger.warning(
605
+ f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
606
+ f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
607
+ )
608
+
609
+ return cls.from_dict(config_dict, **kwargs)
610
+
611
+ @classmethod
612
+ def get_config_dict(
613
+ cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs
614
+ ) -> Tuple[Dict[str, Any], Dict[str, Any]]:
615
+ """
616
+ From a `pretrained_model_name_or_path`, resolve to a dictionary of parameters, to be used for instantiating a
617
+ [`PretrainedConfig`] using `from_dict`.
618
+
619
+ Parameters:
620
+ pretrained_model_name_or_path (`str` or `os.PathLike`):
621
+ The identifier of the pre-trained checkpoint from which we want the dictionary of parameters.
622
+
623
+ Returns:
624
+ `Tuple[Dict, Dict]`: The dictionary(ies) that will be used to instantiate the configuration object.
625
+
626
+ """
627
+ cls._set_token_in_kwargs(kwargs)
628
+
629
+ original_kwargs = copy.deepcopy(kwargs)
630
+ # Get config dict associated with the base config file
631
+ config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs)
632
+ if "_commit_hash" in config_dict:
633
+ original_kwargs["_commit_hash"] = config_dict["_commit_hash"]
634
+
635
+ # That config file may point us toward another config file to use.
636
+ if "configuration_files" in config_dict:
637
+ configuration_file = get_configuration_file(config_dict["configuration_files"])
638
+ config_dict, kwargs = cls._get_config_dict(
639
+ pretrained_model_name_or_path, _configuration_file=configuration_file, **original_kwargs
640
+ )
641
+
642
+ return config_dict, kwargs
643
+
644
+ @classmethod
645
+ def _get_config_dict(
646
+ cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs
647
+ ) -> Tuple[Dict[str, Any], Dict[str, Any]]:
648
+ cache_dir = kwargs.pop("cache_dir", None)
649
+ force_download = kwargs.pop("force_download", False)
650
+ resume_download = kwargs.pop("resume_download", False)
651
+ proxies = kwargs.pop("proxies", None)
652
+ token = kwargs.pop("token", None)
653
+ local_files_only = kwargs.pop("local_files_only", False)
654
+ revision = kwargs.pop("revision", None)
655
+ trust_remote_code = kwargs.pop("trust_remote_code", None)
656
+ subfolder = kwargs.pop("subfolder", "")
657
+ from_pipeline = kwargs.pop("_from_pipeline", None)
658
+ from_auto_class = kwargs.pop("_from_auto", False)
659
+ commit_hash = kwargs.pop("_commit_hash", None)
660
+
661
+ if trust_remote_code is True:
662
+ logger.warning(
663
+ "The argument `trust_remote_code` is to be used with Auto classes. It has no effect here and is"
664
+ " ignored."
665
+ )
666
+
667
+ user_agent = {"file_type": "config", "from_auto_class": from_auto_class}
668
+ if from_pipeline is not None:
669
+ user_agent["using_pipeline"] = from_pipeline
670
+
671
+ pretrained_model_name_or_path = str(pretrained_model_name_or_path)
672
+
673
+ is_local = os.path.isdir(pretrained_model_name_or_path)
674
+ if os.path.isfile(os.path.join(subfolder, pretrained_model_name_or_path)):
675
+ # Special case when pretrained_model_name_or_path is a local file
676
+ resolved_config_file = pretrained_model_name_or_path
677
+ is_local = True
678
+ elif is_remote_url(pretrained_model_name_or_path):
679
+ configuration_file = pretrained_model_name_or_path
680
+ resolved_config_file = download_url(pretrained_model_name_or_path)
681
+ else:
682
+ configuration_file = kwargs.pop("_configuration_file", CONFIG_NAME)
683
+
684
+ try:
685
+ # Load from local folder or from cache or download from model Hub and cache
686
+ resolved_config_file = cached_file(
687
+ pretrained_model_name_or_path,
688
+ configuration_file,
689
+ cache_dir=cache_dir,
690
+ force_download=force_download,
691
+ proxies=proxies,
692
+ resume_download=resume_download,
693
+ local_files_only=local_files_only,
694
+ token=token,
695
+ user_agent=user_agent,
696
+ revision=revision,
697
+ subfolder=subfolder,
698
+ _commit_hash=commit_hash,
699
+ )
700
+ commit_hash = extract_commit_hash(resolved_config_file, commit_hash)
701
+ except EnvironmentError:
702
+ # Raise any environment error raise by `cached_file`. It will have a helpful error message adapted to
703
+ # the original exception.
704
+ raise
705
+ except Exception:
706
+ # For any other exception, we throw a generic error.
707
+ raise EnvironmentError(
708
+ f"Can't load the configuration of '{pretrained_model_name_or_path}'. If you were trying to load it"
709
+ " from 'https://huggingface.co/models', make sure you don't have a local directory with the same"
710
+ f" name. Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory"
711
+ f" containing a {configuration_file} file"
712
+ )
713
+
714
+ try:
715
+ # Load config dict
716
+ config_dict = cls._dict_from_json_file(resolved_config_file)
717
+ config_dict["_commit_hash"] = commit_hash
718
+ except (json.JSONDecodeError, UnicodeDecodeError):
719
+ raise EnvironmentError(
720
+ f"It looks like the config file at '{resolved_config_file}' is not a valid JSON file."
721
+ )
722
+
723
+ if is_local:
724
+ logger.info(f"loading configuration file {resolved_config_file}")
725
+ else:
726
+ logger.info(f"loading configuration file {configuration_file} from cache at {resolved_config_file}")
727
+
728
+ if "auto_map" in config_dict and not is_local:
729
+ config_dict["auto_map"] = add_model_info_to_auto_map(
730
+ config_dict["auto_map"], pretrained_model_name_or_path
731
+ )
732
+ return config_dict, kwargs
733
+
734
+ @classmethod
735
+ def from_dict(cls, config_dict: Dict[str, Any], **kwargs) -> "PretrainedConfig":
736
+ """
737
+ Instantiates a [`PretrainedConfig`] from a Python dictionary of parameters.
738
+
739
+ Args:
740
+ config_dict (`Dict[str, Any]`):
741
+ Dictionary that will be used to instantiate the configuration object. Such a dictionary can be
742
+ retrieved from a pretrained checkpoint by leveraging the [`~PretrainedConfig.get_config_dict`] method.
743
+ kwargs (`Dict[str, Any]`):
744
+ Additional parameters from which to initialize the configuration object.
745
+
746
+ Returns:
747
+ [`PretrainedConfig`]: The configuration object instantiated from those parameters.
748
+ """
749
+ return_unused_kwargs = kwargs.pop("return_unused_kwargs", False)
750
+ # Those arguments may be passed along for our internal telemetry.
751
+ # We remove them so they don't appear in `return_unused_kwargs`.
752
+ kwargs.pop("_from_auto", None)
753
+ kwargs.pop("_from_pipeline", None)
754
+ # The commit hash might have been updated in the `config_dict`, we don't want the kwargs to erase that update.
755
+ if "_commit_hash" in kwargs and "_commit_hash" in config_dict:
756
+ kwargs["_commit_hash"] = config_dict["_commit_hash"]
757
+
758
+ # We remove it from kwargs so that it does not appear in `return_unused_kwargs`.
759
+ config_dict["attn_implementation"] = kwargs.pop("attn_implementation", None)
760
+
761
+ config = cls(**config_dict)
762
+
763
+ if hasattr(config, "pruned_heads"):
764
+ config.pruned_heads = {int(key): value for key, value in config.pruned_heads.items()}
765
+
766
+ # Update config with kwargs if needed
767
+ if "num_labels" in kwargs and "id2label" in kwargs:
768
+ num_labels = kwargs["num_labels"]
769
+ id2label = kwargs["id2label"] if kwargs["id2label"] is not None else []
770
+ if len(id2label) != num_labels:
771
+ raise ValueError(
772
+ f"You passed along `num_labels={num_labels }` with an incompatible id to label map: "
773
+ f"{kwargs['id2label']}. Since those arguments are inconsistent with each other, you should remove "
774
+ "one of them."
775
+ )
776
+ to_remove = []
777
+ for key, value in kwargs.items():
778
+ if hasattr(config, key):
779
+ current_attr = getattr(config, key)
780
+ # To authorize passing a custom subconfig as kwarg in models that have nested configs.
781
+ if isinstance(current_attr, PretrainedConfig) and isinstance(value, dict):
782
+ value = current_attr.__class__(**value)
783
+ setattr(config, key, value)
784
+ if key != "torch_dtype":
785
+ to_remove.append(key)
786
+ for key in to_remove:
787
+ kwargs.pop(key, None)
788
+
789
+ logger.info(f"Model config {config}")
790
+ if return_unused_kwargs:
791
+ return config, kwargs
792
+ else:
793
+ return config
794
+
795
+ @classmethod
796
+ def from_json_file(cls, json_file: Union[str, os.PathLike]) -> "PretrainedConfig":
797
+ """
798
+ Instantiates a [`PretrainedConfig`] from the path to a JSON file of parameters.
799
+
800
+ Args:
801
+ json_file (`str` or `os.PathLike`):
802
+ Path to the JSON file containing the parameters.
803
+
804
+ Returns:
805
+ [`PretrainedConfig`]: The configuration object instantiated from that JSON file.
806
+
807
+ """
808
+ config_dict = cls._dict_from_json_file(json_file)
809
+ return cls(**config_dict)
810
+
811
+ @classmethod
812
+ def _dict_from_json_file(cls, json_file: Union[str, os.PathLike]):
813
+ with open(json_file, "r", encoding="utf-8") as reader:
814
+ text = reader.read()
815
+ return json.loads(text)
816
+
817
+ def __eq__(self, other):
818
+ return isinstance(other, PretrainedConfig) and (self.__dict__ == other.__dict__)
819
+
820
+ def __repr__(self):
821
+ return f"{self.__class__.__name__} {self.to_json_string()}"
822
+
823
+ def to_diff_dict(self) -> Dict[str, Any]:
824
+ """
825
+ Removes all attributes from config which correspond to the default config attributes for better readability and
826
+ serializes to a Python dictionary.
827
+
828
+ Returns:
829
+ `Dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance,
830
+ """
831
+ config_dict = self.to_dict()
832
+
833
+ # get the default config dict
834
+ default_config_dict = PretrainedConfig().to_dict()
835
+
836
+ # get class specific config dict
837
+ class_config_dict = self.__class__().to_dict() if not self.is_composition else {}
838
+
839
+ serializable_config_dict = {}
840
+
841
+ # only serialize values that differ from the default config
842
+ for key, value in config_dict.items():
843
+ if (
844
+ isinstance(getattr(self, key, None), PretrainedConfig)
845
+ and key in class_config_dict
846
+ and isinstance(class_config_dict[key], dict)
847
+ ):
848
+ # For nested configs we need to clean the diff recursively
849
+ diff = recursive_diff_dict(value, class_config_dict[key], config_obj=getattr(self, key, None))
850
+ if "model_type" in value:
851
+ # Needs to be set even if it's not in the diff
852
+ diff["model_type"] = value["model_type"]
853
+ if len(diff) > 0:
854
+ serializable_config_dict[key] = diff
855
+ elif (
856
+ key not in default_config_dict
857
+ or key == "transformers_version"
858
+ or value != default_config_dict[key]
859
+ or (key in class_config_dict and value != class_config_dict[key])
860
+ ):
861
+ serializable_config_dict[key] = value
862
+
863
+ if hasattr(self, "quantization_config"):
864
+ serializable_config_dict["quantization_config"] = (
865
+ self.quantization_config.to_dict()
866
+ if not isinstance(self.quantization_config, dict)
867
+ else self.quantization_config
868
+ )
869
+
870
+ # pop the `_pre_quantization_dtype` as torch.dtypes are not serializable.
871
+ _ = serializable_config_dict.pop("_pre_quantization_dtype", None)
872
+
873
+ self.dict_torch_dtype_to_str(serializable_config_dict)
874
+
875
+ if "_attn_implementation_internal" in serializable_config_dict:
876
+ del serializable_config_dict["_attn_implementation_internal"]
877
+
878
+ return serializable_config_dict
879
+
880
+ def to_dict(self) -> Dict[str, Any]:
881
+ """
882
+ Serializes this instance to a Python dictionary.
883
+
884
+ Returns:
885
+ `Dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance.
886
+ """
887
+ output = copy.deepcopy(self.__dict__)
888
+ if hasattr(self.__class__, "model_type"):
889
+ output["model_type"] = self.__class__.model_type
890
+ if "_auto_class" in output:
891
+ del output["_auto_class"]
892
+ if "_commit_hash" in output:
893
+ del output["_commit_hash"]
894
+ if "_attn_implementation_internal" in output:
895
+ del output["_attn_implementation_internal"]
896
+
897
+ # Transformers version when serializing the model
898
+ output["transformers_version"] = __version__
899
+
900
+ for key, value in output.items():
901
+ # Deal with nested configs like CLIP
902
+ if isinstance(value, PretrainedConfig):
903
+ value = value.to_dict()
904
+ del value["transformers_version"]
905
+
906
+ output[key] = value
907
+
908
+ if hasattr(self, "quantization_config"):
909
+ output["quantization_config"] = (
910
+ self.quantization_config.to_dict()
911
+ if not isinstance(self.quantization_config, dict)
912
+ else self.quantization_config
913
+ )
914
+
915
+ # pop the `_pre_quantization_dtype` as torch.dtypes are not serializable.
916
+ _ = output.pop("_pre_quantization_dtype", None)
917
+
918
+ self.dict_torch_dtype_to_str(output)
919
+
920
+ return output
921
+
922
+ def to_json_string(self, use_diff: bool = True) -> str:
923
+ """
924
+ Serializes this instance to a JSON string.
925
+
926
+ Args:
927
+ use_diff (`bool`, *optional*, defaults to `True`):
928
+ If set to `True`, only the difference between the config instance and the default `PretrainedConfig()`
929
+ is serialized to JSON string.
930
+
931
+ Returns:
932
+ `str`: String containing all the attributes that make up this configuration instance in JSON format.
933
+ """
934
+ if use_diff is True:
935
+ config_dict = self.to_diff_dict()
936
+ else:
937
+ config_dict = self.to_dict()
938
+ return json.dumps(config_dict, indent=2, sort_keys=True) + "\n"
939
+
940
+ def to_json_file(self, json_file_path: Union[str, os.PathLike], use_diff: bool = True):
941
+ """
942
+ Save this instance to a JSON file.
943
+
944
+ Args:
945
+ json_file_path (`str` or `os.PathLike`):
946
+ Path to the JSON file in which this configuration instance's parameters will be saved.
947
+ use_diff (`bool`, *optional*, defaults to `True`):
948
+ If set to `True`, only the difference between the config instance and the default `PretrainedConfig()`
949
+ is serialized to JSON file.
950
+ """
951
+ with open(json_file_path, "w", encoding="utf-8") as writer:
952
+ writer.write(self.to_json_string(use_diff=use_diff))
953
+
954
+ def update(self, config_dict: Dict[str, Any]):
955
+ """
956
+ Updates attributes of this class with attributes from `config_dict`.
957
+
958
+ Args:
959
+ config_dict (`Dict[str, Any]`): Dictionary of attributes that should be updated for this class.
960
+ """
961
+ for key, value in config_dict.items():
962
+ setattr(self, key, value)
963
+
964
+ def update_from_string(self, update_str: str):
965
+ """
966
+ Updates attributes of this class with attributes from `update_str`.
967
+
968
+ The expected format is ints, floats and strings as is, and for booleans use `true` or `false`. For example:
969
+ "n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"
970
+
971
+ The keys to change have to already exist in the config object.
972
+
973
+ Args:
974
+ update_str (`str`): String with attributes that should be updated for this class.
975
+
976
+ """
977
+
978
+ d = dict(x.split("=") for x in update_str.split(","))
979
+ for k, v in d.items():
980
+ if not hasattr(self, k):
981
+ raise ValueError(f"key {k} isn't in the original config dict")
982
+
983
+ old_v = getattr(self, k)
984
+ if isinstance(old_v, bool):
985
+ if v.lower() in ["true", "1", "y", "yes"]:
986
+ v = True
987
+ elif v.lower() in ["false", "0", "n", "no"]:
988
+ v = False
989
+ else:
990
+ raise ValueError(f"can't derive true or false from {v} (key {k})")
991
+ elif isinstance(old_v, int):
992
+ v = int(v)
993
+ elif isinstance(old_v, float):
994
+ v = float(v)
995
+ elif not isinstance(old_v, str):
996
+ raise ValueError(
997
+ f"You can only update int, float, bool or string values in the config, got {v} for key {k}"
998
+ )
999
+
1000
+ setattr(self, k, v)
1001
+
1002
+ def dict_torch_dtype_to_str(self, d: Dict[str, Any]) -> None:
1003
+ """
1004
+ Checks whether the passed dictionary and its nested dicts have a *torch_dtype* key and if it's not None,
1005
+ converts torch.dtype to a string of just the type. For example, `torch.float32` get converted into *"float32"*
1006
+ string, which can then be stored in the json format.
1007
+ """
1008
+ if d.get("torch_dtype", None) is not None and not isinstance(d["torch_dtype"], str):
1009
+ d["torch_dtype"] = str(d["torch_dtype"]).split(".")[1]
1010
+ for value in d.values():
1011
+ if isinstance(value, dict):
1012
+ self.dict_torch_dtype_to_str(value)
1013
+
1014
+ @classmethod
1015
+ def register_for_auto_class(cls, auto_class="AutoConfig"):
1016
+ """
1017
+ Register this class with a given auto class. This should only be used for custom configurations as the ones in
1018
+ the library are already mapped with `AutoConfig`.
1019
+
1020
+ <Tip warning={true}>
1021
+
1022
+ This API is experimental and may have some slight breaking changes in the next releases.
1023
+
1024
+ </Tip>
1025
+
1026
+ Args:
1027
+ auto_class (`str` or `type`, *optional*, defaults to `"AutoConfig"`):
1028
+ The auto class to register this new configuration with.
1029
+ """
1030
+ if not isinstance(auto_class, str):
1031
+ auto_class = auto_class.__name__
1032
+
1033
+ import transformers.models.auto as auto_module
1034
+
1035
+ if not hasattr(auto_module, auto_class):
1036
+ raise ValueError(f"{auto_class} is not a valid auto class.")
1037
+
1038
+ cls._auto_class = auto_class
1039
+
1040
+ @staticmethod
1041
+ def _get_generation_defaults() -> Dict[str, Any]:
1042
+ return {
1043
+ "max_length": 20,
1044
+ "min_length": 0,
1045
+ "do_sample": False,
1046
+ "early_stopping": False,
1047
+ "num_beams": 1,
1048
+ "num_beam_groups": 1,
1049
+ "diversity_penalty": 0.0,
1050
+ "temperature": 1.0,
1051
+ "top_k": 50,
1052
+ "top_p": 1.0,
1053
+ "typical_p": 1.0,
1054
+ "repetition_penalty": 1.0,
1055
+ "length_penalty": 1.0,
1056
+ "no_repeat_ngram_size": 0,
1057
+ "encoder_no_repeat_ngram_size": 0,
1058
+ "bad_words_ids": None,
1059
+ "num_return_sequences": 1,
1060
+ "output_scores": False,
1061
+ "return_dict_in_generate": False,
1062
+ "forced_bos_token_id": None,
1063
+ "forced_eos_token_id": None,
1064
+ "remove_invalid_values": False,
1065
+ "exponential_decay_length_penalty": None,
1066
+ "suppress_tokens": None,
1067
+ "begin_suppress_tokens": None,
1068
+ }
1069
+
1070
+ def _has_non_default_generation_parameters(self) -> bool:
1071
+ """
1072
+ Whether or not this instance holds non-default generation parameters.
1073
+ """
1074
+ for parameter_name, default_value in self._get_generation_defaults().items():
1075
+ if hasattr(self, parameter_name) and getattr(self, parameter_name) != default_value:
1076
+ return True
1077
+ return False
1078
+
1079
+
1080
+ def get_configuration_file(configuration_files: List[str]) -> str:
1081
+ """
1082
+ Get the configuration file to use for this version of transformers.
1083
+
1084
+ Args:
1085
+ configuration_files (`List[str]`): The list of available configuration files.
1086
+
1087
+ Returns:
1088
+ `str`: The configuration file to use.
1089
+ """
1090
+ configuration_files_map = {}
1091
+ for file_name in configuration_files:
1092
+ search = _re_configuration_file.search(file_name)
1093
+ if search is not None:
1094
+ v = search.groups()[0]
1095
+ configuration_files_map[v] = file_name
1096
+ available_versions = sorted(configuration_files_map.keys())
1097
+
1098
+ # Defaults to FULL_CONFIGURATION_FILE and then try to look at some newer versions.
1099
+ configuration_file = CONFIG_NAME
1100
+ transformers_version = version.parse(__version__)
1101
+ for v in available_versions:
1102
+ if version.parse(v) <= transformers_version:
1103
+ configuration_file = configuration_files_map[v]
1104
+ else:
1105
+ # No point going further since the versions are sorted.
1106
+ break
1107
+
1108
+ return configuration_file
1109
+
1110
+
1111
+ def recursive_diff_dict(dict_a, dict_b, config_obj=None):
1112
+ """
1113
+ Helper function to recursively take the diff between two nested dictionaries. The resulting diff only contains the
1114
+ values from `dict_a` that are different from values in `dict_b`.
1115
+ """
1116
+ diff = {}
1117
+ default = config_obj.__class__().to_dict() if config_obj is not None else {}
1118
+ for key, value in dict_a.items():
1119
+ obj_value = getattr(config_obj, str(key), None)
1120
+ if isinstance(obj_value, PretrainedConfig) and key in dict_b and isinstance(dict_b[key], dict):
1121
+ diff_value = recursive_diff_dict(value, dict_b[key], config_obj=obj_value)
1122
+ if len(diff_value) > 0:
1123
+ diff[key] = diff_value
1124
+ elif key not in dict_b or value != dict_b[key] or key not in default or value != default[key]:
1125
+ diff[key] = value
1126
+ return diff
1127
+
1128
+
1129
+ PretrainedConfig.push_to_hub = copy_func(PretrainedConfig.push_to_hub)
1130
+ if PretrainedConfig.push_to_hub.__doc__ is not None:
1131
+ PretrainedConfig.push_to_hub.__doc__ = PretrainedConfig.push_to_hub.__doc__.format(
1132
+ object="config", object_class="AutoConfig", object_files="configuration file"
1133
+ )
llmeval-env/lib/python3.10/site-packages/transformers/convert_graph_to_onnx.py ADDED
@@ -0,0 +1,551 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import warnings
16
+ from argparse import ArgumentParser
17
+ from os import listdir, makedirs
18
+ from pathlib import Path
19
+ from typing import Dict, List, Optional, Tuple
20
+
21
+ from packaging.version import Version, parse
22
+
23
+ from transformers.pipelines import Pipeline, pipeline
24
+ from transformers.tokenization_utils import BatchEncoding
25
+ from transformers.utils import ModelOutput, is_tf_available, is_torch_available
26
+
27
+
28
+ # This is the minimal required version to
29
+ # support some ONNX Runtime features
30
+ ORT_QUANTIZE_MINIMUM_VERSION = parse("1.4.0")
31
+
32
+
33
+ SUPPORTED_PIPELINES = [
34
+ "feature-extraction",
35
+ "ner",
36
+ "sentiment-analysis",
37
+ "fill-mask",
38
+ "question-answering",
39
+ "text-generation",
40
+ "translation_en_to_fr",
41
+ "translation_en_to_de",
42
+ "translation_en_to_ro",
43
+ ]
44
+
45
+
46
+ class OnnxConverterArgumentParser(ArgumentParser):
47
+ """
48
+ Wraps all the script arguments supported to export transformers models to ONNX IR
49
+ """
50
+
51
+ def __init__(self):
52
+ super().__init__("ONNX Converter")
53
+
54
+ self.add_argument(
55
+ "--pipeline",
56
+ type=str,
57
+ choices=SUPPORTED_PIPELINES,
58
+ default="feature-extraction",
59
+ )
60
+ self.add_argument(
61
+ "--model",
62
+ type=str,
63
+ required=True,
64
+ help="Model's id or path (ex: google-bert/bert-base-cased)",
65
+ )
66
+ self.add_argument("--tokenizer", type=str, help="Tokenizer's id or path (ex: google-bert/bert-base-cased)")
67
+ self.add_argument(
68
+ "--framework",
69
+ type=str,
70
+ choices=["pt", "tf"],
71
+ help="Framework for loading the model",
72
+ )
73
+ self.add_argument("--opset", type=int, default=11, help="ONNX opset to use")
74
+ self.add_argument(
75
+ "--check-loading",
76
+ action="store_true",
77
+ help="Check ONNX is able to load the model",
78
+ )
79
+ self.add_argument(
80
+ "--use-external-format",
81
+ action="store_true",
82
+ help="Allow exporting model >= than 2Gb",
83
+ )
84
+ self.add_argument(
85
+ "--quantize",
86
+ action="store_true",
87
+ help="Quantize the neural network to be run with int8",
88
+ )
89
+ self.add_argument("output")
90
+
91
+
92
+ def generate_identified_filename(filename: Path, identifier: str) -> Path:
93
+ """
94
+ Append a string-identifier at the end (before the extension, if any) to the provided filepath
95
+
96
+ Args:
97
+ filename: pathlib.Path The actual path object we would like to add an identifier suffix
98
+ identifier: The suffix to add
99
+
100
+ Returns: String with concatenated identifier at the end of the filename
101
+ """
102
+ return filename.parent.joinpath(filename.stem + identifier).with_suffix(filename.suffix)
103
+
104
+
105
+ def check_onnxruntime_requirements(minimum_version: Version):
106
+ """
107
+ Check onnxruntime is installed and if the installed version match is recent enough
108
+
109
+ Raises:
110
+ ImportError: If onnxruntime is not installed or too old version is found
111
+ """
112
+ try:
113
+ import onnxruntime
114
+
115
+ # Parse the version of the installed onnxruntime
116
+ ort_version = parse(onnxruntime.__version__)
117
+
118
+ # We require 1.4.0 minimum
119
+ if ort_version < ORT_QUANTIZE_MINIMUM_VERSION:
120
+ raise ImportError(
121
+ f"We found an older version of onnxruntime ({onnxruntime.__version__}) "
122
+ f"but we require onnxruntime to be >= {minimum_version} to enable all the conversions options.\n"
123
+ "Please update onnxruntime by running `pip install --upgrade onnxruntime`"
124
+ )
125
+
126
+ except ImportError:
127
+ raise ImportError(
128
+ "onnxruntime doesn't seem to be currently installed. "
129
+ "Please install the onnxruntime by running `pip install onnxruntime`"
130
+ " and relaunch the conversion."
131
+ )
132
+
133
+
134
+ def ensure_valid_input(model, tokens, input_names):
135
+ """
136
+ Ensure inputs are presented in the correct order, without any Non
137
+
138
+ Args:
139
+ model: The model used to forward the input data
140
+ tokens: BatchEncoding holding the input data
141
+ input_names: The name of the inputs
142
+
143
+ Returns: Tuple
144
+
145
+ """
146
+ print("Ensuring inputs are in correct order")
147
+
148
+ model_args_name = model.forward.__code__.co_varnames
149
+ model_args, ordered_input_names = [], []
150
+ for arg_name in model_args_name[1:]: # start at index 1 to skip "self" argument
151
+ if arg_name in input_names:
152
+ ordered_input_names.append(arg_name)
153
+ model_args.append(tokens[arg_name])
154
+ else:
155
+ print(f"{arg_name} is not present in the generated input list.")
156
+ break
157
+
158
+ print(f"Generated inputs order: {ordered_input_names}")
159
+ return ordered_input_names, tuple(model_args)
160
+
161
+
162
+ def infer_shapes(nlp: Pipeline, framework: str) -> Tuple[List[str], List[str], Dict, BatchEncoding]:
163
+ """
164
+ Attempt to infer the static vs dynamic axes for each input and output tensors for a specific model
165
+
166
+ Args:
167
+ nlp: The pipeline object holding the model to be exported
168
+ framework: The framework identifier to dispatch to the correct inference scheme (pt/tf)
169
+
170
+ Returns:
171
+
172
+ - List of the inferred input variable names
173
+ - List of the inferred output variable names
174
+ - Dictionary with input/output variables names as key and shape tensor as value
175
+ - a BatchEncoding reference which was used to infer all the above information
176
+ """
177
+
178
+ def build_shape_dict(name: str, tensor, is_input: bool, seq_len: int):
179
+ if isinstance(tensor, (tuple, list)):
180
+ return [build_shape_dict(name, t, is_input, seq_len) for t in tensor]
181
+
182
+ else:
183
+ # Let's assume batch is the first axis with only 1 element (~~ might not be always true ...)
184
+ axes = {[axis for axis, numel in enumerate(tensor.shape) if numel == 1][0]: "batch"}
185
+ if is_input:
186
+ if len(tensor.shape) == 2:
187
+ axes[1] = "sequence"
188
+ else:
189
+ raise ValueError(f"Unable to infer tensor axes ({len(tensor.shape)})")
190
+ else:
191
+ seq_axes = [dim for dim, shape in enumerate(tensor.shape) if shape == seq_len]
192
+ axes.update({dim: "sequence" for dim in seq_axes})
193
+
194
+ print(f"Found {'input' if is_input else 'output'} {name} with shape: {axes}")
195
+ return axes
196
+
197
+ tokens = nlp.tokenizer("This is a sample output", return_tensors=framework)
198
+ seq_len = tokens.input_ids.shape[-1]
199
+ outputs = nlp.model(**tokens) if framework == "pt" else nlp.model(tokens)
200
+ if isinstance(outputs, ModelOutput):
201
+ outputs = outputs.to_tuple()
202
+ if not isinstance(outputs, (list, tuple)):
203
+ outputs = (outputs,)
204
+
205
+ # Generate input names & axes
206
+ input_vars = list(tokens.keys())
207
+ input_dynamic_axes = {k: build_shape_dict(k, v, True, seq_len) for k, v in tokens.items()}
208
+
209
+ # flatten potentially grouped outputs (past for gpt2, attentions)
210
+ outputs_flat = []
211
+ for output in outputs:
212
+ if isinstance(output, (tuple, list)):
213
+ outputs_flat.extend(output)
214
+ else:
215
+ outputs_flat.append(output)
216
+
217
+ # Generate output names & axes
218
+ output_names = [f"output_{i}" for i in range(len(outputs_flat))]
219
+ output_dynamic_axes = {k: build_shape_dict(k, v, False, seq_len) for k, v in zip(output_names, outputs_flat)}
220
+
221
+ # Create the aggregated axes representation
222
+ dynamic_axes = dict(input_dynamic_axes, **output_dynamic_axes)
223
+ return input_vars, output_names, dynamic_axes, tokens
224
+
225
+
226
+ def load_graph_from_args(
227
+ pipeline_name: str, framework: str, model: str, tokenizer: Optional[str] = None, **models_kwargs
228
+ ) -> Pipeline:
229
+ """
230
+ Convert the set of arguments provided through the CLI to an actual pipeline reference (tokenizer + model
231
+
232
+ Args:
233
+ pipeline_name: The kind of pipeline to use (ner, question-answering, etc.)
234
+ framework: The actual model to convert the pipeline from ("pt" or "tf")
235
+ model: The model name which will be loaded by the pipeline
236
+ tokenizer: The tokenizer name which will be loaded by the pipeline, default to the model's value
237
+
238
+ Returns: Pipeline object
239
+
240
+ """
241
+ # If no tokenizer provided
242
+ if tokenizer is None:
243
+ tokenizer = model
244
+
245
+ # Check the wanted framework is available
246
+ if framework == "pt" and not is_torch_available():
247
+ raise Exception("Cannot convert because PyTorch is not installed. Please install torch first.")
248
+ if framework == "tf" and not is_tf_available():
249
+ raise Exception("Cannot convert because TF is not installed. Please install tensorflow first.")
250
+
251
+ print(f"Loading pipeline (model: {model}, tokenizer: {tokenizer})")
252
+
253
+ # Allocate tokenizer and model
254
+ return pipeline(pipeline_name, model=model, tokenizer=tokenizer, framework=framework, model_kwargs=models_kwargs)
255
+
256
+
257
+ def convert_pytorch(nlp: Pipeline, opset: int, output: Path, use_external_format: bool):
258
+ """
259
+ Export a PyTorch backed pipeline to ONNX Intermediate Representation (IR
260
+
261
+ Args:
262
+ nlp: The pipeline to be exported
263
+ opset: The actual version of the ONNX operator set to use
264
+ output: Path where will be stored the generated ONNX model
265
+ use_external_format: Split the model definition from its parameters to allow model bigger than 2GB
266
+
267
+ Returns:
268
+
269
+ """
270
+ if not is_torch_available():
271
+ raise Exception("Cannot convert because PyTorch is not installed. Please install torch first.")
272
+
273
+ import torch
274
+ from torch.onnx import export
275
+
276
+ print(f"Using framework PyTorch: {torch.__version__}")
277
+
278
+ with torch.no_grad():
279
+ input_names, output_names, dynamic_axes, tokens = infer_shapes(nlp, "pt")
280
+ ordered_input_names, model_args = ensure_valid_input(nlp.model, tokens, input_names)
281
+
282
+ export(
283
+ nlp.model,
284
+ model_args,
285
+ f=output.as_posix(),
286
+ input_names=ordered_input_names,
287
+ output_names=output_names,
288
+ dynamic_axes=dynamic_axes,
289
+ do_constant_folding=True,
290
+ opset_version=opset,
291
+ )
292
+
293
+
294
+ def convert_tensorflow(nlp: Pipeline, opset: int, output: Path):
295
+ """
296
+ Export a TensorFlow backed pipeline to ONNX Intermediate Representation (IR)
297
+
298
+ Args:
299
+ nlp: The pipeline to be exported
300
+ opset: The actual version of the ONNX operator set to use
301
+ output: Path where will be stored the generated ONNX model
302
+
303
+ Notes: TensorFlow cannot export model bigger than 2GB due to internal constraint from TensorFlow
304
+
305
+ """
306
+ if not is_tf_available():
307
+ raise Exception("Cannot convert because TF is not installed. Please install tensorflow first.")
308
+
309
+ print("/!\\ Please note TensorFlow doesn't support exporting model > 2Gb /!\\")
310
+
311
+ try:
312
+ import tensorflow as tf
313
+ import tf2onnx
314
+ from tf2onnx import __version__ as t2ov
315
+
316
+ print(f"Using framework TensorFlow: {tf.version.VERSION}, tf2onnx: {t2ov}")
317
+
318
+ # Build
319
+ input_names, output_names, dynamic_axes, tokens = infer_shapes(nlp, "tf")
320
+
321
+ # Forward
322
+ nlp.model.predict(tokens.data)
323
+ input_signature = [tf.TensorSpec.from_tensor(tensor, name=key) for key, tensor in tokens.items()]
324
+ model_proto, _ = tf2onnx.convert.from_keras(
325
+ nlp.model, input_signature, opset=opset, output_path=output.as_posix()
326
+ )
327
+
328
+ except ImportError as e:
329
+ raise Exception(
330
+ f"Cannot import {e.name} required to convert TF model to ONNX. Please install {e.name} first. {e}"
331
+ )
332
+
333
+
334
+ def convert(
335
+ framework: str,
336
+ model: str,
337
+ output: Path,
338
+ opset: int,
339
+ tokenizer: Optional[str] = None,
340
+ use_external_format: bool = False,
341
+ pipeline_name: str = "feature-extraction",
342
+ **model_kwargs,
343
+ ):
344
+ """
345
+ Convert the pipeline object to the ONNX Intermediate Representation (IR) format
346
+
347
+ Args:
348
+ framework: The framework the pipeline is backed by ("pt" or "tf")
349
+ model: The name of the model to load for the pipeline
350
+ output: The path where the ONNX graph will be stored
351
+ opset: The actual version of the ONNX operator set to use
352
+ tokenizer: The name of the model to load for the pipeline, default to the model's name if not provided
353
+ use_external_format:
354
+ Split the model definition from its parameters to allow model bigger than 2GB (PyTorch only)
355
+ pipeline_name: The kind of pipeline to instantiate (ner, question-answering, etc.)
356
+ model_kwargs: Keyword arguments to be forwarded to the model constructor
357
+
358
+ Returns:
359
+
360
+ """
361
+ warnings.warn(
362
+ "The `transformers.convert_graph_to_onnx` package is deprecated and will be removed in version 5 of"
363
+ " Transformers",
364
+ FutureWarning,
365
+ )
366
+ print(f"ONNX opset version set to: {opset}")
367
+
368
+ # Load the pipeline
369
+ nlp = load_graph_from_args(pipeline_name, framework, model, tokenizer, **model_kwargs)
370
+
371
+ if not output.parent.exists():
372
+ print(f"Creating folder {output.parent}")
373
+ makedirs(output.parent.as_posix())
374
+ elif len(listdir(output.parent.as_posix())) > 0:
375
+ raise Exception(f"Folder {output.parent.as_posix()} is not empty, aborting conversion")
376
+
377
+ # Export the graph
378
+ if framework == "pt":
379
+ convert_pytorch(nlp, opset, output, use_external_format)
380
+ else:
381
+ convert_tensorflow(nlp, opset, output)
382
+
383
+
384
+ def optimize(onnx_model_path: Path) -> Path:
385
+ """
386
+ Load the model at the specified path and let onnxruntime look at transformations on the graph to enable all the
387
+ optimizations possible
388
+
389
+ Args:
390
+ onnx_model_path: filepath where the model binary description is stored
391
+
392
+ Returns: Path where the optimized model binary description has been saved
393
+
394
+ """
395
+ from onnxruntime import InferenceSession, SessionOptions
396
+
397
+ # Generate model name with suffix "optimized"
398
+ opt_model_path = generate_identified_filename(onnx_model_path, "-optimized")
399
+ sess_option = SessionOptions()
400
+ sess_option.optimized_model_filepath = opt_model_path.as_posix()
401
+ _ = InferenceSession(onnx_model_path.as_posix(), sess_option)
402
+
403
+ print(f"Optimized model has been written at {opt_model_path}: \N{heavy check mark}")
404
+ print("/!\\ Optimized model contains hardware specific operators which might not be portable. /!\\")
405
+
406
+ return opt_model_path
407
+
408
+
409
+ def quantize(onnx_model_path: Path) -> Path:
410
+ """
411
+ Quantize the weights of the model from float32 to in8 to allow very efficient inference on modern CPU
412
+
413
+ Args:
414
+ onnx_model_path: Path to location the exported ONNX model is stored
415
+
416
+ Returns: The Path generated for the quantized
417
+ """
418
+ import onnx
419
+ import onnxruntime
420
+ from onnx.onnx_pb import ModelProto
421
+ from onnxruntime.quantization import QuantizationMode
422
+ from onnxruntime.quantization.onnx_quantizer import ONNXQuantizer
423
+ from onnxruntime.quantization.registry import IntegerOpsRegistry
424
+
425
+ # Load the ONNX model
426
+ onnx_model = onnx.load(onnx_model_path.as_posix())
427
+
428
+ if parse(onnx.__version__) < parse("1.5.0"):
429
+ print(
430
+ "Models larger than 2GB will fail to quantize due to protobuf constraint.\n"
431
+ "Please upgrade to onnxruntime >= 1.5.0."
432
+ )
433
+
434
+ # Copy it
435
+ copy_model = ModelProto()
436
+ copy_model.CopyFrom(onnx_model)
437
+
438
+ # Construct quantizer
439
+ # onnxruntime renamed input_qType to activation_qType in v1.13.1, so we
440
+ # check the onnxruntime version to ensure backward compatibility.
441
+ # See also: https://github.com/microsoft/onnxruntime/pull/12873
442
+ if parse(onnxruntime.__version__) < parse("1.13.1"):
443
+ quantizer = ONNXQuantizer(
444
+ model=copy_model,
445
+ per_channel=False,
446
+ reduce_range=False,
447
+ mode=QuantizationMode.IntegerOps,
448
+ static=False,
449
+ weight_qType=True,
450
+ input_qType=False,
451
+ tensors_range=None,
452
+ nodes_to_quantize=None,
453
+ nodes_to_exclude=None,
454
+ op_types_to_quantize=list(IntegerOpsRegistry),
455
+ )
456
+ else:
457
+ quantizer = ONNXQuantizer(
458
+ model=copy_model,
459
+ per_channel=False,
460
+ reduce_range=False,
461
+ mode=QuantizationMode.IntegerOps,
462
+ static=False,
463
+ weight_qType=True,
464
+ activation_qType=False,
465
+ tensors_range=None,
466
+ nodes_to_quantize=None,
467
+ nodes_to_exclude=None,
468
+ op_types_to_quantize=list(IntegerOpsRegistry),
469
+ )
470
+
471
+ # Quantize and export
472
+ quantizer.quantize_model()
473
+
474
+ # Append "-quantized" at the end of the model's name
475
+ quantized_model_path = generate_identified_filename(onnx_model_path, "-quantized")
476
+
477
+ # Save model
478
+ print(f"Quantized model has been written at {quantized_model_path}: \N{heavy check mark}")
479
+ onnx.save_model(quantizer.model.model, quantized_model_path.as_posix())
480
+
481
+ return quantized_model_path
482
+
483
+
484
+ def verify(path: Path):
485
+ from onnxruntime import InferenceSession, SessionOptions
486
+ from onnxruntime.capi.onnxruntime_pybind11_state import RuntimeException
487
+
488
+ print(f"Checking ONNX model loading from: {path} ...")
489
+ try:
490
+ onnx_options = SessionOptions()
491
+ _ = InferenceSession(path.as_posix(), onnx_options, providers=["CPUExecutionProvider"])
492
+ print(f"Model {path} correctly loaded: \N{heavy check mark}")
493
+ except RuntimeException as re:
494
+ print(f"Error while loading the model {re}: \N{heavy ballot x}")
495
+
496
+
497
+ if __name__ == "__main__":
498
+ parser = OnnxConverterArgumentParser()
499
+ args = parser.parse_args()
500
+
501
+ # Make sure output is absolute path
502
+ args.output = Path(args.output).absolute()
503
+
504
+ try:
505
+ print("\n====== Converting model to ONNX ======")
506
+ # Convert
507
+ convert(
508
+ args.framework,
509
+ args.model,
510
+ args.output,
511
+ args.opset,
512
+ args.tokenizer,
513
+ args.use_external_format,
514
+ args.pipeline,
515
+ )
516
+
517
+ if args.quantize:
518
+ # Ensure requirements for quantization on onnxruntime is met
519
+ check_onnxruntime_requirements(ORT_QUANTIZE_MINIMUM_VERSION)
520
+
521
+ # onnxruntime optimizations doesn't provide the same level of performances on TensorFlow than PyTorch
522
+ if args.framework == "tf":
523
+ print(
524
+ "\t Using TensorFlow might not provide the same optimization level compared to PyTorch.\n"
525
+ "\t For TensorFlow users you can try optimizing the model directly through onnxruntime_tools.\n"
526
+ "\t For more information, please refer to the onnxruntime documentation:\n"
527
+ "\t\thttps://github.com/microsoft/onnxruntime/tree/master/onnxruntime/python/tools/transformers\n"
528
+ )
529
+
530
+ print("\n====== Optimizing ONNX model ======")
531
+
532
+ # Quantization works best when using the optimized version of the model
533
+ args.optimized_output = optimize(args.output)
534
+
535
+ # Do the quantization on the right graph
536
+ args.quantized_output = quantize(args.optimized_output)
537
+
538
+ # And verify
539
+ if args.check_loading:
540
+ print("\n====== Check exported ONNX model(s) ======")
541
+ verify(args.output)
542
+
543
+ if hasattr(args, "optimized_output"):
544
+ verify(args.optimized_output)
545
+
546
+ if hasattr(args, "quantized_output"):
547
+ verify(args.quantized_output)
548
+
549
+ except Exception as e:
550
+ print(f"Error while converting the model: {e}")
551
+ exit(1)
llmeval-env/lib/python3.10/site-packages/transformers/convert_tf_hub_seq_to_seq_bert_to_pytorch.py ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Convert Seq2Seq TF Hub checkpoint."""
16
+
17
+
18
+ import argparse
19
+
20
+ from . import (
21
+ BertConfig,
22
+ BertGenerationConfig,
23
+ BertGenerationDecoder,
24
+ BertGenerationEncoder,
25
+ load_tf_weights_in_bert_generation,
26
+ logging,
27
+ )
28
+
29
+
30
+ logging.set_verbosity_info()
31
+
32
+
33
+ def convert_tf_checkpoint_to_pytorch(tf_hub_path, pytorch_dump_path, is_encoder_named_decoder, vocab_size, is_encoder):
34
+ # Initialise PyTorch model
35
+ bert_config = BertConfig.from_pretrained(
36
+ "google-bert/bert-large-cased",
37
+ vocab_size=vocab_size,
38
+ max_position_embeddings=512,
39
+ is_decoder=True,
40
+ add_cross_attention=True,
41
+ )
42
+ bert_config_dict = bert_config.to_dict()
43
+ del bert_config_dict["type_vocab_size"]
44
+ config = BertGenerationConfig(**bert_config_dict)
45
+ if is_encoder:
46
+ model = BertGenerationEncoder(config)
47
+ else:
48
+ model = BertGenerationDecoder(config)
49
+ print(f"Building PyTorch model from configuration: {config}")
50
+
51
+ # Load weights from tf checkpoint
52
+ load_tf_weights_in_bert_generation(
53
+ model,
54
+ tf_hub_path,
55
+ model_class="bert",
56
+ is_encoder_named_decoder=is_encoder_named_decoder,
57
+ is_encoder=is_encoder,
58
+ )
59
+
60
+ # Save pytorch-model
61
+ print(f"Save PyTorch model and config to {pytorch_dump_path}")
62
+ model.save_pretrained(pytorch_dump_path)
63
+
64
+
65
+ if __name__ == "__main__":
66
+ parser = argparse.ArgumentParser()
67
+ # Required parameters
68
+ parser.add_argument(
69
+ "--tf_hub_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
70
+ )
71
+ parser.add_argument(
72
+ "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
73
+ )
74
+ parser.add_argument(
75
+ "--is_encoder_named_decoder",
76
+ action="store_true",
77
+ help="If decoder has to be renamed to encoder in PyTorch model.",
78
+ )
79
+ parser.add_argument("--is_encoder", action="store_true", help="If model is an encoder.")
80
+ parser.add_argument("--vocab_size", default=50358, type=int, help="Vocab size of model")
81
+ args = parser.parse_args()
82
+ convert_tf_checkpoint_to_pytorch(
83
+ args.tf_hub_path,
84
+ args.pytorch_dump_path,
85
+ args.is_encoder_named_decoder,
86
+ args.vocab_size,
87
+ is_encoder=args.is_encoder,
88
+ )
llmeval-env/lib/python3.10/site-packages/transformers/deepspeed.py ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """
15
+ Integration with Deepspeed - kept for backward compatiblity, if you plan to make any edit, make sure to modify the file
16
+ in `integrations/deepspeed` instead.
17
+
18
+ Check: https://github.com/huggingface/transformers/pull/25599
19
+ """
20
+ import warnings
21
+
22
+
23
+ warnings.warn(
24
+ "transformers.deepspeed module is deprecated and will be removed in a future version. Please import deepspeed modules directly from transformers.integrations",
25
+ FutureWarning,
26
+ )
27
+
28
+ # Backward compatibility imports, to make sure all those objects can be found in integrations/deepspeed
29
+ from .integrations.deepspeed import ( # noqa
30
+ HfDeepSpeedConfig,
31
+ HfTrainerDeepSpeedConfig,
32
+ deepspeed_config,
33
+ deepspeed_init,
34
+ deepspeed_load_checkpoint,
35
+ deepspeed_optim_sched,
36
+ is_deepspeed_available,
37
+ is_deepspeed_zero3_enabled,
38
+ set_hf_deepspeed_config,
39
+ unset_hf_deepspeed_config,
40
+ )
llmeval-env/lib/python3.10/site-packages/transformers/dependency_versions_table.py ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # THIS FILE HAS BEEN AUTOGENERATED. To update:
2
+ # 1. modify the `_deps` dict in setup.py
3
+ # 2. run `make deps_table_update``
4
+ deps = {
5
+ "Pillow": "Pillow>=10.0.1,<=15.0",
6
+ "accelerate": "accelerate>=0.21.0",
7
+ "av": "av==9.2.0",
8
+ "beautifulsoup4": "beautifulsoup4",
9
+ "codecarbon": "codecarbon==1.2.0",
10
+ "cookiecutter": "cookiecutter==1.7.3",
11
+ "dataclasses": "dataclasses",
12
+ "datasets": "datasets!=2.5.0",
13
+ "decord": "decord==0.6.0",
14
+ "deepspeed": "deepspeed>=0.9.3",
15
+ "diffusers": "diffusers",
16
+ "dill": "dill<0.3.5",
17
+ "evaluate": "evaluate>=0.2.0",
18
+ "faiss-cpu": "faiss-cpu",
19
+ "fastapi": "fastapi",
20
+ "filelock": "filelock",
21
+ "flax": "flax>=0.4.1,<=0.7.0",
22
+ "fsspec": "fsspec<2023.10.0",
23
+ "ftfy": "ftfy",
24
+ "fugashi": "fugashi>=1.0",
25
+ "GitPython": "GitPython<3.1.19",
26
+ "hf-doc-builder": "hf-doc-builder>=0.3.0",
27
+ "huggingface-hub": "huggingface-hub>=0.19.3,<1.0",
28
+ "importlib_metadata": "importlib_metadata",
29
+ "ipadic": "ipadic>=1.0.0,<2.0",
30
+ "isort": "isort>=5.5.4",
31
+ "jax": "jax>=0.4.1,<=0.4.13",
32
+ "jaxlib": "jaxlib>=0.4.1,<=0.4.13",
33
+ "jieba": "jieba",
34
+ "kenlm": "kenlm",
35
+ "keras": "keras<2.16",
36
+ "keras-nlp": "keras-nlp>=0.3.1",
37
+ "librosa": "librosa",
38
+ "nltk": "nltk",
39
+ "natten": "natten>=0.14.6,<0.15.0",
40
+ "numpy": "numpy>=1.17",
41
+ "onnxconverter-common": "onnxconverter-common",
42
+ "onnxruntime-tools": "onnxruntime-tools>=1.4.2",
43
+ "onnxruntime": "onnxruntime>=1.4.0",
44
+ "opencv-python": "opencv-python",
45
+ "optuna": "optuna",
46
+ "optax": "optax>=0.0.8,<=0.1.4",
47
+ "packaging": "packaging>=20.0",
48
+ "parameterized": "parameterized",
49
+ "phonemizer": "phonemizer",
50
+ "protobuf": "protobuf",
51
+ "psutil": "psutil",
52
+ "pyyaml": "pyyaml>=5.1",
53
+ "pydantic": "pydantic",
54
+ "pytest": "pytest>=7.2.0,<8.0.0",
55
+ "pytest-timeout": "pytest-timeout",
56
+ "pytest-xdist": "pytest-xdist",
57
+ "python": "python>=3.8.0",
58
+ "ray[tune]": "ray[tune]>=2.7.0",
59
+ "regex": "regex!=2019.12.17",
60
+ "requests": "requests",
61
+ "rhoknp": "rhoknp>=1.1.0,<1.3.1",
62
+ "rjieba": "rjieba",
63
+ "rouge-score": "rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1",
64
+ "ruff": "ruff==0.1.5",
65
+ "sacrebleu": "sacrebleu>=1.4.12,<2.0.0",
66
+ "sacremoses": "sacremoses",
67
+ "safetensors": "safetensors>=0.4.1",
68
+ "sagemaker": "sagemaker>=2.31.0",
69
+ "scikit-learn": "scikit-learn",
70
+ "sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
71
+ "sigopt": "sigopt",
72
+ "starlette": "starlette",
73
+ "sudachipy": "sudachipy>=0.6.6",
74
+ "sudachidict_core": "sudachidict_core>=20220729",
75
+ "tensorboard": "tensorboard",
76
+ "tensorflow-cpu": "tensorflow-cpu>=2.6,<2.16",
77
+ "tensorflow": "tensorflow>=2.6,<2.16",
78
+ "tensorflow-text": "tensorflow-text<2.16",
79
+ "tf2onnx": "tf2onnx",
80
+ "timeout-decorator": "timeout-decorator",
81
+ "timm": "timm",
82
+ "tokenizers": "tokenizers>=0.19,<0.20",
83
+ "torch": "torch",
84
+ "torchaudio": "torchaudio",
85
+ "torchvision": "torchvision",
86
+ "pyctcdecode": "pyctcdecode>=0.4.0",
87
+ "tqdm": "tqdm>=4.27",
88
+ "unidic": "unidic>=1.0.2",
89
+ "unidic_lite": "unidic_lite>=1.0.7",
90
+ "urllib3": "urllib3<2.0.0",
91
+ "uvicorn": "uvicorn",
92
+ }
llmeval-env/lib/python3.10/site-packages/transformers/feature_extraction_sequence_utils.py ADDED
@@ -0,0 +1,371 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """
16
+ Sequence feature extraction class for common feature extractors to preprocess sequences.
17
+ """
18
+ from typing import Dict, List, Optional, Union
19
+
20
+ import numpy as np
21
+
22
+ from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
23
+ from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
24
+
25
+
26
+ logger = logging.get_logger(__name__)
27
+
28
+
29
+ class SequenceFeatureExtractor(FeatureExtractionMixin):
30
+ """
31
+ This is a general feature extraction class for speech recognition.
32
+
33
+ Args:
34
+ feature_size (`int`):
35
+ The feature dimension of the extracted features.
36
+ sampling_rate (`int`):
37
+ The sampling rate at which the audio files should be digitalized expressed in hertz (Hz).
38
+ padding_value (`float`):
39
+ The value that is used to fill the padding values / vectors.
40
+ """
41
+
42
+ def __init__(self, feature_size: int, sampling_rate: int, padding_value: float, **kwargs):
43
+ self.feature_size = feature_size
44
+ self.sampling_rate = sampling_rate
45
+ self.padding_value = padding_value
46
+
47
+ self.padding_side = kwargs.pop("padding_side", "right")
48
+ self.return_attention_mask = kwargs.pop("return_attention_mask", True)
49
+
50
+ super().__init__(**kwargs)
51
+
52
+ def pad(
53
+ self,
54
+ processed_features: Union[
55
+ BatchFeature,
56
+ List[BatchFeature],
57
+ Dict[str, BatchFeature],
58
+ Dict[str, List[BatchFeature]],
59
+ List[Dict[str, BatchFeature]],
60
+ ],
61
+ padding: Union[bool, str, PaddingStrategy] = True,
62
+ max_length: Optional[int] = None,
63
+ truncation: bool = False,
64
+ pad_to_multiple_of: Optional[int] = None,
65
+ return_attention_mask: Optional[bool] = None,
66
+ return_tensors: Optional[Union[str, TensorType]] = None,
67
+ ) -> BatchFeature:
68
+ """
69
+ Pad input values / input vectors or a batch of input values / input vectors up to predefined length or to the
70
+ max sequence length in the batch.
71
+
72
+ Padding side (left/right) padding values are defined at the feature extractor level (with `self.padding_side`,
73
+ `self.padding_value`)
74
+
75
+ <Tip>
76
+
77
+ If the `processed_features` passed are dictionary of numpy arrays, PyTorch tensors or TensorFlow tensors, the
78
+ result will use the same type unless you provide a different tensor type with `return_tensors`. In the case of
79
+ PyTorch tensors, you will lose the specific device of your tensors however.
80
+
81
+ </Tip>
82
+
83
+ Args:
84
+ processed_features ([`BatchFeature`], list of [`BatchFeature`], `Dict[str, List[float]]`, `Dict[str, List[List[float]]` or `List[Dict[str, List[float]]]`):
85
+ Processed inputs. Can represent one input ([`BatchFeature`] or `Dict[str, List[float]]`) or a batch of
86
+ input values / vectors (list of [`BatchFeature`], *Dict[str, List[List[float]]]* or *List[Dict[str,
87
+ List[float]]]*) so you can use this method during preprocessing as well as in a PyTorch Dataloader
88
+ collate function.
89
+
90
+ Instead of `List[float]` you can have tensors (numpy arrays, PyTorch tensors or TensorFlow tensors),
91
+ see the note above for the return type.
92
+ padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`):
93
+ Select a strategy to pad the returned sequences (according to the model's padding side and padding
94
+ index) among:
95
+
96
+ - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
97
+ sequence if provided).
98
+ - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
99
+ acceptable input length for the model if that argument is not provided.
100
+ - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
101
+ lengths).
102
+ max_length (`int`, *optional*):
103
+ Maximum length of the returned list and optionally padding length (see above).
104
+ truncation (`bool`):
105
+ Activates truncation to cut input sequences longer than `max_length` to `max_length`.
106
+ pad_to_multiple_of (`int`, *optional*):
107
+ If set will pad the sequence to a multiple of the provided value.
108
+
109
+ This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability
110
+ `>= 7.5` (Volta), or on TPUs which benefit from having sequence lengths be a multiple of 128.
111
+ return_attention_mask (`bool`, *optional*):
112
+ Whether to return the attention mask. If left to the default, will return the attention mask according
113
+ to the specific feature_extractor's default.
114
+
115
+ [What are attention masks?](../glossary#attention-mask)
116
+ return_tensors (`str` or [`~utils.TensorType`], *optional*):
117
+ If set, will return tensors instead of list of python integers. Acceptable values are:
118
+
119
+ - `'tf'`: Return TensorFlow `tf.constant` objects.
120
+ - `'pt'`: Return PyTorch `torch.Tensor` objects.
121
+ - `'np'`: Return Numpy `np.ndarray` objects.
122
+ """
123
+ # If we have a list of dicts, let's convert it in a dict of lists
124
+ # We do this to allow using this method as a collate_fn function in PyTorch Dataloader
125
+ if isinstance(processed_features, (list, tuple)) and isinstance(processed_features[0], (dict, BatchFeature)):
126
+ processed_features = {
127
+ key: [example[key] for example in processed_features] for key in processed_features[0].keys()
128
+ }
129
+
130
+ # The model's main input name, usually `input_values`, has be passed for padding
131
+ if self.model_input_names[0] not in processed_features:
132
+ raise ValueError(
133
+ "You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`"
134
+ f" to this method that includes {self.model_input_names[0]}, but you provided"
135
+ f" {list(processed_features.keys())}"
136
+ )
137
+
138
+ required_input = processed_features[self.model_input_names[0]]
139
+ return_attention_mask = (
140
+ return_attention_mask if return_attention_mask is not None else self.return_attention_mask
141
+ )
142
+
143
+ if len(required_input) == 0:
144
+ if return_attention_mask:
145
+ processed_features["attention_mask"] = []
146
+ return processed_features
147
+
148
+ # If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
149
+ # and rebuild them afterwards if no return_tensors is specified
150
+ # Note that we lose the specific device the tensor may be on for PyTorch
151
+
152
+ first_element = required_input[0]
153
+ if isinstance(first_element, (list, tuple)):
154
+ # first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
155
+ index = 0
156
+ while len(required_input[index]) == 0:
157
+ index += 1
158
+ if index < len(required_input):
159
+ first_element = required_input[index][0]
160
+
161
+ if return_tensors is None:
162
+ if is_tf_tensor(first_element):
163
+ return_tensors = "tf"
164
+ elif is_torch_tensor(first_element):
165
+ return_tensors = "pt"
166
+ elif isinstance(first_element, (int, float, list, tuple, np.ndarray)):
167
+ return_tensors = "np"
168
+ else:
169
+ raise ValueError(
170
+ f"type of {first_element} unknown: {type(first_element)}. "
171
+ "Should be one of a python, numpy, pytorch or tensorflow object."
172
+ )
173
+
174
+ for key, value in processed_features.items():
175
+ if isinstance(value[0], (int, float)):
176
+ processed_features[key] = to_numpy(value)
177
+ else:
178
+ processed_features[key] = [to_numpy(v) for v in value]
179
+
180
+ # Convert padding_strategy in PaddingStrategy
181
+ padding_strategy = self._get_padding_strategies(padding=padding, max_length=max_length)
182
+
183
+ required_input = processed_features[self.model_input_names[0]]
184
+
185
+ batch_size = len(required_input)
186
+ if not all(len(v) == batch_size for v in processed_features.values()):
187
+ raise ValueError("Some items in the output dictionary have a different batch size than others.")
188
+
189
+ truncated_inputs = []
190
+ for i in range(batch_size):
191
+ inputs = {k: v[i] for k, v in processed_features.items()}
192
+ # truncation
193
+ inputs_slice = self._truncate(
194
+ inputs,
195
+ max_length=max_length,
196
+ pad_to_multiple_of=pad_to_multiple_of,
197
+ truncation=truncation,
198
+ )
199
+ truncated_inputs.append(inputs_slice)
200
+
201
+ if padding_strategy == PaddingStrategy.LONGEST:
202
+ # make sure that `max_length` cannot be longer than the longest truncated length
203
+ max_length = max(len(input_slice[self.model_input_names[0]]) for input_slice in truncated_inputs)
204
+ padding_strategy = PaddingStrategy.MAX_LENGTH
205
+
206
+ batch_outputs = {}
207
+ for i in range(batch_size):
208
+ # padding
209
+ outputs = self._pad(
210
+ truncated_inputs[i],
211
+ max_length=max_length,
212
+ padding_strategy=padding_strategy,
213
+ pad_to_multiple_of=pad_to_multiple_of,
214
+ return_attention_mask=return_attention_mask,
215
+ )
216
+
217
+ for key, value in outputs.items():
218
+ if key not in batch_outputs:
219
+ batch_outputs[key] = []
220
+ if value.dtype is np.dtype(np.float64):
221
+ value = value.astype(np.float32)
222
+ batch_outputs[key].append(value)
223
+
224
+ return BatchFeature(batch_outputs, tensor_type=return_tensors)
225
+
226
+ def _pad(
227
+ self,
228
+ processed_features: Union[Dict[str, np.ndarray], BatchFeature],
229
+ max_length: Optional[int] = None,
230
+ padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
231
+ pad_to_multiple_of: Optional[int] = None,
232
+ return_attention_mask: Optional[bool] = None,
233
+ ) -> dict:
234
+ """
235
+ Pad inputs (on left/right and up to predefined length or max length in the batch)
236
+
237
+ Args:
238
+ processed_features (`Union[Dict[str, np.ndarray], BatchFeature]`):
239
+ Dictionary of input values (`np.ndarray[float]`) / input vectors (`List[np.ndarray[float]]`) or batch
240
+ of inputs values (`List[np.ndarray[int]]`) / input vectors (`List[np.ndarray[int]]`)
241
+ max_length (`int`, *optional*):
242
+ Maximum length of the returned list and optionally padding length (see below)
243
+ padding_strategy (`PaddingStrategy`, *optional*, default to `PaddingStrategy.DO_NOT_PAD`):
244
+ PaddingStrategy to use for padding.
245
+
246
+ - PaddingStrategy.LONGEST Pad to the longest sequence in the batch
247
+ - PaddingStrategy.MAX_LENGTH: Pad to the max length (default)
248
+ - PaddingStrategy.DO_NOT_PAD: Do not pad
249
+ The feature_extractor padding sides are defined in self.padding_side:
250
+
251
+ - 'left': pads on the left of the sequences
252
+ - 'right': pads on the right of the sequences
253
+ pad_to_multiple_of (`int`, *optional*):
254
+ Integer if set will pad the sequence to a multiple of the provided value. This is especially useful to
255
+ enable the use of Tensor Core on NVIDIA hardware with compute capability `>= 7.5` (Volta), or on TPUs
256
+ which benefit from having sequence lengths be a multiple of 128.
257
+ return_attention_mask (`bool`, *optional*):
258
+ Set to False to avoid returning attention mask (default: set to model specifics)
259
+ """
260
+ required_input = processed_features[self.model_input_names[0]]
261
+
262
+ if padding_strategy == PaddingStrategy.LONGEST:
263
+ max_length = len(required_input)
264
+
265
+ if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
266
+ max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
267
+
268
+ needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(required_input) < max_length
269
+
270
+ if return_attention_mask and "attention_mask" not in processed_features:
271
+ processed_features["attention_mask"] = np.ones(len(required_input), dtype=np.int32)
272
+
273
+ if needs_to_be_padded:
274
+ difference = max_length - len(required_input)
275
+ if self.padding_side == "right":
276
+ if return_attention_mask:
277
+ processed_features["attention_mask"] = np.pad(
278
+ processed_features["attention_mask"], (0, difference)
279
+ )
280
+ padding_shape = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
281
+ processed_features[self.model_input_names[0]] = np.pad(
282
+ required_input, padding_shape, "constant", constant_values=self.padding_value
283
+ )
284
+ elif self.padding_side == "left":
285
+ if return_attention_mask:
286
+ processed_features["attention_mask"] = np.pad(
287
+ processed_features["attention_mask"], (difference, 0)
288
+ )
289
+ padding_shape = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
290
+ processed_features[self.model_input_names[0]] = np.pad(
291
+ required_input, padding_shape, "constant", constant_values=self.padding_value
292
+ )
293
+ else:
294
+ raise ValueError("Invalid padding strategy:" + str(self.padding_side))
295
+
296
+ return processed_features
297
+
298
+ def _truncate(
299
+ self,
300
+ processed_features: Union[Dict[str, np.ndarray], BatchFeature],
301
+ max_length: Optional[int] = None,
302
+ pad_to_multiple_of: Optional[int] = None,
303
+ truncation: Optional[bool] = None,
304
+ ):
305
+ """
306
+ Truncate inputs to predefined length or max length in the batch
307
+
308
+ Args:
309
+ processed_features(`Union[Dict[str, np.ndarray], BatchFeature]`):
310
+ Dictionary of input values (`np.ndarray[float]`) / input vectors (`List[np.ndarray[float]]`) or batch
311
+ of inputs values (`List[np.ndarray[int]]`) / input vectors (`List[np.ndarray[int]]`)
312
+ max_length (`int`, *optional*):
313
+ maximum length of the returned list and optionally padding length (see below)
314
+ pad_to_multiple_of (`int`, *optional*) :
315
+ Integer if set will pad the sequence to a multiple of the provided value. This is especially useful to
316
+ enable the use of Tensor Core on NVIDIA hardware with compute capability `>= 7.5` (Volta), or on TPUs
317
+ which benefit from having sequence lengths be a multiple of 128.
318
+ truncation (`bool`, *optional*):
319
+ Activates truncation to cut input sequences longer than `max_length` to `max_length`.
320
+ """
321
+ if not truncation:
322
+ return processed_features
323
+ elif truncation and max_length is None:
324
+ raise ValueError("When setting ``truncation=True``, make sure that ``max_length`` is defined.")
325
+
326
+ required_input = processed_features[self.model_input_names[0]]
327
+
328
+ # find `max_length` that fits `pad_to_multiple_of`
329
+ if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
330
+ max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
331
+
332
+ needs_to_be_truncated = len(required_input) > max_length
333
+
334
+ if needs_to_be_truncated:
335
+ processed_features[self.model_input_names[0]] = processed_features[self.model_input_names[0]][:max_length]
336
+ if "attention_mask" in processed_features:
337
+ processed_features["attention_mask"] = processed_features["attention_mask"][:max_length]
338
+
339
+ return processed_features
340
+
341
+ def _get_padding_strategies(self, padding=False, max_length=None):
342
+ """
343
+ Find the correct padding strategy
344
+ """
345
+
346
+ # Get padding strategy
347
+ if padding is not False:
348
+ if padding is True:
349
+ padding_strategy = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
350
+ elif not isinstance(padding, PaddingStrategy):
351
+ padding_strategy = PaddingStrategy(padding)
352
+ elif isinstance(padding, PaddingStrategy):
353
+ padding_strategy = padding
354
+ else:
355
+ padding_strategy = PaddingStrategy.DO_NOT_PAD
356
+
357
+ # Set max length if needed
358
+ if max_length is None:
359
+ if padding_strategy == PaddingStrategy.MAX_LENGTH:
360
+ raise ValueError(
361
+ f"When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined"
362
+ )
363
+
364
+ # Test if we have a padding value
365
+ if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
366
+ raise ValueError(
367
+ "Asking to pad but the feature_extractor does not have a padding value. Please select a value to use"
368
+ " as `padding_value`. For example: `feature_extractor.padding_value = 0.0`."
369
+ )
370
+
371
+ return padding_strategy
llmeval-env/lib/python3.10/site-packages/transformers/feature_extraction_utils.py ADDED
@@ -0,0 +1,684 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """
16
+ Feature extraction saving/loading class for common feature extractors.
17
+ """
18
+
19
+ import copy
20
+ import json
21
+ import os
22
+ import warnings
23
+ from collections import UserDict
24
+ from typing import TYPE_CHECKING, Any, Dict, Optional, Tuple, Union
25
+
26
+ import numpy as np
27
+
28
+ from .dynamic_module_utils import custom_object_save
29
+ from .utils import (
30
+ FEATURE_EXTRACTOR_NAME,
31
+ PushToHubMixin,
32
+ TensorType,
33
+ add_model_info_to_auto_map,
34
+ cached_file,
35
+ copy_func,
36
+ download_url,
37
+ is_flax_available,
38
+ is_jax_tensor,
39
+ is_numpy_array,
40
+ is_offline_mode,
41
+ is_remote_url,
42
+ is_tf_available,
43
+ is_torch_available,
44
+ is_torch_device,
45
+ is_torch_dtype,
46
+ logging,
47
+ requires_backends,
48
+ )
49
+
50
+
51
+ if TYPE_CHECKING:
52
+ if is_torch_available():
53
+ import torch # noqa
54
+
55
+
56
+ logger = logging.get_logger(__name__)
57
+
58
+ PreTrainedFeatureExtractor = Union["SequenceFeatureExtractor"] # noqa: F821
59
+
60
+
61
+ class BatchFeature(UserDict):
62
+ r"""
63
+ Holds the output of the [`~SequenceFeatureExtractor.pad`] and feature extractor specific `__call__` methods.
64
+
65
+ This class is derived from a python dictionary and can be used as a dictionary.
66
+
67
+ Args:
68
+ data (`dict`, *optional*):
69
+ Dictionary of lists/arrays/tensors returned by the __call__/pad methods ('input_values', 'attention_mask',
70
+ etc.).
71
+ tensor_type (`Union[None, str, TensorType]`, *optional*):
72
+ You can give a tensor_type here to convert the lists of integers in PyTorch/TensorFlow/Numpy Tensors at
73
+ initialization.
74
+ """
75
+
76
+ def __init__(self, data: Optional[Dict[str, Any]] = None, tensor_type: Union[None, str, TensorType] = None):
77
+ super().__init__(data)
78
+ self.convert_to_tensors(tensor_type=tensor_type)
79
+
80
+ def __getitem__(self, item: str) -> Union[Any]:
81
+ """
82
+ If the key is a string, returns the value of the dict associated to `key` ('input_values', 'attention_mask',
83
+ etc.).
84
+ """
85
+ if isinstance(item, str):
86
+ return self.data[item]
87
+ else:
88
+ raise KeyError("Indexing with integers is not available when using Python based feature extractors")
89
+
90
+ def __getattr__(self, item: str):
91
+ try:
92
+ return self.data[item]
93
+ except KeyError:
94
+ raise AttributeError
95
+
96
+ def __getstate__(self):
97
+ return {"data": self.data}
98
+
99
+ def __setstate__(self, state):
100
+ if "data" in state:
101
+ self.data = state["data"]
102
+
103
+ # Copied from transformers.tokenization_utils_base.BatchEncoding.keys
104
+ def keys(self):
105
+ return self.data.keys()
106
+
107
+ # Copied from transformers.tokenization_utils_base.BatchEncoding.values
108
+ def values(self):
109
+ return self.data.values()
110
+
111
+ # Copied from transformers.tokenization_utils_base.BatchEncoding.items
112
+ def items(self):
113
+ return self.data.items()
114
+
115
+ def _get_is_as_tensor_fns(self, tensor_type: Optional[Union[str, TensorType]] = None):
116
+ if tensor_type is None:
117
+ return None, None
118
+
119
+ # Convert to TensorType
120
+ if not isinstance(tensor_type, TensorType):
121
+ tensor_type = TensorType(tensor_type)
122
+
123
+ # Get a function reference for the correct framework
124
+ if tensor_type == TensorType.TENSORFLOW:
125
+ if not is_tf_available():
126
+ raise ImportError(
127
+ "Unable to convert output to TensorFlow tensors format, TensorFlow is not installed."
128
+ )
129
+ import tensorflow as tf
130
+
131
+ as_tensor = tf.constant
132
+ is_tensor = tf.is_tensor
133
+ elif tensor_type == TensorType.PYTORCH:
134
+ if not is_torch_available():
135
+ raise ImportError("Unable to convert output to PyTorch tensors format, PyTorch is not installed.")
136
+ import torch # noqa
137
+
138
+ def as_tensor(value):
139
+ if isinstance(value, (list, tuple)) and len(value) > 0 and isinstance(value[0], np.ndarray):
140
+ value = np.array(value)
141
+ return torch.tensor(value)
142
+
143
+ is_tensor = torch.is_tensor
144
+ elif tensor_type == TensorType.JAX:
145
+ if not is_flax_available():
146
+ raise ImportError("Unable to convert output to JAX tensors format, JAX is not installed.")
147
+ import jax.numpy as jnp # noqa: F811
148
+
149
+ as_tensor = jnp.array
150
+ is_tensor = is_jax_tensor
151
+ else:
152
+
153
+ def as_tensor(value, dtype=None):
154
+ if isinstance(value, (list, tuple)) and isinstance(value[0], (list, tuple, np.ndarray)):
155
+ value_lens = [len(val) for val in value]
156
+ if len(set(value_lens)) > 1 and dtype is None:
157
+ # we have a ragged list so handle explicitly
158
+ value = as_tensor([np.asarray(val) for val in value], dtype=object)
159
+ return np.asarray(value, dtype=dtype)
160
+
161
+ is_tensor = is_numpy_array
162
+ return is_tensor, as_tensor
163
+
164
+ def convert_to_tensors(self, tensor_type: Optional[Union[str, TensorType]] = None):
165
+ """
166
+ Convert the inner content to tensors.
167
+
168
+ Args:
169
+ tensor_type (`str` or [`~utils.TensorType`], *optional*):
170
+ The type of tensors to use. If `str`, should be one of the values of the enum [`~utils.TensorType`]. If
171
+ `None`, no modification is done.
172
+ """
173
+ if tensor_type is None:
174
+ return self
175
+
176
+ is_tensor, as_tensor = self._get_is_as_tensor_fns(tensor_type)
177
+
178
+ # Do the tensor conversion in batch
179
+ for key, value in self.items():
180
+ try:
181
+ if not is_tensor(value):
182
+ tensor = as_tensor(value)
183
+
184
+ self[key] = tensor
185
+ except: # noqa E722
186
+ if key == "overflowing_values":
187
+ raise ValueError("Unable to create tensor returning overflowing values of different lengths. ")
188
+ raise ValueError(
189
+ "Unable to create tensor, you should probably activate padding "
190
+ "with 'padding=True' to have batched tensors with the same length."
191
+ )
192
+
193
+ return self
194
+
195
+ def to(self, *args, **kwargs) -> "BatchFeature":
196
+ """
197
+ Send all values to device by calling `v.to(*args, **kwargs)` (PyTorch only). This should support casting in
198
+ different `dtypes` and sending the `BatchFeature` to a different `device`.
199
+
200
+ Args:
201
+ args (`Tuple`):
202
+ Will be passed to the `to(...)` function of the tensors.
203
+ kwargs (`Dict`, *optional*):
204
+ Will be passed to the `to(...)` function of the tensors.
205
+
206
+ Returns:
207
+ [`BatchFeature`]: The same instance after modification.
208
+ """
209
+ requires_backends(self, ["torch"])
210
+ import torch # noqa
211
+
212
+ new_data = {}
213
+ device = kwargs.get("device")
214
+ # Check if the args are a device or a dtype
215
+ if device is None and len(args) > 0:
216
+ # device should be always the first argument
217
+ arg = args[0]
218
+ if is_torch_dtype(arg):
219
+ # The first argument is a dtype
220
+ pass
221
+ elif isinstance(arg, str) or is_torch_device(arg) or isinstance(arg, int):
222
+ device = arg
223
+ else:
224
+ # it's something else
225
+ raise ValueError(f"Attempting to cast a BatchFeature to type {str(arg)}. This is not supported.")
226
+ # We cast only floating point tensors to avoid issues with tokenizers casting `LongTensor` to `FloatTensor`
227
+ for k, v in self.items():
228
+ # check if v is a floating point
229
+ if torch.is_floating_point(v):
230
+ # cast and send to device
231
+ new_data[k] = v.to(*args, **kwargs)
232
+ elif device is not None:
233
+ new_data[k] = v.to(device=device)
234
+ else:
235
+ new_data[k] = v
236
+ self.data = new_data
237
+ return self
238
+
239
+
240
+ class FeatureExtractionMixin(PushToHubMixin):
241
+ """
242
+ This is a feature extraction mixin used to provide saving/loading functionality for sequential and image feature
243
+ extractors.
244
+ """
245
+
246
+ _auto_class = None
247
+
248
+ def __init__(self, **kwargs):
249
+ """Set elements of `kwargs` as attributes."""
250
+ # Pop "processor_class" as it should be saved as private attribute
251
+ self._processor_class = kwargs.pop("processor_class", None)
252
+ # Additional attributes without default values
253
+ for key, value in kwargs.items():
254
+ try:
255
+ setattr(self, key, value)
256
+ except AttributeError as err:
257
+ logger.error(f"Can't set {key} with value {value} for {self}")
258
+ raise err
259
+
260
+ def _set_processor_class(self, processor_class: str):
261
+ """Sets processor class as an attribute."""
262
+ self._processor_class = processor_class
263
+
264
+ @classmethod
265
+ def from_pretrained(
266
+ cls,
267
+ pretrained_model_name_or_path: Union[str, os.PathLike],
268
+ cache_dir: Optional[Union[str, os.PathLike]] = None,
269
+ force_download: bool = False,
270
+ local_files_only: bool = False,
271
+ token: Optional[Union[str, bool]] = None,
272
+ revision: str = "main",
273
+ **kwargs,
274
+ ):
275
+ r"""
276
+ Instantiate a type of [`~feature_extraction_utils.FeatureExtractionMixin`] from a feature extractor, *e.g.* a
277
+ derived class of [`SequenceFeatureExtractor`].
278
+
279
+ Args:
280
+ pretrained_model_name_or_path (`str` or `os.PathLike`):
281
+ This can be either:
282
+
283
+ - a string, the *model id* of a pretrained feature_extractor hosted inside a model repo on
284
+ huggingface.co.
285
+ - a path to a *directory* containing a feature extractor file saved using the
286
+ [`~feature_extraction_utils.FeatureExtractionMixin.save_pretrained`] method, e.g.,
287
+ `./my_model_directory/`.
288
+ - a path or url to a saved feature extractor JSON *file*, e.g.,
289
+ `./my_model_directory/preprocessor_config.json`.
290
+ cache_dir (`str` or `os.PathLike`, *optional*):
291
+ Path to a directory in which a downloaded pretrained model feature extractor should be cached if the
292
+ standard cache should not be used.
293
+ force_download (`bool`, *optional*, defaults to `False`):
294
+ Whether or not to force to (re-)download the feature extractor files and override the cached versions
295
+ if they exist.
296
+ resume_download (`bool`, *optional*, defaults to `False`):
297
+ Whether or not to delete incompletely received file. Attempts to resume the download if such a file
298
+ exists.
299
+ proxies (`Dict[str, str]`, *optional*):
300
+ A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',
301
+ 'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request.
302
+ token (`str` or `bool`, *optional*):
303
+ The token to use as HTTP bearer authorization for remote files. If `True`, or not specified, will use
304
+ the token generated when running `huggingface-cli login` (stored in `~/.huggingface`).
305
+ revision (`str`, *optional*, defaults to `"main"`):
306
+ The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
307
+ git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any
308
+ identifier allowed by git.
309
+
310
+
311
+ <Tip>
312
+
313
+ To test a pull request you made on the Hub, you can pass `revision="refs/pr/<pr_number>".
314
+
315
+ </Tip>
316
+
317
+ return_unused_kwargs (`bool`, *optional*, defaults to `False`):
318
+ If `False`, then this function returns just the final feature extractor object. If `True`, then this
319
+ functions returns a `Tuple(feature_extractor, unused_kwargs)` where *unused_kwargs* is a dictionary
320
+ consisting of the key/value pairs whose keys are not feature extractor attributes: i.e., the part of
321
+ `kwargs` which has not been used to update `feature_extractor` and is otherwise ignored.
322
+ kwargs (`Dict[str, Any]`, *optional*):
323
+ The values in kwargs of any keys which are feature extractor attributes will be used to override the
324
+ loaded values. Behavior concerning key/value pairs whose keys are *not* feature extractor attributes is
325
+ controlled by the `return_unused_kwargs` keyword parameter.
326
+
327
+ Returns:
328
+ A feature extractor of type [`~feature_extraction_utils.FeatureExtractionMixin`].
329
+
330
+ Examples:
331
+
332
+ ```python
333
+ # We can't instantiate directly the base class *FeatureExtractionMixin* nor *SequenceFeatureExtractor* so let's show the examples on a
334
+ # derived class: *Wav2Vec2FeatureExtractor*
335
+ feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(
336
+ "facebook/wav2vec2-base-960h"
337
+ ) # Download feature_extraction_config from huggingface.co and cache.
338
+ feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(
339
+ "./test/saved_model/"
340
+ ) # E.g. feature_extractor (or model) was saved using *save_pretrained('./test/saved_model/')*
341
+ feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained("./test/saved_model/preprocessor_config.json")
342
+ feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(
343
+ "facebook/wav2vec2-base-960h", return_attention_mask=False, foo=False
344
+ )
345
+ assert feature_extractor.return_attention_mask is False
346
+ feature_extractor, unused_kwargs = Wav2Vec2FeatureExtractor.from_pretrained(
347
+ "facebook/wav2vec2-base-960h", return_attention_mask=False, foo=False, return_unused_kwargs=True
348
+ )
349
+ assert feature_extractor.return_attention_mask is False
350
+ assert unused_kwargs == {"foo": False}
351
+ ```"""
352
+ kwargs["cache_dir"] = cache_dir
353
+ kwargs["force_download"] = force_download
354
+ kwargs["local_files_only"] = local_files_only
355
+ kwargs["revision"] = revision
356
+
357
+ use_auth_token = kwargs.pop("use_auth_token", None)
358
+ if use_auth_token is not None:
359
+ warnings.warn(
360
+ "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.",
361
+ FutureWarning,
362
+ )
363
+ if token is not None:
364
+ raise ValueError(
365
+ "`token` and `use_auth_token` are both specified. Please set only the argument `token`."
366
+ )
367
+ token = use_auth_token
368
+
369
+ if token is not None:
370
+ kwargs["token"] = token
371
+
372
+ feature_extractor_dict, kwargs = cls.get_feature_extractor_dict(pretrained_model_name_or_path, **kwargs)
373
+
374
+ return cls.from_dict(feature_extractor_dict, **kwargs)
375
+
376
+ def save_pretrained(self, save_directory: Union[str, os.PathLike], push_to_hub: bool = False, **kwargs):
377
+ """
378
+ Save a feature_extractor object to the directory `save_directory`, so that it can be re-loaded using the
379
+ [`~feature_extraction_utils.FeatureExtractionMixin.from_pretrained`] class method.
380
+
381
+ Args:
382
+ save_directory (`str` or `os.PathLike`):
383
+ Directory where the feature extractor JSON file will be saved (will be created if it does not exist).
384
+ push_to_hub (`bool`, *optional*, defaults to `False`):
385
+ Whether or not to push your model to the Hugging Face model hub after saving it. You can specify the
386
+ repository you want to push to with `repo_id` (will default to the name of `save_directory` in your
387
+ namespace).
388
+ kwargs (`Dict[str, Any]`, *optional*):
389
+ Additional key word arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method.
390
+ """
391
+ use_auth_token = kwargs.pop("use_auth_token", None)
392
+
393
+ if use_auth_token is not None:
394
+ warnings.warn(
395
+ "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.",
396
+ FutureWarning,
397
+ )
398
+ if kwargs.get("token", None) is not None:
399
+ raise ValueError(
400
+ "`token` and `use_auth_token` are both specified. Please set only the argument `token`."
401
+ )
402
+ kwargs["token"] = use_auth_token
403
+
404
+ if os.path.isfile(save_directory):
405
+ raise AssertionError(f"Provided path ({save_directory}) should be a directory, not a file")
406
+
407
+ os.makedirs(save_directory, exist_ok=True)
408
+
409
+ if push_to_hub:
410
+ commit_message = kwargs.pop("commit_message", None)
411
+ repo_id = kwargs.pop("repo_id", save_directory.split(os.path.sep)[-1])
412
+ repo_id = self._create_repo(repo_id, **kwargs)
413
+ files_timestamps = self._get_files_timestamps(save_directory)
414
+
415
+ # If we have a custom config, we copy the file defining it in the folder and set the attributes so it can be
416
+ # loaded from the Hub.
417
+ if self._auto_class is not None:
418
+ custom_object_save(self, save_directory, config=self)
419
+
420
+ # If we save using the predefined names, we can load using `from_pretrained`
421
+ output_feature_extractor_file = os.path.join(save_directory, FEATURE_EXTRACTOR_NAME)
422
+
423
+ self.to_json_file(output_feature_extractor_file)
424
+ logger.info(f"Feature extractor saved in {output_feature_extractor_file}")
425
+
426
+ if push_to_hub:
427
+ self._upload_modified_files(
428
+ save_directory,
429
+ repo_id,
430
+ files_timestamps,
431
+ commit_message=commit_message,
432
+ token=kwargs.get("token"),
433
+ )
434
+
435
+ return [output_feature_extractor_file]
436
+
437
+ @classmethod
438
+ def get_feature_extractor_dict(
439
+ cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs
440
+ ) -> Tuple[Dict[str, Any], Dict[str, Any]]:
441
+ """
442
+ From a `pretrained_model_name_or_path`, resolve to a dictionary of parameters, to be used for instantiating a
443
+ feature extractor of type [`~feature_extraction_utils.FeatureExtractionMixin`] using `from_dict`.
444
+
445
+ Parameters:
446
+ pretrained_model_name_or_path (`str` or `os.PathLike`):
447
+ The identifier of the pre-trained checkpoint from which we want the dictionary of parameters.
448
+
449
+ Returns:
450
+ `Tuple[Dict, Dict]`: The dictionary(ies) that will be used to instantiate the feature extractor object.
451
+ """
452
+ cache_dir = kwargs.pop("cache_dir", None)
453
+ force_download = kwargs.pop("force_download", False)
454
+ resume_download = kwargs.pop("resume_download", False)
455
+ proxies = kwargs.pop("proxies", None)
456
+ subfolder = kwargs.pop("subfolder", None)
457
+ token = kwargs.pop("token", None)
458
+ use_auth_token = kwargs.pop("use_auth_token", None)
459
+ local_files_only = kwargs.pop("local_files_only", False)
460
+ revision = kwargs.pop("revision", None)
461
+
462
+ if use_auth_token is not None:
463
+ warnings.warn(
464
+ "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.",
465
+ FutureWarning,
466
+ )
467
+ if token is not None:
468
+ raise ValueError(
469
+ "`token` and `use_auth_token` are both specified. Please set only the argument `token`."
470
+ )
471
+ token = use_auth_token
472
+
473
+ from_pipeline = kwargs.pop("_from_pipeline", None)
474
+ from_auto_class = kwargs.pop("_from_auto", False)
475
+
476
+ user_agent = {"file_type": "feature extractor", "from_auto_class": from_auto_class}
477
+ if from_pipeline is not None:
478
+ user_agent["using_pipeline"] = from_pipeline
479
+
480
+ if is_offline_mode() and not local_files_only:
481
+ logger.info("Offline mode: forcing local_files_only=True")
482
+ local_files_only = True
483
+
484
+ pretrained_model_name_or_path = str(pretrained_model_name_or_path)
485
+ is_local = os.path.isdir(pretrained_model_name_or_path)
486
+ if os.path.isdir(pretrained_model_name_or_path):
487
+ feature_extractor_file = os.path.join(pretrained_model_name_or_path, FEATURE_EXTRACTOR_NAME)
488
+ if os.path.isfile(pretrained_model_name_or_path):
489
+ resolved_feature_extractor_file = pretrained_model_name_or_path
490
+ is_local = True
491
+ elif is_remote_url(pretrained_model_name_or_path):
492
+ feature_extractor_file = pretrained_model_name_or_path
493
+ resolved_feature_extractor_file = download_url(pretrained_model_name_or_path)
494
+ else:
495
+ feature_extractor_file = FEATURE_EXTRACTOR_NAME
496
+ try:
497
+ # Load from local folder or from cache or download from model Hub and cache
498
+ resolved_feature_extractor_file = cached_file(
499
+ pretrained_model_name_or_path,
500
+ feature_extractor_file,
501
+ cache_dir=cache_dir,
502
+ force_download=force_download,
503
+ proxies=proxies,
504
+ resume_download=resume_download,
505
+ local_files_only=local_files_only,
506
+ subfolder=subfolder,
507
+ token=token,
508
+ user_agent=user_agent,
509
+ revision=revision,
510
+ )
511
+ except EnvironmentError:
512
+ # Raise any environment error raise by `cached_file`. It will have a helpful error message adapted to
513
+ # the original exception.
514
+ raise
515
+ except Exception:
516
+ # For any other exception, we throw a generic error.
517
+ raise EnvironmentError(
518
+ f"Can't load feature extractor for '{pretrained_model_name_or_path}'. If you were trying to load"
519
+ " it from 'https://huggingface.co/models', make sure you don't have a local directory with the"
520
+ f" same name. Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a"
521
+ f" directory containing a {FEATURE_EXTRACTOR_NAME} file"
522
+ )
523
+
524
+ try:
525
+ # Load feature_extractor dict
526
+ with open(resolved_feature_extractor_file, "r", encoding="utf-8") as reader:
527
+ text = reader.read()
528
+ feature_extractor_dict = json.loads(text)
529
+
530
+ except json.JSONDecodeError:
531
+ raise EnvironmentError(
532
+ f"It looks like the config file at '{resolved_feature_extractor_file}' is not a valid JSON file."
533
+ )
534
+
535
+ if is_local:
536
+ logger.info(f"loading configuration file {resolved_feature_extractor_file}")
537
+ else:
538
+ logger.info(
539
+ f"loading configuration file {feature_extractor_file} from cache at {resolved_feature_extractor_file}"
540
+ )
541
+
542
+ if "auto_map" in feature_extractor_dict and not is_local:
543
+ feature_extractor_dict["auto_map"] = add_model_info_to_auto_map(
544
+ feature_extractor_dict["auto_map"], pretrained_model_name_or_path
545
+ )
546
+
547
+ return feature_extractor_dict, kwargs
548
+
549
+ @classmethod
550
+ def from_dict(cls, feature_extractor_dict: Dict[str, Any], **kwargs) -> PreTrainedFeatureExtractor:
551
+ """
552
+ Instantiates a type of [`~feature_extraction_utils.FeatureExtractionMixin`] from a Python dictionary of
553
+ parameters.
554
+
555
+ Args:
556
+ feature_extractor_dict (`Dict[str, Any]`):
557
+ Dictionary that will be used to instantiate the feature extractor object. Such a dictionary can be
558
+ retrieved from a pretrained checkpoint by leveraging the
559
+ [`~feature_extraction_utils.FeatureExtractionMixin.to_dict`] method.
560
+ kwargs (`Dict[str, Any]`):
561
+ Additional parameters from which to initialize the feature extractor object.
562
+
563
+ Returns:
564
+ [`~feature_extraction_utils.FeatureExtractionMixin`]: The feature extractor object instantiated from those
565
+ parameters.
566
+ """
567
+ return_unused_kwargs = kwargs.pop("return_unused_kwargs", False)
568
+
569
+ feature_extractor = cls(**feature_extractor_dict)
570
+
571
+ # Update feature_extractor with kwargs if needed
572
+ to_remove = []
573
+ for key, value in kwargs.items():
574
+ if hasattr(feature_extractor, key):
575
+ setattr(feature_extractor, key, value)
576
+ to_remove.append(key)
577
+ for key in to_remove:
578
+ kwargs.pop(key, None)
579
+
580
+ logger.info(f"Feature extractor {feature_extractor}")
581
+ if return_unused_kwargs:
582
+ return feature_extractor, kwargs
583
+ else:
584
+ return feature_extractor
585
+
586
+ def to_dict(self) -> Dict[str, Any]:
587
+ """
588
+ Serializes this instance to a Python dictionary. Returns:
589
+ `Dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance.
590
+ """
591
+ output = copy.deepcopy(self.__dict__)
592
+ output["feature_extractor_type"] = self.__class__.__name__
593
+ if "mel_filters" in output:
594
+ del output["mel_filters"]
595
+ if "window" in output:
596
+ del output["window"]
597
+ return output
598
+
599
+ @classmethod
600
+ def from_json_file(cls, json_file: Union[str, os.PathLike]) -> PreTrainedFeatureExtractor:
601
+ """
602
+ Instantiates a feature extractor of type [`~feature_extraction_utils.FeatureExtractionMixin`] from the path to
603
+ a JSON file of parameters.
604
+
605
+ Args:
606
+ json_file (`str` or `os.PathLike`):
607
+ Path to the JSON file containing the parameters.
608
+
609
+ Returns:
610
+ A feature extractor of type [`~feature_extraction_utils.FeatureExtractionMixin`]: The feature_extractor
611
+ object instantiated from that JSON file.
612
+ """
613
+ with open(json_file, "r", encoding="utf-8") as reader:
614
+ text = reader.read()
615
+ feature_extractor_dict = json.loads(text)
616
+ return cls(**feature_extractor_dict)
617
+
618
+ def to_json_string(self) -> str:
619
+ """
620
+ Serializes this instance to a JSON string.
621
+
622
+ Returns:
623
+ `str`: String containing all the attributes that make up this feature_extractor instance in JSON format.
624
+ """
625
+ dictionary = self.to_dict()
626
+
627
+ for key, value in dictionary.items():
628
+ if isinstance(value, np.ndarray):
629
+ dictionary[key] = value.tolist()
630
+
631
+ # make sure private name "_processor_class" is correctly
632
+ # saved as "processor_class"
633
+ _processor_class = dictionary.pop("_processor_class", None)
634
+ if _processor_class is not None:
635
+ dictionary["processor_class"] = _processor_class
636
+
637
+ return json.dumps(dictionary, indent=2, sort_keys=True) + "\n"
638
+
639
+ def to_json_file(self, json_file_path: Union[str, os.PathLike]):
640
+ """
641
+ Save this instance to a JSON file.
642
+
643
+ Args:
644
+ json_file_path (`str` or `os.PathLike`):
645
+ Path to the JSON file in which this feature_extractor instance's parameters will be saved.
646
+ """
647
+ with open(json_file_path, "w", encoding="utf-8") as writer:
648
+ writer.write(self.to_json_string())
649
+
650
+ def __repr__(self):
651
+ return f"{self.__class__.__name__} {self.to_json_string()}"
652
+
653
+ @classmethod
654
+ def register_for_auto_class(cls, auto_class="AutoFeatureExtractor"):
655
+ """
656
+ Register this class with a given auto class. This should only be used for custom feature extractors as the ones
657
+ in the library are already mapped with `AutoFeatureExtractor`.
658
+
659
+ <Tip warning={true}>
660
+
661
+ This API is experimental and may have some slight breaking changes in the next releases.
662
+
663
+ </Tip>
664
+
665
+ Args:
666
+ auto_class (`str` or `type`, *optional*, defaults to `"AutoFeatureExtractor"`):
667
+ The auto class to register this new feature extractor with.
668
+ """
669
+ if not isinstance(auto_class, str):
670
+ auto_class = auto_class.__name__
671
+
672
+ import transformers.models.auto as auto_module
673
+
674
+ if not hasattr(auto_module, auto_class):
675
+ raise ValueError(f"{auto_class} is not a valid auto class.")
676
+
677
+ cls._auto_class = auto_class
678
+
679
+
680
+ FeatureExtractionMixin.push_to_hub = copy_func(FeatureExtractionMixin.push_to_hub)
681
+ if FeatureExtractionMixin.push_to_hub.__doc__ is not None:
682
+ FeatureExtractionMixin.push_to_hub.__doc__ = FeatureExtractionMixin.push_to_hub.__doc__.format(
683
+ object="feature extractor", object_class="AutoFeatureExtractor", object_files="feature extractor file"
684
+ )
llmeval-env/lib/python3.10/site-packages/transformers/file_utils.py ADDED
@@ -0,0 +1,133 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """
15
+ File utilities: utilities related to download and cache models
16
+
17
+ This module should not be update anymore and is only left for backward compatibility.
18
+ """
19
+
20
+ from huggingface_hub import get_full_repo_name # for backward compatibility
21
+ from huggingface_hub.constants import HF_HUB_DISABLE_TELEMETRY as DISABLE_TELEMETRY # for backward compatibility
22
+
23
+ from . import __version__
24
+
25
+ # Backward compatibility imports, to make sure all those objects can be found in file_utils
26
+ from .utils import (
27
+ CLOUDFRONT_DISTRIB_PREFIX,
28
+ CONFIG_NAME,
29
+ DUMMY_INPUTS,
30
+ DUMMY_MASK,
31
+ ENV_VARS_TRUE_AND_AUTO_VALUES,
32
+ ENV_VARS_TRUE_VALUES,
33
+ FEATURE_EXTRACTOR_NAME,
34
+ FLAX_WEIGHTS_NAME,
35
+ HF_MODULES_CACHE,
36
+ HUGGINGFACE_CO_PREFIX,
37
+ HUGGINGFACE_CO_RESOLVE_ENDPOINT,
38
+ MODEL_CARD_NAME,
39
+ MULTIPLE_CHOICE_DUMMY_INPUTS,
40
+ PYTORCH_PRETRAINED_BERT_CACHE,
41
+ PYTORCH_TRANSFORMERS_CACHE,
42
+ S3_BUCKET_PREFIX,
43
+ SENTENCEPIECE_UNDERLINE,
44
+ SPIECE_UNDERLINE,
45
+ TF2_WEIGHTS_NAME,
46
+ TF_WEIGHTS_NAME,
47
+ TORCH_FX_REQUIRED_VERSION,
48
+ TRANSFORMERS_CACHE,
49
+ TRANSFORMERS_DYNAMIC_MODULE_NAME,
50
+ USE_JAX,
51
+ USE_TF,
52
+ USE_TORCH,
53
+ WEIGHTS_INDEX_NAME,
54
+ WEIGHTS_NAME,
55
+ ContextManagers,
56
+ DummyObject,
57
+ EntryNotFoundError,
58
+ ExplicitEnum,
59
+ ModelOutput,
60
+ PaddingStrategy,
61
+ PushToHubMixin,
62
+ RepositoryNotFoundError,
63
+ RevisionNotFoundError,
64
+ TensorType,
65
+ _LazyModule,
66
+ add_code_sample_docstrings,
67
+ add_end_docstrings,
68
+ add_start_docstrings,
69
+ add_start_docstrings_to_model_forward,
70
+ cached_property,
71
+ copy_func,
72
+ default_cache_path,
73
+ define_sagemaker_information,
74
+ get_cached_models,
75
+ get_file_from_repo,
76
+ get_torch_version,
77
+ has_file,
78
+ http_user_agent,
79
+ is_apex_available,
80
+ is_bs4_available,
81
+ is_coloredlogs_available,
82
+ is_datasets_available,
83
+ is_detectron2_available,
84
+ is_faiss_available,
85
+ is_flax_available,
86
+ is_ftfy_available,
87
+ is_g2p_en_available,
88
+ is_in_notebook,
89
+ is_ipex_available,
90
+ is_librosa_available,
91
+ is_offline_mode,
92
+ is_onnx_available,
93
+ is_pandas_available,
94
+ is_phonemizer_available,
95
+ is_protobuf_available,
96
+ is_psutil_available,
97
+ is_py3nvml_available,
98
+ is_pyctcdecode_available,
99
+ is_pytesseract_available,
100
+ is_pytorch_quantization_available,
101
+ is_rjieba_available,
102
+ is_sagemaker_dp_enabled,
103
+ is_sagemaker_mp_enabled,
104
+ is_scipy_available,
105
+ is_sentencepiece_available,
106
+ is_seqio_available,
107
+ is_sklearn_available,
108
+ is_soundfile_availble,
109
+ is_spacy_available,
110
+ is_speech_available,
111
+ is_tensor,
112
+ is_tensorflow_probability_available,
113
+ is_tf2onnx_available,
114
+ is_tf_available,
115
+ is_timm_available,
116
+ is_tokenizers_available,
117
+ is_torch_available,
118
+ is_torch_bf16_available,
119
+ is_torch_cuda_available,
120
+ is_torch_fx_available,
121
+ is_torch_fx_proxy,
122
+ is_torch_mps_available,
123
+ is_torch_tf32_available,
124
+ is_torch_xla_available,
125
+ is_torchaudio_available,
126
+ is_training_run_on_sagemaker,
127
+ is_vision_available,
128
+ replace_return_docstrings,
129
+ requires_backends,
130
+ to_numpy,
131
+ to_py_obj,
132
+ torch_only_method,
133
+ )
llmeval-env/lib/python3.10/site-packages/transformers/image_utils.py ADDED
@@ -0,0 +1,769 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import base64
17
+ import os
18
+ from io import BytesIO
19
+ from typing import TYPE_CHECKING, Dict, Iterable, List, Optional, Tuple, Union
20
+
21
+ import numpy as np
22
+ import requests
23
+ from packaging import version
24
+
25
+ from .utils import (
26
+ ExplicitEnum,
27
+ is_jax_tensor,
28
+ is_tf_tensor,
29
+ is_torch_available,
30
+ is_torch_tensor,
31
+ is_vision_available,
32
+ logging,
33
+ requires_backends,
34
+ to_numpy,
35
+ )
36
+ from .utils.constants import ( # noqa: F401
37
+ IMAGENET_DEFAULT_MEAN,
38
+ IMAGENET_DEFAULT_STD,
39
+ IMAGENET_STANDARD_MEAN,
40
+ IMAGENET_STANDARD_STD,
41
+ OPENAI_CLIP_MEAN,
42
+ OPENAI_CLIP_STD,
43
+ )
44
+
45
+
46
+ if is_vision_available():
47
+ import PIL.Image
48
+ import PIL.ImageOps
49
+
50
+ if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("9.1.0"):
51
+ PILImageResampling = PIL.Image.Resampling
52
+ else:
53
+ PILImageResampling = PIL.Image
54
+
55
+ if TYPE_CHECKING:
56
+ if is_torch_available():
57
+ import torch
58
+
59
+
60
+ logger = logging.get_logger(__name__)
61
+
62
+
63
+ ImageInput = Union[
64
+ "PIL.Image.Image", np.ndarray, "torch.Tensor", List["PIL.Image.Image"], List[np.ndarray], List["torch.Tensor"]
65
+ ] # noqa
66
+
67
+
68
+ class ChannelDimension(ExplicitEnum):
69
+ FIRST = "channels_first"
70
+ LAST = "channels_last"
71
+
72
+
73
+ class AnnotationFormat(ExplicitEnum):
74
+ COCO_DETECTION = "coco_detection"
75
+ COCO_PANOPTIC = "coco_panoptic"
76
+
77
+
78
+ class AnnotionFormat(ExplicitEnum):
79
+ COCO_DETECTION = AnnotationFormat.COCO_DETECTION.value
80
+ COCO_PANOPTIC = AnnotationFormat.COCO_PANOPTIC.value
81
+
82
+
83
+ AnnotationType = Dict[str, Union[int, str, List[Dict]]]
84
+
85
+
86
+ def is_pil_image(img):
87
+ return is_vision_available() and isinstance(img, PIL.Image.Image)
88
+
89
+
90
+ def is_valid_image(img):
91
+ return (
92
+ (is_vision_available() and isinstance(img, PIL.Image.Image))
93
+ or isinstance(img, np.ndarray)
94
+ or is_torch_tensor(img)
95
+ or is_tf_tensor(img)
96
+ or is_jax_tensor(img)
97
+ )
98
+
99
+
100
+ def valid_images(imgs):
101
+ # If we have an list of images, make sure every image is valid
102
+ if isinstance(imgs, (list, tuple)):
103
+ for img in imgs:
104
+ if not valid_images(img):
105
+ return False
106
+ # If not a list of tuple, we have been given a single image or batched tensor of images
107
+ elif not is_valid_image(imgs):
108
+ return False
109
+ return True
110
+
111
+
112
+ def is_batched(img):
113
+ if isinstance(img, (list, tuple)):
114
+ return is_valid_image(img[0])
115
+ return False
116
+
117
+
118
+ def is_scaled_image(image: np.ndarray) -> bool:
119
+ """
120
+ Checks to see whether the pixel values have already been rescaled to [0, 1].
121
+ """
122
+ if image.dtype == np.uint8:
123
+ return False
124
+
125
+ # It's possible the image has pixel values in [0, 255] but is of floating type
126
+ return np.min(image) >= 0 and np.max(image) <= 1
127
+
128
+
129
+ def make_list_of_images(images, expected_ndims: int = 3) -> List[ImageInput]:
130
+ """
131
+ Ensure that the input is a list of images. If the input is a single image, it is converted to a list of length 1.
132
+ If the input is a batch of images, it is converted to a list of images.
133
+
134
+ Args:
135
+ images (`ImageInput`):
136
+ Image of images to turn into a list of images.
137
+ expected_ndims (`int`, *optional*, defaults to 3):
138
+ Expected number of dimensions for a single input image. If the input image has a different number of
139
+ dimensions, an error is raised.
140
+ """
141
+ if is_batched(images):
142
+ return images
143
+
144
+ # Either the input is a single image, in which case we create a list of length 1
145
+ if isinstance(images, PIL.Image.Image):
146
+ # PIL images are never batched
147
+ return [images]
148
+
149
+ if is_valid_image(images):
150
+ if images.ndim == expected_ndims + 1:
151
+ # Batch of images
152
+ images = list(images)
153
+ elif images.ndim == expected_ndims:
154
+ # Single image
155
+ images = [images]
156
+ else:
157
+ raise ValueError(
158
+ f"Invalid image shape. Expected either {expected_ndims + 1} or {expected_ndims} dimensions, but got"
159
+ f" {images.ndim} dimensions."
160
+ )
161
+ return images
162
+ raise ValueError(
163
+ "Invalid image type. Expected either PIL.Image.Image, numpy.ndarray, torch.Tensor, tf.Tensor or "
164
+ f"jax.ndarray, but got {type(images)}."
165
+ )
166
+
167
+
168
+ def to_numpy_array(img) -> np.ndarray:
169
+ if not is_valid_image(img):
170
+ raise ValueError(f"Invalid image type: {type(img)}")
171
+
172
+ if is_vision_available() and isinstance(img, PIL.Image.Image):
173
+ return np.array(img)
174
+ return to_numpy(img)
175
+
176
+
177
+ def infer_channel_dimension_format(
178
+ image: np.ndarray, num_channels: Optional[Union[int, Tuple[int, ...]]] = None
179
+ ) -> ChannelDimension:
180
+ """
181
+ Infers the channel dimension format of `image`.
182
+
183
+ Args:
184
+ image (`np.ndarray`):
185
+ The image to infer the channel dimension of.
186
+ num_channels (`int` or `Tuple[int, ...]`, *optional*, defaults to `(1, 3)`):
187
+ The number of channels of the image.
188
+
189
+ Returns:
190
+ The channel dimension of the image.
191
+ """
192
+ num_channels = num_channels if num_channels is not None else (1, 3)
193
+ num_channels = (num_channels,) if isinstance(num_channels, int) else num_channels
194
+
195
+ if image.ndim == 3:
196
+ first_dim, last_dim = 0, 2
197
+ elif image.ndim == 4:
198
+ first_dim, last_dim = 1, 3
199
+ else:
200
+ raise ValueError(f"Unsupported number of image dimensions: {image.ndim}")
201
+
202
+ if image.shape[first_dim] in num_channels:
203
+ return ChannelDimension.FIRST
204
+ elif image.shape[last_dim] in num_channels:
205
+ return ChannelDimension.LAST
206
+ raise ValueError("Unable to infer channel dimension format")
207
+
208
+
209
+ def get_channel_dimension_axis(
210
+ image: np.ndarray, input_data_format: Optional[Union[ChannelDimension, str]] = None
211
+ ) -> int:
212
+ """
213
+ Returns the channel dimension axis of the image.
214
+
215
+ Args:
216
+ image (`np.ndarray`):
217
+ The image to get the channel dimension axis of.
218
+ input_data_format (`ChannelDimension` or `str`, *optional*):
219
+ The channel dimension format of the image. If `None`, will infer the channel dimension from the image.
220
+
221
+ Returns:
222
+ The channel dimension axis of the image.
223
+ """
224
+ if input_data_format is None:
225
+ input_data_format = infer_channel_dimension_format(image)
226
+ if input_data_format == ChannelDimension.FIRST:
227
+ return image.ndim - 3
228
+ elif input_data_format == ChannelDimension.LAST:
229
+ return image.ndim - 1
230
+ raise ValueError(f"Unsupported data format: {input_data_format}")
231
+
232
+
233
+ def get_image_size(image: np.ndarray, channel_dim: ChannelDimension = None) -> Tuple[int, int]:
234
+ """
235
+ Returns the (height, width) dimensions of the image.
236
+
237
+ Args:
238
+ image (`np.ndarray`):
239
+ The image to get the dimensions of.
240
+ channel_dim (`ChannelDimension`, *optional*):
241
+ Which dimension the channel dimension is in. If `None`, will infer the channel dimension from the image.
242
+
243
+ Returns:
244
+ A tuple of the image's height and width.
245
+ """
246
+ if channel_dim is None:
247
+ channel_dim = infer_channel_dimension_format(image)
248
+
249
+ if channel_dim == ChannelDimension.FIRST:
250
+ return image.shape[-2], image.shape[-1]
251
+ elif channel_dim == ChannelDimension.LAST:
252
+ return image.shape[-3], image.shape[-2]
253
+ else:
254
+ raise ValueError(f"Unsupported data format: {channel_dim}")
255
+
256
+
257
+ def is_valid_annotation_coco_detection(annotation: Dict[str, Union[List, Tuple]]) -> bool:
258
+ if (
259
+ isinstance(annotation, dict)
260
+ and "image_id" in annotation
261
+ and "annotations" in annotation
262
+ and isinstance(annotation["annotations"], (list, tuple))
263
+ and (
264
+ # an image can have no annotations
265
+ len(annotation["annotations"]) == 0 or isinstance(annotation["annotations"][0], dict)
266
+ )
267
+ ):
268
+ return True
269
+ return False
270
+
271
+
272
+ def is_valid_annotation_coco_panoptic(annotation: Dict[str, Union[List, Tuple]]) -> bool:
273
+ if (
274
+ isinstance(annotation, dict)
275
+ and "image_id" in annotation
276
+ and "segments_info" in annotation
277
+ and "file_name" in annotation
278
+ and isinstance(annotation["segments_info"], (list, tuple))
279
+ and (
280
+ # an image can have no segments
281
+ len(annotation["segments_info"]) == 0 or isinstance(annotation["segments_info"][0], dict)
282
+ )
283
+ ):
284
+ return True
285
+ return False
286
+
287
+
288
+ def valid_coco_detection_annotations(annotations: Iterable[Dict[str, Union[List, Tuple]]]) -> bool:
289
+ return all(is_valid_annotation_coco_detection(ann) for ann in annotations)
290
+
291
+
292
+ def valid_coco_panoptic_annotations(annotations: Iterable[Dict[str, Union[List, Tuple]]]) -> bool:
293
+ return all(is_valid_annotation_coco_panoptic(ann) for ann in annotations)
294
+
295
+
296
+ def load_image(image: Union[str, "PIL.Image.Image"], timeout: Optional[float] = None) -> "PIL.Image.Image":
297
+ """
298
+ Loads `image` to a PIL Image.
299
+
300
+ Args:
301
+ image (`str` or `PIL.Image.Image`):
302
+ The image to convert to the PIL Image format.
303
+ timeout (`float`, *optional*):
304
+ The timeout value in seconds for the URL request.
305
+
306
+ Returns:
307
+ `PIL.Image.Image`: A PIL Image.
308
+ """
309
+ requires_backends(load_image, ["vision"])
310
+ if isinstance(image, str):
311
+ if image.startswith("http://") or image.startswith("https://"):
312
+ # We need to actually check for a real protocol, otherwise it's impossible to use a local file
313
+ # like http_huggingface_co.png
314
+ image = PIL.Image.open(BytesIO(requests.get(image, timeout=timeout).content))
315
+ elif os.path.isfile(image):
316
+ image = PIL.Image.open(image)
317
+ else:
318
+ if image.startswith("data:image/"):
319
+ image = image.split(",")[1]
320
+
321
+ # Try to load as base64
322
+ try:
323
+ b64 = base64.b64decode(image, validate=True)
324
+ image = PIL.Image.open(BytesIO(b64))
325
+ except Exception as e:
326
+ raise ValueError(
327
+ f"Incorrect image source. Must be a valid URL starting with `http://` or `https://`, a valid path to an image file, or a base64 encoded string. Got {image}. Failed with {e}"
328
+ )
329
+ elif isinstance(image, PIL.Image.Image):
330
+ image = image
331
+ else:
332
+ raise ValueError(
333
+ "Incorrect format used for image. Should be an url linking to an image, a base64 string, a local path, or a PIL image."
334
+ )
335
+ image = PIL.ImageOps.exif_transpose(image)
336
+ image = image.convert("RGB")
337
+ return image
338
+
339
+
340
+ def validate_preprocess_arguments(
341
+ do_rescale: Optional[bool] = None,
342
+ rescale_factor: Optional[float] = None,
343
+ do_normalize: Optional[bool] = None,
344
+ image_mean: Optional[Union[float, List[float]]] = None,
345
+ image_std: Optional[Union[float, List[float]]] = None,
346
+ do_pad: Optional[bool] = None,
347
+ size_divisibility: Optional[int] = None,
348
+ do_center_crop: Optional[bool] = None,
349
+ crop_size: Optional[Dict[str, int]] = None,
350
+ do_resize: Optional[bool] = None,
351
+ size: Optional[Dict[str, int]] = None,
352
+ resample: Optional["PILImageResampling"] = None,
353
+ ):
354
+ """
355
+ Checks validity of typically used arguments in an `ImageProcessor` `preprocess` method.
356
+ Raises `ValueError` if arguments incompatibility is caught.
357
+ Many incompatibilities are model-specific. `do_pad` sometimes needs `size_divisor`,
358
+ sometimes `size_divisibility`, and sometimes `size`. New models and processors added should follow
359
+ existing arguments when possible.
360
+
361
+ """
362
+ if do_rescale and rescale_factor is None:
363
+ raise ValueError("rescale_factor must be specified if do_rescale is True.")
364
+
365
+ if do_pad and size_divisibility is None:
366
+ # Here, size_divisor might be passed as the value of size
367
+ raise ValueError(
368
+ "Depending on moel, size_divisibility, size_divisor, pad_size or size must be specified if do_pad is True."
369
+ )
370
+
371
+ if do_normalize and (image_mean is None or image_std is None):
372
+ raise ValueError("image_mean and image_std must both be specified if do_normalize is True.")
373
+
374
+ if do_center_crop and crop_size is None:
375
+ raise ValueError("crop_size must be specified if do_center_crop is True.")
376
+
377
+ if do_resize and (size is None or resample is None):
378
+ raise ValueError("size and resample must be specified if do_resize is True.")
379
+
380
+
381
+ # In the future we can add a TF implementation here when we have TF models.
382
+ class ImageFeatureExtractionMixin:
383
+ """
384
+ Mixin that contain utilities for preparing image features.
385
+ """
386
+
387
+ def _ensure_format_supported(self, image):
388
+ if not isinstance(image, (PIL.Image.Image, np.ndarray)) and not is_torch_tensor(image):
389
+ raise ValueError(
390
+ f"Got type {type(image)} which is not supported, only `PIL.Image.Image`, `np.array` and "
391
+ "`torch.Tensor` are."
392
+ )
393
+
394
+ def to_pil_image(self, image, rescale=None):
395
+ """
396
+ Converts `image` to a PIL Image. Optionally rescales it and puts the channel dimension back as the last axis if
397
+ needed.
398
+
399
+ Args:
400
+ image (`PIL.Image.Image` or `numpy.ndarray` or `torch.Tensor`):
401
+ The image to convert to the PIL Image format.
402
+ rescale (`bool`, *optional*):
403
+ Whether or not to apply the scaling factor (to make pixel values integers between 0 and 255). Will
404
+ default to `True` if the image type is a floating type, `False` otherwise.
405
+ """
406
+ self._ensure_format_supported(image)
407
+
408
+ if is_torch_tensor(image):
409
+ image = image.numpy()
410
+
411
+ if isinstance(image, np.ndarray):
412
+ if rescale is None:
413
+ # rescale default to the array being of floating type.
414
+ rescale = isinstance(image.flat[0], np.floating)
415
+ # If the channel as been moved to first dim, we put it back at the end.
416
+ if image.ndim == 3 and image.shape[0] in [1, 3]:
417
+ image = image.transpose(1, 2, 0)
418
+ if rescale:
419
+ image = image * 255
420
+ image = image.astype(np.uint8)
421
+ return PIL.Image.fromarray(image)
422
+ return image
423
+
424
+ def convert_rgb(self, image):
425
+ """
426
+ Converts `PIL.Image.Image` to RGB format.
427
+
428
+ Args:
429
+ image (`PIL.Image.Image`):
430
+ The image to convert.
431
+ """
432
+ self._ensure_format_supported(image)
433
+ if not isinstance(image, PIL.Image.Image):
434
+ return image
435
+
436
+ return image.convert("RGB")
437
+
438
+ def rescale(self, image: np.ndarray, scale: Union[float, int]) -> np.ndarray:
439
+ """
440
+ Rescale a numpy image by scale amount
441
+ """
442
+ self._ensure_format_supported(image)
443
+ return image * scale
444
+
445
+ def to_numpy_array(self, image, rescale=None, channel_first=True):
446
+ """
447
+ Converts `image` to a numpy array. Optionally rescales it and puts the channel dimension as the first
448
+ dimension.
449
+
450
+ Args:
451
+ image (`PIL.Image.Image` or `np.ndarray` or `torch.Tensor`):
452
+ The image to convert to a NumPy array.
453
+ rescale (`bool`, *optional*):
454
+ Whether or not to apply the scaling factor (to make pixel values floats between 0. and 1.). Will
455
+ default to `True` if the image is a PIL Image or an array/tensor of integers, `False` otherwise.
456
+ channel_first (`bool`, *optional*, defaults to `True`):
457
+ Whether or not to permute the dimensions of the image to put the channel dimension first.
458
+ """
459
+ self._ensure_format_supported(image)
460
+
461
+ if isinstance(image, PIL.Image.Image):
462
+ image = np.array(image)
463
+
464
+ if is_torch_tensor(image):
465
+ image = image.numpy()
466
+
467
+ rescale = isinstance(image.flat[0], np.integer) if rescale is None else rescale
468
+
469
+ if rescale:
470
+ image = self.rescale(image.astype(np.float32), 1 / 255.0)
471
+
472
+ if channel_first and image.ndim == 3:
473
+ image = image.transpose(2, 0, 1)
474
+
475
+ return image
476
+
477
+ def expand_dims(self, image):
478
+ """
479
+ Expands 2-dimensional `image` to 3 dimensions.
480
+
481
+ Args:
482
+ image (`PIL.Image.Image` or `np.ndarray` or `torch.Tensor`):
483
+ The image to expand.
484
+ """
485
+ self._ensure_format_supported(image)
486
+
487
+ # Do nothing if PIL image
488
+ if isinstance(image, PIL.Image.Image):
489
+ return image
490
+
491
+ if is_torch_tensor(image):
492
+ image = image.unsqueeze(0)
493
+ else:
494
+ image = np.expand_dims(image, axis=0)
495
+ return image
496
+
497
+ def normalize(self, image, mean, std, rescale=False):
498
+ """
499
+ Normalizes `image` with `mean` and `std`. Note that this will trigger a conversion of `image` to a NumPy array
500
+ if it's a PIL Image.
501
+
502
+ Args:
503
+ image (`PIL.Image.Image` or `np.ndarray` or `torch.Tensor`):
504
+ The image to normalize.
505
+ mean (`List[float]` or `np.ndarray` or `torch.Tensor`):
506
+ The mean (per channel) to use for normalization.
507
+ std (`List[float]` or `np.ndarray` or `torch.Tensor`):
508
+ The standard deviation (per channel) to use for normalization.
509
+ rescale (`bool`, *optional*, defaults to `False`):
510
+ Whether or not to rescale the image to be between 0 and 1. If a PIL image is provided, scaling will
511
+ happen automatically.
512
+ """
513
+ self._ensure_format_supported(image)
514
+
515
+ if isinstance(image, PIL.Image.Image):
516
+ image = self.to_numpy_array(image, rescale=True)
517
+ # If the input image is a PIL image, it automatically gets rescaled. If it's another
518
+ # type it may need rescaling.
519
+ elif rescale:
520
+ if isinstance(image, np.ndarray):
521
+ image = self.rescale(image.astype(np.float32), 1 / 255.0)
522
+ elif is_torch_tensor(image):
523
+ image = self.rescale(image.float(), 1 / 255.0)
524
+
525
+ if isinstance(image, np.ndarray):
526
+ if not isinstance(mean, np.ndarray):
527
+ mean = np.array(mean).astype(image.dtype)
528
+ if not isinstance(std, np.ndarray):
529
+ std = np.array(std).astype(image.dtype)
530
+ elif is_torch_tensor(image):
531
+ import torch
532
+
533
+ if not isinstance(mean, torch.Tensor):
534
+ mean = torch.tensor(mean)
535
+ if not isinstance(std, torch.Tensor):
536
+ std = torch.tensor(std)
537
+
538
+ if image.ndim == 3 and image.shape[0] in [1, 3]:
539
+ return (image - mean[:, None, None]) / std[:, None, None]
540
+ else:
541
+ return (image - mean) / std
542
+
543
+ def resize(self, image, size, resample=None, default_to_square=True, max_size=None):
544
+ """
545
+ Resizes `image`. Enforces conversion of input to PIL.Image.
546
+
547
+ Args:
548
+ image (`PIL.Image.Image` or `np.ndarray` or `torch.Tensor`):
549
+ The image to resize.
550
+ size (`int` or `Tuple[int, int]`):
551
+ The size to use for resizing the image. If `size` is a sequence like (h, w), output size will be
552
+ matched to this.
553
+
554
+ If `size` is an int and `default_to_square` is `True`, then image will be resized to (size, size). If
555
+ `size` is an int and `default_to_square` is `False`, then smaller edge of the image will be matched to
556
+ this number. i.e, if height > width, then image will be rescaled to (size * height / width, size).
557
+ resample (`int`, *optional*, defaults to `PILImageResampling.BILINEAR`):
558
+ The filter to user for resampling.
559
+ default_to_square (`bool`, *optional*, defaults to `True`):
560
+ How to convert `size` when it is a single int. If set to `True`, the `size` will be converted to a
561
+ square (`size`,`size`). If set to `False`, will replicate
562
+ [`torchvision.transforms.Resize`](https://pytorch.org/vision/stable/transforms.html#torchvision.transforms.Resize)
563
+ with support for resizing only the smallest edge and providing an optional `max_size`.
564
+ max_size (`int`, *optional*, defaults to `None`):
565
+ The maximum allowed for the longer edge of the resized image: if the longer edge of the image is
566
+ greater than `max_size` after being resized according to `size`, then the image is resized again so
567
+ that the longer edge is equal to `max_size`. As a result, `size` might be overruled, i.e the smaller
568
+ edge may be shorter than `size`. Only used if `default_to_square` is `False`.
569
+
570
+ Returns:
571
+ image: A resized `PIL.Image.Image`.
572
+ """
573
+ resample = resample if resample is not None else PILImageResampling.BILINEAR
574
+
575
+ self._ensure_format_supported(image)
576
+
577
+ if not isinstance(image, PIL.Image.Image):
578
+ image = self.to_pil_image(image)
579
+
580
+ if isinstance(size, list):
581
+ size = tuple(size)
582
+
583
+ if isinstance(size, int) or len(size) == 1:
584
+ if default_to_square:
585
+ size = (size, size) if isinstance(size, int) else (size[0], size[0])
586
+ else:
587
+ width, height = image.size
588
+ # specified size only for the smallest edge
589
+ short, long = (width, height) if width <= height else (height, width)
590
+ requested_new_short = size if isinstance(size, int) else size[0]
591
+
592
+ if short == requested_new_short:
593
+ return image
594
+
595
+ new_short, new_long = requested_new_short, int(requested_new_short * long / short)
596
+
597
+ if max_size is not None:
598
+ if max_size <= requested_new_short:
599
+ raise ValueError(
600
+ f"max_size = {max_size} must be strictly greater than the requested "
601
+ f"size for the smaller edge size = {size}"
602
+ )
603
+ if new_long > max_size:
604
+ new_short, new_long = int(max_size * new_short / new_long), max_size
605
+
606
+ size = (new_short, new_long) if width <= height else (new_long, new_short)
607
+
608
+ return image.resize(size, resample=resample)
609
+
610
+ def center_crop(self, image, size):
611
+ """
612
+ Crops `image` to the given size using a center crop. Note that if the image is too small to be cropped to the
613
+ size given, it will be padded (so the returned result has the size asked).
614
+
615
+ Args:
616
+ image (`PIL.Image.Image` or `np.ndarray` or `torch.Tensor` of shape (n_channels, height, width) or (height, width, n_channels)):
617
+ The image to resize.
618
+ size (`int` or `Tuple[int, int]`):
619
+ The size to which crop the image.
620
+
621
+ Returns:
622
+ new_image: A center cropped `PIL.Image.Image` or `np.ndarray` or `torch.Tensor` of shape: (n_channels,
623
+ height, width).
624
+ """
625
+ self._ensure_format_supported(image)
626
+
627
+ if not isinstance(size, tuple):
628
+ size = (size, size)
629
+
630
+ # PIL Image.size is (width, height) but NumPy array and torch Tensors have (height, width)
631
+ if is_torch_tensor(image) or isinstance(image, np.ndarray):
632
+ if image.ndim == 2:
633
+ image = self.expand_dims(image)
634
+ image_shape = image.shape[1:] if image.shape[0] in [1, 3] else image.shape[:2]
635
+ else:
636
+ image_shape = (image.size[1], image.size[0])
637
+
638
+ top = (image_shape[0] - size[0]) // 2
639
+ bottom = top + size[0] # In case size is odd, (image_shape[0] + size[0]) // 2 won't give the proper result.
640
+ left = (image_shape[1] - size[1]) // 2
641
+ right = left + size[1] # In case size is odd, (image_shape[1] + size[1]) // 2 won't give the proper result.
642
+
643
+ # For PIL Images we have a method to crop directly.
644
+ if isinstance(image, PIL.Image.Image):
645
+ return image.crop((left, top, right, bottom))
646
+
647
+ # Check if image is in (n_channels, height, width) or (height, width, n_channels) format
648
+ channel_first = True if image.shape[0] in [1, 3] else False
649
+
650
+ # Transpose (height, width, n_channels) format images
651
+ if not channel_first:
652
+ if isinstance(image, np.ndarray):
653
+ image = image.transpose(2, 0, 1)
654
+ if is_torch_tensor(image):
655
+ image = image.permute(2, 0, 1)
656
+
657
+ # Check if cropped area is within image boundaries
658
+ if top >= 0 and bottom <= image_shape[0] and left >= 0 and right <= image_shape[1]:
659
+ return image[..., top:bottom, left:right]
660
+
661
+ # Otherwise, we may need to pad if the image is too small. Oh joy...
662
+ new_shape = image.shape[:-2] + (max(size[0], image_shape[0]), max(size[1], image_shape[1]))
663
+ if isinstance(image, np.ndarray):
664
+ new_image = np.zeros_like(image, shape=new_shape)
665
+ elif is_torch_tensor(image):
666
+ new_image = image.new_zeros(new_shape)
667
+
668
+ top_pad = (new_shape[-2] - image_shape[0]) // 2
669
+ bottom_pad = top_pad + image_shape[0]
670
+ left_pad = (new_shape[-1] - image_shape[1]) // 2
671
+ right_pad = left_pad + image_shape[1]
672
+ new_image[..., top_pad:bottom_pad, left_pad:right_pad] = image
673
+
674
+ top += top_pad
675
+ bottom += top_pad
676
+ left += left_pad
677
+ right += left_pad
678
+
679
+ new_image = new_image[
680
+ ..., max(0, top) : min(new_image.shape[-2], bottom), max(0, left) : min(new_image.shape[-1], right)
681
+ ]
682
+
683
+ return new_image
684
+
685
+ def flip_channel_order(self, image):
686
+ """
687
+ Flips the channel order of `image` from RGB to BGR, or vice versa. Note that this will trigger a conversion of
688
+ `image` to a NumPy array if it's a PIL Image.
689
+
690
+ Args:
691
+ image (`PIL.Image.Image` or `np.ndarray` or `torch.Tensor`):
692
+ The image whose color channels to flip. If `np.ndarray` or `torch.Tensor`, the channel dimension should
693
+ be first.
694
+ """
695
+ self._ensure_format_supported(image)
696
+
697
+ if isinstance(image, PIL.Image.Image):
698
+ image = self.to_numpy_array(image)
699
+
700
+ return image[::-1, :, :]
701
+
702
+ def rotate(self, image, angle, resample=None, expand=0, center=None, translate=None, fillcolor=None):
703
+ """
704
+ Returns a rotated copy of `image`. This method returns a copy of `image`, rotated the given number of degrees
705
+ counter clockwise around its centre.
706
+
707
+ Args:
708
+ image (`PIL.Image.Image` or `np.ndarray` or `torch.Tensor`):
709
+ The image to rotate. If `np.ndarray` or `torch.Tensor`, will be converted to `PIL.Image.Image` before
710
+ rotating.
711
+
712
+ Returns:
713
+ image: A rotated `PIL.Image.Image`.
714
+ """
715
+ resample = resample if resample is not None else PIL.Image.NEAREST
716
+
717
+ self._ensure_format_supported(image)
718
+
719
+ if not isinstance(image, PIL.Image.Image):
720
+ image = self.to_pil_image(image)
721
+
722
+ return image.rotate(
723
+ angle, resample=resample, expand=expand, center=center, translate=translate, fillcolor=fillcolor
724
+ )
725
+
726
+
727
+ def promote_annotation_format(annotation_format: Union[AnnotionFormat, AnnotationFormat]) -> AnnotationFormat:
728
+ # can be removed when `AnnotionFormat` is fully deprecated
729
+ return AnnotationFormat(annotation_format.value)
730
+
731
+
732
+ def validate_annotations(
733
+ annotation_format: AnnotationFormat,
734
+ supported_annotation_formats: Tuple[AnnotationFormat, ...],
735
+ annotations: List[Dict],
736
+ ) -> None:
737
+ if isinstance(annotation_format, AnnotionFormat):
738
+ logger.warning_once(
739
+ f"`{annotation_format.__class__.__name__}` is deprecated and will be removed in v4.38. "
740
+ f"Please use `{AnnotationFormat.__name__}` instead."
741
+ )
742
+ annotation_format = promote_annotation_format(annotation_format)
743
+
744
+ if annotation_format not in supported_annotation_formats:
745
+ raise ValueError(f"Unsupported annotation format: {format} must be one of {supported_annotation_formats}")
746
+
747
+ if annotation_format is AnnotationFormat.COCO_DETECTION:
748
+ if not valid_coco_detection_annotations(annotations):
749
+ raise ValueError(
750
+ "Invalid COCO detection annotations. Annotations must a dict (single image) or list of dicts "
751
+ "(batch of images) with the following keys: `image_id` and `annotations`, with the latter "
752
+ "being a list of annotations in the COCO format."
753
+ )
754
+
755
+ if annotation_format is AnnotationFormat.COCO_PANOPTIC:
756
+ if not valid_coco_panoptic_annotations(annotations):
757
+ raise ValueError(
758
+ "Invalid COCO panoptic annotations. Annotations must a dict (single image) or list of dicts "
759
+ "(batch of images) with the following keys: `image_id`, `file_name` and `segments_info`, with "
760
+ "the latter being a list of annotations in the COCO format."
761
+ )
762
+
763
+
764
+ def validate_kwargs(valid_processor_keys: List[str], captured_kwargs: List[str]):
765
+ unused_keys = set(captured_kwargs).difference(set(valid_processor_keys))
766
+ if unused_keys:
767
+ unused_key_str = ", ".join(unused_keys)
768
+ # TODO raise a warning here instead of simply logging?
769
+ logger.warning(f"Unused or unrecognized kwargs: {unused_key_str}.")
llmeval-env/lib/python3.10/site-packages/transformers/modelcard.py ADDED
@@ -0,0 +1,904 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Configuration base class and utilities."""
16
+
17
+
18
+ import copy
19
+ import json
20
+ import os
21
+ import warnings
22
+ from dataclasses import dataclass
23
+ from pathlib import Path
24
+ from typing import Any, Dict, List, Optional, Union
25
+
26
+ import requests
27
+ import yaml
28
+ from huggingface_hub import model_info
29
+ from huggingface_hub.utils import HFValidationError
30
+
31
+ from . import __version__
32
+ from .models.auto.modeling_auto import (
33
+ MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
34
+ MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
35
+ MODEL_FOR_CTC_MAPPING_NAMES,
36
+ MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
37
+ MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES,
38
+ MODEL_FOR_MASKED_LM_MAPPING_NAMES,
39
+ MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
40
+ MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
41
+ MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
42
+ MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
43
+ MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES,
44
+ MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES,
45
+ MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
46
+ MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES,
47
+ )
48
+ from .training_args import ParallelMode
49
+ from .utils import (
50
+ MODEL_CARD_NAME,
51
+ cached_file,
52
+ is_datasets_available,
53
+ is_offline_mode,
54
+ is_tf_available,
55
+ is_tokenizers_available,
56
+ is_torch_available,
57
+ logging,
58
+ )
59
+
60
+
61
+ TASK_MAPPING = {
62
+ "text-generation": MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
63
+ "image-classification": MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
64
+ "image-segmentation": MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES,
65
+ "fill-mask": MODEL_FOR_MASKED_LM_MAPPING_NAMES,
66
+ "object-detection": MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
67
+ "question-answering": MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
68
+ "text2text-generation": MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
69
+ "text-classification": MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
70
+ "table-question-answering": MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES,
71
+ "token-classification": MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
72
+ "audio-classification": MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
73
+ "automatic-speech-recognition": {**MODEL_FOR_CTC_MAPPING_NAMES, **MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES},
74
+ "zero-shot-image-classification": MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES,
75
+ }
76
+
77
+ logger = logging.get_logger(__name__)
78
+
79
+
80
+ class ModelCard:
81
+ r"""
82
+ Structured Model Card class. Store model card as well as methods for loading/downloading/saving model cards.
83
+
84
+ Please read the following paper for details and explanation on the sections: "Model Cards for Model Reporting" by
85
+ Margaret Mitchell, Simone Wu, Andrew Zaldivar, Parker Barnes, Lucy Vasserman, Ben Hutchinson, Elena Spitzer,
86
+ Inioluwa Deborah Raji and Timnit Gebru for the proposal behind model cards. Link: https://arxiv.org/abs/1810.03993
87
+
88
+ Note: A model card can be loaded and saved to disk.
89
+ """
90
+
91
+ def __init__(self, **kwargs):
92
+ warnings.warn(
93
+ "The class `ModelCard` is deprecated and will be removed in version 5 of Transformers", FutureWarning
94
+ )
95
+ # Recommended attributes from https://arxiv.org/abs/1810.03993 (see papers)
96
+ self.model_details = kwargs.pop("model_details", {})
97
+ self.intended_use = kwargs.pop("intended_use", {})
98
+ self.factors = kwargs.pop("factors", {})
99
+ self.metrics = kwargs.pop("metrics", {})
100
+ self.evaluation_data = kwargs.pop("evaluation_data", {})
101
+ self.training_data = kwargs.pop("training_data", {})
102
+ self.quantitative_analyses = kwargs.pop("quantitative_analyses", {})
103
+ self.ethical_considerations = kwargs.pop("ethical_considerations", {})
104
+ self.caveats_and_recommendations = kwargs.pop("caveats_and_recommendations", {})
105
+
106
+ # Open additional attributes
107
+ for key, value in kwargs.items():
108
+ try:
109
+ setattr(self, key, value)
110
+ except AttributeError as err:
111
+ logger.error(f"Can't set {key} with value {value} for {self}")
112
+ raise err
113
+
114
+ def save_pretrained(self, save_directory_or_file):
115
+ """Save a model card object to the directory or file `save_directory_or_file`."""
116
+ if os.path.isdir(save_directory_or_file):
117
+ # If we save using the predefined names, we can load using `from_pretrained`
118
+ output_model_card_file = os.path.join(save_directory_or_file, MODEL_CARD_NAME)
119
+ else:
120
+ output_model_card_file = save_directory_or_file
121
+
122
+ self.to_json_file(output_model_card_file)
123
+ logger.info(f"Model card saved in {output_model_card_file}")
124
+
125
+ @classmethod
126
+ def from_pretrained(cls, pretrained_model_name_or_path, **kwargs):
127
+ r"""
128
+ Instantiate a [`ModelCard`] from a pre-trained model model card.
129
+
130
+ Parameters:
131
+ pretrained_model_name_or_path: either:
132
+
133
+ - a string, the *model id* of a pretrained model card hosted inside a model repo on huggingface.co.
134
+ - a path to a *directory* containing a model card file saved using the [`~ModelCard.save_pretrained`]
135
+ method, e.g.: `./my_model_directory/`.
136
+ - a path or url to a saved model card JSON *file*, e.g.: `./my_model_directory/modelcard.json`.
137
+
138
+ cache_dir: (*optional*) string:
139
+ Path to a directory in which a downloaded pre-trained model card should be cached if the standard cache
140
+ should not be used.
141
+
142
+ kwargs: (*optional*) dict: key/value pairs with which to update the ModelCard object after loading.
143
+
144
+ - The values in kwargs of any keys which are model card attributes will be used to override the loaded
145
+ values.
146
+ - Behavior concerning key/value pairs whose keys are *not* model card attributes is controlled by the
147
+ *return_unused_kwargs* keyword parameter.
148
+
149
+ proxies: (*optional*) dict, default None:
150
+ A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128',
151
+ 'http://hostname': 'foo.bar:4012'}. The proxies are used on each request.
152
+
153
+ return_unused_kwargs: (*optional*) bool:
154
+
155
+ - If False, then this function returns just the final model card object.
156
+ - If True, then this functions returns a tuple *(model card, unused_kwargs)* where *unused_kwargs* is a
157
+ dictionary consisting of the key/value pairs whose keys are not model card attributes: ie the part of
158
+ kwargs which has not been used to update *ModelCard* and is otherwise ignored.
159
+
160
+ Examples:
161
+
162
+ ```python
163
+ # Download model card from huggingface.co and cache.
164
+ modelcard = ModelCard.from_pretrained("google-bert/bert-base-uncased")
165
+ # Model card was saved using *save_pretrained('./test/saved_model/')*
166
+ modelcard = ModelCard.from_pretrained("./test/saved_model/")
167
+ modelcard = ModelCard.from_pretrained("./test/saved_model/modelcard.json")
168
+ modelcard = ModelCard.from_pretrained("google-bert/bert-base-uncased", output_attentions=True, foo=False)
169
+ ```"""
170
+ cache_dir = kwargs.pop("cache_dir", None)
171
+ proxies = kwargs.pop("proxies", None)
172
+ return_unused_kwargs = kwargs.pop("return_unused_kwargs", False)
173
+ from_pipeline = kwargs.pop("_from_pipeline", None)
174
+
175
+ user_agent = {"file_type": "model_card"}
176
+ if from_pipeline is not None:
177
+ user_agent["using_pipeline"] = from_pipeline
178
+
179
+ is_local = os.path.isdir(pretrained_model_name_or_path)
180
+ if os.path.isfile(pretrained_model_name_or_path):
181
+ resolved_model_card_file = pretrained_model_name_or_path
182
+ is_local = True
183
+ else:
184
+ try:
185
+ # Load from URL or cache if already cached
186
+ resolved_model_card_file = cached_file(
187
+ pretrained_model_name_or_path,
188
+ filename=MODEL_CARD_NAME,
189
+ cache_dir=cache_dir,
190
+ proxies=proxies,
191
+ user_agent=user_agent,
192
+ )
193
+ if is_local:
194
+ logger.info(f"loading model card file {resolved_model_card_file}")
195
+ else:
196
+ logger.info(f"loading model card file {MODEL_CARD_NAME} from cache at {resolved_model_card_file}")
197
+ # Load model card
198
+ modelcard = cls.from_json_file(resolved_model_card_file)
199
+
200
+ except (EnvironmentError, json.JSONDecodeError):
201
+ # We fall back on creating an empty model card
202
+ modelcard = cls()
203
+
204
+ # Update model card with kwargs if needed
205
+ to_remove = []
206
+ for key, value in kwargs.items():
207
+ if hasattr(modelcard, key):
208
+ setattr(modelcard, key, value)
209
+ to_remove.append(key)
210
+ for key in to_remove:
211
+ kwargs.pop(key, None)
212
+
213
+ logger.info(f"Model card: {modelcard}")
214
+ if return_unused_kwargs:
215
+ return modelcard, kwargs
216
+ else:
217
+ return modelcard
218
+
219
+ @classmethod
220
+ def from_dict(cls, json_object):
221
+ """Constructs a `ModelCard` from a Python dictionary of parameters."""
222
+ return cls(**json_object)
223
+
224
+ @classmethod
225
+ def from_json_file(cls, json_file):
226
+ """Constructs a `ModelCard` from a json file of parameters."""
227
+ with open(json_file, "r", encoding="utf-8") as reader:
228
+ text = reader.read()
229
+ dict_obj = json.loads(text)
230
+ return cls(**dict_obj)
231
+
232
+ def __eq__(self, other):
233
+ return self.__dict__ == other.__dict__
234
+
235
+ def __repr__(self):
236
+ return str(self.to_json_string())
237
+
238
+ def to_dict(self):
239
+ """Serializes this instance to a Python dictionary."""
240
+ output = copy.deepcopy(self.__dict__)
241
+ return output
242
+
243
+ def to_json_string(self):
244
+ """Serializes this instance to a JSON string."""
245
+ return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
246
+
247
+ def to_json_file(self, json_file_path):
248
+ """Save this instance to a json file."""
249
+ with open(json_file_path, "w", encoding="utf-8") as writer:
250
+ writer.write(self.to_json_string())
251
+
252
+
253
+ AUTOGENERATED_TRAINER_COMMENT = """
254
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
255
+ should probably proofread and complete it, then remove this comment. -->
256
+ """
257
+
258
+ AUTOGENERATED_KERAS_COMMENT = """
259
+ <!-- This model card has been generated automatically according to the information Keras had access to. You should
260
+ probably proofread and complete it, then remove this comment. -->
261
+ """
262
+
263
+
264
+ TASK_TAG_TO_NAME_MAPPING = {
265
+ "fill-mask": "Masked Language Modeling",
266
+ "image-classification": "Image Classification",
267
+ "image-segmentation": "Image Segmentation",
268
+ "multiple-choice": "Multiple Choice",
269
+ "object-detection": "Object Detection",
270
+ "question-answering": "Question Answering",
271
+ "summarization": "Summarization",
272
+ "table-question-answering": "Table Question Answering",
273
+ "text-classification": "Text Classification",
274
+ "text-generation": "Causal Language Modeling",
275
+ "text2text-generation": "Sequence-to-sequence Language Modeling",
276
+ "token-classification": "Token Classification",
277
+ "translation": "Translation",
278
+ "zero-shot-classification": "Zero Shot Classification",
279
+ "automatic-speech-recognition": "Automatic Speech Recognition",
280
+ "audio-classification": "Audio Classification",
281
+ }
282
+
283
+
284
+ METRIC_TAGS = [
285
+ "accuracy",
286
+ "bleu",
287
+ "f1",
288
+ "matthews_correlation",
289
+ "pearsonr",
290
+ "precision",
291
+ "recall",
292
+ "rouge",
293
+ "sacrebleu",
294
+ "spearmanr",
295
+ "wer",
296
+ ]
297
+
298
+
299
+ def _listify(obj):
300
+ if obj is None:
301
+ return []
302
+ elif isinstance(obj, str):
303
+ return [obj]
304
+ else:
305
+ return obj
306
+
307
+
308
+ def _insert_values_as_list(metadata, name, values):
309
+ if values is None:
310
+ return metadata
311
+ if isinstance(values, str):
312
+ values = [values]
313
+ values = [v for v in values if v is not None]
314
+ if len(values) == 0:
315
+ return metadata
316
+ metadata[name] = values
317
+ return metadata
318
+
319
+
320
+ def infer_metric_tags_from_eval_results(eval_results):
321
+ if eval_results is None:
322
+ return {}
323
+ result = {}
324
+ for key in eval_results.keys():
325
+ if key.lower().replace(" ", "_") in METRIC_TAGS:
326
+ result[key.lower().replace(" ", "_")] = key
327
+ elif key.lower() == "rouge1":
328
+ result["rouge"] = key
329
+ return result
330
+
331
+
332
+ def _insert_value(metadata, name, value):
333
+ if value is None:
334
+ return metadata
335
+ metadata[name] = value
336
+ return metadata
337
+
338
+
339
+ def is_hf_dataset(dataset):
340
+ if not is_datasets_available():
341
+ return False
342
+
343
+ from datasets import Dataset, IterableDataset
344
+
345
+ return isinstance(dataset, (Dataset, IterableDataset))
346
+
347
+
348
+ def _get_mapping_values(mapping):
349
+ result = []
350
+ for v in mapping.values():
351
+ if isinstance(v, (tuple, list)):
352
+ result += list(v)
353
+ else:
354
+ result.append(v)
355
+ return result
356
+
357
+
358
+ @dataclass
359
+ class TrainingSummary:
360
+ model_name: str
361
+ language: Optional[Union[str, List[str]]] = None
362
+ license: Optional[str] = None
363
+ tags: Optional[Union[str, List[str]]] = None
364
+ finetuned_from: Optional[str] = None
365
+ tasks: Optional[Union[str, List[str]]] = None
366
+ dataset: Optional[Union[str, List[str]]] = None
367
+ dataset_tags: Optional[Union[str, List[str]]] = None
368
+ dataset_args: Optional[Union[str, List[str]]] = None
369
+ dataset_metadata: Optional[Dict[str, Any]] = None
370
+ eval_results: Optional[Dict[str, float]] = None
371
+ eval_lines: Optional[List[str]] = None
372
+ hyperparameters: Optional[Dict[str, Any]] = None
373
+ source: Optional[str] = "trainer"
374
+
375
+ def __post_init__(self):
376
+ # Infer default license from the checkpoint used, if possible.
377
+ if (
378
+ self.license is None
379
+ and not is_offline_mode()
380
+ and self.finetuned_from is not None
381
+ and len(self.finetuned_from) > 0
382
+ ):
383
+ try:
384
+ info = model_info(self.finetuned_from)
385
+ for tag in info.tags:
386
+ if tag.startswith("license:"):
387
+ self.license = tag[8:]
388
+ except (requests.exceptions.HTTPError, requests.exceptions.ConnectionError, HFValidationError):
389
+ pass
390
+
391
+ def create_model_index(self, metric_mapping):
392
+ model_index = {"name": self.model_name}
393
+
394
+ # Dataset mapping tag -> name
395
+ dataset_names = _listify(self.dataset)
396
+ dataset_tags = _listify(self.dataset_tags)
397
+ dataset_args = _listify(self.dataset_args)
398
+ dataset_metadata = _listify(self.dataset_metadata)
399
+ if len(dataset_args) < len(dataset_tags):
400
+ dataset_args = dataset_args + [None] * (len(dataset_tags) - len(dataset_args))
401
+ dataset_mapping = dict(zip(dataset_tags, dataset_names))
402
+ dataset_arg_mapping = dict(zip(dataset_tags, dataset_args))
403
+ dataset_metadata_mapping = dict(zip(dataset_tags, dataset_metadata))
404
+
405
+ task_mapping = {
406
+ task: TASK_TAG_TO_NAME_MAPPING[task] for task in _listify(self.tasks) if task in TASK_TAG_TO_NAME_MAPPING
407
+ }
408
+
409
+ model_index["results"] = []
410
+
411
+ if len(task_mapping) == 0 and len(dataset_mapping) == 0:
412
+ return [model_index]
413
+ if len(task_mapping) == 0:
414
+ task_mapping = {None: None}
415
+ if len(dataset_mapping) == 0:
416
+ dataset_mapping = {None: None}
417
+
418
+ # One entry per dataset and per task
419
+ all_possibilities = [(task_tag, ds_tag) for task_tag in task_mapping for ds_tag in dataset_mapping]
420
+ for task_tag, ds_tag in all_possibilities:
421
+ result = {}
422
+ if task_tag is not None:
423
+ result["task"] = {"name": task_mapping[task_tag], "type": task_tag}
424
+
425
+ if ds_tag is not None:
426
+ metadata = dataset_metadata_mapping.get(ds_tag, {})
427
+ result["dataset"] = {
428
+ "name": dataset_mapping[ds_tag],
429
+ "type": ds_tag,
430
+ **metadata,
431
+ }
432
+ if dataset_arg_mapping[ds_tag] is not None:
433
+ result["dataset"]["args"] = dataset_arg_mapping[ds_tag]
434
+
435
+ if len(metric_mapping) > 0:
436
+ result["metrics"] = []
437
+ for metric_tag, metric_name in metric_mapping.items():
438
+ result["metrics"].append(
439
+ {
440
+ "name": metric_name,
441
+ "type": metric_tag,
442
+ "value": self.eval_results[metric_name],
443
+ }
444
+ )
445
+
446
+ # Remove partial results to avoid the model card being rejected.
447
+ if "task" in result and "dataset" in result and "metrics" in result:
448
+ model_index["results"].append(result)
449
+ else:
450
+ logger.info(f"Dropping the following result as it does not have all the necessary fields:\n{result}")
451
+
452
+ return [model_index]
453
+
454
+ def create_metadata(self):
455
+ metric_mapping = infer_metric_tags_from_eval_results(self.eval_results)
456
+
457
+ metadata = {}
458
+ metadata = _insert_values_as_list(metadata, "language", self.language)
459
+ metadata = _insert_value(metadata, "license", self.license)
460
+ if self.finetuned_from is not None and isinstance(self.finetuned_from, str) and len(self.finetuned_from) > 0:
461
+ metadata = _insert_value(metadata, "base_model", self.finetuned_from)
462
+ metadata = _insert_values_as_list(metadata, "tags", self.tags)
463
+ metadata = _insert_values_as_list(metadata, "datasets", self.dataset_tags)
464
+ metadata = _insert_values_as_list(metadata, "metrics", list(metric_mapping.keys()))
465
+ metadata["model-index"] = self.create_model_index(metric_mapping)
466
+
467
+ return metadata
468
+
469
+ def to_model_card(self):
470
+ model_card = ""
471
+
472
+ metadata = yaml.dump(self.create_metadata(), sort_keys=False)
473
+ if len(metadata) > 0:
474
+ model_card = f"---\n{metadata}---\n"
475
+
476
+ # Now the model card for realsies.
477
+ if self.source == "trainer":
478
+ model_card += AUTOGENERATED_TRAINER_COMMENT
479
+ else:
480
+ model_card += AUTOGENERATED_KERAS_COMMENT
481
+
482
+ model_card += f"\n# {self.model_name}\n\n"
483
+
484
+ if self.finetuned_from is None:
485
+ model_card += "This model was trained from scratch on "
486
+ else:
487
+ model_card += (
488
+ "This model is a fine-tuned version of"
489
+ f" [{self.finetuned_from}](https://huggingface.co/{self.finetuned_from}) on "
490
+ )
491
+
492
+ if self.dataset is None:
493
+ model_card += "an unknown dataset."
494
+ else:
495
+ if isinstance(self.dataset, str):
496
+ model_card += f"the {self.dataset} dataset."
497
+ elif isinstance(self.dataset, (tuple, list)) and len(self.dataset) == 1:
498
+ model_card += f"the {self.dataset[0]} dataset."
499
+ else:
500
+ model_card += (
501
+ ", ".join([f"the {ds}" for ds in self.dataset[:-1]]) + f" and the {self.dataset[-1]} datasets."
502
+ )
503
+
504
+ if self.eval_results is not None:
505
+ model_card += "\nIt achieves the following results on the evaluation set:\n"
506
+ model_card += "\n".join([f"- {name}: {_maybe_round(value)}" for name, value in self.eval_results.items()])
507
+ model_card += "\n"
508
+
509
+ model_card += "\n## Model description\n\nMore information needed\n"
510
+ model_card += "\n## Intended uses & limitations\n\nMore information needed\n"
511
+ model_card += "\n## Training and evaluation data\n\nMore information needed\n"
512
+
513
+ model_card += "\n## Training procedure\n"
514
+ model_card += "\n### Training hyperparameters\n"
515
+ if self.hyperparameters is not None:
516
+ model_card += "\nThe following hyperparameters were used during training:\n"
517
+ model_card += "\n".join([f"- {name}: {value}" for name, value in self.hyperparameters.items()])
518
+ model_card += "\n"
519
+ else:
520
+ model_card += "\nMore information needed\n"
521
+
522
+ if self.eval_lines is not None:
523
+ model_card += "\n### Training results\n\n"
524
+ model_card += make_markdown_table(self.eval_lines)
525
+ model_card += "\n"
526
+
527
+ model_card += "\n### Framework versions\n\n"
528
+ model_card += f"- Transformers {__version__}\n"
529
+
530
+ if self.source == "trainer" and is_torch_available():
531
+ import torch
532
+
533
+ model_card += f"- Pytorch {torch.__version__}\n"
534
+ elif self.source == "keras" and is_tf_available():
535
+ import tensorflow as tf
536
+
537
+ model_card += f"- TensorFlow {tf.__version__}\n"
538
+ if is_datasets_available():
539
+ import datasets
540
+
541
+ model_card += f"- Datasets {datasets.__version__}\n"
542
+ if is_tokenizers_available():
543
+ import tokenizers
544
+
545
+ model_card += f"- Tokenizers {tokenizers.__version__}\n"
546
+
547
+ return model_card
548
+
549
+ @classmethod
550
+ def from_trainer(
551
+ cls,
552
+ trainer,
553
+ language=None,
554
+ license=None,
555
+ tags=None,
556
+ model_name=None,
557
+ finetuned_from=None,
558
+ tasks=None,
559
+ dataset_tags=None,
560
+ dataset_metadata=None,
561
+ dataset=None,
562
+ dataset_args=None,
563
+ ):
564
+ # Infer default from dataset
565
+ one_dataset = trainer.eval_dataset if trainer.eval_dataset is not None else trainer.train_dataset
566
+ if is_hf_dataset(one_dataset) and (dataset_tags is None or dataset_args is None or dataset_metadata is None):
567
+ default_tag = one_dataset.builder_name
568
+ # Those are not real datasets from the Hub so we exclude them.
569
+ if default_tag not in ["csv", "json", "pandas", "parquet", "text"]:
570
+ if dataset_metadata is None:
571
+ dataset_metadata = [{"config": one_dataset.config_name, "split": str(one_dataset.split)}]
572
+ if dataset_tags is None:
573
+ dataset_tags = [default_tag]
574
+ if dataset_args is None:
575
+ dataset_args = [one_dataset.config_name]
576
+
577
+ if dataset is None and dataset_tags is not None:
578
+ dataset = dataset_tags
579
+
580
+ # Infer default finetuned_from
581
+ if (
582
+ finetuned_from is None
583
+ and hasattr(trainer.model.config, "_name_or_path")
584
+ and not os.path.isdir(trainer.model.config._name_or_path)
585
+ ):
586
+ finetuned_from = trainer.model.config._name_or_path
587
+
588
+ # Infer default task tag:
589
+ if tasks is None:
590
+ model_class_name = trainer.model.__class__.__name__
591
+ for task, mapping in TASK_MAPPING.items():
592
+ if model_class_name in _get_mapping_values(mapping):
593
+ tasks = task
594
+
595
+ if model_name is None:
596
+ model_name = Path(trainer.args.output_dir).name
597
+ if len(model_name) == 0:
598
+ model_name = finetuned_from
599
+
600
+ # Add `generated_from_trainer` to the tags
601
+ if tags is None:
602
+ tags = ["generated_from_trainer"]
603
+ elif isinstance(tags, str) and tags != "generated_from_trainer":
604
+ tags = [tags, "generated_from_trainer"]
605
+ elif "generated_from_trainer" not in tags:
606
+ tags.append("generated_from_trainer")
607
+
608
+ _, eval_lines, eval_results = parse_log_history(trainer.state.log_history)
609
+ hyperparameters = extract_hyperparameters_from_trainer(trainer)
610
+
611
+ return cls(
612
+ language=language,
613
+ license=license,
614
+ tags=tags,
615
+ model_name=model_name,
616
+ finetuned_from=finetuned_from,
617
+ tasks=tasks,
618
+ dataset=dataset,
619
+ dataset_tags=dataset_tags,
620
+ dataset_args=dataset_args,
621
+ dataset_metadata=dataset_metadata,
622
+ eval_results=eval_results,
623
+ eval_lines=eval_lines,
624
+ hyperparameters=hyperparameters,
625
+ )
626
+
627
+ @classmethod
628
+ def from_keras(
629
+ cls,
630
+ model,
631
+ model_name,
632
+ keras_history=None,
633
+ language=None,
634
+ license=None,
635
+ tags=None,
636
+ finetuned_from=None,
637
+ tasks=None,
638
+ dataset_tags=None,
639
+ dataset=None,
640
+ dataset_args=None,
641
+ ):
642
+ # Infer default from dataset
643
+ if dataset is not None:
644
+ if is_hf_dataset(dataset) and (dataset_tags is None or dataset_args is None):
645
+ default_tag = dataset.builder_name
646
+ # Those are not real datasets from the Hub so we exclude them.
647
+ if default_tag not in ["csv", "json", "pandas", "parquet", "text"]:
648
+ if dataset_tags is None:
649
+ dataset_tags = [default_tag]
650
+ if dataset_args is None:
651
+ dataset_args = [dataset.config_name]
652
+
653
+ if dataset is None and dataset_tags is not None:
654
+ dataset = dataset_tags
655
+
656
+ # Infer default finetuned_from
657
+ if (
658
+ finetuned_from is None
659
+ and hasattr(model.config, "_name_or_path")
660
+ and not os.path.isdir(model.config._name_or_path)
661
+ ):
662
+ finetuned_from = model.config._name_or_path
663
+
664
+ # Infer default task tag:
665
+ if tasks is None:
666
+ model_class_name = model.__class__.__name__
667
+ for task, mapping in TASK_MAPPING.items():
668
+ if model_class_name in _get_mapping_values(mapping):
669
+ tasks = task
670
+
671
+ # Add `generated_from_keras_callback` to the tags
672
+ if tags is None:
673
+ tags = ["generated_from_keras_callback"]
674
+ elif isinstance(tags, str) and tags != "generated_from_keras_callback":
675
+ tags = [tags, "generated_from_keras_callback"]
676
+ elif "generated_from_keras_callback" not in tags:
677
+ tags.append("generated_from_keras_callback")
678
+
679
+ if keras_history is not None:
680
+ _, eval_lines, eval_results = parse_keras_history(keras_history)
681
+ else:
682
+ eval_lines = []
683
+ eval_results = {}
684
+ hyperparameters = extract_hyperparameters_from_keras(model)
685
+
686
+ return cls(
687
+ language=language,
688
+ license=license,
689
+ tags=tags,
690
+ model_name=model_name,
691
+ finetuned_from=finetuned_from,
692
+ tasks=tasks,
693
+ dataset_tags=dataset_tags,
694
+ dataset=dataset,
695
+ dataset_args=dataset_args,
696
+ eval_results=eval_results,
697
+ eval_lines=eval_lines,
698
+ hyperparameters=hyperparameters,
699
+ source="keras",
700
+ )
701
+
702
+
703
+ def parse_keras_history(logs):
704
+ """
705
+ Parse the `logs` of either a `keras.History` object returned by `model.fit()` or an accumulated logs `dict`
706
+ passed to the `PushToHubCallback`. Returns lines and logs compatible with those returned by `parse_log_history`.
707
+ """
708
+ if hasattr(logs, "history"):
709
+ # This looks like a `History` object
710
+ if not hasattr(logs, "epoch"):
711
+ # This history looks empty, return empty results
712
+ return None, [], {}
713
+ logs.history["epoch"] = logs.epoch
714
+ logs = logs.history
715
+ else:
716
+ # Training logs is a list of dicts, let's invert it to a dict of lists to match a History object
717
+ logs = {log_key: [single_dict[log_key] for single_dict in logs] for log_key in logs[0]}
718
+
719
+ lines = []
720
+ for i in range(len(logs["epoch"])):
721
+ epoch_dict = {log_key: log_value_list[i] for log_key, log_value_list in logs.items()}
722
+ values = {}
723
+ for k, v in epoch_dict.items():
724
+ if k.startswith("val_"):
725
+ k = "validation_" + k[4:]
726
+ elif k != "epoch":
727
+ k = "train_" + k
728
+ splits = k.split("_")
729
+ name = " ".join([part.capitalize() for part in splits])
730
+ values[name] = v
731
+ lines.append(values)
732
+
733
+ eval_results = lines[-1]
734
+
735
+ return logs, lines, eval_results
736
+
737
+
738
+ def parse_log_history(log_history):
739
+ """
740
+ Parse the `log_history` of a Trainer to get the intermediate and final evaluation results.
741
+ """
742
+ idx = 0
743
+ while idx < len(log_history) and "train_runtime" not in log_history[idx]:
744
+ idx += 1
745
+
746
+ # If there are no training logs
747
+ if idx == len(log_history):
748
+ idx -= 1
749
+ while idx >= 0 and "eval_loss" not in log_history[idx]:
750
+ idx -= 1
751
+
752
+ if idx >= 0:
753
+ return None, None, log_history[idx]
754
+ else:
755
+ return None, None, None
756
+
757
+ # From now one we can assume we have training logs:
758
+ train_log = log_history[idx]
759
+ lines = []
760
+ training_loss = "No log"
761
+ for i in range(idx):
762
+ if "loss" in log_history[i]:
763
+ training_loss = log_history[i]["loss"]
764
+ if "eval_loss" in log_history[i]:
765
+ metrics = log_history[i].copy()
766
+ _ = metrics.pop("total_flos", None)
767
+ epoch = metrics.pop("epoch", None)
768
+ step = metrics.pop("step", None)
769
+ _ = metrics.pop("eval_runtime", None)
770
+ _ = metrics.pop("eval_samples_per_second", None)
771
+ _ = metrics.pop("eval_steps_per_second", None)
772
+ _ = metrics.pop("eval_jit_compilation_time", None)
773
+ values = {"Training Loss": training_loss, "Epoch": epoch, "Step": step}
774
+ for k, v in metrics.items():
775
+ if k == "eval_loss":
776
+ values["Validation Loss"] = v
777
+ else:
778
+ splits = k.split("_")
779
+ name = " ".join([part.capitalize() for part in splits[1:]])
780
+ values[name] = v
781
+ lines.append(values)
782
+
783
+ idx = len(log_history) - 1
784
+ while idx >= 0 and "eval_loss" not in log_history[idx]:
785
+ idx -= 1
786
+
787
+ if idx > 0:
788
+ eval_results = {}
789
+ for key, value in log_history[idx].items():
790
+ if key.startswith("eval_"):
791
+ key = key[5:]
792
+ if key not in ["runtime", "samples_per_second", "steps_per_second", "epoch", "step"]:
793
+ camel_cased_key = " ".join([part.capitalize() for part in key.split("_")])
794
+ eval_results[camel_cased_key] = value
795
+ return train_log, lines, eval_results
796
+ else:
797
+ return train_log, lines, None
798
+
799
+
800
+ def extract_hyperparameters_from_keras(model):
801
+ from .modeling_tf_utils import keras
802
+
803
+ hyperparameters = {}
804
+ if hasattr(model, "optimizer") and model.optimizer is not None:
805
+ hyperparameters["optimizer"] = model.optimizer.get_config()
806
+ else:
807
+ hyperparameters["optimizer"] = None
808
+ hyperparameters["training_precision"] = keras.mixed_precision.global_policy().name
809
+
810
+ return hyperparameters
811
+
812
+
813
+ def _maybe_round(v, decimals=4):
814
+ if isinstance(v, float) and len(str(v).split(".")) > 1 and len(str(v).split(".")[1]) > decimals:
815
+ return f"{v:.{decimals}f}"
816
+ return str(v)
817
+
818
+
819
+ def _regular_table_line(values, col_widths):
820
+ values_with_space = [f"| {v}" + " " * (w - len(v) + 1) for v, w in zip(values, col_widths)]
821
+ return "".join(values_with_space) + "|\n"
822
+
823
+
824
+ def _second_table_line(col_widths):
825
+ values = ["|:" + "-" * w + ":" for w in col_widths]
826
+ return "".join(values) + "|\n"
827
+
828
+
829
+ def make_markdown_table(lines):
830
+ """
831
+ Create a nice Markdown table from the results in `lines`.
832
+ """
833
+ if lines is None or len(lines) == 0:
834
+ return ""
835
+ col_widths = {key: len(str(key)) for key in lines[0].keys()}
836
+ for line in lines:
837
+ for key, value in line.items():
838
+ if col_widths[key] < len(_maybe_round(value)):
839
+ col_widths[key] = len(_maybe_round(value))
840
+
841
+ table = _regular_table_line(list(lines[0].keys()), list(col_widths.values()))
842
+ table += _second_table_line(list(col_widths.values()))
843
+ for line in lines:
844
+ table += _regular_table_line([_maybe_round(v) for v in line.values()], list(col_widths.values()))
845
+ return table
846
+
847
+
848
+ _TRAINING_ARGS_KEYS = [
849
+ "learning_rate",
850
+ "train_batch_size",
851
+ "eval_batch_size",
852
+ "seed",
853
+ ]
854
+
855
+
856
+ def extract_hyperparameters_from_trainer(trainer):
857
+ hyperparameters = {k: getattr(trainer.args, k) for k in _TRAINING_ARGS_KEYS}
858
+
859
+ if trainer.args.parallel_mode not in [ParallelMode.NOT_PARALLEL, ParallelMode.NOT_DISTRIBUTED]:
860
+ hyperparameters["distributed_type"] = (
861
+ "multi-GPU" if trainer.args.parallel_mode == ParallelMode.DISTRIBUTED else trainer.args.parallel_mode.value
862
+ )
863
+ if trainer.args.world_size > 1:
864
+ hyperparameters["num_devices"] = trainer.args.world_size
865
+ if trainer.args.gradient_accumulation_steps > 1:
866
+ hyperparameters["gradient_accumulation_steps"] = trainer.args.gradient_accumulation_steps
867
+
868
+ total_train_batch_size = (
869
+ trainer.args.train_batch_size * trainer.args.world_size * trainer.args.gradient_accumulation_steps
870
+ )
871
+ if total_train_batch_size != hyperparameters["train_batch_size"]:
872
+ hyperparameters["total_train_batch_size"] = total_train_batch_size
873
+ total_eval_batch_size = trainer.args.eval_batch_size * trainer.args.world_size
874
+ if total_eval_batch_size != hyperparameters["eval_batch_size"]:
875
+ hyperparameters["total_eval_batch_size"] = total_eval_batch_size
876
+
877
+ if trainer.args.adafactor:
878
+ hyperparameters["optimizer"] = "Adafactor"
879
+ else:
880
+ hyperparameters["optimizer"] = (
881
+ f"Adam with betas=({trainer.args.adam_beta1},{trainer.args.adam_beta2}) and"
882
+ f" epsilon={trainer.args.adam_epsilon}"
883
+ )
884
+
885
+ hyperparameters["lr_scheduler_type"] = trainer.args.lr_scheduler_type.value
886
+ if trainer.args.warmup_ratio != 0.0:
887
+ hyperparameters["lr_scheduler_warmup_ratio"] = trainer.args.warmup_ratio
888
+ if trainer.args.warmup_steps != 0.0:
889
+ hyperparameters["lr_scheduler_warmup_steps"] = trainer.args.warmup_steps
890
+ if trainer.args.max_steps != -1:
891
+ hyperparameters["training_steps"] = trainer.args.max_steps
892
+ else:
893
+ hyperparameters["num_epochs"] = trainer.args.num_train_epochs
894
+
895
+ if trainer.args.fp16:
896
+ if trainer.use_apex:
897
+ hyperparameters["mixed_precision_training"] = f"Apex, opt level {trainer.args.fp16_opt_level}"
898
+ else:
899
+ hyperparameters["mixed_precision_training"] = "Native AMP"
900
+
901
+ if trainer.args.label_smoothing_factor != 0.0:
902
+ hyperparameters["label_smoothing_factor"] = trainer.args.label_smoothing_factor
903
+
904
+ return hyperparameters
llmeval-env/lib/python3.10/site-packages/transformers/modeling_attn_mask_utils.py ADDED
@@ -0,0 +1,492 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from dataclasses import dataclass
15
+ from typing import List, Optional, Tuple, Union
16
+
17
+ import torch
18
+
19
+
20
+ @dataclass
21
+ class AttentionMaskConverter:
22
+ """
23
+ A utility attention mask class that allows one to:
24
+ - Create a causal 4d mask
25
+ - Create a causal 4d mask with slided window
26
+ - Convert a 2d attention mask (batch_size, query_length) to a 4d attention mask (batch_size, 1, query_length,
27
+ key_value_length) that can be multiplied with attention scores
28
+
29
+ Examples:
30
+
31
+ ```python
32
+ >>> import torch
33
+ >>> from transformers.modeling_attn_mask_utils import AttentionMaskConverter
34
+
35
+ >>> converter = AttentionMaskConverter(True)
36
+ >>> converter.to_4d(torch.tensor([[0, 0, 0, 1, 1]]), 5, key_value_length=5, dtype=torch.float32)
37
+ tensor([[[[-3.4028e+38, -3.4028e+38, -3.4028e+38, -3.4028e+38, -3.4028e+38],
38
+ [-3.4028e+38, -3.4028e+38, -3.4028e+38, -3.4028e+38, -3.4028e+38],
39
+ [-3.4028e+38, -3.4028e+38, -3.4028e+38, -3.4028e+38, -3.4028e+38],
40
+ [-3.4028e+38, -3.4028e+38, -3.4028e+38, 0.0000e+00, -3.4028e+38],
41
+ [-3.4028e+38, -3.4028e+38, -3.4028e+38, 0.0000e+00, 0.0000e+00]]]])
42
+ ```
43
+
44
+ Parameters:
45
+ is_causal (`bool`):
46
+ Whether the attention mask should be a uni-directional (causal) or bi-directional mask.
47
+
48
+ sliding_window (`int`, *optional*):
49
+ Optionally, the sliding window masks can be created if `sliding_window` is defined to a positive integer.
50
+ """
51
+
52
+ is_causal: bool
53
+ sliding_window: int
54
+
55
+ def __init__(self, is_causal: bool, sliding_window: Optional[int] = None):
56
+ self.is_causal = is_causal
57
+ self.sliding_window = sliding_window
58
+
59
+ if self.sliding_window is not None and self.sliding_window <= 0:
60
+ raise ValueError(
61
+ f"Make sure that when passing `sliding_window` that its value is a strictly positive integer, not `{self.sliding_window}`"
62
+ )
63
+
64
+ def to_causal_4d(
65
+ self,
66
+ batch_size: int,
67
+ query_length: int,
68
+ key_value_length: int,
69
+ dtype: torch.dtype,
70
+ device: Union[torch.device, "str"] = "cpu",
71
+ ) -> Optional[torch.Tensor]:
72
+ """
73
+ Creates a causal 4D mask of (bsz, head_dim=1, query_length, key_value_length) shape and adds large negative
74
+ bias to upper right hand triangular matrix (causal mask).
75
+ """
76
+ if not self.is_causal:
77
+ raise ValueError(f"Please use `to_causal_4d` only if {self.__class__} has `is_causal` set to True.")
78
+
79
+ # If shape is not cached, create a new causal mask and cache it
80
+ input_shape = (batch_size, query_length)
81
+ past_key_values_length = key_value_length - query_length
82
+
83
+ # create causal mask
84
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
85
+ causal_4d_mask = None
86
+ if input_shape[-1] > 1 or self.sliding_window is not None:
87
+ causal_4d_mask = self._make_causal_mask(
88
+ input_shape,
89
+ dtype,
90
+ device=device,
91
+ past_key_values_length=past_key_values_length,
92
+ sliding_window=self.sliding_window,
93
+ )
94
+
95
+ return causal_4d_mask
96
+
97
+ def to_4d(
98
+ self,
99
+ attention_mask_2d: torch.Tensor,
100
+ query_length: int,
101
+ dtype: torch.dtype,
102
+ key_value_length: Optional[int] = None,
103
+ ) -> torch.Tensor:
104
+ """
105
+ Converts 2D attention mask to 4D attention mask by expanding mask to (bsz, head_dim=1, query_length,
106
+ key_value_length) shape and by adding a large negative bias to not-attended positions. If attention_mask is
107
+ causal, a causal mask will be added.
108
+ """
109
+ input_shape = (attention_mask_2d.shape[0], query_length)
110
+
111
+ # create causal mask
112
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
113
+ causal_4d_mask = None
114
+ if (input_shape[-1] > 1 or self.sliding_window is not None) and self.is_causal:
115
+ if key_value_length is None:
116
+ raise ValueError(
117
+ "This attention mask converter is causal. Make sure to pass `key_value_length` to correctly create a causal mask."
118
+ )
119
+
120
+ past_key_values_length = key_value_length - query_length
121
+ causal_4d_mask = self._make_causal_mask(
122
+ input_shape,
123
+ dtype,
124
+ device=attention_mask_2d.device,
125
+ past_key_values_length=past_key_values_length,
126
+ sliding_window=self.sliding_window,
127
+ )
128
+ elif self.sliding_window is not None:
129
+ raise NotImplementedError("Sliding window is currently only implemented for causal masking")
130
+
131
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
132
+ expanded_attn_mask = self._expand_mask(attention_mask_2d, dtype, tgt_len=input_shape[-1]).to(
133
+ attention_mask_2d.device
134
+ )
135
+
136
+ if causal_4d_mask is not None:
137
+ expanded_attn_mask = causal_4d_mask.masked_fill(expanded_attn_mask.bool(), torch.finfo(dtype).min)
138
+
139
+ # expanded_attn_mask + causal_4d_mask can cause some overflow
140
+ expanded_4d_mask = expanded_attn_mask
141
+
142
+ return expanded_4d_mask
143
+
144
+ @staticmethod
145
+ def _make_causal_mask(
146
+ input_ids_shape: torch.Size,
147
+ dtype: torch.dtype,
148
+ device: torch.device,
149
+ past_key_values_length: int = 0,
150
+ sliding_window: Optional[int] = None,
151
+ ):
152
+ """
153
+ Make causal mask used for bi-directional self-attention.
154
+ """
155
+ bsz, tgt_len = input_ids_shape
156
+ mask = torch.full((tgt_len, tgt_len), torch.finfo(dtype).min, device=device)
157
+ mask_cond = torch.arange(mask.size(-1), device=device)
158
+ mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0)
159
+
160
+ mask = mask.to(dtype)
161
+
162
+ if past_key_values_length > 0:
163
+ mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype, device=device), mask], dim=-1)
164
+
165
+ # add lower triangular sliding window mask if necessary
166
+ if sliding_window is not None:
167
+ diagonal = past_key_values_length - sliding_window - 1
168
+
169
+ context_mask = torch.tril(torch.ones_like(mask, dtype=torch.bool), diagonal=diagonal)
170
+ mask.masked_fill_(context_mask, torch.finfo(dtype).min)
171
+
172
+ return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length)
173
+
174
+ @staticmethod
175
+ def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None):
176
+ """
177
+ Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
178
+ """
179
+ bsz, src_len = mask.size()
180
+ tgt_len = tgt_len if tgt_len is not None else src_len
181
+
182
+ expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype)
183
+
184
+ inverted_mask = 1.0 - expanded_mask
185
+
186
+ return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min)
187
+
188
+ @staticmethod
189
+ def _unmask_unattended(
190
+ expanded_mask: torch.FloatTensor,
191
+ min_dtype: float,
192
+ ):
193
+ # fmt: off
194
+ """
195
+ Attend to all tokens in masked rows from the expanded attention mask, for example the relevant first rows when
196
+ using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path.
197
+ Details: https://github.com/pytorch/pytorch/issues/110213
198
+
199
+ `expanded_mask` is [bsz, num_masks, tgt_seq_len, src_seq_len] or [bsz, tgt_seq_len, src_seq_len].
200
+ `attention_mask` is [bsz, src_seq_len].
201
+
202
+ The dimension num_masks of `expanded_mask` is most often 1, but it can also be the number of heads in the case of alibi attention bias.
203
+
204
+ For example, if `expanded_mask` is (e.g. here left-padding case)
205
+ ```
206
+ [[[[0, 0, 0],
207
+ [0, 0, 0],
208
+ [0, 0, 1]]],
209
+ [[[1, 0, 0],
210
+ [1, 1, 0],
211
+ [1, 1, 1]]],
212
+ [[[0, 0, 0],
213
+ [0, 1, 0],
214
+ [0, 1, 1]]]]
215
+ ```
216
+ then the modified `expanded_mask` will be
217
+ ```
218
+ [[[[1, 1, 1], <-- modified
219
+ [1, 1, 1], <-- modified
220
+ [0, 0, 1]]],
221
+ [[[1, 0, 0],
222
+ [1, 1, 0],
223
+ [1, 1, 1]]],
224
+ [[[1, 1, 1], <-- modified
225
+ [0, 1, 0],
226
+ [0, 1, 1]]]]
227
+ ```
228
+ """
229
+ # fmt: on
230
+ if expanded_mask.dtype == torch.bool:
231
+ raise ValueError(
232
+ "AttentionMaskConverter._unmask_unattended expects a float `expanded_mask`, got a BoolTensor."
233
+ )
234
+
235
+ return expanded_mask.mul(~torch.all(expanded_mask == min_dtype, dim=-1, keepdim=True))
236
+
237
+ @staticmethod
238
+ def _ignore_causal_mask_sdpa(
239
+ attention_mask: Optional[torch.Tensor],
240
+ inputs_embeds: torch.Tensor,
241
+ past_key_values_length: int,
242
+ sliding_window: Optional[int] = None,
243
+ ) -> bool:
244
+ """
245
+ Detects whether the optional user-specified attention_mask & the automatically created causal mask can be ignored in case PyTorch's SDPA is used, rather relying on SDPA's `is_causal` argument.
246
+
247
+ In case no token is masked in the `attention_mask` argument, if `query_length == 1` or
248
+ `key_value_length == query_length`, we rather rely on SDPA `is_causal` argument to use causal/non-causal masks,
249
+ allowing to dispatch to the flash attention kernel (that can otherwise not be used if a custom `attn_mask` is passed).
250
+ """
251
+
252
+ batch_size, query_length = inputs_embeds.shape[0], inputs_embeds.shape[1]
253
+ key_value_length = query_length + past_key_values_length
254
+
255
+ is_tracing = (
256
+ torch.jit.is_tracing()
257
+ or isinstance(inputs_embeds, torch.fx.Proxy)
258
+ or (hasattr(torch, "_dynamo") and torch._dynamo.is_compiling())
259
+ )
260
+
261
+ ignore_causal_mask = False
262
+
263
+ if attention_mask is None:
264
+ # TODO: When tracing with TorchDynamo with fullgraph=True, the model is recompiled depending on the input shape, thus SDPA's `is_causal` argument is rightfully updated (see https://gist.github.com/fxmarty/1313f39037fc1c112508989628c57363). However, when using `torch.export` or
265
+ # or `torch.onnx.dynamo_export`, we must pass an example input, and `is_causal` behavior is hard-coded. If a user exports a model with q_len > 1, the exported model will hard-code `is_causal=True` which is in general wrong (see https://github.com/pytorch/pytorch/issues/108108).
266
+ # Thus, we currently can NOT set `ignore_causal_mask = True` here. We would need a `torch._dynamo.is_exporting()` flag.
267
+ #
268
+ # Besides, jit.trace can not handle the `q_len > 1` condition for `is_causal` (`TypeError: scaled_dot_product_attention(): argument 'is_causal' must be bool, not Tensor`).
269
+ if (
270
+ not is_tracing
271
+ and (query_length == 1 or key_value_length == query_length)
272
+ and (sliding_window is None or key_value_length < sliding_window)
273
+ ):
274
+ ignore_causal_mask = True
275
+ elif sliding_window is None or key_value_length < sliding_window:
276
+ if len(attention_mask.shape) == 4:
277
+ expected_shape = (batch_size, 1, query_length, key_value_length)
278
+ if tuple(attention_mask.shape) != expected_shape:
279
+ raise ValueError(
280
+ f"Incorrect 4D attention_mask shape: {tuple(attention_mask.shape)}; expected: {expected_shape}."
281
+ )
282
+ elif not is_tracing and torch.all(attention_mask == 1):
283
+ if query_length == 1 or key_value_length == query_length:
284
+ # For query_length == 1, causal attention and bi-directional attention are the same.
285
+ ignore_causal_mask = True
286
+
287
+ # Unfortunately, for query_length > 1 and key_value_length != query_length, we cannot generally ignore the attention mask, as SDPA causal mask generation
288
+ # may be wrong. We will set `is_causal=False` in SDPA and rely on Transformers attention_mask instead, hence not setting it to None here.
289
+ # Reference: https://github.com/pytorch/pytorch/issues/108108
290
+ # TODO: maybe revisit this with https://github.com/pytorch/pytorch/pull/114823 in PyTorch 2.3.
291
+
292
+ return ignore_causal_mask
293
+
294
+
295
+ def _prepare_4d_causal_attention_mask(
296
+ attention_mask: Optional[torch.Tensor],
297
+ input_shape: Union[torch.Size, Tuple, List],
298
+ inputs_embeds: torch.Tensor,
299
+ past_key_values_length: int,
300
+ sliding_window: Optional[int] = None,
301
+ ):
302
+ """
303
+ Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape
304
+ `(batch_size, key_value_length)`
305
+
306
+ Args:
307
+ attention_mask (`torch.Tensor` or `None`):
308
+ A 2D attention mask of shape `(batch_size, key_value_length)`
309
+ input_shape (`tuple(int)` or `list(int)` or `torch.Size`):
310
+ The input shape should be a tuple that defines `(batch_size, query_length)`.
311
+ inputs_embeds (`torch.Tensor`):
312
+ The embedded inputs as a torch Tensor.
313
+ past_key_values_length (`int`):
314
+ The length of the key value cache.
315
+ sliding_window (`int`, *optional*):
316
+ If the model uses windowed attention, a sliding window should be passed.
317
+ """
318
+ attn_mask_converter = AttentionMaskConverter(is_causal=True, sliding_window=sliding_window)
319
+
320
+ key_value_length = input_shape[-1] + past_key_values_length
321
+
322
+ # 4d mask is passed through the layers
323
+ if attention_mask is not None and len(attention_mask.shape) == 2:
324
+ attention_mask = attn_mask_converter.to_4d(
325
+ attention_mask, input_shape[-1], key_value_length=key_value_length, dtype=inputs_embeds.dtype
326
+ )
327
+ elif attention_mask is not None and len(attention_mask.shape) == 4:
328
+ expected_shape = (input_shape[0], 1, input_shape[1], key_value_length)
329
+ if tuple(attention_mask.shape) != expected_shape:
330
+ raise ValueError(
331
+ f"Incorrect 4D attention_mask shape: {tuple(attention_mask.shape)}; expected: {expected_shape}."
332
+ )
333
+ else:
334
+ # if the 4D mask has correct shape - invert it and fill with negative infinity
335
+ inverted_mask = 1.0 - attention_mask
336
+ attention_mask = inverted_mask.masked_fill(
337
+ inverted_mask.to(torch.bool), torch.finfo(inputs_embeds.dtype).min
338
+ )
339
+ else:
340
+ attention_mask = attn_mask_converter.to_causal_4d(
341
+ input_shape[0], input_shape[-1], key_value_length, dtype=inputs_embeds.dtype, device=inputs_embeds.device
342
+ )
343
+
344
+ return attention_mask
345
+
346
+
347
+ # Adapted from _prepare_4d_causal_attention_mask
348
+ def _prepare_4d_causal_attention_mask_for_sdpa(
349
+ attention_mask: Optional[torch.Tensor],
350
+ input_shape: Union[torch.Size, Tuple, List],
351
+ inputs_embeds: torch.Tensor,
352
+ past_key_values_length: int,
353
+ sliding_window: Optional[int] = None,
354
+ ):
355
+ """
356
+ Prepares the correct `attn_mask` argument to be used by `torch.nn.functional.scaled_dot_product_attention`.
357
+
358
+ In case no token is masked in the `attention_mask` argument, we simply set it to `None` for the cases `query_length == 1` and
359
+ `key_value_length == query_length`, and rely instead on SDPA `is_causal` argument to use causal/non-causal masks,
360
+ allowing to dispatch to the flash attention kernel (that can otherwise not be used if a custom `attn_mask` is passed).
361
+ """
362
+ attn_mask_converter = AttentionMaskConverter(is_causal=True, sliding_window=sliding_window)
363
+
364
+ key_value_length = input_shape[-1] + past_key_values_length
365
+
366
+ # torch.jit.trace, symbolic_trace and torchdynamo with fullgraph=True are unable to capture the controlflow `is_causal=attention_mask is None and q_len > 1`
367
+ # used as an SDPA argument. We keep compatibility with these tracing tools by always using SDPA's `attn_mask` argument in case we are tracing.
368
+ # TODO: For dynamo, rather use a check on fullgraph=True once this is possible (https://github.com/pytorch/pytorch/pull/120400).
369
+ is_tracing = (
370
+ torch.jit.is_tracing()
371
+ or isinstance(inputs_embeds, torch.fx.Proxy)
372
+ or (hasattr(torch, "_dynamo") and torch._dynamo.is_compiling())
373
+ )
374
+
375
+ ignore_causal_mask = AttentionMaskConverter._ignore_causal_mask_sdpa(
376
+ attention_mask=attention_mask,
377
+ inputs_embeds=inputs_embeds,
378
+ past_key_values_length=past_key_values_length,
379
+ sliding_window=sliding_window,
380
+ )
381
+
382
+ if ignore_causal_mask:
383
+ expanded_4d_mask = None
384
+ elif attention_mask is None:
385
+ expanded_4d_mask = attn_mask_converter.to_causal_4d(
386
+ input_shape[0], input_shape[-1], key_value_length, dtype=inputs_embeds.dtype, device=inputs_embeds.device
387
+ )
388
+ else:
389
+ expanded_4d_mask = attn_mask_converter.to_4d(
390
+ attention_mask,
391
+ input_shape[-1],
392
+ dtype=inputs_embeds.dtype,
393
+ key_value_length=key_value_length,
394
+ )
395
+
396
+ # Attend to all tokens in masked rows from the causal_mask, for example the relevant first rows when
397
+ # using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path.
398
+ # Details: https://github.com/pytorch/pytorch/issues/110213
399
+ if not is_tracing and expanded_4d_mask.device.type == "cuda":
400
+ expanded_4d_mask = AttentionMaskConverter._unmask_unattended(
401
+ expanded_4d_mask, min_dtype=torch.finfo(inputs_embeds.dtype).min
402
+ )
403
+
404
+ return expanded_4d_mask
405
+
406
+
407
+ def _prepare_4d_attention_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None):
408
+ """
409
+ Creates a non-causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape
410
+ `(batch_size, key_value_length)`
411
+
412
+ Args:
413
+ mask (`torch.Tensor` or `None`):
414
+ A 2D attention mask of shape `(batch_size, key_value_length)`
415
+ dtype (`torch.dtype`):
416
+ The torch dtype the created mask shall have.
417
+ tgt_len (`int`):
418
+ The target length or query length the created mask shall have.
419
+ """
420
+ return AttentionMaskConverter._expand_mask(mask=mask, dtype=dtype, tgt_len=tgt_len)
421
+
422
+
423
+ def _prepare_4d_attention_mask_for_sdpa(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None):
424
+ """
425
+ Creates a non-causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape
426
+ `(batch_size, key_value_length)`
427
+
428
+ Args:
429
+ mask (`torch.Tensor` or `None`):
430
+ A 2D attention mask of shape `(batch_size, key_value_length)`
431
+ dtype (`torch.dtype`):
432
+ The torch dtype the created mask shall have.
433
+ tgt_len (`int`):
434
+ The target length or query length the created mask shall have.
435
+ """
436
+ batch_size, key_value_length = mask.shape
437
+ tgt_len = tgt_len if tgt_len is not None else key_value_length
438
+
439
+ # torch.jit.trace, symbolic_trace and torchdynamo with fullgraph=True are unable to capture the controlflow `is_causal=attention_mask is None and q_len > 1`
440
+ # used as an SDPA argument. We keep compatibility with these tracing tools by always using SDPA's `attn_mask` argument in case we are tracing.
441
+ # TODO: For dynamo, rather use a check on fullgraph=True once this is possible (https://github.com/pytorch/pytorch/pull/120400).
442
+ is_tracing = (
443
+ torch.jit.is_tracing()
444
+ or isinstance(mask, torch.fx.Proxy)
445
+ or (hasattr(torch, "_dynamo") and torch._dynamo.is_compiling())
446
+ )
447
+
448
+ if torch.all(mask == 1):
449
+ if is_tracing:
450
+ pass
451
+ elif tgt_len == 1:
452
+ # For query_length == 1, causal attention and bi-directional attention are the same.
453
+ return None
454
+ elif key_value_length == tgt_len:
455
+ return None
456
+ else:
457
+ # Unfortunately, for query_length > 1 and key_value_length != query_length, we can not generally ignore the attention mask, as SDPA causal mask generation
458
+ # may be wrong. We will set is_causal=False in SDPA and rely on Transformers attention_mask instead, hence not setting it to None here.
459
+ # Reference: https://github.com/pytorch/pytorch/issues/108108
460
+ return AttentionMaskConverter._expand_mask(mask=mask, dtype=dtype, tgt_len=tgt_len)
461
+ else:
462
+ return AttentionMaskConverter._expand_mask(mask=mask, dtype=dtype, tgt_len=tgt_len)
463
+
464
+
465
+ def _create_4d_causal_attention_mask(
466
+ input_shape: Union[torch.Size, Tuple, List],
467
+ dtype: torch.dtype,
468
+ device: torch.device,
469
+ past_key_values_length: int = 0,
470
+ sliding_window: Optional[int] = None,
471
+ ) -> Optional[torch.Tensor]:
472
+ """
473
+ Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)`
474
+
475
+ Args:
476
+ input_shape (`tuple(int)` or `list(int)` or `torch.Size`):
477
+ The input shape should be a tuple that defines `(batch_size, query_length)`.
478
+ dtype (`torch.dtype`):
479
+ The torch dtype the created mask shall have.
480
+ device (`int`):
481
+ The torch device the created mask shall have.
482
+ sliding_window (`int`, *optional*):
483
+ If the model uses windowed attention, a sliding window should be passed.
484
+ """
485
+ attn_mask_converter = AttentionMaskConverter(is_causal=True, sliding_window=sliding_window)
486
+
487
+ key_value_length = past_key_values_length + input_shape[-1]
488
+ attention_mask = attn_mask_converter.to_causal_4d(
489
+ input_shape[0], input_shape[-1], key_value_length, dtype=dtype, device=device
490
+ )
491
+
492
+ return attention_mask
llmeval-env/lib/python3.10/site-packages/transformers/modeling_flax_outputs.py ADDED
@@ -0,0 +1,700 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2021 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import Dict, Optional, Tuple
15
+
16
+ import flax
17
+ import jax.numpy as jnp
18
+
19
+ from .utils import ModelOutput
20
+
21
+
22
+ @flax.struct.dataclass
23
+ class FlaxBaseModelOutput(ModelOutput):
24
+ """
25
+ Base class for model's outputs, with potential hidden states and attentions.
26
+
27
+ Args:
28
+ last_hidden_state (`jnp.ndarray` of shape `(batch_size, sequence_length, hidden_size)`):
29
+ Sequence of hidden-states at the output of the last layer of the model.
30
+ hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
31
+ Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape
32
+ `(batch_size, sequence_length, hidden_size)`.
33
+
34
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
35
+ attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
36
+ Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
37
+ sequence_length)`.
38
+
39
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
40
+ heads.
41
+ """
42
+
43
+ last_hidden_state: jnp.ndarray = None
44
+ hidden_states: Optional[Tuple[jnp.ndarray]] = None
45
+ attentions: Optional[Tuple[jnp.ndarray]] = None
46
+
47
+
48
+ @flax.struct.dataclass
49
+ class FlaxBaseModelOutputWithNoAttention(ModelOutput):
50
+ """
51
+ Base class for model's outputs, with potential hidden states.
52
+
53
+ Args:
54
+ last_hidden_state (`jnp.ndarray` of shape `(batch_size, num_channels, height, width)`):
55
+ Sequence of hidden-states at the output of the last layer of the model.
56
+ hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
57
+ Tuple of `jnp.ndarray` (one for the output of the embeddings, if the model has an embedding layer, + one
58
+ for the output of each layer) of shape `(batch_size, num_channels, height, width)`. Hidden-states of the
59
+ model at the output of each layer plus the optional initial embedding outputs.
60
+ """
61
+
62
+ last_hidden_state: jnp.ndarray = None
63
+ hidden_states: Optional[Tuple[jnp.ndarray]] = None
64
+
65
+
66
+ @flax.struct.dataclass
67
+ class FlaxBaseModelOutputWithPoolingAndNoAttention(ModelOutput):
68
+ """
69
+ Base class for model's outputs that also contains a pooling of the last hidden states.
70
+
71
+ Args:
72
+ last_hidden_state (`jnp.ndarray` of shape `(batch_size, num_channels, height, width)`):
73
+ Sequence of hidden-states at the output of the last layer of the model.
74
+ pooler_output (`jnp.ndarray` of shape `(batch_size, hidden_size)`):
75
+ Last layer hidden-state after a pooling operation on the spatial dimensions.
76
+ hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
77
+ Tuple of `jnp.ndarray` (one for the output of the embeddings, if the model has an embedding layer, + one
78
+ for the output of each layer) of shape `(batch_size, num_channels, height, width)`. Hidden-states of the
79
+ model at the output of each layer plus the optional initial embedding outputs.
80
+ """
81
+
82
+ last_hidden_state: jnp.ndarray = None
83
+ pooler_output: jnp.ndarray = None
84
+ hidden_states: Optional[Tuple[jnp.ndarray]] = None
85
+
86
+
87
+ @flax.struct.dataclass
88
+ class FlaxImageClassifierOutputWithNoAttention(ModelOutput):
89
+ """
90
+ Base class for outputs of image classification models.
91
+
92
+ Args:
93
+ logits (`jnp.ndarray` of shape `(batch_size, config.num_labels)`):
94
+ Classification (or regression if config.num_labels==1) scores (before SoftMax).
95
+ hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when
96
+ `config.output_hidden_states=True`):
97
+ Tuple of `jnp.ndarray` (one for the output of the embeddings, if the model has an embedding layer, + one
98
+ for the output of each stage) of shape `(batch_size, num_channels, height, width)`. Hidden-states (also
99
+ called feature maps) of the model at the output of each stage.
100
+ """
101
+
102
+ logits: jnp.ndarray = None
103
+ hidden_states: Optional[Tuple[jnp.ndarray]] = None
104
+
105
+
106
+ @flax.struct.dataclass
107
+ class FlaxBaseModelOutputWithPast(ModelOutput):
108
+ """
109
+ Base class for model's outputs, with potential hidden states and attentions.
110
+
111
+ Args:
112
+ last_hidden_state (`jnp.ndarray` of shape `(batch_size, sequence_length, hidden_size)`):
113
+ Sequence of hidden-states at the output of the last layer of the model.
114
+ past_key_values (`Dict[str, jnp.ndarray]`):
115
+ Dictionary of pre-computed hidden-states (key and values in the attention blocks) that can be used for fast
116
+ auto-regressive decoding. Pre-computed key and value hidden-states are of shape *[batch_size, max_length]*.
117
+ hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
118
+ Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape
119
+ `(batch_size, sequence_length, hidden_size)`.
120
+
121
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
122
+ attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
123
+ Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
124
+ sequence_length)`.
125
+
126
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
127
+ heads.
128
+ """
129
+
130
+ last_hidden_state: jnp.ndarray = None
131
+ past_key_values: Optional[Dict[str, jnp.ndarray]] = None
132
+ hidden_states: Optional[Tuple[jnp.ndarray]] = None
133
+ attentions: Optional[Tuple[jnp.ndarray]] = None
134
+
135
+
136
+ @flax.struct.dataclass
137
+ class FlaxBaseModelOutputWithPooling(ModelOutput):
138
+ """
139
+ Base class for model's outputs that also contains a pooling of the last hidden states.
140
+
141
+ Args:
142
+ last_hidden_state (`jnp.ndarray` of shape `(batch_size, sequence_length, hidden_size)`):
143
+ Sequence of hidden-states at the output of the last layer of the model.
144
+ pooler_output (`jnp.ndarray` of shape `(batch_size, hidden_size)`):
145
+ Last layer hidden-state of the first token of the sequence (classification token) further processed by a
146
+ Linear layer and a Tanh activation function. The Linear layer weights are trained from the next sentence
147
+ prediction (classification) objective during pretraining.
148
+ hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
149
+ Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape
150
+ `(batch_size, sequence_length, hidden_size)`.
151
+
152
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
153
+ attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
154
+ Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
155
+ sequence_length)`.
156
+
157
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
158
+ heads.
159
+ """
160
+
161
+ last_hidden_state: jnp.ndarray = None
162
+ pooler_output: jnp.ndarray = None
163
+ hidden_states: Optional[Tuple[jnp.ndarray]] = None
164
+ attentions: Optional[Tuple[jnp.ndarray]] = None
165
+
166
+
167
+ @flax.struct.dataclass
168
+ class FlaxBaseModelOutputWithPoolingAndCrossAttentions(ModelOutput):
169
+ """
170
+ Base class for model's outputs that also contains a pooling of the last hidden states.
171
+
172
+ Args:
173
+ last_hidden_state (`jnp.ndarray` of shape `(batch_size, sequence_length, hidden_size)`):
174
+ Sequence of hidden-states at the output of the last layer of the model.
175
+ pooler_output (`jnp.ndarray` of shape `(batch_size, hidden_size)`):
176
+ Last layer hidden-state of the first token of the sequence (classification token) after further processing
177
+ through the layers used for the auxiliary pretraining task. E.g. for BERT-family of models, this returns
178
+ the classification token after processing through a linear layer and a tanh activation function. The linear
179
+ layer weights are trained from the next sentence prediction (classification) objective during pretraining.
180
+ hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
181
+ Tuple of `jnp.ndarray` (one for the output of the embeddings, if the model has an embedding layer, + one
182
+ for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
183
+
184
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
185
+ attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
186
+ Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
187
+ sequence_length)`.
188
+
189
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
190
+ heads.
191
+ cross_attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` and `config.add_cross_attention=True` is passed or when `config.output_attentions=True`):
192
+ Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
193
+ sequence_length)`.
194
+
195
+ Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the
196
+ weighted average in the cross-attention heads.
197
+ past_key_values (`tuple(tuple(jnp.ndarray))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
198
+ Tuple of `tuple(jnp.ndarray)` of length `config.n_layers`, with each tuple having 2 tensors of shape
199
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and optionally if
200
+ `config.is_encoder_decoder=True` 2 additional tensors of shape `(batch_size, num_heads,
201
+ encoder_sequence_length, embed_size_per_head)`.
202
+
203
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if
204
+ `config.is_encoder_decoder=True` in the cross-attention blocks) that can be used (see `past_key_values`
205
+ input) to speed up sequential decoding.
206
+ """
207
+
208
+ last_hidden_state: jnp.ndarray = None
209
+ pooler_output: jnp.ndarray = None
210
+ hidden_states: Optional[Tuple[jnp.ndarray]] = None
211
+ past_key_values: Optional[Tuple[Tuple[jnp.ndarray]]] = None
212
+ attentions: Optional[Tuple[jnp.ndarray]] = None
213
+ cross_attentions: Optional[Tuple[jnp.ndarray]] = None
214
+
215
+
216
+ @flax.struct.dataclass
217
+ class FlaxBaseModelOutputWithPastAndCrossAttentions(ModelOutput):
218
+ """
219
+ Base class for model's outputs that may also contain a past key/values (to speed up sequential decoding).
220
+
221
+ Args:
222
+ last_hidden_state (`jnp.ndarray` of shape `(batch_size, sequence_length, hidden_size)`):
223
+ Sequence of hidden-states at the output of the last layer of the model.
224
+
225
+ If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1,
226
+ hidden_size)` is output.
227
+ past_key_values (`tuple(tuple(jnp.ndarray))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
228
+ Tuple of `tuple(jnp.ndarray)` of length `config.n_layers`, with each tuple having 2 tensors of shape
229
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and optionally if
230
+ `config.is_encoder_decoder=True` 2 additional tensors of shape `(batch_size, num_heads,
231
+ encoder_sequence_length, embed_size_per_head)`.
232
+
233
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if
234
+ `config.is_encoder_decoder=True` in the cross-attention blocks) that can be used (see `past_key_values`
235
+ input) to speed up sequential decoding.
236
+ hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
237
+ Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape
238
+ `(batch_size, sequence_length, hidden_size)`.
239
+
240
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
241
+ attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
242
+ Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
243
+ sequence_length)`.
244
+
245
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
246
+ heads.
247
+ cross_attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` and `config.add_cross_attention=True` is passed or when `config.output_attentions=True`):
248
+ Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
249
+ sequence_length)`.
250
+
251
+ Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the
252
+ weighted average in the cross-attention heads.
253
+ """
254
+
255
+ last_hidden_state: jnp.ndarray = None
256
+ past_key_values: Optional[Tuple[Tuple[jnp.ndarray]]] = None
257
+ hidden_states: Optional[Tuple[jnp.ndarray]] = None
258
+ attentions: Optional[Tuple[jnp.ndarray]] = None
259
+ cross_attentions: Optional[Tuple[jnp.ndarray]] = None
260
+
261
+
262
+ @flax.struct.dataclass
263
+ class FlaxSeq2SeqModelOutput(ModelOutput):
264
+ """
265
+ Base class for model encoder's outputs that also contains : pre-computed hidden states that can speed up sequential
266
+ decoding.
267
+
268
+ Args:
269
+ last_hidden_state (`jnp.ndarray` of shape `(batch_size, sequence_length, hidden_size)`):
270
+ Sequence of hidden-states at the output of the last layer of the decoder of the model.
271
+
272
+ If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1,
273
+ hidden_size)` is output.
274
+ past_key_values (`tuple(tuple(jnp.ndarray))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
275
+ Tuple of `tuple(jnp.ndarray)` of length `config.n_layers`, with each tuple having 2 tensors of shape
276
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
277
+ `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
278
+
279
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
280
+ blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
281
+ decoder_hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
282
+ Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape
283
+ `(batch_size, sequence_length, hidden_size)`.
284
+
285
+ Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.
286
+ decoder_attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
287
+ Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
288
+ sequence_length)`.
289
+
290
+ Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the
291
+ self-attention heads.
292
+ cross_attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
293
+ Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
294
+ sequence_length)`.
295
+
296
+ Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the
297
+ weighted average in the cross-attention heads.
298
+ encoder_last_hidden_state (`jnp.ndarray` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
299
+ Sequence of hidden-states at the output of the last layer of the encoder of the model.
300
+ encoder_hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
301
+ Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape
302
+ `(batch_size, sequence_length, hidden_size)`.
303
+
304
+ Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.
305
+ encoder_attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
306
+ Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
307
+ sequence_length)`.
308
+
309
+ Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the
310
+ self-attention heads.
311
+ """
312
+
313
+ last_hidden_state: jnp.ndarray = None
314
+ past_key_values: Optional[Tuple[Tuple[jnp.ndarray]]] = None
315
+ decoder_hidden_states: Optional[Tuple[jnp.ndarray]] = None
316
+ decoder_attentions: Optional[Tuple[jnp.ndarray]] = None
317
+ cross_attentions: Optional[Tuple[jnp.ndarray]] = None
318
+ encoder_last_hidden_state: Optional[jnp.ndarray] = None
319
+ encoder_hidden_states: Optional[Tuple[jnp.ndarray]] = None
320
+ encoder_attentions: Optional[Tuple[jnp.ndarray]] = None
321
+
322
+
323
+ @flax.struct.dataclass
324
+ class FlaxCausalLMOutputWithCrossAttentions(ModelOutput):
325
+ """
326
+ Base class for causal language model (or autoregressive) outputs.
327
+
328
+ Args:
329
+ logits (`jnp.ndarray` of shape `(batch_size, sequence_length, config.vocab_size)`):
330
+ Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
331
+ hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
332
+ Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape
333
+ `(batch_size, sequence_length, hidden_size)`.
334
+
335
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
336
+ attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
337
+ Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
338
+ sequence_length)`.
339
+
340
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
341
+ heads.
342
+ cross_attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
343
+ Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
344
+ sequence_length)`.
345
+
346
+ Cross attentions weights after the attention softmax, used to compute the weighted average in the
347
+ cross-attention heads.
348
+ past_key_values (`tuple(tuple(jnp.ndarray))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
349
+ Tuple of `jnp.ndarray` tuples of length `config.n_layers`, with each tuple containing the cached key, value
350
+ states of the self-attention and the cross-attention layers if model is used in encoder-decoder setting.
351
+ Only relevant if `config.is_decoder = True`.
352
+
353
+ Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see
354
+ `past_key_values` input) to speed up sequential decoding.
355
+ """
356
+
357
+ logits: jnp.ndarray = None
358
+ past_key_values: Optional[Tuple[Tuple[jnp.ndarray]]] = None
359
+ hidden_states: Optional[Tuple[jnp.ndarray]] = None
360
+ attentions: Optional[Tuple[jnp.ndarray]] = None
361
+ cross_attentions: Optional[Tuple[jnp.ndarray]] = None
362
+
363
+
364
+ @flax.struct.dataclass
365
+ class FlaxMaskedLMOutput(ModelOutput):
366
+ """
367
+ Base class for masked language models outputs.
368
+
369
+ Args:
370
+ logits (`jnp.ndarray` of shape `(batch_size, sequence_length, config.vocab_size)`):
371
+ Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
372
+ hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
373
+ Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape
374
+ `(batch_size, sequence_length, hidden_size)`.
375
+
376
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
377
+ attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
378
+ Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
379
+ sequence_length)`.
380
+
381
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
382
+ heads.
383
+ """
384
+
385
+ logits: jnp.ndarray = None
386
+ hidden_states: Optional[Tuple[jnp.ndarray]] = None
387
+ attentions: Optional[Tuple[jnp.ndarray]] = None
388
+
389
+
390
+ FlaxCausalLMOutput = FlaxMaskedLMOutput
391
+
392
+
393
+ @flax.struct.dataclass
394
+ class FlaxSeq2SeqLMOutput(ModelOutput):
395
+ """
396
+ Base class for sequence-to-sequence language models outputs.
397
+
398
+ Args:
399
+ logits (`jnp.ndarray` of shape `(batch_size, sequence_length, config.vocab_size)`):
400
+ Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
401
+ past_key_values (`tuple(tuple(jnp.ndarray))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
402
+ Tuple of `tuple(jnp.ndarray)` of length `config.n_layers`, with each tuple having 2 tensors of shape
403
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
404
+ `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
405
+
406
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
407
+ blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
408
+ decoder_hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
409
+ Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape
410
+ `(batch_size, sequence_length, hidden_size)`.
411
+
412
+ Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.
413
+ decoder_attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
414
+ Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
415
+ sequence_length)`.
416
+
417
+ Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the
418
+ self-attention heads.
419
+ cross_attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
420
+ Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
421
+ sequence_length)`.
422
+
423
+ Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the
424
+ weighted average in the cross-attention heads.
425
+ encoder_last_hidden_state (`jnp.ndarray` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
426
+ Sequence of hidden-states at the output of the last layer of the encoder of the model.
427
+ encoder_hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
428
+ Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape
429
+ `(batch_size, sequence_length, hidden_size)`.
430
+
431
+ Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.
432
+ encoder_attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
433
+ Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
434
+ sequence_length)`.
435
+
436
+ Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the
437
+ self-attention heads.
438
+ """
439
+
440
+ logits: jnp.ndarray = None
441
+ past_key_values: Optional[Tuple[Tuple[jnp.ndarray]]] = None
442
+ decoder_hidden_states: Optional[Tuple[jnp.ndarray]] = None
443
+ decoder_attentions: Optional[Tuple[jnp.ndarray]] = None
444
+ cross_attentions: Optional[Tuple[jnp.ndarray]] = None
445
+ encoder_last_hidden_state: Optional[jnp.ndarray] = None
446
+ encoder_hidden_states: Optional[Tuple[jnp.ndarray]] = None
447
+ encoder_attentions: Optional[Tuple[jnp.ndarray]] = None
448
+
449
+
450
+ @flax.struct.dataclass
451
+ class FlaxNextSentencePredictorOutput(ModelOutput):
452
+ """
453
+ Base class for outputs of models predicting if two sentences are consecutive or not.
454
+
455
+ Args:
456
+ logits (`jnp.ndarray` of shape `(batch_size, 2)`):
457
+ Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation
458
+ before SoftMax).
459
+ hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
460
+ Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape
461
+ `(batch_size, sequence_length, hidden_size)`.
462
+
463
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
464
+ attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
465
+ Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
466
+ sequence_length)`.
467
+
468
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
469
+ heads.
470
+ """
471
+
472
+ logits: jnp.ndarray = None
473
+ hidden_states: Optional[Tuple[jnp.ndarray]] = None
474
+ attentions: Optional[Tuple[jnp.ndarray]] = None
475
+
476
+
477
+ @flax.struct.dataclass
478
+ class FlaxSequenceClassifierOutput(ModelOutput):
479
+ """
480
+ Base class for outputs of sentence classification models.
481
+
482
+ Args:
483
+ logits (`jnp.ndarray` of shape `(batch_size, config.num_labels)`):
484
+ Classification (or regression if config.num_labels==1) scores (before SoftMax).
485
+ hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
486
+ Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape
487
+ `(batch_size, sequence_length, hidden_size)`.
488
+
489
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
490
+ attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
491
+ Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
492
+ sequence_length)`.
493
+
494
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
495
+ heads.
496
+ """
497
+
498
+ logits: jnp.ndarray = None
499
+ hidden_states: Optional[Tuple[jnp.ndarray]] = None
500
+ attentions: Optional[Tuple[jnp.ndarray]] = None
501
+
502
+
503
+ @flax.struct.dataclass
504
+ class FlaxSeq2SeqSequenceClassifierOutput(ModelOutput):
505
+ """
506
+ Base class for outputs of sequence-to-sequence sentence classification models.
507
+
508
+ Args:
509
+ logits (`jnp.ndarray` of shape `(batch_size, config.num_labels)`):
510
+ Classification (or regression if config.num_labels==1) scores (before SoftMax).
511
+ past_key_values (`tuple(tuple(jnp.ndarray))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
512
+ Tuple of `tuple(jnp.ndarray)` of length `config.n_layers`, with each tuple having 2 tensors of shape
513
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
514
+ `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
515
+
516
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
517
+ blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
518
+ decoder_hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
519
+ Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape
520
+ `(batch_size, sequence_length, hidden_size)`.
521
+
522
+ Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.
523
+ decoder_attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
524
+ Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
525
+ sequence_length)`.
526
+
527
+ Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the
528
+ self-attention heads.
529
+ cross_attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
530
+ Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
531
+ sequence_length)`.
532
+
533
+ Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the
534
+ weighted average in the cross-attention heads.
535
+ encoder_last_hidden_state (`jnp.ndarray` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
536
+ Sequence of hidden-states at the output of the last layer of the encoder of the model.
537
+ encoder_hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
538
+ Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape
539
+ `(batch_size, sequence_length, hidden_size)`.
540
+
541
+ Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.
542
+ encoder_attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
543
+ Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
544
+ sequence_length)`.
545
+
546
+ Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the
547
+ self-attention heads.
548
+ """
549
+
550
+ logits: jnp.ndarray = None
551
+ past_key_values: Optional[Tuple[Tuple[jnp.ndarray]]] = None
552
+ decoder_hidden_states: Optional[Tuple[jnp.ndarray]] = None
553
+ decoder_attentions: Optional[Tuple[jnp.ndarray]] = None
554
+ cross_attentions: Optional[Tuple[jnp.ndarray]] = None
555
+ encoder_last_hidden_state: Optional[jnp.ndarray] = None
556
+ encoder_hidden_states: Optional[Tuple[jnp.ndarray]] = None
557
+ encoder_attentions: Optional[Tuple[jnp.ndarray]] = None
558
+
559
+
560
+ @flax.struct.dataclass
561
+ class FlaxMultipleChoiceModelOutput(ModelOutput):
562
+ """
563
+ Base class for outputs of multiple choice models.
564
+
565
+ Args:
566
+ logits (`jnp.ndarray` of shape `(batch_size, num_choices)`):
567
+ *num_choices* is the second dimension of the input tensors. (see *input_ids* above).
568
+
569
+ Classification scores (before SoftMax).
570
+ hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
571
+ Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape
572
+ `(batch_size, sequence_length, hidden_size)`.
573
+
574
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
575
+ attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
576
+ Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
577
+ sequence_length)`.
578
+
579
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
580
+ heads.
581
+ """
582
+
583
+ logits: jnp.ndarray = None
584
+ hidden_states: Optional[Tuple[jnp.ndarray]] = None
585
+ attentions: Optional[Tuple[jnp.ndarray]] = None
586
+
587
+
588
+ @flax.struct.dataclass
589
+ class FlaxTokenClassifierOutput(ModelOutput):
590
+ """
591
+ Base class for outputs of token classification models.
592
+
593
+ Args:
594
+ logits (`jnp.ndarray` of shape `(batch_size, sequence_length, config.num_labels)`):
595
+ Classification scores (before SoftMax).
596
+ hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
597
+ Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape
598
+ `(batch_size, sequence_length, hidden_size)`.
599
+
600
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
601
+ attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
602
+ Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
603
+ sequence_length)`.
604
+
605
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
606
+ heads.
607
+ """
608
+
609
+ logits: jnp.ndarray = None
610
+ hidden_states: Optional[Tuple[jnp.ndarray]] = None
611
+ attentions: Optional[Tuple[jnp.ndarray]] = None
612
+
613
+
614
+ @flax.struct.dataclass
615
+ class FlaxQuestionAnsweringModelOutput(ModelOutput):
616
+ """
617
+ Base class for outputs of question answering models.
618
+
619
+ Args:
620
+ start_logits (`jnp.ndarray` of shape `(batch_size, sequence_length)`):
621
+ Span-start scores (before SoftMax).
622
+ end_logits (`jnp.ndarray` of shape `(batch_size, sequence_length)`):
623
+ Span-end scores (before SoftMax).
624
+ hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
625
+ Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape
626
+ `(batch_size, sequence_length, hidden_size)`.
627
+
628
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
629
+ attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
630
+ Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
631
+ sequence_length)`.
632
+
633
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
634
+ heads.
635
+ """
636
+
637
+ start_logits: jnp.ndarray = None
638
+ end_logits: jnp.ndarray = None
639
+ hidden_states: Optional[Tuple[jnp.ndarray]] = None
640
+ attentions: Optional[Tuple[jnp.ndarray]] = None
641
+
642
+
643
+ @flax.struct.dataclass
644
+ class FlaxSeq2SeqQuestionAnsweringModelOutput(ModelOutput):
645
+ """
646
+ Base class for outputs of sequence-to-sequence question answering models.
647
+
648
+ Args:
649
+ start_logits (`jnp.ndarray` of shape `(batch_size, sequence_length)`):
650
+ Span-start scores (before SoftMax).
651
+ end_logits (`jnp.ndarray` of shape `(batch_size, sequence_length)`):
652
+ Span-end scores (before SoftMax).
653
+ past_key_values (`tuple(tuple(jnp.ndarray))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
654
+ Tuple of `tuple(jnp.ndarray)` of length `config.n_layers`, with each tuple having 2 tensors of shape
655
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
656
+ `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
657
+
658
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
659
+ blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
660
+ decoder_hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
661
+ Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape
662
+ `(batch_size, sequence_length, hidden_size)`.
663
+
664
+ Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.
665
+ decoder_attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
666
+ Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
667
+ sequence_length)`.
668
+
669
+ Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the
670
+ self-attention heads.
671
+ cross_attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
672
+ Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
673
+ sequence_length)`.
674
+
675
+ Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the
676
+ weighted average in the cross-attention heads.
677
+ encoder_last_hidden_state (`jnp.ndarray` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
678
+ Sequence of hidden-states at the output of the last layer of the encoder of the model.
679
+ encoder_hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
680
+ Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape
681
+ `(batch_size, sequence_length, hidden_size)`.
682
+
683
+ Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.
684
+ encoder_attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
685
+ Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
686
+ sequence_length)`.
687
+
688
+ Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the
689
+ self-attention heads.
690
+ """
691
+
692
+ start_logits: jnp.ndarray = None
693
+ end_logits: jnp.ndarray = None
694
+ past_key_values: Optional[Tuple[Tuple[jnp.ndarray]]] = None
695
+ decoder_hidden_states: Optional[Tuple[jnp.ndarray]] = None
696
+ decoder_attentions: Optional[Tuple[jnp.ndarray]] = None
697
+ cross_attentions: Optional[Tuple[jnp.ndarray]] = None
698
+ encoder_last_hidden_state: Optional[jnp.ndarray] = None
699
+ encoder_hidden_states: Optional[Tuple[jnp.ndarray]] = None
700
+ encoder_attentions: Optional[Tuple[jnp.ndarray]] = None
llmeval-env/lib/python3.10/site-packages/transformers/modeling_flax_utils.py ADDED
@@ -0,0 +1,1288 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The Google Flax Team Authors and The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+
17
+ import gc
18
+ import json
19
+ import os
20
+ import re
21
+ import warnings
22
+ from functools import partial
23
+ from pickle import UnpicklingError
24
+ from typing import Any, Dict, Optional, Set, Tuple, Union
25
+
26
+ import flax.linen as nn
27
+ import jax
28
+ import jax.numpy as jnp
29
+ import msgpack.exceptions
30
+ from flax.core.frozen_dict import FrozenDict, unfreeze
31
+ from flax.serialization import from_bytes, to_bytes
32
+ from flax.traverse_util import flatten_dict, unflatten_dict
33
+ from jax.random import PRNGKey
34
+
35
+ from .configuration_utils import PretrainedConfig
36
+ from .dynamic_module_utils import custom_object_save
37
+ from .generation import FlaxGenerationMixin, GenerationConfig
38
+ from .modeling_flax_pytorch_utils import load_pytorch_checkpoint_in_flax_state_dict
39
+ from .utils import (
40
+ FLAX_WEIGHTS_INDEX_NAME,
41
+ FLAX_WEIGHTS_NAME,
42
+ SAFE_WEIGHTS_INDEX_NAME,
43
+ SAFE_WEIGHTS_NAME,
44
+ WEIGHTS_INDEX_NAME,
45
+ WEIGHTS_NAME,
46
+ PushToHubMixin,
47
+ add_code_sample_docstrings,
48
+ add_start_docstrings_to_model_forward,
49
+ cached_file,
50
+ copy_func,
51
+ download_url,
52
+ has_file,
53
+ is_offline_mode,
54
+ is_remote_url,
55
+ logging,
56
+ replace_return_docstrings,
57
+ )
58
+ from .utils.hub import convert_file_size_to_int, get_checkpoint_shard_files
59
+ from .utils.import_utils import is_safetensors_available
60
+
61
+
62
+ if is_safetensors_available():
63
+ from safetensors import safe_open
64
+ from safetensors.flax import load_file as safe_load_file
65
+ from safetensors.flax import save_file as safe_save_file
66
+
67
+ logger = logging.get_logger(__name__)
68
+
69
+
70
+ def quick_gelu(x):
71
+ return x * jax.nn.sigmoid(1.702 * x)
72
+
73
+
74
+ ACT2FN = {
75
+ "gelu": partial(nn.gelu, approximate=False),
76
+ "relu": nn.relu,
77
+ "silu": nn.swish,
78
+ "swish": nn.swish,
79
+ "gelu_new": partial(nn.gelu, approximate=True),
80
+ "quick_gelu": quick_gelu,
81
+ "gelu_pytorch_tanh": partial(nn.gelu, approximate=True),
82
+ }
83
+
84
+
85
+ def dtype_byte_size(dtype):
86
+ """
87
+ Returns the size (in bytes) occupied by one parameter of type `dtype`. Example:
88
+ ```py
89
+ >>> dtype_byte_size(np.float32)
90
+ 4
91
+ ```
92
+ """
93
+ if dtype == bool:
94
+ return 1 / 8
95
+ bit_search = re.search(r"[^\d](\d+)$", dtype.name)
96
+ if bit_search is None:
97
+ raise ValueError(f"`dtype` is not a valid dtype: {dtype}.")
98
+ bit_size = int(bit_search.groups()[0])
99
+ return bit_size // 8
100
+
101
+
102
+ def flax_shard_checkpoint(params, max_shard_size="10GB"):
103
+ """
104
+ Splits a model state dictionary in sub-checkpoints so that the final size of each sub-checkpoint does not exceed a
105
+ given size. The sub-checkpoints are determined by iterating through the `state_dict` in the order of its keys, so
106
+ there is no optimization made to make each sub-checkpoint as close as possible to the maximum size passed. For
107
+ example, if the limit is 10GB and we have weights of sizes [6GB, 6GB, 2GB, 6GB, 2GB, 2GB] they will get sharded as
108
+ [6GB], [6+2GB], [6+2+2GB] and not [6+2+2GB], [6+2GB], [6GB].
109
+
110
+ <Tip warning={true}>
111
+
112
+ If one of the model's weight is bigger that `max_shard_size`, it will end up in its own sub-checkpoint which will
113
+ have a size greater than `max_shard_size`.
114
+
115
+ </Tip>
116
+
117
+ Args:
118
+ params (`Union[Dict, FrozenDict]`): A `PyTree` of model parameters.
119
+ max_shard_size (`int` or `str`, *optional*, defaults to `"10GB"`):
120
+ The maximum size of each sub-checkpoint. If expressed as a string, needs to be digits followed by a unit
121
+ (like `"5MB"`).
122
+ """
123
+ max_shard_size = convert_file_size_to_int(max_shard_size)
124
+
125
+ sharded_state_dicts = []
126
+ current_block = {}
127
+ current_block_size = 0
128
+ total_size = 0
129
+
130
+ # flatten the weights to chunk
131
+ weights = flatten_dict(params, sep="/")
132
+ for item in weights:
133
+ weight_size = weights[item].size * dtype_byte_size(weights[item].dtype)
134
+
135
+ # If this weight is going to tip up over the maximal size, we split.
136
+ if current_block_size + weight_size > max_shard_size:
137
+ sharded_state_dicts.append(current_block)
138
+ current_block = {}
139
+ current_block_size = 0
140
+
141
+ current_block[item] = weights[item]
142
+ current_block_size += weight_size
143
+ total_size += weight_size
144
+
145
+ # Add the last block
146
+ sharded_state_dicts.append(current_block)
147
+
148
+ # If we only have one shard, we return it
149
+ if len(sharded_state_dicts) == 1:
150
+ return {FLAX_WEIGHTS_NAME: sharded_state_dicts[0]}, None
151
+
152
+ # Otherwise, let's build the index
153
+ weight_map = {}
154
+ shards = {}
155
+ for idx, shard in enumerate(sharded_state_dicts):
156
+ shard_file = FLAX_WEIGHTS_NAME.replace(".msgpack", f"-{idx+1:05d}-of-{len(sharded_state_dicts):05d}.msgpack")
157
+ shards[shard_file] = shard
158
+ for weight_name in shard.keys():
159
+ weight_map[weight_name] = shard_file
160
+
161
+ # Add the metadata
162
+ metadata = {"total_size": total_size}
163
+ index = {"metadata": metadata, "weight_map": weight_map}
164
+ return shards, index
165
+
166
+
167
+ class FlaxPreTrainedModel(PushToHubMixin, FlaxGenerationMixin):
168
+ r"""
169
+ Base class for all models.
170
+
171
+ [`FlaxPreTrainedModel`] takes care of storing the configuration of the models and handles methods for loading,
172
+ downloading and saving models.
173
+
174
+ Class attributes (overridden by derived classes):
175
+
176
+ - **config_class** ([`PretrainedConfig`]) -- A subclass of [`PretrainedConfig`] to use as configuration class
177
+ for this model architecture.
178
+ - **base_model_prefix** (`str`) -- A string indicating the attribute associated to the base model in derived
179
+ classes of the same architecture adding modules on top of the base model.
180
+ - **main_input_name** (`str`) -- The name of the principal input to the model (often `input_ids` for NLP
181
+ models, `pixel_values` for vision models and `input_values` for speech models).
182
+ """
183
+
184
+ config_class = None
185
+ base_model_prefix = ""
186
+ main_input_name = "input_ids"
187
+ _auto_class = None
188
+ _missing_keys = set()
189
+
190
+ def __init__(
191
+ self,
192
+ config: PretrainedConfig,
193
+ module: nn.Module,
194
+ input_shape: Tuple = (1, 1),
195
+ seed: int = 0,
196
+ dtype: jnp.dtype = jnp.float32,
197
+ _do_init: bool = True,
198
+ ):
199
+ if config is None:
200
+ raise ValueError("config cannot be None")
201
+
202
+ if module is None:
203
+ raise ValueError("module cannot be None")
204
+
205
+ # Those are private to be exposed as typed property on derived classes.
206
+ self._config = config
207
+ self._module = module
208
+
209
+ # Those are public as their type is generic to every derived classes.
210
+ self.key = PRNGKey(seed)
211
+ self.dtype = dtype
212
+ self.input_shape = input_shape
213
+ self.generation_config = GenerationConfig.from_model_config(config) if self.can_generate() else None
214
+
215
+ # To check if the model was initialized automatically.
216
+ self._is_initialized = _do_init
217
+
218
+ if _do_init:
219
+ # randomly initialized parameters
220
+ random_params = self.init_weights(self.key, input_shape)
221
+ params_shape_tree = jax.eval_shape(lambda params: params, random_params)
222
+ else:
223
+ init_fn = partial(self.init_weights, input_shape=input_shape)
224
+ params_shape_tree = jax.eval_shape(init_fn, self.key)
225
+
226
+ logger.info(
227
+ "Model weights are not initialized as `_do_init` is set to `False`. "
228
+ f"Make sure to call `{self.__class__.__name__}.init_weights` manually to initialize the weights."
229
+ )
230
+
231
+ # get the shape of the parameters
232
+ self._params_shape_tree = params_shape_tree
233
+
234
+ # save required_params as set
235
+ self._required_params = set(flatten_dict(unfreeze(params_shape_tree)).keys())
236
+
237
+ # initialize the parameters
238
+ if _do_init:
239
+ self.params = random_params
240
+
241
+ def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict = None) -> Dict:
242
+ raise NotImplementedError(f"init method has to be implemented for {self}")
243
+
244
+ def enable_gradient_checkpointing(self):
245
+ raise NotImplementedError(f"gradient checkpointing method has to be implemented for {self}")
246
+
247
+ @classmethod
248
+ def _from_config(cls, config, **kwargs):
249
+ """
250
+ All context managers that the model should be initialized under go here.
251
+ """
252
+ return cls(config, **kwargs)
253
+
254
+ @property
255
+ def framework(self) -> str:
256
+ """
257
+ :str: Identifies that this is a Flax model.
258
+ """
259
+ return "flax"
260
+
261
+ @property
262
+ def config(self) -> PretrainedConfig:
263
+ return self._config
264
+
265
+ @property
266
+ def module(self) -> nn.Module:
267
+ return self._module
268
+
269
+ @property
270
+ def params(self) -> Union[Dict, FrozenDict]:
271
+ if not self._is_initialized:
272
+ raise ValueError(
273
+ "`params` cannot be accessed from model when the model is created with `_do_init=False`. "
274
+ "You must call `init_weights` manually and store the params outside of the model and "
275
+ "pass it explicitly where needed."
276
+ )
277
+ return self._params
278
+
279
+ @property
280
+ def required_params(self) -> Set:
281
+ return self._required_params
282
+
283
+ @property
284
+ def params_shape_tree(self) -> Dict:
285
+ return self._params_shape_tree
286
+
287
+ @params.setter
288
+ def params(self, params: Union[Dict, FrozenDict]):
289
+ # don't set params if the model is not initialized
290
+ if not self._is_initialized:
291
+ raise ValueError(
292
+ "`params` cannot be set from model when the model is created with `_do_init=False`. "
293
+ "You store the params outside of the model."
294
+ )
295
+
296
+ if isinstance(params, FrozenDict):
297
+ params = unfreeze(params)
298
+ param_keys = set(flatten_dict(params).keys())
299
+ if len(self.required_params - param_keys) > 0:
300
+ raise ValueError(
301
+ "Some parameters are missing. Make sure that `params` include the following "
302
+ f"parameters {self.required_params - param_keys}"
303
+ )
304
+ self._params = params
305
+
306
+ def _cast_floating_to(self, params: Union[Dict, FrozenDict], dtype: jnp.dtype, mask: Any = None) -> Any:
307
+ """
308
+ Helper method to cast floating-point values of given parameter `PyTree` to given `dtype`.
309
+ """
310
+
311
+ # taken from https://github.com/deepmind/jmp/blob/3a8318abc3292be38582794dbf7b094e6583b192/jmp/_src/policy.py#L27
312
+ def conditional_cast(param):
313
+ if isinstance(param, jnp.ndarray) and jnp.issubdtype(param.dtype, jnp.floating):
314
+ param = param.astype(dtype)
315
+ return param
316
+
317
+ if mask is None:
318
+ return jax.tree_util.tree_map(conditional_cast, params)
319
+
320
+ flat_params = flatten_dict(params)
321
+ flat_mask, _ = jax.tree_util.tree_flatten(mask)
322
+
323
+ for masked, key in zip(flat_mask, sorted(flat_params.keys())):
324
+ if masked:
325
+ flat_params[key] = conditional_cast(flat_params[key])
326
+
327
+ return unflatten_dict(flat_params)
328
+
329
+ def to_bf16(self, params: Union[Dict, FrozenDict], mask: Any = None):
330
+ r"""
331
+ Cast the floating-point `params` to `jax.numpy.bfloat16`. This returns a new `params` tree and does not cast
332
+ the `params` in place.
333
+
334
+ This method can be used on TPU to explicitly convert the model parameters to bfloat16 precision to do full
335
+ half-precision training or to save weights in bfloat16 for inference in order to save memory and improve speed.
336
+
337
+ Arguments:
338
+ params (`Union[Dict, FrozenDict]`):
339
+ A `PyTree` of model parameters.
340
+ mask (`Union[Dict, FrozenDict]`):
341
+ A `PyTree` with same structure as the `params` tree. The leaves should be booleans, `True` for params
342
+ you want to cast, and should be `False` for those you want to skip.
343
+
344
+ Examples:
345
+
346
+ ```python
347
+ >>> from transformers import FlaxBertModel
348
+
349
+ >>> # load model
350
+ >>> model = FlaxBertModel.from_pretrained("google-bert/bert-base-cased")
351
+ >>> # By default, the model parameters will be in fp32 precision, to cast these to bfloat16 precision
352
+ >>> model.params = model.to_bf16(model.params)
353
+ >>> # If you want don't want to cast certain parameters (for example layer norm bias and scale)
354
+ >>> # then pass the mask as follows
355
+ >>> from flax import traverse_util
356
+
357
+ >>> model = FlaxBertModel.from_pretrained("google-bert/bert-base-cased")
358
+ >>> flat_params = traverse_util.flatten_dict(model.params)
359
+ >>> mask = {
360
+ ... path: (path[-2] != ("LayerNorm", "bias") and path[-2:] != ("LayerNorm", "scale"))
361
+ ... for path in flat_params
362
+ ... }
363
+ >>> mask = traverse_util.unflatten_dict(mask)
364
+ >>> model.params = model.to_bf16(model.params, mask)
365
+ ```"""
366
+ return self._cast_floating_to(params, jnp.bfloat16, mask)
367
+
368
+ def to_fp32(self, params: Union[Dict, FrozenDict], mask: Any = None):
369
+ r"""
370
+ Cast the floating-point `parmas` to `jax.numpy.float32`. This method can be used to explicitly convert the
371
+ model parameters to fp32 precision. This returns a new `params` tree and does not cast the `params` in place.
372
+
373
+ Arguments:
374
+ params (`Union[Dict, FrozenDict]`):
375
+ A `PyTree` of model parameters.
376
+ mask (`Union[Dict, FrozenDict]`):
377
+ A `PyTree` with same structure as the `params` tree. The leaves should be booleans, `True` for params
378
+ you want to cast, and should be `False` for those you want to skip
379
+
380
+ Examples:
381
+
382
+ ```python
383
+ >>> from transformers import FlaxBertModel
384
+
385
+ >>> # Download model and configuration from huggingface.co
386
+ >>> model = FlaxBertModel.from_pretrained("google-bert/bert-base-cased")
387
+ >>> # By default, the model params will be in fp32, to illustrate the use of this method,
388
+ >>> # we'll first cast to fp16 and back to fp32
389
+ >>> model.params = model.to_f16(model.params)
390
+ >>> # now cast back to fp32
391
+ >>> model.params = model.to_fp32(model.params)
392
+ ```"""
393
+ return self._cast_floating_to(params, jnp.float32, mask)
394
+
395
+ def to_fp16(self, params: Union[Dict, FrozenDict], mask: Any = None):
396
+ r"""
397
+ Cast the floating-point `parmas` to `jax.numpy.float16`. This returns a new `params` tree and does not cast the
398
+ `params` in place.
399
+
400
+ This method can be used on GPU to explicitly convert the model parameters to float16 precision to do full
401
+ half-precision training or to save weights in float16 for inference in order to save memory and improve speed.
402
+
403
+ Arguments:
404
+ params (`Union[Dict, FrozenDict]`):
405
+ A `PyTree` of model parameters.
406
+ mask (`Union[Dict, FrozenDict]`):
407
+ A `PyTree` with same structure as the `params` tree. The leaves should be booleans, `True` for params
408
+ you want to cast, and should be `False` for those you want to skip
409
+
410
+ Examples:
411
+
412
+ ```python
413
+ >>> from transformers import FlaxBertModel
414
+
415
+ >>> # load model
416
+ >>> model = FlaxBertModel.from_pretrained("google-bert/bert-base-cased")
417
+ >>> # By default, the model params will be in fp32, to cast these to float16
418
+ >>> model.params = model.to_fp16(model.params)
419
+ >>> # If you want don't want to cast certain parameters (for example layer norm bias and scale)
420
+ >>> # then pass the mask as follows
421
+ >>> from flax import traverse_util
422
+
423
+ >>> model = FlaxBertModel.from_pretrained("google-bert/bert-base-cased")
424
+ >>> flat_params = traverse_util.flatten_dict(model.params)
425
+ >>> mask = {
426
+ ... path: (path[-2] != ("LayerNorm", "bias") and path[-2:] != ("LayerNorm", "scale"))
427
+ ... for path in flat_params
428
+ ... }
429
+ >>> mask = traverse_util.unflatten_dict(mask)
430
+ >>> model.params = model.to_fp16(model.params, mask)
431
+ ```"""
432
+ return self._cast_floating_to(params, jnp.float16, mask)
433
+
434
+ @classmethod
435
+ def load_flax_weights(cls, resolved_archive_file):
436
+ try:
437
+ if resolved_archive_file.endswith(".safetensors"):
438
+ state = safe_load_file(resolved_archive_file)
439
+ state = unflatten_dict(state, sep=".")
440
+ else:
441
+ with open(resolved_archive_file, "rb") as state_f:
442
+ state = from_bytes(cls, state_f.read())
443
+ except (UnpicklingError, msgpack.exceptions.ExtraData) as e:
444
+ try:
445
+ with open(resolved_archive_file) as f:
446
+ if f.read().startswith("version"):
447
+ raise OSError(
448
+ "You seem to have cloned a repository without having git-lfs installed. Please"
449
+ " install git-lfs and run `git lfs install` followed by `git lfs pull` in the"
450
+ " folder you cloned."
451
+ )
452
+ else:
453
+ raise ValueError from e
454
+ except (UnicodeDecodeError, ValueError):
455
+ raise EnvironmentError(f"Unable to convert {resolved_archive_file} to Flax deserializable object. ")
456
+
457
+ return state
458
+
459
+ @classmethod
460
+ def load_flax_sharded_weights(cls, shard_files):
461
+ """
462
+ This is the same as [`flax.serialization.from_bytes`]
463
+ (https:lax.readthedocs.io/en/latest/_modules/flax/serialization.html#from_bytes) but for a sharded checkpoint.
464
+
465
+ This load is performed efficiently: each checkpoint shard is loaded one by one in RAM and deleted after being
466
+ loaded in the model.
467
+
468
+ Args:
469
+ shard_files (`List[str]`:
470
+ The list of shard files to load.
471
+
472
+ Returns:
473
+ `Dict`: A nested dictionary of the model parameters, in the expected format for flax models : `{'model':
474
+ {'params': {'...'}}}`.
475
+ """
476
+
477
+ # Load the index
478
+ state_sharded_dict = {}
479
+
480
+ for shard_file in shard_files:
481
+ # load using msgpack utils
482
+ try:
483
+ with open(shard_file, "rb") as state_f:
484
+ state = from_bytes(cls, state_f.read())
485
+ except (UnpicklingError, msgpack.exceptions.ExtraData) as e:
486
+ with open(shard_file) as f:
487
+ if f.read().startswith("version"):
488
+ raise OSError(
489
+ "You seem to have cloned a repository without having git-lfs installed. Please"
490
+ " install git-lfs and run `git lfs install` followed by `git lfs pull` in the"
491
+ " folder you cloned."
492
+ )
493
+ else:
494
+ raise ValueError from e
495
+ except (UnicodeDecodeError, ValueError):
496
+ raise EnvironmentError(f"Unable to convert {shard_file} to Flax deserializable object. ")
497
+
498
+ state = flatten_dict(state, sep="/")
499
+ state_sharded_dict.update(state)
500
+ del state
501
+ gc.collect()
502
+
503
+ # the state dict is unflattened to the match the format of model.params
504
+ return unflatten_dict(state_sharded_dict, sep="/")
505
+
506
+ @classmethod
507
+ def can_generate(cls) -> bool:
508
+ """
509
+ Returns whether this model can generate sequences with `.generate()`. Returns:
510
+ `bool`: Whether this model can generate sequences with `.generate()`.
511
+ """
512
+ # Detects whether `prepare_inputs_for_generation` has been overwritten, which is a requirement for generation.
513
+ # Alternativelly, the model can also have a custom `generate` function.
514
+ if "GenerationMixin" in str(cls.prepare_inputs_for_generation) and "GenerationMixin" in str(cls.generate):
515
+ return False
516
+ return True
517
+
518
+ @classmethod
519
+ def from_pretrained(
520
+ cls,
521
+ pretrained_model_name_or_path: Union[str, os.PathLike],
522
+ dtype: jnp.dtype = jnp.float32,
523
+ *model_args,
524
+ config: Optional[Union[PretrainedConfig, str, os.PathLike]] = None,
525
+ cache_dir: Optional[Union[str, os.PathLike]] = None,
526
+ ignore_mismatched_sizes: bool = False,
527
+ force_download: bool = False,
528
+ local_files_only: bool = False,
529
+ token: Optional[Union[str, bool]] = None,
530
+ revision: str = "main",
531
+ **kwargs,
532
+ ):
533
+ r"""
534
+ Instantiate a pretrained flax model from a pre-trained model configuration.
535
+
536
+ The warning *Weights from XXX not initialized from pretrained model* means that the weights of XXX do not come
537
+ pretrained with the rest of the model. It is up to you to train those weights with a downstream fine-tuning
538
+ task.
539
+
540
+ The warning *Weights from XXX not used in YYY* means that the layer XXX is not used by YYY, therefore those
541
+ weights are discarded.
542
+
543
+ Parameters:
544
+ pretrained_model_name_or_path (`str` or `os.PathLike`):
545
+ Can be either:
546
+
547
+ - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co.
548
+ - A path to a *directory* containing model weights saved using
549
+ [`~FlaxPreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`.
550
+ - A path or url to a *pt index checkpoint file* (e.g, `./tf_model/model.ckpt.index`). In this case,
551
+ `from_pt` should be set to `True`.
552
+ dtype (`jax.numpy.dtype`, *optional*, defaults to `jax.numpy.float32`):
553
+ The data type of the computation. Can be one of `jax.numpy.float32`, `jax.numpy.float16` (on GPUs) and
554
+ `jax.numpy.bfloat16` (on TPUs).
555
+
556
+ This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If
557
+ specified all the computation will be performed with the given `dtype`.
558
+
559
+ **Note that this only specifies the dtype of the computation and does not influence the dtype of model
560
+ parameters.**
561
+
562
+ If you wish to change the dtype of the model parameters, see [`~FlaxPreTrainedModel.to_fp16`] and
563
+ [`~FlaxPreTrainedModel.to_bf16`].
564
+ model_args (sequence of positional arguments, *optional*):
565
+ All remaining positional arguments will be passed to the underlying model's `__init__` method.
566
+ config (`Union[PretrainedConfig, str, os.PathLike]`, *optional*):
567
+ Can be either:
568
+
569
+ - an instance of a class derived from [`PretrainedConfig`],
570
+ - a string or path valid as input to [`~PretrainedConfig.from_pretrained`].
571
+
572
+ Configuration for the model to use instead of an automatically loaded configuration. Configuration can
573
+ be automatically loaded when:
574
+
575
+ - The model is a model provided by the library (loaded with the *model id* string of a pretrained
576
+ model).
577
+ - The model was saved using [`~PreTrainedModel.save_pretrained`] and is reloaded by supplying the
578
+ save directory.
579
+ - The model is loaded by supplying a local directory as `pretrained_model_name_or_path` and a
580
+ configuration JSON file named *config.json* is found in the directory.
581
+ cache_dir (`Union[str, os.PathLike]`, *optional*):
582
+ Path to a directory in which a downloaded pretrained model configuration should be cached if the
583
+ standard cache should not be used.
584
+ from_pt (`bool`, *optional*, defaults to `False`):
585
+ Load the model weights from a PyTorch checkpoint save file (see docstring of
586
+ `pretrained_model_name_or_path` argument).
587
+ ignore_mismatched_sizes (`bool`, *optional*, defaults to `False`):
588
+ Whether or not to raise an error if some of the weights from the checkpoint do not have the same size
589
+ as the weights of the model (if for instance, you are instantiating a model with 10 labels from a
590
+ checkpoint with 3 labels).
591
+ force_download (`bool`, *optional*, defaults to `False`):
592
+ Whether or not to force the (re-)download of the model weights and configuration files, overriding the
593
+ cached versions if they exist.
594
+ resume_download (`bool`, *optional*, defaults to `False`):
595
+ Whether or not to delete incompletely received files. Will attempt to resume the download if such a
596
+ file exists.
597
+ proxies (`Dict[str, str]`, *optional*):
598
+ A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',
599
+ 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
600
+ local_files_only(`bool`, *optional*, defaults to `False`):
601
+ Whether or not to only look at local files (i.e., do not try to download the model).
602
+ token (`str` or `bool`, *optional*):
603
+ The token to use as HTTP bearer authorization for remote files. If `True`, or not specified, will use
604
+ the token generated when running `huggingface-cli login` (stored in `~/.huggingface`).
605
+ revision (`str`, *optional*, defaults to `"main"`):
606
+ The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
607
+ git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any
608
+ identifier allowed by git.
609
+
610
+
611
+ <Tip>
612
+
613
+ To test a pull request you made on the Hub, you can pass `revision="refs/pr/<pr_number>".
614
+
615
+ </Tip>
616
+
617
+ subfolder (`str`, *optional*, defaults to `""`):
618
+ In case the relevant files are located inside a subfolder of the model repo on huggingface.co, you can
619
+ specify the folder name here.
620
+ kwargs (remaining dictionary of keyword arguments, *optional*):
621
+ Can be used to update the configuration object (after it being loaded) and initiate the model (e.g.,
622
+ `output_attentions=True`). Behaves differently depending on whether a `config` is provided or
623
+ automatically loaded:
624
+
625
+ - If a configuration is provided with `config`, `**kwargs` will be directly passed to the
626
+ underlying model's `__init__` method (we assume all relevant updates to the configuration have
627
+ already been done)
628
+ - If a configuration is not provided, `kwargs` will be first passed to the configuration class
629
+ initialization function ([`~PretrainedConfig.from_pretrained`]). Each key of `kwargs` that
630
+ corresponds to a configuration attribute will be used to override said attribute with the
631
+ supplied `kwargs` value. Remaining keys that do not correspond to any configuration attribute
632
+ will be passed to the underlying model's `__init__` function.
633
+
634
+ Examples:
635
+
636
+ ```python
637
+ >>> from transformers import BertConfig, FlaxBertModel
638
+
639
+ >>> # Download model and configuration from huggingface.co and cache.
640
+ >>> model = FlaxBertModel.from_pretrained("google-bert/bert-base-cased")
641
+ >>> # Model was saved using *save_pretrained('./test/saved_model/')* (for example purposes, not runnable).
642
+ >>> model = FlaxBertModel.from_pretrained("./test/saved_model/")
643
+ >>> # Loading from a PyTorch checkpoint file instead of a PyTorch model (slower, for example purposes, not runnable).
644
+ >>> config = BertConfig.from_json_file("./pt_model/config.json")
645
+ >>> model = FlaxBertModel.from_pretrained("./pt_model/pytorch_model.bin", from_pt=True, config=config)
646
+ ```"""
647
+ from_pt = kwargs.pop("from_pt", False)
648
+ resume_download = kwargs.pop("resume_download", False)
649
+ proxies = kwargs.pop("proxies", None)
650
+ use_auth_token = kwargs.pop("use_auth_token", None)
651
+ trust_remote_code = kwargs.pop("trust_remote_code", None)
652
+ from_pipeline = kwargs.pop("_from_pipeline", None)
653
+ from_auto_class = kwargs.pop("_from_auto", False)
654
+ _do_init = kwargs.pop("_do_init", True)
655
+ subfolder = kwargs.pop("subfolder", "")
656
+ commit_hash = kwargs.pop("_commit_hash", None)
657
+
658
+ # Not relevant for Flax Models
659
+ _ = kwargs.pop("adapter_kwargs", None)
660
+
661
+ if use_auth_token is not None:
662
+ warnings.warn(
663
+ "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.",
664
+ FutureWarning,
665
+ )
666
+ if token is not None:
667
+ raise ValueError(
668
+ "`token` and `use_auth_token` are both specified. Please set only the argument `token`."
669
+ )
670
+ token = use_auth_token
671
+
672
+ if trust_remote_code is True:
673
+ logger.warning(
674
+ "The argument `trust_remote_code` is to be used with Auto classes. It has no effect here and is"
675
+ " ignored."
676
+ )
677
+
678
+ user_agent = {"file_type": "model", "framework": "flax", "from_auto_class": from_auto_class}
679
+ if from_pipeline is not None:
680
+ user_agent["using_pipeline"] = from_pipeline
681
+
682
+ if is_offline_mode() and not local_files_only:
683
+ logger.info("Offline mode: forcing local_files_only=True")
684
+ local_files_only = True
685
+
686
+ # Load config if we don't provide a configuration
687
+ if not isinstance(config, PretrainedConfig):
688
+ config_path = config if config is not None else pretrained_model_name_or_path
689
+ config, model_kwargs = cls.config_class.from_pretrained(
690
+ config_path,
691
+ cache_dir=cache_dir,
692
+ return_unused_kwargs=True,
693
+ force_download=force_download,
694
+ resume_download=resume_download,
695
+ proxies=proxies,
696
+ local_files_only=local_files_only,
697
+ token=token,
698
+ revision=revision,
699
+ subfolder=subfolder,
700
+ _from_auto=from_auto_class,
701
+ _from_pipeline=from_pipeline,
702
+ _commit_hash=commit_hash,
703
+ **kwargs,
704
+ )
705
+ else:
706
+ model_kwargs = kwargs.copy()
707
+
708
+ if commit_hash is None:
709
+ commit_hash = getattr(config, "_commit_hash", None)
710
+
711
+ # Add the dtype to model_kwargs
712
+ model_kwargs["dtype"] = dtype
713
+
714
+ # This variable will flag if we're loading a sharded checkpoint. In this case the archive file is just the
715
+ # index of the files.
716
+ is_sharded = False
717
+
718
+ # Load model
719
+ if pretrained_model_name_or_path is not None:
720
+ pretrained_model_name_or_path = str(pretrained_model_name_or_path)
721
+ is_local = os.path.isdir(pretrained_model_name_or_path)
722
+ if os.path.isdir(pretrained_model_name_or_path):
723
+ if os.path.isfile(os.path.join(pretrained_model_name_or_path, subfolder, FLAX_WEIGHTS_NAME)):
724
+ # Load from a Flax checkpoint
725
+ archive_file = os.path.join(pretrained_model_name_or_path, subfolder, FLAX_WEIGHTS_NAME)
726
+ elif os.path.isfile(os.path.join(pretrained_model_name_or_path, subfolder, FLAX_WEIGHTS_INDEX_NAME)):
727
+ # Load from a sharded Flax checkpoint
728
+ archive_file = os.path.join(pretrained_model_name_or_path, subfolder, FLAX_WEIGHTS_INDEX_NAME)
729
+ is_sharded = True
730
+ elif is_safetensors_available() and os.path.isfile(
731
+ os.path.join(pretrained_model_name_or_path, SAFE_WEIGHTS_NAME)
732
+ ):
733
+ # Load from a safetensors checkpoint
734
+ archive_file = os.path.join(pretrained_model_name_or_path, SAFE_WEIGHTS_NAME)
735
+ elif from_pt and os.path.isfile(os.path.join(pretrained_model_name_or_path, subfolder, WEIGHTS_NAME)):
736
+ # Load from a PyTorch checkpoint
737
+ archive_file = os.path.join(pretrained_model_name_or_path, subfolder, WEIGHTS_NAME)
738
+ elif from_pt and os.path.isfile(
739
+ os.path.join(pretrained_model_name_or_path, subfolder, WEIGHTS_INDEX_NAME)
740
+ ):
741
+ # Load from a sharded pytorch checkpoint
742
+ archive_file = os.path.join(pretrained_model_name_or_path, subfolder, WEIGHTS_INDEX_NAME)
743
+ is_sharded = True
744
+ # At this stage we don't have a weight file so we will raise an error.
745
+ elif is_safetensors_available() and os.path.isfile(
746
+ os.path.join(pretrained_model_name_or_path, SAFE_WEIGHTS_INDEX_NAME)
747
+ ):
748
+ # Load from a sharded safetensors checkpoint
749
+ archive_file = os.path.join(pretrained_model_name_or_path, SAFE_WEIGHTS_INDEX_NAME)
750
+ is_sharded = True
751
+ raise NotImplementedError("Support for sharded checkpoints using safetensors is coming soon!")
752
+ elif os.path.isfile(os.path.join(pretrained_model_name_or_path, subfolder, WEIGHTS_NAME)):
753
+ raise EnvironmentError(
754
+ f"Error no file named {FLAX_WEIGHTS_NAME} found in directory {pretrained_model_name_or_path} "
755
+ "but there is a file for PyTorch weights. Use `from_pt=True` to load this model from those "
756
+ "weights."
757
+ )
758
+ else:
759
+ raise EnvironmentError(
760
+ f"Error no file named {FLAX_WEIGHTS_NAME} or {WEIGHTS_NAME} found in directory "
761
+ f"{pretrained_model_name_or_path}."
762
+ )
763
+ elif os.path.isfile(os.path.join(subfolder, pretrained_model_name_or_path)):
764
+ archive_file = pretrained_model_name_or_path
765
+ is_local = True
766
+ elif is_remote_url(pretrained_model_name_or_path):
767
+ filename = pretrained_model_name_or_path
768
+ resolved_archive_file = download_url(pretrained_model_name_or_path)
769
+ else:
770
+ if from_pt:
771
+ filename = WEIGHTS_NAME
772
+ else:
773
+ filename = FLAX_WEIGHTS_NAME
774
+
775
+ try:
776
+ # Load from URL or cache if already cached
777
+ cached_file_kwargs = {
778
+ "cache_dir": cache_dir,
779
+ "force_download": force_download,
780
+ "proxies": proxies,
781
+ "resume_download": resume_download,
782
+ "local_files_only": local_files_only,
783
+ "token": token,
784
+ "user_agent": user_agent,
785
+ "revision": revision,
786
+ "subfolder": subfolder,
787
+ "_raise_exceptions_for_gated_repo": False,
788
+ "_raise_exceptions_for_missing_entries": False,
789
+ "_commit_hash": commit_hash,
790
+ }
791
+ resolved_archive_file = cached_file(pretrained_model_name_or_path, filename, **cached_file_kwargs)
792
+
793
+ # Maybe the checkpoint is sharded, we try to grab the index name in this case.
794
+ if resolved_archive_file is None and filename == FLAX_WEIGHTS_NAME:
795
+ resolved_archive_file = cached_file(
796
+ pretrained_model_name_or_path, FLAX_WEIGHTS_INDEX_NAME, **cached_file_kwargs
797
+ )
798
+ if resolved_archive_file is not None:
799
+ is_sharded = True
800
+
801
+ # Maybe the checkpoint is pytorch sharded, we try to grab the pytorch index name in this case.
802
+ if resolved_archive_file is None and from_pt:
803
+ resolved_archive_file = cached_file(
804
+ pretrained_model_name_or_path, WEIGHTS_INDEX_NAME, **cached_file_kwargs
805
+ )
806
+ if resolved_archive_file is not None:
807
+ is_sharded = True
808
+
809
+ # If we still haven't found anything, look for `safetensors`.
810
+ if resolved_archive_file is None:
811
+ # No support for sharded safetensors yet, so we'll raise an error if that's all we find.
812
+ filename = SAFE_WEIGHTS_NAME
813
+ resolved_archive_file = cached_file(
814
+ pretrained_model_name_or_path, SAFE_WEIGHTS_NAME, **cached_file_kwargs
815
+ )
816
+
817
+ # Since we set _raise_exceptions_for_missing_entries=False, we don't get an exception but a None
818
+ # result when internet is up, the repo and revision exist, but the file does not.
819
+ if resolved_archive_file is None:
820
+ # Otherwise, maybe there is a TF or Torch model file. We try those to give a helpful error
821
+ # message.
822
+ has_file_kwargs = {
823
+ "revision": revision,
824
+ "proxies": proxies,
825
+ "token": token,
826
+ }
827
+ if has_file(pretrained_model_name_or_path, SAFE_WEIGHTS_INDEX_NAME, **has_file_kwargs):
828
+ is_sharded = True
829
+ raise NotImplementedError(
830
+ "Support for sharded checkpoints using safetensors is coming soon!"
831
+ )
832
+ elif has_file(pretrained_model_name_or_path, WEIGHTS_NAME, **has_file_kwargs):
833
+ raise EnvironmentError(
834
+ f"{pretrained_model_name_or_path} does not appear to have a file named"
835
+ f" {FLAX_WEIGHTS_NAME} but there is a file for PyTorch weights. Use `from_pt=True` to"
836
+ " load this model from those weights."
837
+ )
838
+ elif has_file(pretrained_model_name_or_path, WEIGHTS_INDEX_NAME, **has_file_kwargs):
839
+ raise EnvironmentError(
840
+ f"{pretrained_model_name_or_path} does not appear to have a file named"
841
+ f" {FLAX_WEIGHTS_INDEX_NAME} but there is a sharded file for PyTorch weights. Use"
842
+ " `from_pt=True` to load this model from those weights."
843
+ )
844
+ else:
845
+ raise EnvironmentError(
846
+ f"{pretrained_model_name_or_path} does not appear to have a file named"
847
+ f" {FLAX_WEIGHTS_NAME} or {WEIGHTS_NAME}."
848
+ )
849
+ except EnvironmentError:
850
+ # Raise any environment error raise by `cached_file`. It will have a helpful error message adapted
851
+ # to the original exception.
852
+ raise
853
+ except Exception:
854
+ # For any other exception, we throw a generic error.
855
+ raise EnvironmentError(
856
+ f"Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it"
857
+ " from 'https://huggingface.co/models', make sure you don't have a local directory with the"
858
+ f" same name. Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a"
859
+ f" directory containing a file named {FLAX_WEIGHTS_NAME} or {WEIGHTS_NAME}."
860
+ )
861
+
862
+ if is_local:
863
+ logger.info(f"loading weights file {archive_file}")
864
+ resolved_archive_file = archive_file
865
+ filename = resolved_archive_file.split(os.path.sep)[-1]
866
+ else:
867
+ logger.info(f"loading weights file {filename} from cache at {resolved_archive_file}")
868
+ else:
869
+ resolved_archive_file = None
870
+
871
+ # We'll need to download and cache each checkpoint shard if the checkpoint is sharded.
872
+ if is_sharded:
873
+ # resolved_archive_file becomes a list of files that point to the different checkpoint shards in this case.
874
+ resolved_archive_file, _ = get_checkpoint_shard_files(
875
+ pretrained_model_name_or_path,
876
+ resolved_archive_file,
877
+ cache_dir=cache_dir,
878
+ force_download=force_download,
879
+ proxies=proxies,
880
+ resume_download=resume_download,
881
+ local_files_only=local_files_only,
882
+ token=token,
883
+ user_agent=user_agent,
884
+ revision=revision,
885
+ subfolder=subfolder,
886
+ _commit_hash=commit_hash,
887
+ )
888
+
889
+ safetensors_from_pt = False
890
+ if filename == SAFE_WEIGHTS_NAME:
891
+ with safe_open(resolved_archive_file, framework="flax") as f:
892
+ safetensors_metadata = f.metadata()
893
+ if safetensors_metadata is None or safetensors_metadata.get("format") not in ["pt", "tf", "flax"]:
894
+ raise OSError(
895
+ f"The safetensors archive passed at {resolved_archive_file} does not contain the valid metadata."
896
+ " Make sure you save your model with the `save_pretrained` method."
897
+ )
898
+ safetensors_from_pt = safetensors_metadata.get("format") == "pt"
899
+
900
+ # init random models
901
+ model = cls(config, *model_args, _do_init=_do_init, **model_kwargs)
902
+
903
+ if from_pt or safetensors_from_pt:
904
+ state = load_pytorch_checkpoint_in_flax_state_dict(model, resolved_archive_file, is_sharded)
905
+ else:
906
+ if is_sharded:
907
+ state = cls.load_flax_sharded_weights(resolved_archive_file)
908
+ else:
909
+ state = cls.load_flax_weights(resolved_archive_file)
910
+ # make sure all arrays are stored as jnp.arrays
911
+ # NOTE: This is to prevent a bug this will be fixed in Flax >= v0.3.4:
912
+ # https://github.com/google/flax/issues/1261
913
+ if _do_init:
914
+ state = jax.tree_util.tree_map(jnp.array, state)
915
+ else:
916
+ # keep the params on CPU if we don't want to initialize
917
+ state = jax.tree_util.tree_map(lambda x: jax.device_put(x, jax.local_devices(backend="cpu")[0]), state)
918
+
919
+ if "batch_stats" in state: # if flax model contains batch norm layers
920
+ # if model is base model only use model_prefix key
921
+ if (
922
+ cls.base_model_prefix not in dict(model.params_shape_tree["params"])
923
+ and cls.base_model_prefix in state["params"]
924
+ ):
925
+ state["params"] = state["params"][cls.base_model_prefix]
926
+ state["batch_stats"] = state["batch_stats"][cls.base_model_prefix]
927
+
928
+ # if model is head model and we are loading weights from base model
929
+ # we initialize new params dict with base_model_prefix
930
+ if (
931
+ cls.base_model_prefix in dict(model.params_shape_tree["params"])
932
+ and cls.base_model_prefix not in state["params"]
933
+ ):
934
+ state = {
935
+ "params": {cls.base_model_prefix: state["params"]},
936
+ "batch_stats": {cls.base_model_prefix: state["batch_stats"]},
937
+ }
938
+
939
+ else:
940
+ # if model is base model only use model_prefix key
941
+ if cls.base_model_prefix not in dict(model.params_shape_tree) and cls.base_model_prefix in state:
942
+ state = state[cls.base_model_prefix]
943
+
944
+ # if model is head model and we are loading weights from base model
945
+ # we initialize new params dict with base_model_prefix
946
+ if cls.base_model_prefix in dict(model.params_shape_tree) and cls.base_model_prefix not in state:
947
+ state = {cls.base_model_prefix: state}
948
+
949
+ # flatten dicts
950
+ state = flatten_dict(state)
951
+
952
+ random_state = flatten_dict(unfreeze(model.params if _do_init else model.params_shape_tree))
953
+
954
+ missing_keys = model.required_params - set(state.keys())
955
+ unexpected_keys = set(state.keys()) - model.required_params
956
+
957
+ # Disabling warning when porting pytorch weights to flax, flax does not uses num_batches_tracked
958
+ for unexpected_key in unexpected_keys.copy():
959
+ if "num_batches_tracked" in unexpected_key[-1]:
960
+ unexpected_keys.remove(unexpected_key)
961
+
962
+ if missing_keys and not _do_init:
963
+ logger.warning(
964
+ f"The checkpoint {pretrained_model_name_or_path} is missing required keys: {missing_keys}. "
965
+ "Make sure to call model.init_weights to initialize the missing weights."
966
+ )
967
+ cls._missing_keys = missing_keys
968
+
969
+ # Mistmatched keys contains tuples key/shape1/shape2 of weights in the checkpoint that have a shape not
970
+ # matching the weights in the model.
971
+ mismatched_keys = []
972
+ for key in state.keys():
973
+ if key in random_state and state[key].shape != random_state[key].shape:
974
+ if ignore_mismatched_sizes:
975
+ mismatched_keys.append((key, state[key].shape, random_state[key].shape))
976
+ state[key] = random_state[key]
977
+ else:
978
+ raise ValueError(
979
+ f"Trying to load the pretrained weight for {key} failed: checkpoint has shape "
980
+ f"{state[key].shape} which is incompatible with the model shape {random_state[key].shape}. "
981
+ "Using `ignore_mismatched_sizes=True` if you really want to load this checkpoint inside this "
982
+ "model."
983
+ )
984
+
985
+ # add missing keys as random parameters if we are initializing
986
+ if missing_keys and _do_init:
987
+ for missing_key in missing_keys:
988
+ state[missing_key] = random_state[missing_key]
989
+
990
+ # remove unexpected keys to not be saved again
991
+ for unexpected_key in unexpected_keys:
992
+ del state[unexpected_key]
993
+
994
+ if len(unexpected_keys) > 0:
995
+ logger.warning(
996
+ f"Some weights of the model checkpoint at {pretrained_model_name_or_path} were not used when"
997
+ f" initializing {model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are"
998
+ f" initializing {model.__class__.__name__} from the checkpoint of a model trained on another task or"
999
+ " with another architecture (e.g. initializing a BertForSequenceClassification model from a"
1000
+ " BertForPreTraining model).\n- This IS NOT expected if you are initializing"
1001
+ f" {model.__class__.__name__} from the checkpoint of a model that you expect to be exactly identical"
1002
+ " (initializing a BertForSequenceClassification model from a BertForSequenceClassification model)."
1003
+ )
1004
+ else:
1005
+ logger.info(f"All model checkpoint weights were used when initializing {model.__class__.__name__}.\n")
1006
+
1007
+ if len(missing_keys) > 0:
1008
+ logger.warning(
1009
+ f"Some weights of {model.__class__.__name__} were not initialized from the model checkpoint at"
1010
+ f" {pretrained_model_name_or_path} and are newly initialized: {missing_keys}\nYou should probably"
1011
+ " TRAIN this model on a down-stream task to be able to use it for predictions and inference."
1012
+ )
1013
+ elif len(mismatched_keys) == 0:
1014
+ logger.info(
1015
+ f"All the weights of {model.__class__.__name__} were initialized from the model checkpoint at"
1016
+ f" {pretrained_model_name_or_path}.\nIf your task is similar to the task the model of the checkpoint"
1017
+ f" was trained on, you can already use {model.__class__.__name__} for predictions without further"
1018
+ " training."
1019
+ )
1020
+ if len(mismatched_keys) > 0:
1021
+ mismatched_warning = "\n".join(
1022
+ [
1023
+ f"- {key}: found shape {shape1} in the checkpoint and {shape2} in the model instantiated"
1024
+ for key, shape1, shape2 in mismatched_keys
1025
+ ]
1026
+ )
1027
+ logger.warning(
1028
+ f"Some weights of {model.__class__.__name__} were not initialized from the model checkpoint at"
1029
+ f" {pretrained_model_name_or_path} and are newly initialized because the shapes did not"
1030
+ f" match:\n{mismatched_warning}\nYou should probably TRAIN this model on a down-stream task to be able"
1031
+ " to use it for predictions and inference."
1032
+ )
1033
+
1034
+ # dictionary of key: dtypes for the model params
1035
+ param_dtypes = jax.tree_util.tree_map(lambda x: x.dtype, state)
1036
+ # extract keys of parameters not in jnp.float32
1037
+ fp16_params = [k for k in param_dtypes if param_dtypes[k] == jnp.float16]
1038
+ bf16_params = [k for k in param_dtypes if param_dtypes[k] == jnp.bfloat16]
1039
+
1040
+ # raise a warning if any of the parameters are not in jnp.float32
1041
+ if len(fp16_params) > 0:
1042
+ logger.warning(
1043
+ f"Some of the weights of {model.__class__.__name__} were initialized in float16 precision from "
1044
+ f"the model checkpoint at {pretrained_model_name_or_path}:\n{fp16_params}\n"
1045
+ "You should probably UPCAST the model weights to float32 if this was not intended. "
1046
+ "See [`~FlaxPreTrainedModel.to_fp32`] for further information on how to do this."
1047
+ )
1048
+
1049
+ if len(bf16_params) > 0:
1050
+ logger.warning(
1051
+ f"Some of the weights of {model.__class__.__name__} were initialized in bfloat16 precision from "
1052
+ f"the model checkpoint at {pretrained_model_name_or_path}:\n{bf16_params}\n"
1053
+ "You should probably UPCAST the model weights to float32 if this was not intended. "
1054
+ "See [`~FlaxPreTrainedModel.to_fp32`] for further information on how to do this."
1055
+ )
1056
+
1057
+ # If it is a model with generation capabilities, attempt to load the generation config
1058
+ if model.can_generate():
1059
+ try:
1060
+ model.generation_config = GenerationConfig.from_pretrained(
1061
+ pretrained_model_name_or_path,
1062
+ cache_dir=cache_dir,
1063
+ force_download=force_download,
1064
+ resume_download=resume_download,
1065
+ proxies=proxies,
1066
+ local_files_only=local_files_only,
1067
+ token=token,
1068
+ revision=revision,
1069
+ subfolder=subfolder,
1070
+ _from_auto=from_auto_class,
1071
+ _from_pipeline=from_pipeline,
1072
+ **kwargs,
1073
+ )
1074
+ except OSError:
1075
+ logger.info(
1076
+ "Generation config file not found, using a generation config created from the model config."
1077
+ )
1078
+ pass
1079
+
1080
+ if _do_init:
1081
+ # set correct parameters
1082
+ model.params = unflatten_dict(state)
1083
+ return model
1084
+ else:
1085
+ return model, unflatten_dict(state)
1086
+
1087
+ def save_pretrained(
1088
+ self,
1089
+ save_directory: Union[str, os.PathLike],
1090
+ params=None,
1091
+ push_to_hub=False,
1092
+ max_shard_size="10GB",
1093
+ token: Optional[Union[str, bool]] = None,
1094
+ safe_serialization: bool = False,
1095
+ **kwargs,
1096
+ ):
1097
+ """
1098
+ Save a model and its configuration file to a directory, so that it can be re-loaded using the
1099
+ `[`~FlaxPreTrainedModel.from_pretrained`]` class method
1100
+
1101
+ Arguments:
1102
+ save_directory (`str` or `os.PathLike`):
1103
+ Directory to which to save. Will be created if it doesn't exist.
1104
+ push_to_hub (`bool`, *optional*, defaults to `False`):
1105
+ Whether or not to push your model to the Hugging Face model hub after saving it. You can specify the
1106
+ repository you want to push to with `repo_id` (will default to the name of `save_directory` in your
1107
+ namespace).
1108
+ max_shard_size (`int` or `str`, *optional*, defaults to `"10GB"`):
1109
+ The maximum size for a checkpoint before being sharded. Checkpoints shard will then be each of size
1110
+ lower than this size. If expressed as a string, needs to be digits followed by a unit (like `"5MB"`).
1111
+
1112
+ <Tip warning={true}>
1113
+
1114
+ If a single weight of the model is bigger than `max_shard_size`, it will be in its own checkpoint shard
1115
+ which will be bigger than `max_shard_size`.
1116
+
1117
+ </Tip>
1118
+
1119
+ token (`str` or `bool`, *optional*):
1120
+ The token to use as HTTP bearer authorization for remote files. If `True`, or not specified, will use
1121
+ the token generated when running `huggingface-cli login` (stored in `~/.huggingface`).
1122
+ kwargs (`Dict[str, Any]`, *optional*):
1123
+ Additional key word arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method.
1124
+ safe_serialization (`bool`, *optional*, defaults to `False`):
1125
+ Whether to save the model using `safetensors` or through msgpack.
1126
+ """
1127
+ use_auth_token = kwargs.pop("use_auth_token", None)
1128
+
1129
+ if use_auth_token is not None:
1130
+ warnings.warn(
1131
+ "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.",
1132
+ FutureWarning,
1133
+ )
1134
+ if token is not None:
1135
+ raise ValueError(
1136
+ "`token` and `use_auth_token` are both specified. Please set only the argument `token`."
1137
+ )
1138
+ token = use_auth_token
1139
+
1140
+ if token is not None:
1141
+ kwargs["token"] = token
1142
+
1143
+ if os.path.isfile(save_directory):
1144
+ logger.error(f"Provided path ({save_directory}) should be a directory, not a file")
1145
+ return
1146
+
1147
+ os.makedirs(save_directory, exist_ok=True)
1148
+
1149
+ if push_to_hub:
1150
+ commit_message = kwargs.pop("commit_message", None)
1151
+ repo_id = kwargs.pop("repo_id", save_directory.split(os.path.sep)[-1])
1152
+ repo_id = self._create_repo(repo_id, **kwargs)
1153
+ files_timestamps = self._get_files_timestamps(save_directory)
1154
+
1155
+ # get abs dir
1156
+ save_directory = os.path.abspath(save_directory)
1157
+ # save config as well
1158
+ self.config.architectures = [self.__class__.__name__[4:]]
1159
+
1160
+ # If we have a custom model, we copy the file defining it in the folder and set the attributes so it can be
1161
+ # loaded from the Hub.
1162
+ if self._auto_class is not None:
1163
+ custom_object_save(self, save_directory, config=self.config)
1164
+
1165
+ self.config.save_pretrained(save_directory)
1166
+ if self.can_generate():
1167
+ self.generation_config.save_pretrained(save_directory)
1168
+
1169
+ # save model
1170
+ weights_name = SAFE_WEIGHTS_NAME if safe_serialization else FLAX_WEIGHTS_NAME
1171
+ output_model_file = os.path.join(save_directory, weights_name)
1172
+
1173
+ shards, index = flax_shard_checkpoint(params if params is not None else self.params, max_shard_size)
1174
+ # Clean the folder from a previous save
1175
+ for filename in os.listdir(save_directory):
1176
+ full_filename = os.path.join(save_directory, filename)
1177
+ weights_no_suffix = weights_name.replace(".bin", "").replace(".safetensors", "")
1178
+ if (
1179
+ filename.startswith(weights_no_suffix)
1180
+ and os.path.isfile(full_filename)
1181
+ and filename not in shards.keys()
1182
+ ):
1183
+ os.remove(full_filename)
1184
+
1185
+ if index is None:
1186
+ if safe_serialization:
1187
+ params = params if params is not None else self.params
1188
+ flat_dict = flatten_dict(params, sep=".")
1189
+ safe_save_file(flat_dict, output_model_file, metadata={"format": "flax"})
1190
+ else:
1191
+ with open(output_model_file, "wb") as f:
1192
+ params = params if params is not None else self.params
1193
+ model_bytes = to_bytes(params)
1194
+ f.write(model_bytes)
1195
+
1196
+ else:
1197
+ save_index_file = os.path.join(save_directory, FLAX_WEIGHTS_INDEX_NAME)
1198
+ # Save the index as well
1199
+ with open(save_index_file, "w", encoding="utf-8") as f:
1200
+ content = json.dumps(index, indent=2, sort_keys=True) + "\n"
1201
+ f.write(content)
1202
+ logger.info(
1203
+ f"The model is bigger than the maximum size per checkpoint ({max_shard_size}) and is going to be "
1204
+ f"split in {len(shards)} checkpoint shards. You can find where each parameters has been saved in the "
1205
+ f"index located at {save_index_file}."
1206
+ )
1207
+ for shard_file, shard in shards.items():
1208
+ # the shard item are unflattened, to save them we need to flatten them again
1209
+ with open(os.path.join(save_directory, shard_file), mode="wb") as f:
1210
+ params = unflatten_dict(shard, sep="/")
1211
+ shard_bytes = to_bytes(params)
1212
+ f.write(shard_bytes)
1213
+
1214
+ logger.info(f"Model weights saved in {output_model_file}")
1215
+
1216
+ if push_to_hub:
1217
+ self._upload_modified_files(
1218
+ save_directory,
1219
+ repo_id,
1220
+ files_timestamps,
1221
+ commit_message=commit_message,
1222
+ token=token,
1223
+ )
1224
+
1225
+ @classmethod
1226
+ def register_for_auto_class(cls, auto_class="FlaxAutoModel"):
1227
+ """
1228
+ Register this class with a given auto class. This should only be used for custom models as the ones in the
1229
+ library are already mapped with an auto class.
1230
+
1231
+ <Tip warning={true}>
1232
+
1233
+ This API is experimental and may have some slight breaking changes in the next releases.
1234
+
1235
+ </Tip>
1236
+
1237
+ Args:
1238
+ auto_class (`str` or `type`, *optional*, defaults to `"FlaxAutoModel"`):
1239
+ The auto class to register this new model with.
1240
+ """
1241
+ if not isinstance(auto_class, str):
1242
+ auto_class = auto_class.__name__
1243
+
1244
+ import transformers.models.auto as auto_module
1245
+
1246
+ if not hasattr(auto_module, auto_class):
1247
+ raise ValueError(f"{auto_class} is not a valid auto class.")
1248
+
1249
+ cls._auto_class = auto_class
1250
+
1251
+
1252
+ # To update the docstring, we need to copy the method, otherwise we change the original docstring.
1253
+ FlaxPreTrainedModel.push_to_hub = copy_func(FlaxPreTrainedModel.push_to_hub)
1254
+ if FlaxPreTrainedModel.push_to_hub.__doc__ is not None:
1255
+ FlaxPreTrainedModel.push_to_hub.__doc__ = FlaxPreTrainedModel.push_to_hub.__doc__.format(
1256
+ object="model", object_class="FlaxAutoModel", object_files="model checkpoint"
1257
+ )
1258
+
1259
+
1260
+ def overwrite_call_docstring(model_class, docstring):
1261
+ # copy __call__ function to be sure docstring is changed only for this function
1262
+ model_class.__call__ = copy_func(model_class.__call__)
1263
+ # delete existing docstring
1264
+ model_class.__call__.__doc__ = None
1265
+ # set correct docstring
1266
+ model_class.__call__ = add_start_docstrings_to_model_forward(docstring)(model_class.__call__)
1267
+
1268
+
1269
+ def append_call_sample_docstring(
1270
+ model_class, checkpoint, output_type, config_class, mask=None, revision=None, real_checkpoint=None
1271
+ ):
1272
+ model_class.__call__ = copy_func(model_class.__call__)
1273
+ model_class.__call__ = add_code_sample_docstrings(
1274
+ checkpoint=checkpoint,
1275
+ output_type=output_type,
1276
+ config_class=config_class,
1277
+ model_cls=model_class.__name__,
1278
+ revision=revision,
1279
+ real_checkpoint=real_checkpoint,
1280
+ )(model_class.__call__)
1281
+
1282
+
1283
+ def append_replace_return_docstrings(model_class, output_type, config_class):
1284
+ model_class.__call__ = copy_func(model_class.__call__)
1285
+ model_class.__call__ = replace_return_docstrings(
1286
+ output_type=output_type,
1287
+ config_class=config_class,
1288
+ )(model_class.__call__)
llmeval-env/lib/python3.10/site-packages/transformers/modeling_tf_outputs.py ADDED
@@ -0,0 +1,991 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from __future__ import annotations
16
+
17
+ import warnings
18
+ from dataclasses import dataclass
19
+ from typing import List, Optional, Tuple
20
+
21
+ import tensorflow as tf
22
+
23
+ from .utils import ModelOutput
24
+
25
+
26
+ @dataclass
27
+ class TFBaseModelOutput(ModelOutput):
28
+ """
29
+ Base class for model's outputs, with potential hidden states and attentions.
30
+
31
+ Args:
32
+ last_hidden_state (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`):
33
+ Sequence of hidden-states at the output of the last layer of the model.
34
+ hidden_states (`tuple(tf.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
35
+ Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape
36
+ `(batch_size, sequence_length, hidden_size)`.
37
+
38
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
39
+ attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
40
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
41
+ sequence_length)`.
42
+
43
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
44
+ heads.
45
+ """
46
+
47
+ last_hidden_state: tf.Tensor = None
48
+ hidden_states: Tuple[tf.Tensor] | None = None
49
+ attentions: Tuple[tf.Tensor] | None = None
50
+
51
+
52
+ @dataclass
53
+ class TFBaseModelOutputWithNoAttention(ModelOutput):
54
+ """
55
+ Base class for model's outputs, with potential hidden states.
56
+
57
+ Args:
58
+ last_hidden_state (`tf.Tensor` shape `(batch_size, num_channels, height, width)`):
59
+ Sequence of hidden-states at the output of the last layer of the model.
60
+ hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
61
+ Tuple of `tf.Tensor` (one for the output of the embeddings, if the model has an embedding layer, + one for
62
+ the output of each layer) of shape `(batch_size, num_channels, height, width)`.
63
+
64
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
65
+ """
66
+
67
+ last_hidden_state: tf.Tensor = None
68
+ hidden_states: Optional[Tuple[tf.Tensor, ...]] = None
69
+
70
+
71
+ @dataclass
72
+ class TFBaseModelOutputWithPooling(ModelOutput):
73
+ """
74
+ Base class for model's outputs that also contains a pooling of the last hidden states.
75
+
76
+ Args:
77
+ last_hidden_state (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`):
78
+ Sequence of hidden-states at the output of the last layer of the model.
79
+ pooler_output (`tf.Tensor` of shape `(batch_size, hidden_size)`):
80
+ Last layer hidden-state of the first token of the sequence (classification token) further processed by a
81
+ Linear layer and a Tanh activation function. The Linear layer weights are trained from the next sentence
82
+ prediction (classification) objective during pretraining.
83
+
84
+ This output is usually *not* a good summary of the semantic content of the input, you're often better with
85
+ averaging or pooling the sequence of hidden-states for the whole input sequence.
86
+ hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
87
+ Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape
88
+ `(batch_size, sequence_length, hidden_size)`.
89
+
90
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
91
+ attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
92
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
93
+ sequence_length)`.
94
+
95
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
96
+ heads.
97
+ """
98
+
99
+ last_hidden_state: tf.Tensor = None
100
+ pooler_output: tf.Tensor = None
101
+ hidden_states: Tuple[tf.Tensor] | None = None
102
+ attentions: Tuple[tf.Tensor] | None = None
103
+
104
+
105
+ @dataclass
106
+ class TFBaseModelOutputWithPoolingAndNoAttention(ModelOutput):
107
+ """
108
+ Base class for model's outputs that also contains a pooling of the last hidden states.
109
+
110
+ Args:
111
+ last_hidden_state (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):
112
+ Sequence of hidden-states at the output of the last layer of the model.
113
+ pooler_output (`tf.Tensor` of shape `(batch_size, hidden_size)`):
114
+ Last layer hidden-state after a pooling operation on the spatial dimensions.
115
+ hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
116
+ Tuple of `tf.Tensor` (one for the output of the embeddings, if the model has an embedding layer, + one for
117
+ the output of each layer) of shape `(batch_size, num_channels, height, width)`.
118
+
119
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
120
+ """
121
+
122
+ last_hidden_state: tf.Tensor = None
123
+ pooler_output: tf.Tensor = None
124
+ hidden_states: Optional[Tuple[tf.Tensor, ...]] = None
125
+
126
+
127
+ @dataclass
128
+ class TFBaseModelOutputWithPoolingAndCrossAttentions(ModelOutput):
129
+ """
130
+ Base class for model's outputs that also contains a pooling of the last hidden states.
131
+
132
+ Args:
133
+ last_hidden_state (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`):
134
+ Sequence of hidden-states at the output of the last layer of the model.
135
+ pooler_output (`tf.Tensor` of shape `(batch_size, hidden_size)`):
136
+ Last layer hidden-state of the first token of the sequence (classification token) further processed by a
137
+ Linear layer and a Tanh activation function. The Linear layer weights are trained from the next sentence
138
+ prediction (classification) objective during pretraining.
139
+
140
+ This output is usually *not* a good summary of the semantic content of the input, you're often better with
141
+ averaging or pooling the sequence of hidden-states for the whole input sequence.
142
+ past_key_values (`List[tf.Tensor]`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
143
+ List of `tf.Tensor` of length `config.n_layers`, with each tensor of shape `(2, batch_size, num_heads,
144
+ sequence_length, embed_size_per_head)`).
145
+
146
+ Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see
147
+ `past_key_values` input) to speed up sequential decoding.
148
+ hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
149
+ Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape
150
+ `(batch_size, sequence_length, hidden_size)`.
151
+
152
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
153
+ attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
154
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
155
+ sequence_length)`.
156
+
157
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
158
+ heads.
159
+ cross_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
160
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
161
+ sequence_length)`.
162
+
163
+ Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the
164
+ weighted average in the cross-attention heads.
165
+ """
166
+
167
+ last_hidden_state: tf.Tensor = None
168
+ pooler_output: tf.Tensor = None
169
+ past_key_values: List[tf.Tensor] | None = None
170
+ hidden_states: Tuple[tf.Tensor] | None = None
171
+ attentions: Tuple[tf.Tensor] | None = None
172
+ cross_attentions: Tuple[tf.Tensor] | None = None
173
+
174
+
175
+ @dataclass
176
+ class TFBaseModelOutputWithPast(ModelOutput):
177
+ """
178
+ Base class for model's outputs that may also contain a past key/values (to speed up sequential decoding).
179
+
180
+ Args:
181
+ last_hidden_state (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`):
182
+ Sequence of hidden-states at the output of the last layer of the model.
183
+
184
+ If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1,
185
+ hidden_size)` is output.
186
+ past_key_values (`List[tf.Tensor]`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
187
+ List of `tf.Tensor` of length `config.n_layers`, with each tensor of shape `(2, batch_size, num_heads,
188
+ sequence_length, embed_size_per_head)`).
189
+
190
+ Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see
191
+ `past_key_values` input) to speed up sequential decoding.
192
+ hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
193
+ Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape
194
+ `(batch_size, sequence_length, hidden_size)`.
195
+
196
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
197
+ attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
198
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
199
+ sequence_length)`.
200
+
201
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
202
+ heads.
203
+ """
204
+
205
+ last_hidden_state: tf.Tensor = None
206
+ past_key_values: List[tf.Tensor] | None = None
207
+ hidden_states: Tuple[tf.Tensor] | None = None
208
+ attentions: Tuple[tf.Tensor] | None = None
209
+
210
+
211
+ @dataclass
212
+ class TFBaseModelOutputWithCrossAttentions(ModelOutput):
213
+ """
214
+ Base class for model's outputs, with potential hidden states and attentions.
215
+
216
+ Args:
217
+ last_hidden_state (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`):
218
+ Sequence of hidden-states at the output of the last layer of the model.
219
+ hidden_states (`tuple(tf.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
220
+ Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape
221
+ `(batch_size, sequence_length, hidden_size)`.
222
+
223
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
224
+ attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
225
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
226
+ sequence_length)`.
227
+
228
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
229
+ heads.
230
+ cross_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
231
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
232
+ sequence_length)`.
233
+
234
+ Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the
235
+ weighted average in the cross-attention heads.
236
+ """
237
+
238
+ last_hidden_state: tf.Tensor = None
239
+ hidden_states: Tuple[tf.Tensor] | None = None
240
+ attentions: Tuple[tf.Tensor] | None = None
241
+ cross_attentions: Tuple[tf.Tensor] | None = None
242
+
243
+
244
+ @dataclass
245
+ class TFBaseModelOutputWithPastAndCrossAttentions(ModelOutput):
246
+ """
247
+ Base class for model's outputs that may also contain a past key/values (to speed up sequential decoding).
248
+
249
+ Args:
250
+ last_hidden_state (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`):
251
+ Sequence of hidden-states at the output of the last layer of the model.
252
+
253
+ If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1,
254
+ hidden_size)` is output.
255
+ past_key_values (`List[tf.Tensor]`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
256
+ List of `tf.Tensor` of length `config.n_layers`, with each tensor of shape `(2, batch_size, num_heads,
257
+ sequence_length, embed_size_per_head)`).
258
+
259
+ Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see
260
+ `past_key_values` input) to speed up sequential decoding.
261
+ hidden_states (`tuple(tf.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
262
+ Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape
263
+ `(batch_size, sequence_length, hidden_size)`.
264
+
265
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
266
+ attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
267
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
268
+ sequence_length)`.
269
+
270
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
271
+ heads.
272
+ cross_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
273
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
274
+ sequence_length)`.
275
+
276
+ Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the
277
+ weighted average in the cross-attention heads.
278
+ """
279
+
280
+ last_hidden_state: tf.Tensor = None
281
+ past_key_values: List[tf.Tensor] | None = None
282
+ hidden_states: Tuple[tf.Tensor] | None = None
283
+ attentions: Tuple[tf.Tensor] | None = None
284
+ cross_attentions: Tuple[tf.Tensor] | None = None
285
+
286
+
287
+ @dataclass
288
+ class TFSeq2SeqModelOutput(ModelOutput):
289
+ """
290
+ Base class for model encoder's outputs that also contains : pre-computed hidden states that can speed up sequential
291
+ decoding.
292
+
293
+ Args:
294
+ last_hidden_state (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`):
295
+ Sequence of hidden-states at the output of the last layer of the decoder of the model.
296
+
297
+ If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1,
298
+ hidden_size)` is output.
299
+ past_key_values (`List[tf.Tensor]`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
300
+ List of `tf.Tensor` of length `config.n_layers`, with each tensor of shape `(2, batch_size, num_heads,
301
+ sequence_length, embed_size_per_head)`).
302
+
303
+ Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be
304
+ used (see `past_key_values` input) to speed up sequential decoding.
305
+ decoder_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
306
+ Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape
307
+ `(batch_size, sequence_length, hidden_size)`.
308
+
309
+ Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.
310
+ decoder_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
311
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
312
+ sequence_length)`.
313
+
314
+ Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the
315
+ self-attention heads.
316
+ cross_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
317
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
318
+ sequence_length)`.
319
+
320
+ Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the
321
+ weighted average in the cross-attention heads.
322
+ encoder_last_hidden_state (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
323
+ Sequence of hidden-states at the output of the last layer of the encoder of the model.
324
+ encoder_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
325
+ Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape
326
+ `(batch_size, sequence_length, hidden_size)`.
327
+
328
+ Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.
329
+ encoder_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
330
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
331
+ sequence_length)`.
332
+
333
+ Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the
334
+ self-attention heads.
335
+ """
336
+
337
+ last_hidden_state: tf.Tensor = None
338
+ past_key_values: List[tf.Tensor] | None = None
339
+ decoder_hidden_states: Tuple[tf.Tensor] | None = None
340
+ decoder_attentions: Tuple[tf.Tensor] | None = None
341
+ cross_attentions: Tuple[tf.Tensor] | None = None
342
+ encoder_last_hidden_state: tf.Tensor | None = None
343
+ encoder_hidden_states: Tuple[tf.Tensor] | None = None
344
+ encoder_attentions: Tuple[tf.Tensor] | None = None
345
+
346
+
347
+ @dataclass
348
+ class TFCausalLMOutput(ModelOutput):
349
+ """
350
+ Base class for causal language model (or autoregressive) outputs.
351
+
352
+ Args:
353
+ loss (`tf.Tensor` of shape `(n,)`, *optional*, where n is the number of non-masked labels, returned when `labels` is provided):
354
+ Language modeling loss (for next-token prediction).
355
+ logits (`tf.Tensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
356
+ Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
357
+ hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
358
+ Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape
359
+ `(batch_size, sequence_length, hidden_size)`.
360
+
361
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
362
+ attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
363
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
364
+ sequence_length)`.
365
+
366
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
367
+ heads.
368
+ """
369
+
370
+ loss: tf.Tensor | None = None
371
+ logits: tf.Tensor = None
372
+ hidden_states: Tuple[tf.Tensor] | None = None
373
+ attentions: Tuple[tf.Tensor] | None = None
374
+
375
+
376
+ @dataclass
377
+ class TFCausalLMOutputWithPast(ModelOutput):
378
+ """
379
+ Base class for causal language model (or autoregressive) outputs.
380
+
381
+ Args:
382
+ loss (`tf.Tensor` of shape `(n,)`, *optional*, where n is the number of non-masked labels, returned when `labels` is provided):
383
+ Language modeling loss (for next-token prediction).
384
+ logits (`tf.Tensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
385
+ Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
386
+ past_key_values (`List[tf.Tensor]`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
387
+ List of `tf.Tensor` of length `config.n_layers`, with each tensor of shape `(2, batch_size, num_heads,
388
+ sequence_length, embed_size_per_head)`).
389
+
390
+ Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see
391
+ `past_key_values` input) to speed up sequential decoding.
392
+ hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
393
+ Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape
394
+ `(batch_size, sequence_length, hidden_size)`.
395
+
396
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
397
+ attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
398
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
399
+ sequence_length)`.
400
+
401
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
402
+ heads.
403
+ """
404
+
405
+ loss: tf.Tensor | None = None
406
+ logits: tf.Tensor = None
407
+ past_key_values: List[tf.Tensor] | None = None
408
+ hidden_states: Tuple[tf.Tensor] | None = None
409
+ attentions: Tuple[tf.Tensor] | None = None
410
+
411
+
412
+ @dataclass
413
+ class TFCausalLMOutputWithCrossAttentions(ModelOutput):
414
+ """
415
+ Base class for causal language model (or autoregressive) outputs.
416
+
417
+ Args:
418
+ loss (`tf.Tensor` of shape `(n,)`, *optional*, where n is the number of non-masked labels, returned when `labels` is provided):
419
+ Language modeling loss (for next-token prediction).
420
+ logits (`tf.Tensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
421
+ Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
422
+ hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
423
+ Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape
424
+ `(batch_size, sequence_length, hidden_size)`.
425
+
426
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
427
+ attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
428
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
429
+ sequence_length)`.
430
+
431
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
432
+ heads.
433
+ cross_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
434
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
435
+ sequence_length)`.
436
+
437
+ Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the
438
+ weighted average in the cross-attention heads.
439
+ past_key_values (`List[tf.Tensor]`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
440
+ List of `tf.Tensor` of length `config.n_layers`, with each tensor of shape `(2, batch_size, num_heads,
441
+ sequence_length, embed_size_per_head)`).
442
+
443
+ Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see
444
+ `past_key_values` input) to speed up sequential decoding.
445
+ """
446
+
447
+ loss: tf.Tensor | None = None
448
+ logits: tf.Tensor = None
449
+ past_key_values: List[tf.Tensor] | None = None
450
+ hidden_states: Tuple[tf.Tensor] | None = None
451
+ attentions: Tuple[tf.Tensor] | None = None
452
+ cross_attentions: Tuple[tf.Tensor] | None = None
453
+
454
+
455
+ @dataclass
456
+ class TFMaskedLMOutput(ModelOutput):
457
+ """
458
+ Base class for masked language models outputs.
459
+
460
+ Args:
461
+ loss (`tf.Tensor` of shape `(n,)`, *optional*, where n is the number of non-masked labels, returned when `labels` is provided):
462
+ Masked language modeling (MLM) loss.
463
+ logits (`tf.Tensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
464
+ Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
465
+ hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
466
+ Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape
467
+ `(batch_size, sequence_length, hidden_size)`.
468
+
469
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
470
+ attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
471
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
472
+ sequence_length)`.
473
+
474
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
475
+ heads.
476
+ """
477
+
478
+ loss: tf.Tensor | None = None
479
+ logits: tf.Tensor = None
480
+ hidden_states: Tuple[tf.Tensor] | None = None
481
+ attentions: Tuple[tf.Tensor] | None = None
482
+
483
+
484
+ @dataclass
485
+ class TFSeq2SeqLMOutput(ModelOutput):
486
+ """
487
+ Base class for sequence-to-sequence language models outputs.
488
+
489
+ Args:
490
+ loss (`tf.Tensor` of shape `(n,)`, *optional*, where n is the number of non-masked labels, returned when `labels` is provided):
491
+ Language modeling loss.
492
+ logits (`tf.Tensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
493
+ Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
494
+ past_key_values (`List[tf.Tensor]`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
495
+ List of `tf.Tensor` of length `config.n_layers`, with each tensor of shape `(2, batch_size, num_heads,
496
+ sequence_length, embed_size_per_head)`).
497
+
498
+ Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be
499
+ used (see `past_key_values` input) to speed up sequential decoding.
500
+ decoder_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
501
+ Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape
502
+ `(batch_size, sequence_length, hidden_size)`.
503
+
504
+ Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.
505
+ decoder_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
506
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
507
+ sequence_length)`.
508
+
509
+ Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the
510
+ self-attention heads.
511
+ cross_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
512
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
513
+ sequence_length)`.
514
+
515
+ Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the
516
+ weighted average in the cross-attention heads.
517
+ encoder_last_hidden_state (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
518
+ Sequence of hidden-states at the output of the last layer of the encoder of the model.
519
+ encoder_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
520
+ Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape
521
+ `(batch_size, sequence_length, hidden_size)`.
522
+
523
+ Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.
524
+ encoder_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
525
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
526
+ sequence_length)`.
527
+
528
+ Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the
529
+ self-attention heads.
530
+ """
531
+
532
+ loss: tf.Tensor | None = None
533
+ logits: tf.Tensor = None
534
+ past_key_values: List[tf.Tensor] | None = None
535
+ decoder_hidden_states: Tuple[tf.Tensor] | None = None
536
+ decoder_attentions: Tuple[tf.Tensor] | None = None
537
+ cross_attentions: Tuple[tf.Tensor] | None = None
538
+ encoder_last_hidden_state: tf.Tensor | None = None
539
+ encoder_hidden_states: Tuple[tf.Tensor] | None = None
540
+ encoder_attentions: Tuple[tf.Tensor] | None = None
541
+
542
+
543
+ @dataclass
544
+ class TFNextSentencePredictorOutput(ModelOutput):
545
+ """
546
+ Base class for outputs of models predicting if two sentences are consecutive or not.
547
+
548
+ Args:
549
+ loss (`tf.Tensor` of shape `(n,)`, *optional*, where n is the number of non-masked labels, returned when `next_sentence_label` is provided):
550
+ Next sentence prediction loss.
551
+ logits (`tf.Tensor` of shape `(batch_size, 2)`):
552
+ Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation
553
+ before SoftMax).
554
+ hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
555
+ Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape
556
+ `(batch_size, sequence_length, hidden_size)`.
557
+
558
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
559
+ attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
560
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
561
+ sequence_length)`.
562
+
563
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
564
+ heads.
565
+ """
566
+
567
+ loss: tf.Tensor | None = None
568
+ logits: tf.Tensor = None
569
+ hidden_states: Tuple[tf.Tensor] | None = None
570
+ attentions: Tuple[tf.Tensor] | None = None
571
+
572
+
573
+ @dataclass
574
+ class TFSequenceClassifierOutput(ModelOutput):
575
+ """
576
+ Base class for outputs of sentence classification models.
577
+
578
+ Args:
579
+ loss (`tf.Tensor` of shape `(batch_size, )`, *optional*, returned when `labels` is provided):
580
+ Classification (or regression if config.num_labels==1) loss.
581
+ logits (`tf.Tensor` of shape `(batch_size, config.num_labels)`):
582
+ Classification (or regression if config.num_labels==1) scores (before SoftMax).
583
+ hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
584
+ Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape
585
+ `(batch_size, sequence_length, hidden_size)`.
586
+
587
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
588
+ attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
589
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
590
+ sequence_length)`.
591
+
592
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
593
+ heads.
594
+ """
595
+
596
+ loss: tf.Tensor | None = None
597
+ logits: tf.Tensor = None
598
+ hidden_states: Tuple[tf.Tensor] | None = None
599
+ attentions: Tuple[tf.Tensor] | None = None
600
+
601
+
602
+ @dataclass
603
+ class TFSeq2SeqSequenceClassifierOutput(ModelOutput):
604
+ """
605
+ Base class for outputs of sequence-to-sequence sentence classification models.
606
+
607
+ Args:
608
+ loss (`tf.Tensor` of shape `(1,)`, *optional*, returned when `label` is provided):
609
+ Classification (or regression if config.num_labels==1) loss.
610
+ logits (`tf.Tensor` of shape `(batch_size, config.num_labels)`):
611
+ Classification (or regression if config.num_labels==1) scores (before SoftMax).
612
+ past_key_values (`List[tf.Tensor]`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
613
+ List of `tf.Tensor` of length `config.n_layers`, with each tensor of shape `(2, batch_size, num_heads,
614
+ sequence_length, embed_size_per_head)`).
615
+
616
+ Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be
617
+ used (see `past_key_values` input) to speed up sequential decoding.
618
+ decoder_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
619
+ Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape
620
+ `(batch_size, sequence_length, hidden_size)`.
621
+
622
+ Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.
623
+ decoder_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
624
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
625
+ sequence_length)`.
626
+
627
+ Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the
628
+ self-attention heads.
629
+ cross_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
630
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
631
+ sequence_length)`
632
+ encoder_last_hidden_state (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
633
+ Sequence of hidden-states at the output of the last layer of the encoder of the model.
634
+ encoder_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
635
+ Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape
636
+ `(batch_size, sequence_length, hidden_size)`.
637
+
638
+ Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.
639
+ encoder_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
640
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
641
+ sequence_length)`.
642
+
643
+ Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the
644
+ self-attention heads.
645
+ """
646
+
647
+ loss: tf.Tensor | None = None
648
+ logits: tf.Tensor = None
649
+ past_key_values: List[tf.Tensor] | None = None
650
+ decoder_hidden_states: Tuple[tf.Tensor] | None = None
651
+ decoder_attentions: Tuple[tf.Tensor] | None = None
652
+ cross_attentions: Tuple[tf.Tensor] | None = None
653
+ encoder_last_hidden_state: tf.Tensor | None = None
654
+ encoder_hidden_states: Tuple[tf.Tensor] | None = None
655
+ encoder_attentions: Tuple[tf.Tensor] | None = None
656
+
657
+
658
+ @dataclass
659
+ class TFSemanticSegmenterOutput(ModelOutput):
660
+ """
661
+ Base class for outputs of semantic segmentation models.
662
+
663
+ Args:
664
+ loss (`tf.Tensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
665
+ Classification (or regression if config.num_labels==1) loss.
666
+ logits (`tf.Tensor` of shape `(batch_size, config.num_labels, logits_height, logits_width)`):
667
+ Classification scores for each pixel.
668
+
669
+ <Tip warning={true}>
670
+
671
+ The logits returned do not necessarily have the same size as the `pixel_values` passed as inputs. This is
672
+ to avoid doing two interpolations and lose some quality when a user needs to resize the logits to the
673
+ original image size as post-processing. You should always check your logits shape and resize as needed.
674
+
675
+ </Tip>
676
+
677
+ hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
678
+ Tuple of `tf.Tensor` (one for the output of the embeddings, if the model has an embedding layer, + one for
679
+ the output of each layer) of shape `(batch_size, patch_size, hidden_size)`.
680
+
681
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
682
+ attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
683
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, patch_size, sequence_length)`.
684
+
685
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
686
+ heads.
687
+ """
688
+
689
+ loss: tf.Tensor | None = None
690
+ logits: tf.Tensor = None
691
+ hidden_states: Tuple[tf.Tensor] | None = None
692
+ attentions: Tuple[tf.Tensor] | None = None
693
+
694
+
695
+ @dataclass
696
+ class TFSemanticSegmenterOutputWithNoAttention(ModelOutput):
697
+ """
698
+ Base class for outputs of semantic segmentation models that do not output attention scores.
699
+
700
+ Args:
701
+ loss (`tf.Tensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
702
+ Classification (or regression if config.num_labels==1) loss.
703
+ logits (`tf.Tensor` of shape `(batch_size, config.num_labels, logits_height, logits_width)`):
704
+ Classification scores for each pixel.
705
+
706
+ <Tip warning={true}>
707
+
708
+ The logits returned do not necessarily have the same size as the `pixel_values` passed as inputs. This is
709
+ to avoid doing two interpolations and lose some quality when a user needs to resize the logits to the
710
+ original image size as post-processing. You should always check your logits shape and resize as needed.
711
+
712
+ </Tip>
713
+
714
+ hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
715
+ Tuple of `tf.Tensor` (one for the output of the embeddings, if the model has an embedding layer, + one for
716
+ the output of each layer) of shape `(batch_size, patch_size, hidden_size)`.
717
+
718
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
719
+ """
720
+
721
+ loss: tf.Tensor | None = None
722
+ logits: tf.Tensor = None
723
+ hidden_states: Tuple[tf.Tensor] | None = None
724
+
725
+
726
+ @dataclass
727
+ class TFImageClassifierOutput(ModelOutput):
728
+ """
729
+ Base class for outputs of image classification models.
730
+
731
+ Args:
732
+ loss (`tf.Tensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
733
+ Classification (or regression if config.num_labels==1) loss.
734
+ logits (`tf.Tensor` of shape `(batch_size, config.num_labels)`):
735
+ Classification (or regression if config.num_labels==1) scores (before SoftMax).
736
+ hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
737
+ Tuple of `tf.Tensor` (one for the output of the embeddings, if the model has an embedding layer, + one for
738
+ the output of each stage) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states (also called
739
+ feature maps) of the model at the output of each stage.
740
+ attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
741
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, patch_size, sequence_length)`.
742
+
743
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
744
+ heads.
745
+ """
746
+
747
+ loss: tf.Tensor | None = None
748
+ logits: tf.Tensor = None
749
+ hidden_states: Tuple[tf.Tensor] | None = None
750
+ attentions: Tuple[tf.Tensor] | None = None
751
+
752
+
753
+ @dataclass
754
+ class TFMultipleChoiceModelOutput(ModelOutput):
755
+ """
756
+ Base class for outputs of multiple choice models.
757
+
758
+ Args:
759
+ loss (`tf.Tensor` of shape *(batch_size, )*, *optional*, returned when `labels` is provided):
760
+ Classification loss.
761
+ logits (`tf.Tensor` of shape `(batch_size, num_choices)`):
762
+ *num_choices* is the second dimension of the input tensors. (see *input_ids* above).
763
+
764
+ Classification scores (before SoftMax).
765
+ hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
766
+ Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape
767
+ `(batch_size, sequence_length, hidden_size)`.
768
+
769
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
770
+ attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
771
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
772
+ sequence_length)`.
773
+
774
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
775
+ heads.
776
+ """
777
+
778
+ loss: tf.Tensor | None = None
779
+ logits: tf.Tensor = None
780
+ hidden_states: Tuple[tf.Tensor] | None = None
781
+ attentions: Tuple[tf.Tensor] | None = None
782
+
783
+
784
+ @dataclass
785
+ class TFTokenClassifierOutput(ModelOutput):
786
+ """
787
+ Base class for outputs of token classification models.
788
+
789
+ Args:
790
+ loss (`tf.Tensor` of shape `(n,)`, *optional*, where n is the number of unmasked labels, returned when `labels` is provided) :
791
+ Classification loss.
792
+ logits (`tf.Tensor` of shape `(batch_size, sequence_length, config.num_labels)`):
793
+ Classification scores (before SoftMax).
794
+ hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
795
+ Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape
796
+ `(batch_size, sequence_length, hidden_size)`.
797
+
798
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
799
+ attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
800
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
801
+ sequence_length)`.
802
+
803
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
804
+ heads.
805
+ """
806
+
807
+ loss: tf.Tensor | None = None
808
+ logits: tf.Tensor = None
809
+ hidden_states: Tuple[tf.Tensor] | None = None
810
+ attentions: Tuple[tf.Tensor] | None = None
811
+
812
+
813
+ @dataclass
814
+ class TFQuestionAnsweringModelOutput(ModelOutput):
815
+ """
816
+ Base class for outputs of question answering models.
817
+
818
+ Args:
819
+ loss (`tf.Tensor` of shape `(batch_size, )`, *optional*, returned when `start_positions` and `end_positions` are provided):
820
+ Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.
821
+ start_logits (`tf.Tensor` of shape `(batch_size, sequence_length)`):
822
+ Span-start scores (before SoftMax).
823
+ end_logits (`tf.Tensor` of shape `(batch_size, sequence_length)`):
824
+ Span-end scores (before SoftMax).
825
+ hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
826
+ Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape
827
+ `(batch_size, sequence_length, hidden_size)`.
828
+
829
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
830
+ attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
831
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
832
+ sequence_length)`.
833
+
834
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
835
+ heads.
836
+ """
837
+
838
+ loss: tf.Tensor | None = None
839
+ start_logits: tf.Tensor = None
840
+ end_logits: tf.Tensor = None
841
+ hidden_states: Tuple[tf.Tensor] | None = None
842
+ attentions: Tuple[tf.Tensor] | None = None
843
+
844
+
845
+ @dataclass
846
+ class TFSeq2SeqQuestionAnsweringModelOutput(ModelOutput):
847
+ """
848
+ Base class for outputs of sequence-to-sequence question answering models.
849
+
850
+ Args:
851
+ loss (`tf.Tensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
852
+ Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.
853
+ start_logits (`tf.Tensor` of shape `(batch_size, sequence_length)`):
854
+ Span-start scores (before SoftMax).
855
+ end_logits (`tf.Tensor` of shape `(batch_size, sequence_length)`):
856
+ Span-end scores (before SoftMax).
857
+ past_key_values (`List[tf.Tensor]`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
858
+ List of `tf.Tensor` of length `config.n_layers`, with each tensor of shape `(2, batch_size, num_heads,
859
+ sequence_length, embed_size_per_head)`).
860
+
861
+ Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be
862
+ used (see `past_key_values` input) to speed up sequential decoding.
863
+ decoder_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
864
+ Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape
865
+ `(batch_size, sequence_length, hidden_size)`.
866
+
867
+ Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.
868
+ decoder_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
869
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
870
+ sequence_length)`.
871
+
872
+ Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the
873
+ self-attention heads.
874
+ encoder_last_hidden_state (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
875
+ Sequence of hidden-states at the output of the last layer of the encoder of the model.
876
+ encoder_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
877
+ Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape
878
+ `(batch_size, sequence_length, hidden_size)`.
879
+
880
+ Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.
881
+ encoder_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
882
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
883
+ sequence_length)`.
884
+
885
+ Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the
886
+ self-attention heads.
887
+ """
888
+
889
+ loss: tf.Tensor | None = None
890
+ start_logits: tf.Tensor = None
891
+ end_logits: tf.Tensor = None
892
+ past_key_values: List[tf.Tensor] | None = None
893
+ decoder_hidden_states: Tuple[tf.Tensor] | None = None
894
+ decoder_attentions: Tuple[tf.Tensor] | None = None
895
+ encoder_last_hidden_state: tf.Tensor | None = None
896
+ encoder_hidden_states: Tuple[tf.Tensor] | None = None
897
+ encoder_attentions: Tuple[tf.Tensor] | None = None
898
+
899
+
900
+ @dataclass
901
+ class TFSequenceClassifierOutputWithPast(ModelOutput):
902
+ """
903
+ Base class for outputs of sentence classification models.
904
+
905
+ Args:
906
+ loss (`tf.Tensor` of shape `(batch_size, )`, *optional*, returned when `labels` is provided):
907
+ Classification (or regression if config.num_labels==1) loss.
908
+ logits (`tf.Tensor` of shape `(batch_size, config.num_labels)`):
909
+ Classification (or regression if config.num_labels==1) scores (before SoftMax).
910
+ past_key_values (`List[tf.Tensor]`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
911
+ List of `tf.Tensor` of length `config.n_layers`, with each tensor of shape `(2, batch_size, num_heads,
912
+ sequence_length, embed_size_per_head)`).
913
+
914
+ Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see
915
+ `past_key_values` input) to speed up sequential decoding.
916
+ hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
917
+ Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape
918
+ `(batch_size, sequence_length, hidden_size)`.
919
+
920
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
921
+ attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
922
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
923
+ sequence_length)`.
924
+
925
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
926
+ heads.
927
+ """
928
+
929
+ loss: tf.Tensor | None = None
930
+ logits: tf.Tensor = None
931
+ past_key_values: List[tf.Tensor] | None = None
932
+ hidden_states: Tuple[tf.Tensor] | None = None
933
+ attentions: Tuple[tf.Tensor] | None = None
934
+
935
+
936
+ @dataclass
937
+ class TFImageClassifierOutputWithNoAttention(ModelOutput):
938
+ """
939
+ Base class for outputs of image classification models.
940
+
941
+ Args:
942
+ loss (`tf.Tensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
943
+ Classification (or regression if config.num_labels==1) loss.
944
+ logits (`tf.Tensor` of shape `(batch_size, config.num_labels)`):
945
+ Classification (or regression if config.num_labels==1) scores (before SoftMax).
946
+ hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
947
+ Tuple of `tf.Tensor` (one for the output of the embeddings, if the model has an embedding layer, + one for
948
+ the output of each stage) of shape `(batch_size, num_channels, height, width)`. Hidden-states (also called
949
+ feature maps) of the model at the output of each stage.
950
+ """
951
+
952
+ loss: tf.Tensor | None = None
953
+ logits: tf.Tensor = None
954
+ hidden_states: Optional[Tuple[tf.Tensor, ...]] = None
955
+
956
+
957
+ @dataclass
958
+ class TFMaskedImageModelingOutput(ModelOutput):
959
+ """
960
+ Base class for outputs of masked image completion / in-painting models.
961
+
962
+ Args:
963
+ loss (`tf.Tensor` of shape `(1,)`, *optional*, returned when `bool_masked_pos` is provided):
964
+ Reconstruction loss.
965
+ reconstruction (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):
966
+ Reconstructed / completed images.
967
+ hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when
968
+ `config.output_hidden_states=True`):
969
+ Tuple of `tf.Tensor` (one for the output of the embeddings, if the model has an embedding layer, + one for
970
+ the output of each stage) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states (also called
971
+ feature maps) of the model at the output of each stage.
972
+ attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when
973
+ `config.output_attentions=True`):
974
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, patch_size, sequence_length)`.
975
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
976
+ heads.
977
+ """
978
+
979
+ loss: tf.Tensor | None = None
980
+ reconstruction: tf.Tensor = None
981
+ hidden_states: Tuple[tf.Tensor] | None = None
982
+ attentions: Tuple[tf.Tensor] | None = None
983
+
984
+ @property
985
+ def logits(self):
986
+ warnings.warn(
987
+ "logits attribute is deprecated and will be removed in version 5 of Transformers."
988
+ " Please use the reconstruction attribute to retrieve the final output instead.",
989
+ FutureWarning,
990
+ )
991
+ return self.reconstruction
llmeval-env/lib/python3.10/site-packages/transformers/modeling_tf_utils.py ADDED
The diff for this file is too large to render. See raw diff
 
llmeval-env/lib/python3.10/site-packages/transformers/processing_utils.py ADDED
@@ -0,0 +1,524 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """
16
+ Processing saving/loading class for common processors.
17
+ """
18
+
19
+ import copy
20
+ import inspect
21
+ import json
22
+ import os
23
+ import warnings
24
+ from pathlib import Path
25
+ from typing import Any, Dict, Optional, Tuple, Union
26
+
27
+ from .dynamic_module_utils import custom_object_save
28
+ from .tokenization_utils_base import PreTrainedTokenizerBase
29
+ from .utils import (
30
+ PROCESSOR_NAME,
31
+ PushToHubMixin,
32
+ add_model_info_to_auto_map,
33
+ cached_file,
34
+ copy_func,
35
+ direct_transformers_import,
36
+ download_url,
37
+ is_offline_mode,
38
+ is_remote_url,
39
+ logging,
40
+ )
41
+
42
+
43
+ logger = logging.get_logger(__name__)
44
+
45
+ # Dynamically import the Transformers module to grab the attribute classes of the processor form their names.
46
+ transformers_module = direct_transformers_import(Path(__file__).parent)
47
+
48
+
49
+ AUTO_TO_BASE_CLASS_MAPPING = {
50
+ "AutoTokenizer": "PreTrainedTokenizerBase",
51
+ "AutoFeatureExtractor": "FeatureExtractionMixin",
52
+ "AutoImageProcessor": "ImageProcessingMixin",
53
+ }
54
+
55
+
56
+ class ProcessorMixin(PushToHubMixin):
57
+ """
58
+ This is a mixin used to provide saving/loading functionality for all processor classes.
59
+ """
60
+
61
+ attributes = ["feature_extractor", "tokenizer"]
62
+ # Names need to be attr_class for attr in attributes
63
+ feature_extractor_class = None
64
+ tokenizer_class = None
65
+ _auto_class = None
66
+
67
+ # args have to match the attributes class attribute
68
+ def __init__(self, *args, **kwargs):
69
+ # Sanitize args and kwargs
70
+ for key in kwargs:
71
+ if key not in self.attributes:
72
+ raise TypeError(f"Unexpected keyword argument {key}.")
73
+ for arg, attribute_name in zip(args, self.attributes):
74
+ if attribute_name in kwargs:
75
+ raise TypeError(f"Got multiple values for argument {attribute_name}.")
76
+ else:
77
+ kwargs[attribute_name] = arg
78
+
79
+ if len(kwargs) != len(self.attributes):
80
+ raise ValueError(
81
+ f"This processor requires {len(self.attributes)} arguments: {', '.join(self.attributes)}. Got "
82
+ f"{len(args)} arguments instead."
83
+ )
84
+
85
+ # Check each arg is of the proper class (this will also catch a user initializing in the wrong order)
86
+ for attribute_name, arg in kwargs.items():
87
+ class_name = getattr(self, f"{attribute_name}_class")
88
+ # Nothing is ever going to be an instance of "AutoXxx", in that case we check the base class.
89
+ class_name = AUTO_TO_BASE_CLASS_MAPPING.get(class_name, class_name)
90
+ if isinstance(class_name, tuple):
91
+ proper_class = tuple(getattr(transformers_module, n) for n in class_name if n is not None)
92
+ else:
93
+ proper_class = getattr(transformers_module, class_name)
94
+
95
+ if not isinstance(arg, proper_class):
96
+ raise ValueError(
97
+ f"Received a {type(arg).__name__} for argument {attribute_name}, but a {class_name} was expected."
98
+ )
99
+
100
+ setattr(self, attribute_name, arg)
101
+
102
+ def to_dict(self) -> Dict[str, Any]:
103
+ """
104
+ Serializes this instance to a Python dictionary.
105
+
106
+ Returns:
107
+ `Dict[str, Any]`: Dictionary of all the attributes that make up this processor instance.
108
+ """
109
+ output = copy.deepcopy(self.__dict__)
110
+
111
+ # Get the kwargs in `__init__`.
112
+ sig = inspect.signature(self.__init__)
113
+ # Only save the attributes that are presented in the kwargs of `__init__`.
114
+ attrs_to_save = sig.parameters
115
+ # Don't save attributes like `tokenizer`, `image processor` etc.
116
+ attrs_to_save = [x for x in attrs_to_save if x not in self.__class__.attributes]
117
+ # extra attributes to be kept
118
+ attrs_to_save += ["auto_map"]
119
+
120
+ output = {k: v for k, v in output.items() if k in attrs_to_save}
121
+
122
+ output["processor_class"] = self.__class__.__name__
123
+
124
+ if "tokenizer" in output:
125
+ del output["tokenizer"]
126
+ if "image_processor" in output:
127
+ del output["image_processor"]
128
+ if "feature_extractor" in output:
129
+ del output["feature_extractor"]
130
+
131
+ # Some attributes have different names but containing objects that are not simple strings
132
+ output = {
133
+ k: v
134
+ for k, v in output.items()
135
+ if not (isinstance(v, PushToHubMixin) or v.__class__.__name__ == "BeamSearchDecoderCTC")
136
+ }
137
+
138
+ return output
139
+
140
+ def to_json_string(self) -> str:
141
+ """
142
+ Serializes this instance to a JSON string.
143
+
144
+ Returns:
145
+ `str`: String containing all the attributes that make up this feature_extractor instance in JSON format.
146
+ """
147
+ dictionary = self.to_dict()
148
+
149
+ return json.dumps(dictionary, indent=2, sort_keys=True) + "\n"
150
+
151
+ def to_json_file(self, json_file_path: Union[str, os.PathLike]):
152
+ """
153
+ Save this instance to a JSON file.
154
+
155
+ Args:
156
+ json_file_path (`str` or `os.PathLike`):
157
+ Path to the JSON file in which this processor instance's parameters will be saved.
158
+ """
159
+ with open(json_file_path, "w", encoding="utf-8") as writer:
160
+ writer.write(self.to_json_string())
161
+
162
+ def __repr__(self):
163
+ attributes_repr = [f"- {name}: {repr(getattr(self, name))}" for name in self.attributes]
164
+ attributes_repr = "\n".join(attributes_repr)
165
+ return f"{self.__class__.__name__}:\n{attributes_repr}\n\n{self.to_json_string()}"
166
+
167
+ def save_pretrained(self, save_directory, push_to_hub: bool = False, **kwargs):
168
+ """
169
+ Saves the attributes of this processor (feature extractor, tokenizer...) in the specified directory so that it
170
+ can be reloaded using the [`~ProcessorMixin.from_pretrained`] method.
171
+
172
+ <Tip>
173
+
174
+ This class method is simply calling [`~feature_extraction_utils.FeatureExtractionMixin.save_pretrained`] and
175
+ [`~tokenization_utils_base.PreTrainedTokenizerBase.save_pretrained`]. Please refer to the docstrings of the
176
+ methods above for more information.
177
+
178
+ </Tip>
179
+
180
+ Args:
181
+ save_directory (`str` or `os.PathLike`):
182
+ Directory where the feature extractor JSON file and the tokenizer files will be saved (directory will
183
+ be created if it does not exist).
184
+ push_to_hub (`bool`, *optional*, defaults to `False`):
185
+ Whether or not to push your model to the Hugging Face model hub after saving it. You can specify the
186
+ repository you want to push to with `repo_id` (will default to the name of `save_directory` in your
187
+ namespace).
188
+ kwargs (`Dict[str, Any]`, *optional*):
189
+ Additional key word arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method.
190
+ """
191
+ use_auth_token = kwargs.pop("use_auth_token", None)
192
+
193
+ if use_auth_token is not None:
194
+ warnings.warn(
195
+ "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.",
196
+ FutureWarning,
197
+ )
198
+ if kwargs.get("token", None) is not None:
199
+ raise ValueError(
200
+ "`token` and `use_auth_token` are both specified. Please set only the argument `token`."
201
+ )
202
+ kwargs["token"] = use_auth_token
203
+
204
+ os.makedirs(save_directory, exist_ok=True)
205
+
206
+ if push_to_hub:
207
+ commit_message = kwargs.pop("commit_message", None)
208
+ repo_id = kwargs.pop("repo_id", save_directory.split(os.path.sep)[-1])
209
+ repo_id = self._create_repo(repo_id, **kwargs)
210
+ files_timestamps = self._get_files_timestamps(save_directory)
211
+ # If we have a custom config, we copy the file defining it in the folder and set the attributes so it can be
212
+ # loaded from the Hub.
213
+ if self._auto_class is not None:
214
+ attrs = [getattr(self, attribute_name) for attribute_name in self.attributes]
215
+ configs = [(a.init_kwargs if isinstance(a, PreTrainedTokenizerBase) else a) for a in attrs]
216
+ configs.append(self)
217
+ custom_object_save(self, save_directory, config=configs)
218
+
219
+ for attribute_name in self.attributes:
220
+ attribute = getattr(self, attribute_name)
221
+ # Include the processor class in the attribute config so this processor can then be reloaded with the
222
+ # `AutoProcessor` API.
223
+ if hasattr(attribute, "_set_processor_class"):
224
+ attribute._set_processor_class(self.__class__.__name__)
225
+ attribute.save_pretrained(save_directory)
226
+
227
+ if self._auto_class is not None:
228
+ # We added an attribute to the init_kwargs of the tokenizers, which needs to be cleaned up.
229
+ for attribute_name in self.attributes:
230
+ attribute = getattr(self, attribute_name)
231
+ if isinstance(attribute, PreTrainedTokenizerBase):
232
+ del attribute.init_kwargs["auto_map"]
233
+
234
+ # If we save using the predefined names, we can load using `from_pretrained`
235
+ output_processor_file = os.path.join(save_directory, PROCESSOR_NAME)
236
+
237
+ # For now, let's not save to `processor_config.json` if the processor doesn't have extra attributes and
238
+ # `auto_map` is not specified.
239
+ if set(self.to_dict().keys()) != {"processor_class"}:
240
+ self.to_json_file(output_processor_file)
241
+ logger.info(f"processor saved in {output_processor_file}")
242
+
243
+ if push_to_hub:
244
+ self._upload_modified_files(
245
+ save_directory,
246
+ repo_id,
247
+ files_timestamps,
248
+ commit_message=commit_message,
249
+ token=kwargs.get("token"),
250
+ )
251
+
252
+ if set(self.to_dict().keys()) == {"processor_class"}:
253
+ return []
254
+ return [output_processor_file]
255
+
256
+ @classmethod
257
+ def get_processor_dict(
258
+ cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs
259
+ ) -> Tuple[Dict[str, Any], Dict[str, Any]]:
260
+ """
261
+ From a `pretrained_model_name_or_path`, resolve to a dictionary of parameters, to be used for instantiating a
262
+ processor of type [`~processing_utils.ProcessingMixin`] using `from_args_and_dict`.
263
+
264
+ Parameters:
265
+ pretrained_model_name_or_path (`str` or `os.PathLike`):
266
+ The identifier of the pre-trained checkpoint from which we want the dictionary of parameters.
267
+ subfolder (`str`, *optional*, defaults to `""`):
268
+ In case the relevant files are located inside a subfolder of the model repo on huggingface.co, you can
269
+ specify the folder name here.
270
+
271
+ Returns:
272
+ `Tuple[Dict, Dict]`: The dictionary(ies) that will be used to instantiate the processor object.
273
+ """
274
+ cache_dir = kwargs.pop("cache_dir", None)
275
+ force_download = kwargs.pop("force_download", False)
276
+ resume_download = kwargs.pop("resume_download", False)
277
+ proxies = kwargs.pop("proxies", None)
278
+ token = kwargs.pop("token", None)
279
+ local_files_only = kwargs.pop("local_files_only", False)
280
+ revision = kwargs.pop("revision", None)
281
+ subfolder = kwargs.pop("subfolder", "")
282
+
283
+ from_pipeline = kwargs.pop("_from_pipeline", None)
284
+ from_auto_class = kwargs.pop("_from_auto", False)
285
+
286
+ user_agent = {"file_type": "processor", "from_auto_class": from_auto_class}
287
+ if from_pipeline is not None:
288
+ user_agent["using_pipeline"] = from_pipeline
289
+
290
+ if is_offline_mode() and not local_files_only:
291
+ logger.info("Offline mode: forcing local_files_only=True")
292
+ local_files_only = True
293
+
294
+ pretrained_model_name_or_path = str(pretrained_model_name_or_path)
295
+ is_local = os.path.isdir(pretrained_model_name_or_path)
296
+ if os.path.isdir(pretrained_model_name_or_path):
297
+ processor_file = os.path.join(pretrained_model_name_or_path, PROCESSOR_NAME)
298
+ if os.path.isfile(pretrained_model_name_or_path):
299
+ resolved_processor_file = pretrained_model_name_or_path
300
+ is_local = True
301
+ elif is_remote_url(pretrained_model_name_or_path):
302
+ processor_file = pretrained_model_name_or_path
303
+ resolved_processor_file = download_url(pretrained_model_name_or_path)
304
+ else:
305
+ processor_file = PROCESSOR_NAME
306
+ try:
307
+ # Load from local folder or from cache or download from model Hub and cache
308
+ resolved_processor_file = cached_file(
309
+ pretrained_model_name_or_path,
310
+ processor_file,
311
+ cache_dir=cache_dir,
312
+ force_download=force_download,
313
+ proxies=proxies,
314
+ resume_download=resume_download,
315
+ local_files_only=local_files_only,
316
+ token=token,
317
+ user_agent=user_agent,
318
+ revision=revision,
319
+ subfolder=subfolder,
320
+ _raise_exceptions_for_missing_entries=False,
321
+ )
322
+ except EnvironmentError:
323
+ # Raise any environment error raise by `cached_file`. It will have a helpful error message adapted to
324
+ # the original exception.
325
+ raise
326
+ except Exception:
327
+ # For any other exception, we throw a generic error.
328
+ raise EnvironmentError(
329
+ f"Can't load processor for '{pretrained_model_name_or_path}'. If you were trying to load"
330
+ " it from 'https://huggingface.co/models', make sure you don't have a local directory with the"
331
+ f" same name. Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a"
332
+ f" directory containing a {PROCESSOR_NAME} file"
333
+ )
334
+
335
+ # Existing processors on the Hub created before #27761 being merged don't have `processor_config.json` (if not
336
+ # updated afterward), and we need to keep `from_pretrained` work. So here it fallbacks to the empty dict.
337
+ # (`cached_file` called using `_raise_exceptions_for_missing_entries=False` to avoid exception)
338
+ # However, for models added in the future, we won't get the expected error if this file is missing.
339
+ if resolved_processor_file is None:
340
+ return {}, kwargs
341
+
342
+ try:
343
+ # Load processor dict
344
+ with open(resolved_processor_file, "r", encoding="utf-8") as reader:
345
+ text = reader.read()
346
+ processor_dict = json.loads(text)
347
+
348
+ except json.JSONDecodeError:
349
+ raise EnvironmentError(
350
+ f"It looks like the config file at '{resolved_processor_file}' is not a valid JSON file."
351
+ )
352
+
353
+ if is_local:
354
+ logger.info(f"loading configuration file {resolved_processor_file}")
355
+ else:
356
+ logger.info(f"loading configuration file {processor_file} from cache at {resolved_processor_file}")
357
+
358
+ if "auto_map" in processor_dict and not is_local:
359
+ processor_dict["auto_map"] = add_model_info_to_auto_map(
360
+ processor_dict["auto_map"], pretrained_model_name_or_path
361
+ )
362
+
363
+ return processor_dict, kwargs
364
+
365
+ @classmethod
366
+ def from_args_and_dict(cls, args, processor_dict: Dict[str, Any], **kwargs):
367
+ """
368
+ Instantiates a type of [`~processing_utils.ProcessingMixin`] from a Python dictionary of parameters.
369
+
370
+ Args:
371
+ processor_dict (`Dict[str, Any]`):
372
+ Dictionary that will be used to instantiate the processor object. Such a dictionary can be
373
+ retrieved from a pretrained checkpoint by leveraging the
374
+ [`~processing_utils.ProcessingMixin.to_dict`] method.
375
+ kwargs (`Dict[str, Any]`):
376
+ Additional parameters from which to initialize the processor object.
377
+
378
+ Returns:
379
+ [`~processing_utils.ProcessingMixin`]: The processor object instantiated from those
380
+ parameters.
381
+ """
382
+ processor_dict = processor_dict.copy()
383
+ return_unused_kwargs = kwargs.pop("return_unused_kwargs", False)
384
+
385
+ # Unlike image processors or feature extractors whose `__init__` accept `kwargs`, processor don't have `kwargs`.
386
+ # We have to pop up some unused (but specific) arguments to make it work.
387
+ if "processor_class" in processor_dict:
388
+ del processor_dict["processor_class"]
389
+
390
+ if "auto_map" in processor_dict:
391
+ del processor_dict["auto_map"]
392
+
393
+ processor = cls(*args, **processor_dict)
394
+
395
+ # Update processor with kwargs if needed
396
+ for key in set(kwargs.keys()):
397
+ if hasattr(processor, key):
398
+ setattr(processor, key, kwargs.pop(key))
399
+
400
+ logger.info(f"Processor {processor}")
401
+ if return_unused_kwargs:
402
+ return processor, kwargs
403
+ else:
404
+ return processor
405
+
406
+ @classmethod
407
+ def from_pretrained(
408
+ cls,
409
+ pretrained_model_name_or_path: Union[str, os.PathLike],
410
+ cache_dir: Optional[Union[str, os.PathLike]] = None,
411
+ force_download: bool = False,
412
+ local_files_only: bool = False,
413
+ token: Optional[Union[str, bool]] = None,
414
+ revision: str = "main",
415
+ **kwargs,
416
+ ):
417
+ r"""
418
+ Instantiate a processor associated with a pretrained model.
419
+
420
+ <Tip>
421
+
422
+ This class method is simply calling the feature extractor
423
+ [`~feature_extraction_utils.FeatureExtractionMixin.from_pretrained`], image processor
424
+ [`~image_processing_utils.ImageProcessingMixin`] and the tokenizer
425
+ [`~tokenization_utils_base.PreTrainedTokenizer.from_pretrained`] methods. Please refer to the docstrings of the
426
+ methods above for more information.
427
+
428
+ </Tip>
429
+
430
+ Args:
431
+ pretrained_model_name_or_path (`str` or `os.PathLike`):
432
+ This can be either:
433
+
434
+ - a string, the *model id* of a pretrained feature_extractor hosted inside a model repo on
435
+ huggingface.co.
436
+ - a path to a *directory* containing a feature extractor file saved using the
437
+ [`~SequenceFeatureExtractor.save_pretrained`] method, e.g., `./my_model_directory/`.
438
+ - a path or url to a saved feature extractor JSON *file*, e.g.,
439
+ `./my_model_directory/preprocessor_config.json`.
440
+ **kwargs
441
+ Additional keyword arguments passed along to both
442
+ [`~feature_extraction_utils.FeatureExtractionMixin.from_pretrained`] and
443
+ [`~tokenization_utils_base.PreTrainedTokenizer.from_pretrained`].
444
+ """
445
+ kwargs["cache_dir"] = cache_dir
446
+ kwargs["force_download"] = force_download
447
+ kwargs["local_files_only"] = local_files_only
448
+ kwargs["revision"] = revision
449
+
450
+ use_auth_token = kwargs.pop("use_auth_token", None)
451
+ if use_auth_token is not None:
452
+ warnings.warn(
453
+ "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.",
454
+ FutureWarning,
455
+ )
456
+ if token is not None:
457
+ raise ValueError(
458
+ "`token` and `use_auth_token` are both specified. Please set only the argument `token`."
459
+ )
460
+ token = use_auth_token
461
+
462
+ if token is not None:
463
+ kwargs["token"] = token
464
+
465
+ args = cls._get_arguments_from_pretrained(pretrained_model_name_or_path, **kwargs)
466
+ processor_dict, kwargs = cls.get_processor_dict(pretrained_model_name_or_path, **kwargs)
467
+
468
+ return cls.from_args_and_dict(args, processor_dict, **kwargs)
469
+
470
+ @classmethod
471
+ def register_for_auto_class(cls, auto_class="AutoProcessor"):
472
+ """
473
+ Register this class with a given auto class. This should only be used for custom feature extractors as the ones
474
+ in the library are already mapped with `AutoProcessor`.
475
+
476
+ <Tip warning={true}>
477
+
478
+ This API is experimental and may have some slight breaking changes in the next releases.
479
+
480
+ </Tip>
481
+
482
+ Args:
483
+ auto_class (`str` or `type`, *optional*, defaults to `"AutoProcessor"`):
484
+ The auto class to register this new feature extractor with.
485
+ """
486
+ if not isinstance(auto_class, str):
487
+ auto_class = auto_class.__name__
488
+
489
+ import transformers.models.auto as auto_module
490
+
491
+ if not hasattr(auto_module, auto_class):
492
+ raise ValueError(f"{auto_class} is not a valid auto class.")
493
+
494
+ cls._auto_class = auto_class
495
+
496
+ @classmethod
497
+ def _get_arguments_from_pretrained(cls, pretrained_model_name_or_path, **kwargs):
498
+ args = []
499
+ for attribute_name in cls.attributes:
500
+ class_name = getattr(cls, f"{attribute_name}_class")
501
+ if isinstance(class_name, tuple):
502
+ classes = tuple(getattr(transformers_module, n) if n is not None else None for n in class_name)
503
+ use_fast = kwargs.get("use_fast", True)
504
+ if use_fast and classes[1] is not None:
505
+ attribute_class = classes[1]
506
+ else:
507
+ attribute_class = classes[0]
508
+ else:
509
+ attribute_class = getattr(transformers_module, class_name)
510
+
511
+ args.append(attribute_class.from_pretrained(pretrained_model_name_or_path, **kwargs))
512
+ return args
513
+
514
+ @property
515
+ def model_input_names(self):
516
+ first_attribute = getattr(self, self.attributes[0])
517
+ return getattr(first_attribute, "model_input_names", None)
518
+
519
+
520
+ ProcessorMixin.push_to_hub = copy_func(ProcessorMixin.push_to_hub)
521
+ if ProcessorMixin.push_to_hub.__doc__ is not None:
522
+ ProcessorMixin.push_to_hub.__doc__ = ProcessorMixin.push_to_hub.__doc__.format(
523
+ object="processor", object_class="AutoProcessor", object_files="processor files"
524
+ )
llmeval-env/lib/python3.10/site-packages/transformers/pytorch_utils.py ADDED
@@ -0,0 +1,296 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ import inspect
15
+ from typing import Callable, List, Optional, Set, Tuple, Union
16
+
17
+ import torch
18
+ from packaging import version
19
+ from safetensors.torch import storage_ptr, storage_size
20
+ from torch import nn
21
+
22
+ from .utils import is_torch_xla_available, logging
23
+
24
+
25
+ ALL_LAYERNORM_LAYERS = [nn.LayerNorm]
26
+
27
+ logger = logging.get_logger(__name__)
28
+
29
+ parsed_torch_version_base = version.parse(version.parse(torch.__version__).base_version)
30
+
31
+ is_torch_greater_or_equal_than_2_2 = parsed_torch_version_base >= version.parse("2.2")
32
+ is_torch_greater_or_equal_than_2_1 = parsed_torch_version_base >= version.parse("2.1")
33
+ is_torch_greater_or_equal_than_2_0 = parsed_torch_version_base >= version.parse("2.0")
34
+ is_torch_greater_or_equal_than_1_13 = parsed_torch_version_base >= version.parse("1.13")
35
+ is_torch_greater_or_equal_than_1_12 = parsed_torch_version_base >= version.parse("1.12")
36
+
37
+
38
+ def softmax_backward_data(parent, grad_output, output, dim, self):
39
+ """
40
+ A function that calls the internal `_softmax_backward_data` PyTorch method and that adjusts the arguments according
41
+ to the torch version detected.
42
+ """
43
+
44
+ from torch import _softmax_backward_data
45
+
46
+ return _softmax_backward_data(grad_output, output, parent.dim, self.dtype)
47
+
48
+
49
+ def prune_linear_layer(layer: nn.Linear, index: torch.LongTensor, dim: int = 0) -> nn.Linear:
50
+ """
51
+ Prune a linear layer to keep only entries in index.
52
+
53
+ Used to remove heads.
54
+
55
+ Args:
56
+ layer (`torch.nn.Linear`): The layer to prune.
57
+ index (`torch.LongTensor`): The indices to keep in the layer.
58
+ dim (`int`, *optional*, defaults to 0): The dimension on which to keep the indices.
59
+
60
+ Returns:
61
+ `torch.nn.Linear`: The pruned layer as a new layer with `requires_grad=True`.
62
+ """
63
+ index = index.to(layer.weight.device)
64
+ W = layer.weight.index_select(dim, index).clone().detach()
65
+ if layer.bias is not None:
66
+ if dim == 1:
67
+ b = layer.bias.clone().detach()
68
+ else:
69
+ b = layer.bias[index].clone().detach()
70
+ new_size = list(layer.weight.size())
71
+ new_size[dim] = len(index)
72
+ new_layer = nn.Linear(new_size[1], new_size[0], bias=layer.bias is not None).to(layer.weight.device)
73
+ new_layer.weight.requires_grad = False
74
+ new_layer.weight.copy_(W.contiguous())
75
+ new_layer.weight.requires_grad = True
76
+ if layer.bias is not None:
77
+ new_layer.bias.requires_grad = False
78
+ new_layer.bias.copy_(b.contiguous())
79
+ new_layer.bias.requires_grad = True
80
+ return new_layer
81
+
82
+
83
+ class Conv1D(nn.Module):
84
+ """
85
+ 1D-convolutional layer as defined by Radford et al. for OpenAI GPT (and also used in GPT-2).
86
+
87
+ Basically works like a linear layer but the weights are transposed.
88
+
89
+ Args:
90
+ nf (`int`): The number of output features.
91
+ nx (`int`): The number of input features.
92
+ """
93
+
94
+ def __init__(self, nf, nx):
95
+ super().__init__()
96
+ self.nf = nf
97
+ self.weight = nn.Parameter(torch.empty(nx, nf))
98
+ self.bias = nn.Parameter(torch.zeros(nf))
99
+ nn.init.normal_(self.weight, std=0.02)
100
+
101
+ def forward(self, x):
102
+ size_out = x.size()[:-1] + (self.nf,)
103
+ x = torch.addmm(self.bias, x.view(-1, x.size(-1)), self.weight)
104
+ x = x.view(size_out)
105
+ return x
106
+
107
+
108
+ def prune_conv1d_layer(layer: Conv1D, index: torch.LongTensor, dim: int = 1) -> Conv1D:
109
+ """
110
+ Prune a Conv1D layer to keep only entries in index. A Conv1D work as a Linear layer (see e.g. BERT) but the weights
111
+ are transposed.
112
+
113
+ Used to remove heads.
114
+
115
+ Args:
116
+ layer ([`~pytorch_utils.Conv1D`]): The layer to prune.
117
+ index (`torch.LongTensor`): The indices to keep in the layer.
118
+ dim (`int`, *optional*, defaults to 1): The dimension on which to keep the indices.
119
+
120
+ Returns:
121
+ [`~pytorch_utils.Conv1D`]: The pruned layer as a new layer with `requires_grad=True`.
122
+ """
123
+ index = index.to(layer.weight.device)
124
+ W = layer.weight.index_select(dim, index).clone().detach()
125
+ if dim == 0:
126
+ b = layer.bias.clone().detach()
127
+ else:
128
+ b = layer.bias[index].clone().detach()
129
+ new_size = list(layer.weight.size())
130
+ new_size[dim] = len(index)
131
+ new_layer = Conv1D(new_size[1], new_size[0]).to(layer.weight.device)
132
+ new_layer.weight.requires_grad = False
133
+ new_layer.weight.copy_(W.contiguous())
134
+ new_layer.weight.requires_grad = True
135
+ new_layer.bias.requires_grad = False
136
+ new_layer.bias.copy_(b.contiguous())
137
+ new_layer.bias.requires_grad = True
138
+ return new_layer
139
+
140
+
141
+ def prune_layer(
142
+ layer: Union[nn.Linear, Conv1D], index: torch.LongTensor, dim: Optional[int] = None
143
+ ) -> Union[nn.Linear, Conv1D]:
144
+ """
145
+ Prune a Conv1D or linear layer to keep only entries in index.
146
+
147
+ Used to remove heads.
148
+
149
+ Args:
150
+ layer (`Union[torch.nn.Linear, Conv1D]`): The layer to prune.
151
+ index (`torch.LongTensor`): The indices to keep in the layer.
152
+ dim (`int`, *optional*): The dimension on which to keep the indices.
153
+
154
+ Returns:
155
+ `torch.nn.Linear` or [`~pytorch_utils.Conv1D`]: The pruned layer as a new layer with `requires_grad=True`.
156
+ """
157
+ if isinstance(layer, nn.Linear):
158
+ return prune_linear_layer(layer, index, dim=0 if dim is None else dim)
159
+ elif isinstance(layer, Conv1D):
160
+ return prune_conv1d_layer(layer, index, dim=1 if dim is None else dim)
161
+ else:
162
+ raise ValueError(f"Can't prune layer of class {layer.__class__}")
163
+
164
+
165
+ def apply_chunking_to_forward(
166
+ forward_fn: Callable[..., torch.Tensor], chunk_size: int, chunk_dim: int, *input_tensors
167
+ ) -> torch.Tensor:
168
+ """
169
+ This function chunks the `input_tensors` into smaller input tensor parts of size `chunk_size` over the dimension
170
+ `chunk_dim`. It then applies a layer `forward_fn` to each chunk independently to save memory.
171
+
172
+ If the `forward_fn` is independent across the `chunk_dim` this function will yield the same result as directly
173
+ applying `forward_fn` to `input_tensors`.
174
+
175
+ Args:
176
+ forward_fn (`Callable[..., torch.Tensor]`):
177
+ The forward function of the model.
178
+ chunk_size (`int`):
179
+ The chunk size of a chunked tensor: `num_chunks = len(input_tensors[0]) / chunk_size`.
180
+ chunk_dim (`int`):
181
+ The dimension over which the `input_tensors` should be chunked.
182
+ input_tensors (`Tuple[torch.Tensor]`):
183
+ The input tensors of `forward_fn` which will be chunked
184
+
185
+ Returns:
186
+ `torch.Tensor`: A tensor with the same shape as the `forward_fn` would have given if applied`.
187
+
188
+
189
+ Examples:
190
+
191
+ ```python
192
+ # rename the usual forward() fn to forward_chunk()
193
+ def forward_chunk(self, hidden_states):
194
+ hidden_states = self.decoder(hidden_states)
195
+ return hidden_states
196
+
197
+
198
+ # implement a chunked forward function
199
+ def forward(self, hidden_states):
200
+ return apply_chunking_to_forward(self.forward_chunk, self.chunk_size_lm_head, self.seq_len_dim, hidden_states)
201
+ ```"""
202
+
203
+ assert len(input_tensors) > 0, f"{input_tensors} has to be a tuple/list of tensors"
204
+
205
+ # inspect.signature exist since python 3.5 and is a python method -> no problem with backward compatibility
206
+ num_args_in_forward_chunk_fn = len(inspect.signature(forward_fn).parameters)
207
+ if num_args_in_forward_chunk_fn != len(input_tensors):
208
+ raise ValueError(
209
+ f"forward_chunk_fn expects {num_args_in_forward_chunk_fn} arguments, but only {len(input_tensors)} input "
210
+ "tensors are given"
211
+ )
212
+
213
+ if chunk_size > 0:
214
+ tensor_shape = input_tensors[0].shape[chunk_dim]
215
+ for input_tensor in input_tensors:
216
+ if input_tensor.shape[chunk_dim] != tensor_shape:
217
+ raise ValueError(
218
+ f"All input tenors have to be of the same shape: {tensor_shape}, "
219
+ f"found shape {input_tensor.shape[chunk_dim]}"
220
+ )
221
+
222
+ if input_tensors[0].shape[chunk_dim] % chunk_size != 0:
223
+ raise ValueError(
224
+ f"The dimension to be chunked {input_tensors[0].shape[chunk_dim]} has to be a multiple of the chunk "
225
+ f"size {chunk_size}"
226
+ )
227
+
228
+ num_chunks = input_tensors[0].shape[chunk_dim] // chunk_size
229
+
230
+ # chunk input tensor into tuples
231
+ input_tensors_chunks = tuple(input_tensor.chunk(num_chunks, dim=chunk_dim) for input_tensor in input_tensors)
232
+ # apply forward fn to every tuple
233
+ output_chunks = tuple(forward_fn(*input_tensors_chunk) for input_tensors_chunk in zip(*input_tensors_chunks))
234
+ # concatenate output at same dimension
235
+ return torch.cat(output_chunks, dim=chunk_dim)
236
+
237
+ return forward_fn(*input_tensors)
238
+
239
+
240
+ def find_pruneable_heads_and_indices(
241
+ heads: List[int], n_heads: int, head_size: int, already_pruned_heads: Set[int]
242
+ ) -> Tuple[Set[int], torch.LongTensor]:
243
+ """
244
+ Finds the heads and their indices taking `already_pruned_heads` into account.
245
+
246
+ Args:
247
+ heads (`List[int]`): List of the indices of heads to prune.
248
+ n_heads (`int`): The number of heads in the model.
249
+ head_size (`int`): The size of each head.
250
+ already_pruned_heads (`Set[int]`): A set of already pruned heads.
251
+
252
+ Returns:
253
+ `Tuple[Set[int], torch.LongTensor]`: A tuple with the indices of heads to prune taking `already_pruned_heads`
254
+ into account and the indices of rows/columns to keep in the layer weight.
255
+ """
256
+ mask = torch.ones(n_heads, head_size)
257
+ heads = set(heads) - already_pruned_heads # Convert to set and remove already pruned heads
258
+ for head in heads:
259
+ # Compute how many pruned heads are before the head and move the index accordingly
260
+ head = head - sum(1 if h < head else 0 for h in already_pruned_heads)
261
+ mask[head] = 0
262
+ mask = mask.view(-1).contiguous().eq(1)
263
+ index: torch.LongTensor = torch.arange(len(mask))[mask].long()
264
+ return heads, index
265
+
266
+
267
+ def meshgrid(
268
+ *tensors: Union[torch.Tensor, List[torch.Tensor]], indexing: Optional[str] = None
269
+ ) -> Tuple[torch.Tensor, ...]:
270
+ """
271
+ Wrapper around torch.meshgrid to avoid warning messages about the introduced `indexing` argument.
272
+
273
+ Reference: https://pytorch.org/docs/1.13/generated/torch.meshgrid.html
274
+ """
275
+ return torch.meshgrid(*tensors, indexing=indexing)
276
+
277
+
278
+ def id_tensor_storage(tensor: torch.Tensor) -> Tuple[torch.device, int, int]:
279
+ """
280
+ Unique identifier to a tensor storage. Multiple different tensors can share the same underlying storage. For
281
+ example, "meta" tensors all share the same storage, and thus their identifier will all be equal. This identifier is
282
+ guaranteed to be unique and constant for this tensor's storage during its lifetime. Two tensor storages with
283
+ non-overlapping lifetimes may have the same id.
284
+ """
285
+ if tensor.device.type == "xla" and is_torch_xla_available():
286
+ # NOTE: xla tensors dont have storage
287
+ # use some other unique id to distinguish.
288
+ # this is a XLA tensor, it must be created using torch_xla's
289
+ # device. So the following import is safe:
290
+ import torch_xla
291
+
292
+ unique_id = torch_xla._XLAC._xla_get_tensor_id(tensor)
293
+ else:
294
+ unique_id = storage_ptr(tensor)
295
+
296
+ return tensor.device, unique_id, storage_size(tensor)
llmeval-env/lib/python3.10/site-packages/transformers/safetensors_conversion.py ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import uuid
3
+ from typing import Optional
4
+
5
+ import requests
6
+ from huggingface_hub import Discussion, HfApi, get_repo_discussions
7
+
8
+ from .utils import cached_file, http_user_agent, logging
9
+
10
+
11
+ logger = logging.get_logger(__name__)
12
+
13
+
14
+ def previous_pr(api: HfApi, model_id: str, pr_title: str, token: str) -> Optional["Discussion"]:
15
+ main_commit = api.list_repo_commits(model_id, token=token)[0].commit_id
16
+ for discussion in get_repo_discussions(repo_id=model_id, token=token):
17
+ if discussion.title == pr_title and discussion.status == "open" and discussion.is_pull_request:
18
+ commits = api.list_repo_commits(model_id, revision=discussion.git_reference, token=token)
19
+
20
+ if main_commit == commits[1].commit_id:
21
+ return discussion
22
+ return None
23
+
24
+
25
+ def spawn_conversion(token: str, private: bool, model_id: str):
26
+ logger.info("Attempting to convert .bin model on the fly to safetensors.")
27
+
28
+ safetensors_convert_space_url = "https://safetensors-convert.hf.space"
29
+ sse_url = f"{safetensors_convert_space_url}/queue/join"
30
+ sse_data_url = f"{safetensors_convert_space_url}/queue/data"
31
+
32
+ # The `fn_index` is necessary to indicate to gradio that we will use the `run` method of the Space.
33
+ hash_data = {"fn_index": 1, "session_hash": str(uuid.uuid4())}
34
+
35
+ def start(_sse_connection, payload):
36
+ for line in _sse_connection.iter_lines():
37
+ line = line.decode()
38
+ if line.startswith("data:"):
39
+ resp = json.loads(line[5:])
40
+ logger.debug(f"Safetensors conversion status: {resp['msg']}")
41
+ if resp["msg"] == "queue_full":
42
+ raise ValueError("Queue is full! Please try again.")
43
+ elif resp["msg"] == "send_data":
44
+ event_id = resp["event_id"]
45
+ response = requests.post(
46
+ sse_data_url,
47
+ stream=True,
48
+ params=hash_data,
49
+ json={"event_id": event_id, **payload, **hash_data},
50
+ )
51
+ response.raise_for_status()
52
+ elif resp["msg"] == "process_completed":
53
+ return
54
+
55
+ with requests.get(sse_url, stream=True, params=hash_data) as sse_connection:
56
+ data = {"data": [model_id, private, token]}
57
+ try:
58
+ logger.debug("Spawning safetensors automatic conversion.")
59
+ start(sse_connection, data)
60
+ except Exception as e:
61
+ logger.warning(f"Error during conversion: {repr(e)}")
62
+
63
+
64
+ def get_conversion_pr_reference(api: HfApi, model_id: str, **kwargs):
65
+ private = api.model_info(model_id).private
66
+
67
+ logger.info("Attempting to create safetensors variant")
68
+ pr_title = "Adding `safetensors` variant of this model"
69
+ token = kwargs.get("token")
70
+
71
+ # This looks into the current repo's open PRs to see if a PR for safetensors was already open. If so, it
72
+ # returns it. It checks that the PR was opened by the bot and not by another user so as to prevent
73
+ # security breaches.
74
+ pr = previous_pr(api, model_id, pr_title, token=token)
75
+
76
+ if pr is None or (not private and pr.author != "SFConvertBot"):
77
+ spawn_conversion(token, private, model_id)
78
+ pr = previous_pr(api, model_id, pr_title, token=token)
79
+ else:
80
+ logger.info("Safetensors PR exists")
81
+
82
+ sha = f"refs/pr/{pr.num}"
83
+
84
+ return sha
85
+
86
+
87
+ def auto_conversion(pretrained_model_name_or_path: str, ignore_errors_during_conversion=False, **cached_file_kwargs):
88
+ try:
89
+ api = HfApi(token=cached_file_kwargs.get("token"), headers=http_user_agent())
90
+ sha = get_conversion_pr_reference(api, pretrained_model_name_or_path, **cached_file_kwargs)
91
+
92
+ if sha is None:
93
+ return None, None
94
+ cached_file_kwargs["revision"] = sha
95
+ del cached_file_kwargs["_commit_hash"]
96
+
97
+ # This is an additional HEAD call that could be removed if we could infer sharded/non-sharded from the PR
98
+ # description.
99
+ sharded = api.file_exists(
100
+ pretrained_model_name_or_path,
101
+ "model.safetensors.index.json",
102
+ revision=sha,
103
+ token=cached_file_kwargs.get("token"),
104
+ )
105
+ filename = "model.safetensors.index.json" if sharded else "model.safetensors"
106
+
107
+ resolved_archive_file = cached_file(pretrained_model_name_or_path, filename, **cached_file_kwargs)
108
+ return resolved_archive_file, sha, sharded
109
+ except Exception as e:
110
+ if not ignore_errors_during_conversion:
111
+ raise e
llmeval-env/lib/python3.10/site-packages/transformers/tokenization_utils.py ADDED
@@ -0,0 +1,1040 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """
16
+ Tokenization classes for python tokenizers. For fast tokenizers (provided by HuggingFace's tokenizers library) see
17
+ tokenization_utils_fast.py
18
+ """
19
+ import bisect
20
+ import itertools
21
+ import re
22
+ import unicodedata
23
+ from collections import OrderedDict
24
+ from typing import Any, Dict, List, Optional, Tuple, Union, overload
25
+
26
+ from .tokenization_utils_base import (
27
+ ENCODE_KWARGS_DOCSTRING,
28
+ ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING,
29
+ INIT_TOKENIZER_DOCSTRING,
30
+ AddedToken,
31
+ BatchEncoding,
32
+ EncodedInput,
33
+ EncodedInputPair,
34
+ PreTokenizedInput,
35
+ PreTokenizedInputPair,
36
+ PreTrainedTokenizerBase,
37
+ TextInput,
38
+ TextInputPair,
39
+ TruncationStrategy,
40
+ )
41
+ from .utils import PaddingStrategy, TensorType, add_end_docstrings, logging
42
+
43
+
44
+ logger = logging.get_logger(__name__)
45
+
46
+ # Slow tokenizers are saved in a vocabulary plus three separated files
47
+ SPECIAL_TOKENS_MAP_FILE = "special_tokens_map.json"
48
+ ADDED_TOKENS_FILE = "added_tokens.json"
49
+ TOKENIZER_CONFIG_FILE = "tokenizer_config.json"
50
+
51
+
52
+ class Trie:
53
+ """
54
+ Trie in Python. Creates a Trie out of a list of words. The trie is used to split on `added_tokens` in one pass
55
+ Loose reference https://en.wikipedia.org/wiki/Trie
56
+ """
57
+
58
+ def __init__(self):
59
+ self.data = {}
60
+ self._tokens = set()
61
+
62
+ def add(self, word: str):
63
+ """
64
+ Passes over every char (utf-8 char) on word and recursively adds it to the internal `data` trie representation.
65
+ The special key `""` is used to represent termination.
66
+
67
+ This function is idempotent, adding twice the same word will leave the trie unchanged
68
+
69
+ Example:
70
+
71
+ ```python
72
+ >>> trie = Trie()
73
+ >>> trie.add("Hello 友達")
74
+ >>> trie.data
75
+ {"H": {"e": {"l": {"l": {"o": {" ": {"友": {"達": {"": 1}}}}}}}}}
76
+
77
+ >>> trie.add("Hello")
78
+ >>> trie.data
79
+ {"H": {"e": {"l": {"l": {"o": {"": 1, " ": {"友": {"達": {"": 1}}}}}}}}}
80
+ ```
81
+ """
82
+ if not word:
83
+ # Prevent empty string
84
+ return
85
+
86
+ self._tokens.add(word)
87
+ ref = self.data
88
+ for char in word:
89
+ ref[char] = char in ref and ref[char] or {}
90
+ ref = ref[char]
91
+ ref[""] = 1
92
+
93
+ def split(self, text: str) -> List[str]:
94
+ """
95
+ Will look for the words added to the trie within `text`. Output is the original string splitted along the
96
+ boundaries of the words found.
97
+
98
+ This trie will match the longest possible word first !
99
+
100
+ Example:
101
+
102
+ ```python
103
+ >>> trie = Trie()
104
+ >>> trie.split("[CLS] This is a extra_id_100")
105
+ ["[CLS] This is a extra_id_100"]
106
+
107
+ >>> trie.add("[CLS]")
108
+ >>> trie.add("extra_id_1")
109
+ >>> trie.add("extra_id_100")
110
+ >>> trie.split("[CLS] This is a extra_id_100")
111
+ ["[CLS]", " This is a ", "extra_id_100"]
112
+ ```
113
+ """
114
+ # indexes are counted left of the chars index.
115
+ # "hello", index 0, is left of h, index 1 is between h and e.
116
+ # index 5 is right of the "o".
117
+
118
+ # States are going to capture every possible start (indexes as above)
119
+ # as keys, and have as values, a pointer to the position in the trie
120
+ # where we're at. This is a partial match for now.
121
+ # This enables to keep track of multiple matches while we're iterating
122
+ # the string
123
+ # If the trie contains, "blowing", and "lower" and we encounter the
124
+ # string "blower", we need to split into ["b", "lower"].
125
+ # This is where we need to keep track of multiple possible starts.
126
+ states = OrderedDict()
127
+
128
+ # This will contain every indices where we need
129
+ # to cut.
130
+ # We force to cut at offset 0 and len(text) (added later)
131
+ offsets = [0]
132
+
133
+ # This is used by the lookahead which needs to skip over
134
+ # some text where the full match exceeded the place in the initial
135
+ # for loop
136
+ skip = 0
137
+ # Main loop, Giving this algorithm O(n) complexity
138
+ for current, current_char in enumerate(text):
139
+ if skip and current < skip:
140
+ # Prevents the lookahead for matching twice
141
+ # like extra_id_100 and id_100
142
+ continue
143
+
144
+ # This will track every state
145
+ # that stop matching, we need to stop tracking them.
146
+ # If we look at "lowball", we're going to match "l" (add it to states), "o", "w", then
147
+ # fail on "b", we need to remove 0 from the valid states.
148
+ to_remove = set()
149
+ # Whenever we found a match, we need to drop everything
150
+ # this is a greedy algorithm, it will match on the first found token
151
+ reset = False
152
+
153
+ # In this case, we already have partial matches (But unfinished)
154
+ for start, trie_pointer in states.items():
155
+ if "" in trie_pointer:
156
+ # This is a final match, we need to reset and
157
+ # store the results in `offsets`.
158
+
159
+ # Lookahead to match longest first
160
+ # Important in case of extra_id_1 vs extra_id_100
161
+ # Here we are also actively looking for other earlier partial
162
+ # matches
163
+ # "[CLS]", "L", we need to match CLS even if L is special
164
+ for lookstart, looktrie_pointer in states.items():
165
+ if lookstart > start:
166
+ # This partial match is later, we can stop looking
167
+ break
168
+ elif lookstart < start:
169
+ # This partial match is earlier, the trie pointer
170
+ # was already updated, so index is + 1
171
+ lookahead_index = current + 1
172
+ end = current + 1
173
+ else:
174
+ # Here lookstart == start and
175
+ # looktrie_pointer == trie_pointer
176
+ # It wasn't updated yet so indices are current ones
177
+ lookahead_index = current
178
+ end = current
179
+ next_char = text[lookahead_index] if lookahead_index < len(text) else None
180
+ if "" in looktrie_pointer:
181
+ start = lookstart
182
+ end = lookahead_index
183
+ skip = lookahead_index
184
+
185
+ while next_char in looktrie_pointer:
186
+ looktrie_pointer = looktrie_pointer[next_char]
187
+ lookahead_index += 1
188
+ if "" in looktrie_pointer:
189
+ start = lookstart
190
+ end = lookahead_index
191
+ skip = lookahead_index
192
+
193
+ if lookahead_index == len(text):
194
+ # End of string
195
+ break
196
+ next_char = text[lookahead_index]
197
+ # End lookahead
198
+
199
+ # Storing and resetting
200
+ offsets.append(start)
201
+ offsets.append(end)
202
+ reset = True
203
+ break
204
+ elif current_char in trie_pointer:
205
+ # The current character being looked at has a match within the trie
206
+ # update the pointer (it will be stored back into states later).
207
+ trie_pointer = trie_pointer[current_char]
208
+
209
+ # Storing back the new pointer into the states.
210
+ # Partial matches got longer by one.
211
+ states[start] = trie_pointer
212
+ else:
213
+ # The new character has not match in the trie, we need
214
+ # to stop keeping track of this partial match.
215
+ # We can't do it directly within the loop because of how
216
+ # python iteration works
217
+ to_remove.add(start)
218
+
219
+ # Either clearing the full start (we found a real match)
220
+ # Or clearing only the partial matches that didn't work.
221
+ if reset:
222
+ states = {}
223
+ else:
224
+ for start in to_remove:
225
+ del states[start]
226
+
227
+ # If this character is a starting character within the trie
228
+ # start keeping track of this partial match.
229
+ if current >= skip and current_char in self.data:
230
+ states[current] = self.data[current_char]
231
+
232
+ # We have a cut at the end with states.
233
+ for start, trie_pointer in states.items():
234
+ if "" in trie_pointer:
235
+ # This is a final match, we need to reset and
236
+ # store the results in `offsets`.
237
+ end = len(text)
238
+ offsets.append(start)
239
+ offsets.append(end)
240
+ # Longest cut is always the one with lower start so the first
241
+ # item so we need to break.
242
+ break
243
+
244
+ return self.cut_text(text, offsets)
245
+
246
+ def cut_text(self, text, offsets):
247
+ # We have all the offsets now, we just need to do the actual splitting.
248
+ # We need to eventually add the first part of the string and the eventual
249
+ # last part.
250
+ offsets.append(len(text))
251
+ tokens = []
252
+ start = 0
253
+ for end in offsets:
254
+ if start > end:
255
+ logger.error(
256
+ "There was a bug in Trie algorithm in tokenization. Attempting to recover. Please report it"
257
+ " anyway."
258
+ )
259
+ continue
260
+ elif start == end:
261
+ # This might happen if there's a match at index 0
262
+ # we're also preventing zero-width cuts in case of two
263
+ # consecutive matches
264
+ continue
265
+ tokens.append(text[start:end])
266
+ start = end
267
+
268
+ return tokens
269
+
270
+
271
+ def _is_whitespace(char):
272
+ """Checks whether `char` is a whitespace character."""
273
+ # \t, \n, and \r are technically control characters but we treat them
274
+ # as whitespace since they are generally considered as such.
275
+ if char == " " or char == "\t" or char == "\n" or char == "\r":
276
+ return True
277
+ cat = unicodedata.category(char)
278
+ if cat == "Zs":
279
+ return True
280
+ return False
281
+
282
+
283
+ def _is_control(char):
284
+ """Checks whether `char` is a control character."""
285
+ # These are technically control characters but we count them as whitespace
286
+ # characters.
287
+ if char == "\t" or char == "\n" or char == "\r":
288
+ return False
289
+ cat = unicodedata.category(char)
290
+ if cat.startswith("C"):
291
+ return True
292
+ return False
293
+
294
+
295
+ def _is_punctuation(char):
296
+ """Checks whether `char` is a punctuation character."""
297
+ cp = ord(char)
298
+ # We treat all non-letter/number ASCII as punctuation.
299
+ # Characters such as "^", "$", and "`" are not in the Unicode
300
+ # Punctuation class but we treat them as punctuation anyways, for
301
+ # consistency.
302
+ if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126):
303
+ return True
304
+ cat = unicodedata.category(char)
305
+ if cat.startswith("P"):
306
+ return True
307
+ return False
308
+
309
+
310
+ def _is_end_of_word(text):
311
+ """Checks whether the last character in text is one of a punctuation, control or whitespace character."""
312
+ last_char = text[-1]
313
+ return bool(_is_control(last_char) | _is_punctuation(last_char) | _is_whitespace(last_char))
314
+
315
+
316
+ def _is_start_of_word(text):
317
+ """Checks whether the first character in text is one of a punctuation, control or whitespace character."""
318
+ first_char = text[0]
319
+ return bool(_is_control(first_char) | _is_punctuation(first_char) | _is_whitespace(first_char))
320
+
321
+
322
+ def _insert_one_token_to_ordered_list(token_list: List[str], new_token: str):
323
+ """
324
+ Inserts one token to an ordered list if it does not already exist. Note: token_list must be sorted.
325
+ """
326
+ insertion_idx = bisect.bisect_left(token_list, new_token)
327
+ # Checks if new_token is already in the ordered token_list
328
+ if insertion_idx < len(token_list) and token_list[insertion_idx] == new_token:
329
+ # new_token is in token_list, don't add
330
+ return
331
+ else:
332
+ token_list.insert(insertion_idx, new_token)
333
+
334
+
335
+ @add_end_docstrings(INIT_TOKENIZER_DOCSTRING)
336
+ class PreTrainedTokenizer(PreTrainedTokenizerBase):
337
+ """
338
+ Base class for all slow tokenizers.
339
+
340
+ Inherits from [`~tokenization_utils_base.PreTrainedTokenizerBase`].
341
+
342
+ Handle all the shared methods for tokenization and special tokens as well as methods downloading/caching/loading
343
+ pretrained tokenizers as well as adding tokens to the vocabulary.
344
+
345
+ This class also contain the added tokens in a unified way on top of all tokenizers so we don't have to handle the
346
+ specific vocabulary augmentation methods of the various underlying dictionary structures (BPE, sentencepiece...).
347
+ """
348
+
349
+ def __init__(self, **kwargs):
350
+ # 1. Init the parent class
351
+
352
+ self.tokens_trie = Trie()
353
+
354
+ # 2. init `_added_tokens_decoder` if child class did not
355
+ if not hasattr(self, "_added_tokens_decoder"):
356
+ self._added_tokens_decoder: Dict[int, AddedToken] = {}
357
+
358
+ # 3. if a `added_tokens_decoder` is passed, we are loading from a saved tokenizer, we overwrite
359
+ self._added_tokens_decoder.update(kwargs.pop("added_tokens_decoder", {}))
360
+ self._added_tokens_encoder: Dict[str, int] = {k.content: v for v, k in self._added_tokens_decoder.items()}
361
+
362
+ # 4 init the parent class
363
+ super().__init__(**kwargs)
364
+
365
+ # 4. If some of the special tokens are not part of the vocab, we add them, at the end.
366
+ # the order of addition is the same as self.SPECIAL_TOKENS_ATTRIBUTES following `tokenizers`
367
+ self._add_tokens(
368
+ [token for token in self.all_special_tokens_extended if token not in self._added_tokens_encoder],
369
+ special_tokens=True,
370
+ )
371
+
372
+ self._decode_use_source_tokenizer = False
373
+
374
+ @property
375
+ def is_fast(self) -> bool:
376
+ return False
377
+
378
+ @property
379
+ def vocab_size(self) -> int:
380
+ """
381
+ `int`: Size of the base vocabulary (without the added tokens).
382
+ """
383
+ raise NotImplementedError
384
+
385
+ @property
386
+ def added_tokens_encoder(self) -> Dict[str, int]:
387
+ """
388
+ Returns the sorted mapping from string to index. The added tokens encoder is cached for performance
389
+ optimisation in `self._added_tokens_encoder` for the slow tokenizers.
390
+ """
391
+ return {k.content: v for v, k in sorted(self._added_tokens_decoder.items(), key=lambda item: item[0])}
392
+
393
+ @property
394
+ def added_tokens_decoder(self) -> Dict[int, AddedToken]:
395
+ """
396
+ Returns the added tokens in the vocabulary as a dictionary of index to AddedToken.
397
+
398
+ Returns:
399
+ `Dict[str, int]`: The added tokens.
400
+ """
401
+ return dict(sorted(self._added_tokens_decoder.items(), key=lambda item: item[0]))
402
+
403
+ @added_tokens_decoder.setter
404
+ def added_tokens_decoder(self, value: Dict[int, Union[AddedToken, str]]) -> Dict[int, AddedToken]:
405
+ # Always raise an error if string because users should define the behavior
406
+ for index, token in value.items():
407
+ if not isinstance(token, (str, AddedToken)) or not isinstance(index, int):
408
+ raise ValueError(
409
+ f"The provided `added_tokens_decoder` has an element of type {index.__class__, token.__class__}, should be a dict of {int, Union[AddedToken, str]}"
410
+ )
411
+
412
+ self._added_tokens_decoder[index] = AddedToken(token) if isinstance(token, str) else token
413
+ self._added_tokens_encoder[str(token)] = index
414
+
415
+ def get_added_vocab(self) -> Dict[str, int]:
416
+ """
417
+ Returns the added tokens in the vocabulary as a dictionary of token to index. Results might be different from
418
+ the fast call because for now we always add the tokens even if they are already in the vocabulary. This is
419
+ something we should change.
420
+
421
+ Returns:
422
+ `Dict[str, int]`: The added tokens.
423
+ """
424
+ return self._added_tokens_encoder
425
+
426
+ def __len__(self):
427
+ """
428
+ Size of the full vocabulary with the added tokens. Counts the `keys` and not the `values` because otherwise if
429
+ there is a hole in the vocab, we will add tokenizers at a wrong index.
430
+ """
431
+ return len(set(self.get_vocab().keys()))
432
+
433
+ def _add_tokens(self, new_tokens: Union[List[str], List[AddedToken]], special_tokens: bool = False) -> int:
434
+ """
435
+ Add a list of new tokens to the tokenizer class. If the new tokens are not in the vocabulary, they are added to
436
+ it with indices starting from length of the current vocabulary. Special tokens are sometimes already in the
437
+ vocab which is why they have to be handled specifically.
438
+
439
+ Args:
440
+ new_tokens (`List[str]`or `List[tokenizers.AddedToken]`):
441
+ Token(s) to add in vocabulary. A token is counted as added if it's not already in the vocabulary
442
+ (tested by checking if the tokenizer assign the index of the `unk_token` to them). If a token is part
443
+ of the vocabulary then we simply mark this token as an `AddedToken` which allows to control the
444
+ stripping and normalization of this token. This is NOT possible in `tokenizers`.
445
+ special_tokens (`bool`, *optional*, defaults to `False`):
446
+ Whether or not the tokens should be added as special tokens.
447
+
448
+ Returns:
449
+ `int`: The number of tokens actually added to the vocabulary.
450
+
451
+ Examples:
452
+
453
+ ```python
454
+ # Let's see how to increase the vocabulary of Bert model and tokenizer
455
+ tokenizer = BertTokenizer.from_pretrained("google-bert/bert-base-uncased")
456
+ model = BertModel.from_pretrained("google-bert/bert-base-uncased")
457
+
458
+ num_added_toks = tokenizer.add_tokens(["new_tok1", "my_new-tok2"])
459
+ print("We have added", num_added_toks, "tokens")
460
+ # Note: resize_token_embeddings expects to receive the full size of the new vocabulary, i.e. the length of the tokenizer.
461
+ model.resize_token_embeddings(len(tokenizer))
462
+ ```"""
463
+ added_tokens = 0
464
+ if new_tokens is None:
465
+ return added_tokens
466
+ # TODO this is fairly slow to improve!
467
+ current_vocab = self.get_vocab().copy()
468
+ new_idx = len(current_vocab) # only call this once, len gives the last index + 1
469
+ for token in new_tokens:
470
+ if not isinstance(token, (str, AddedToken)):
471
+ raise TypeError(f"Token {token} is not a string but a {type(token)}.")
472
+ if str(token) == "":
473
+ continue
474
+ if isinstance(token, str):
475
+ if token in self._added_tokens_encoder:
476
+ continue
477
+ else:
478
+ # very important for fast and slow equivalence!
479
+ is_special = token in self.all_special_tokens or special_tokens
480
+ token = AddedToken(
481
+ token, rstrip=False, lstrip=False, normalized=not is_special, special=is_special
482
+ )
483
+ elif special_tokens:
484
+ # doing token.special=True changes the normalization! will fix in rust
485
+ # this is important and the only reason why the AddedTokens in each class are normalized by default
486
+ token.__setstate__({"special": True, "normalized": token.normalized})
487
+ if token in self._added_tokens_decoder:
488
+ continue
489
+ if not token.special and token.normalized and getattr(self, "do_lower_case", False):
490
+ # Normalize if requested
491
+ token.content = token.content.lower()
492
+ if token.content not in current_vocab:
493
+ token_index = new_idx + added_tokens
494
+ current_vocab[token.content] = token_index
495
+ added_tokens += 1
496
+ else:
497
+ token_index = current_vocab[token.content]
498
+
499
+ if token.special and str(token) not in self.all_special_tokens:
500
+ self._additional_special_tokens.append(token)
501
+ # the setter automatically updates the reverse map
502
+ self._added_tokens_decoder[token_index] = token
503
+ self._added_tokens_encoder[token.content] = token_index
504
+ if self.verbose:
505
+ logger.info(f"Adding {token} to the vocabulary")
506
+
507
+ self._update_trie()
508
+ return added_tokens
509
+
510
+ def _update_trie(self, unique_no_split_tokens: Optional[str] = []):
511
+ for token in self._added_tokens_decoder.values():
512
+ if token not in self.tokens_trie._tokens:
513
+ self.tokens_trie.add(token.content)
514
+ for token in unique_no_split_tokens:
515
+ if token not in self.tokens_trie._tokens:
516
+ self.tokens_trie.add(token)
517
+
518
+ def num_special_tokens_to_add(self, pair: bool = False) -> int:
519
+ """
520
+ Returns the number of added tokens when encoding a sequence with special tokens.
521
+
522
+ <Tip>
523
+
524
+ This encodes a dummy input and checks the number of added tokens, and is therefore not efficient. Do not put
525
+ this inside your training loop.
526
+
527
+ </Tip>
528
+
529
+ Args:
530
+ pair (`bool`, *optional*, defaults to `False`):
531
+ Whether the number of added tokens should be computed in the case of a sequence pair or a single
532
+ sequence.
533
+
534
+ Returns:
535
+ `int`: Number of special tokens added to sequences.
536
+ """
537
+ token_ids_0 = []
538
+ token_ids_1 = []
539
+ return len(self.build_inputs_with_special_tokens(token_ids_0, token_ids_1 if pair else None))
540
+
541
+ def tokenize(self, text: TextInput, **kwargs) -> List[str]:
542
+ """
543
+ Converts a string into a sequence of tokens, using the tokenizer.
544
+
545
+ Split in words for word-based vocabulary or sub-words for sub-word-based vocabularies
546
+ (BPE/SentencePieces/WordPieces). Takes care of added tokens.
547
+
548
+ Args:
549
+ text (`str`):
550
+ The sequence to be encoded.
551
+ **kwargs (additional keyword arguments):
552
+ Passed along to the model-specific `prepare_for_tokenization` preprocessing method.
553
+
554
+ Returns:
555
+ `List[str]`: The list of tokens.
556
+ """
557
+ split_special_tokens = kwargs.pop("split_special_tokens", self.split_special_tokens)
558
+
559
+ text, kwargs = self.prepare_for_tokenization(text, **kwargs)
560
+
561
+ if kwargs:
562
+ logger.warning(f"Keyword arguments {kwargs} not recognized.")
563
+
564
+ if hasattr(self, "do_lower_case") and self.do_lower_case:
565
+ # convert non-special tokens to lowercase. Might be super slow as well?
566
+ escaped_special_toks = [re.escape(s_tok) for s_tok in (self.all_special_tokens)]
567
+ escaped_special_toks += [
568
+ re.escape(s_tok.content)
569
+ for s_tok in (self._added_tokens_decoder.values())
570
+ if not s_tok.special and s_tok.normalized
571
+ ]
572
+ pattern = r"(" + r"|".join(escaped_special_toks) + r")|" + r"(.+?)"
573
+ text = re.sub(pattern, lambda m: m.groups()[0] or m.groups()[1].lower(), text)
574
+
575
+ if split_special_tokens:
576
+ no_split_token = []
577
+ tokens = [text]
578
+ else:
579
+ no_split_token = self._added_tokens_encoder.keys() # don't split on any of the added tokens
580
+ # "This is something<special_token_1> else"
581
+ tokens = self.tokens_trie.split(text)
582
+
583
+ # ["This is something", "<special_token_1>", " else"]
584
+ for i, token in enumerate(tokens):
585
+ if token in no_split_token:
586
+ tok_extended = self._added_tokens_decoder.get(self._added_tokens_encoder[token], None)
587
+ left = tokens[i - 1] if i > 0 else None
588
+ right = tokens[i + 1] if i < len(tokens) - 1 else None
589
+ if isinstance(tok_extended, AddedToken):
590
+ if tok_extended.rstrip and right:
591
+ # A bit counter-intuitive but we strip the left of the string
592
+ # since tok_extended.rstrip means the special token is eating all white spaces on its right
593
+ tokens[i + 1] = right.lstrip()
594
+ # Strip white spaces on the left
595
+ if tok_extended.lstrip and left:
596
+ tokens[i - 1] = left.rstrip() # Opposite here
597
+ if tok_extended.single_word and left and left[-1] != " ":
598
+ tokens[i - 1] += token
599
+ tokens[i] = ""
600
+ elif tok_extended.single_word and right and right[0] != " ":
601
+ tokens[i + 1] = token + tokens[i + 1]
602
+ tokens[i] = ""
603
+ else:
604
+ raise ValueError(
605
+ f"{tok_extended} cannot be tokenized because it was not properly added"
606
+ f" to the tokenizer. This means that it is not an `AddedToken` but a {type(tok_extended)}"
607
+ )
608
+ # ["This is something", "<special_token_1>", "else"]
609
+ tokenized_text = []
610
+ for token in tokens:
611
+ # Need to skip eventual empty (fully stripped) tokens
612
+ if not token:
613
+ continue
614
+ if token in no_split_token:
615
+ tokenized_text.append(token)
616
+ else:
617
+ tokenized_text.extend(self._tokenize(token))
618
+ # ["This", " is", " something", "<special_token_1>", "else"]
619
+ return tokenized_text
620
+
621
+ def _tokenize(self, text, **kwargs):
622
+ """
623
+ Converts a string into a sequence of tokens (string), using the tokenizer. Split in words for word-based
624
+ vocabulary or sub-words for sub-word-based vocabularies (BPE/SentencePieces/WordPieces).
625
+
626
+ Do NOT take care of added tokens.
627
+ """
628
+ raise NotImplementedError
629
+
630
+ def convert_tokens_to_ids(self, tokens: Union[str, List[str]]) -> Union[int, List[int]]:
631
+ """
632
+ Converts a token string (or a sequence of tokens) in a single integer id (or a sequence of ids), using the
633
+ vocabulary.
634
+
635
+ Args:
636
+ tokens (`str` or `List[str]`): One or several token(s) to convert to token id(s).
637
+
638
+ Returns:
639
+ `int` or `List[int]`: The token id or list of token ids.
640
+ """
641
+ if tokens is None:
642
+ return None
643
+
644
+ if isinstance(tokens, str):
645
+ return self._convert_token_to_id_with_added_voc(tokens)
646
+
647
+ ids = []
648
+ for token in tokens:
649
+ ids.append(self._convert_token_to_id_with_added_voc(token))
650
+ return ids
651
+
652
+ def _convert_token_to_id_with_added_voc(self, token):
653
+ if token is None:
654
+ return None
655
+
656
+ if token in self._added_tokens_encoder:
657
+ return self._added_tokens_encoder[token]
658
+ return self._convert_token_to_id(token)
659
+
660
+ def _convert_token_to_id(self, token):
661
+ raise NotImplementedError
662
+
663
+ def _encode_plus(
664
+ self,
665
+ text: Union[TextInput, PreTokenizedInput, EncodedInput],
666
+ text_pair: Optional[Union[TextInput, PreTokenizedInput, EncodedInput]] = None,
667
+ add_special_tokens: bool = True,
668
+ padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
669
+ truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
670
+ max_length: Optional[int] = None,
671
+ stride: int = 0,
672
+ is_split_into_words: bool = False,
673
+ pad_to_multiple_of: Optional[int] = None,
674
+ return_tensors: Optional[Union[str, TensorType]] = None,
675
+ return_token_type_ids: Optional[bool] = None,
676
+ return_attention_mask: Optional[bool] = None,
677
+ return_overflowing_tokens: bool = False,
678
+ return_special_tokens_mask: bool = False,
679
+ return_offsets_mapping: bool = False,
680
+ return_length: bool = False,
681
+ verbose: bool = True,
682
+ **kwargs,
683
+ ) -> BatchEncoding:
684
+ def get_input_ids(text):
685
+ if isinstance(text, str):
686
+ tokens = self.tokenize(text, **kwargs)
687
+ return self.convert_tokens_to_ids(tokens)
688
+ elif isinstance(text, (list, tuple)) and len(text) > 0 and isinstance(text[0], str):
689
+ if is_split_into_words:
690
+ tokens = list(
691
+ itertools.chain(*(self.tokenize(t, is_split_into_words=True, **kwargs) for t in text))
692
+ )
693
+ return self.convert_tokens_to_ids(tokens)
694
+ else:
695
+ return self.convert_tokens_to_ids(text)
696
+ elif isinstance(text, (list, tuple)) and len(text) > 0 and isinstance(text[0], int):
697
+ return text
698
+ else:
699
+ if is_split_into_words:
700
+ raise ValueError(
701
+ f"Input {text} is not valid. Should be a string or a list/tuple of strings when"
702
+ " `is_split_into_words=True`."
703
+ )
704
+ else:
705
+ raise ValueError(
706
+ f"Input {text} is not valid. Should be a string, a list/tuple of strings or a list/tuple of"
707
+ " integers."
708
+ )
709
+
710
+ if return_offsets_mapping:
711
+ raise NotImplementedError(
712
+ "return_offset_mapping is not available when using Python tokenizers. "
713
+ "To use this feature, change your tokenizer to one deriving from "
714
+ "transformers.PreTrainedTokenizerFast. "
715
+ "More information on available tokenizers at "
716
+ "https://github.com/huggingface/transformers/pull/2674"
717
+ )
718
+
719
+ first_ids = get_input_ids(text)
720
+ second_ids = get_input_ids(text_pair) if text_pair is not None else None
721
+
722
+ return self.prepare_for_model(
723
+ first_ids,
724
+ pair_ids=second_ids,
725
+ add_special_tokens=add_special_tokens,
726
+ padding=padding_strategy.value,
727
+ truncation=truncation_strategy.value,
728
+ max_length=max_length,
729
+ stride=stride,
730
+ pad_to_multiple_of=pad_to_multiple_of,
731
+ return_tensors=return_tensors,
732
+ prepend_batch_axis=True,
733
+ return_attention_mask=return_attention_mask,
734
+ return_token_type_ids=return_token_type_ids,
735
+ return_overflowing_tokens=return_overflowing_tokens,
736
+ return_special_tokens_mask=return_special_tokens_mask,
737
+ return_length=return_length,
738
+ verbose=verbose,
739
+ )
740
+
741
+ def _batch_encode_plus(
742
+ self,
743
+ batch_text_or_text_pairs: Union[
744
+ List[TextInput],
745
+ List[TextInputPair],
746
+ List[PreTokenizedInput],
747
+ List[PreTokenizedInputPair],
748
+ List[EncodedInput],
749
+ List[EncodedInputPair],
750
+ ],
751
+ add_special_tokens: bool = True,
752
+ padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
753
+ truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
754
+ max_length: Optional[int] = None,
755
+ stride: int = 0,
756
+ is_split_into_words: bool = False,
757
+ pad_to_multiple_of: Optional[int] = None,
758
+ return_tensors: Optional[Union[str, TensorType]] = None,
759
+ return_token_type_ids: Optional[bool] = None,
760
+ return_attention_mask: Optional[bool] = None,
761
+ return_overflowing_tokens: bool = False,
762
+ return_special_tokens_mask: bool = False,
763
+ return_offsets_mapping: bool = False,
764
+ return_length: bool = False,
765
+ verbose: bool = True,
766
+ **kwargs,
767
+ ) -> BatchEncoding:
768
+ def get_input_ids(text):
769
+ if isinstance(text, str):
770
+ tokens = self.tokenize(text, **kwargs)
771
+ return self.convert_tokens_to_ids(tokens)
772
+ elif isinstance(text, (list, tuple)) and len(text) > 0 and isinstance(text[0], str):
773
+ if is_split_into_words:
774
+ tokens = list(
775
+ itertools.chain(*(self.tokenize(t, is_split_into_words=True, **kwargs) for t in text))
776
+ )
777
+ return self.convert_tokens_to_ids(tokens)
778
+ else:
779
+ return self.convert_tokens_to_ids(text)
780
+ elif isinstance(text, (list, tuple)) and len(text) > 0 and isinstance(text[0], int):
781
+ return text
782
+ else:
783
+ raise ValueError(
784
+ "Input is not valid. Should be a string, a list/tuple of strings or a list/tuple of integers."
785
+ )
786
+
787
+ if return_offsets_mapping:
788
+ raise NotImplementedError(
789
+ "return_offset_mapping is not available when using Python tokenizers. "
790
+ "To use this feature, change your tokenizer to one deriving from "
791
+ "transformers.PreTrainedTokenizerFast."
792
+ )
793
+
794
+ input_ids = []
795
+ for ids_or_pair_ids in batch_text_or_text_pairs:
796
+ if not isinstance(ids_or_pair_ids, (list, tuple)):
797
+ ids, pair_ids = ids_or_pair_ids, None
798
+ elif is_split_into_words and not isinstance(ids_or_pair_ids[0], (list, tuple)):
799
+ ids, pair_ids = ids_or_pair_ids, None
800
+ else:
801
+ ids, pair_ids = ids_or_pair_ids
802
+
803
+ first_ids = get_input_ids(ids)
804
+ second_ids = get_input_ids(pair_ids) if pair_ids is not None else None
805
+ input_ids.append((first_ids, second_ids))
806
+
807
+ batch_outputs = self._batch_prepare_for_model(
808
+ input_ids,
809
+ add_special_tokens=add_special_tokens,
810
+ padding_strategy=padding_strategy,
811
+ truncation_strategy=truncation_strategy,
812
+ max_length=max_length,
813
+ stride=stride,
814
+ pad_to_multiple_of=pad_to_multiple_of,
815
+ return_attention_mask=return_attention_mask,
816
+ return_token_type_ids=return_token_type_ids,
817
+ return_overflowing_tokens=return_overflowing_tokens,
818
+ return_special_tokens_mask=return_special_tokens_mask,
819
+ return_length=return_length,
820
+ return_tensors=return_tensors,
821
+ verbose=verbose,
822
+ )
823
+
824
+ return BatchEncoding(batch_outputs)
825
+
826
+ @add_end_docstrings(ENCODE_KWARGS_DOCSTRING, ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
827
+ def _batch_prepare_for_model(
828
+ self,
829
+ batch_ids_pairs: List[Union[PreTokenizedInputPair, Tuple[List[int], None]]],
830
+ add_special_tokens: bool = True,
831
+ padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
832
+ truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
833
+ max_length: Optional[int] = None,
834
+ stride: int = 0,
835
+ pad_to_multiple_of: Optional[int] = None,
836
+ return_tensors: Optional[str] = None,
837
+ return_token_type_ids: Optional[bool] = None,
838
+ return_attention_mask: Optional[bool] = None,
839
+ return_overflowing_tokens: bool = False,
840
+ return_special_tokens_mask: bool = False,
841
+ return_length: bool = False,
842
+ verbose: bool = True,
843
+ ) -> BatchEncoding:
844
+ """
845
+ Prepares a sequence of input id, or a pair of sequences of inputs ids so that it can be used by the model. It
846
+ adds special tokens, truncates sequences if overflowing while taking into account the special tokens and
847
+ manages a moving window (with user defined stride) for overflowing tokens
848
+
849
+ Args:
850
+ batch_ids_pairs: list of tokenized input ids or input ids pairs
851
+ """
852
+
853
+ batch_outputs = {}
854
+ for first_ids, second_ids in batch_ids_pairs:
855
+ outputs = self.prepare_for_model(
856
+ first_ids,
857
+ second_ids,
858
+ add_special_tokens=add_special_tokens,
859
+ padding=PaddingStrategy.DO_NOT_PAD.value, # we pad in batch afterward
860
+ truncation=truncation_strategy.value,
861
+ max_length=max_length,
862
+ stride=stride,
863
+ pad_to_multiple_of=None, # we pad in batch afterward
864
+ return_attention_mask=False, # we pad in batch afterward
865
+ return_token_type_ids=return_token_type_ids,
866
+ return_overflowing_tokens=return_overflowing_tokens,
867
+ return_special_tokens_mask=return_special_tokens_mask,
868
+ return_length=return_length,
869
+ return_tensors=None, # We convert the whole batch to tensors at the end
870
+ prepend_batch_axis=False,
871
+ verbose=verbose,
872
+ )
873
+
874
+ for key, value in outputs.items():
875
+ if key not in batch_outputs:
876
+ batch_outputs[key] = []
877
+ batch_outputs[key].append(value)
878
+
879
+ batch_outputs = self.pad(
880
+ batch_outputs,
881
+ padding=padding_strategy.value,
882
+ max_length=max_length,
883
+ pad_to_multiple_of=pad_to_multiple_of,
884
+ return_attention_mask=return_attention_mask,
885
+ )
886
+
887
+ batch_outputs = BatchEncoding(batch_outputs, tensor_type=return_tensors)
888
+
889
+ return batch_outputs
890
+
891
+ def prepare_for_tokenization(
892
+ self, text: str, is_split_into_words: bool = False, **kwargs
893
+ ) -> Tuple[str, Dict[str, Any]]:
894
+ """
895
+ Performs any necessary transformations before tokenization.
896
+
897
+ This method should pop the arguments from kwargs and return the remaining `kwargs` as well. We test the
898
+ `kwargs` at the end of the encoding process to be sure all the arguments have been used.
899
+
900
+ Args:
901
+ text (`str`):
902
+ The text to prepare.
903
+ is_split_into_words (`bool`, *optional*, defaults to `False`):
904
+ Whether or not the input is already pre-tokenized (e.g., split into words). If set to `True`, the
905
+ tokenizer assumes the input is already split into words (for instance, by splitting it on whitespace)
906
+ which it will tokenize. This is useful for NER or token classification.
907
+ kwargs (`Dict[str, Any]`, *optional*):
908
+ Keyword arguments to use for the tokenization.
909
+
910
+ Returns:
911
+ `Tuple[str, Dict[str, Any]]`: The prepared text and the unused kwargs.
912
+ """
913
+ return (text, kwargs)
914
+
915
+ def get_special_tokens_mask(
916
+ self, token_ids_0: List, token_ids_1: Optional[List] = None, already_has_special_tokens: bool = False
917
+ ) -> List[int]:
918
+ """
919
+ Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding
920
+ special tokens using the tokenizer `prepare_for_model` or `encode_plus` methods.
921
+
922
+ Args:
923
+ token_ids_0 (`List[int]`):
924
+ List of ids of the first sequence.
925
+ token_ids_1 (`List[int]`, *optional*):
926
+ List of ids of the second sequence.
927
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
928
+ Whether or not the token list is already formatted with special tokens for the model.
929
+
930
+ Returns:
931
+ A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
932
+ """
933
+ if already_has_special_tokens:
934
+ if token_ids_1 is not None:
935
+ raise ValueError(
936
+ "You should not supply a second sequence if the provided sequence of "
937
+ "ids is already formatted with special tokens for the model."
938
+ )
939
+
940
+ return super().get_special_tokens_mask(
941
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
942
+ )
943
+ return [0] * ((len(token_ids_1) if token_ids_1 else 0) + len(token_ids_0))
944
+
945
+ @overload
946
+ def convert_ids_to_tokens(self, ids: int, skip_special_tokens: bool = False) -> str:
947
+ ...
948
+
949
+ @overload
950
+ def convert_ids_to_tokens(self, ids: List[int], skip_special_tokens: bool = False) -> List[str]:
951
+ ...
952
+
953
+ def convert_ids_to_tokens(
954
+ self, ids: Union[int, List[int]], skip_special_tokens: bool = False
955
+ ) -> Union[str, List[str]]:
956
+ """
957
+ Converts a single index or a sequence of indices in a token or a sequence of tokens, using the vocabulary and
958
+ added tokens.
959
+
960
+ Args:
961
+ ids (`int` or `List[int]`):
962
+ The token id (or token ids) to convert to tokens.
963
+ skip_special_tokens (`bool`, *optional*, defaults to `False`):
964
+ Whether or not to remove special tokens in the decoding.
965
+
966
+ Returns:
967
+ `str` or `List[str]`: The decoded token(s).
968
+ """
969
+ if isinstance(ids, int):
970
+ if ids in self._added_tokens_decoder:
971
+ return self._added_tokens_decoder[ids].content
972
+ else:
973
+ return self._convert_id_to_token(ids)
974
+ tokens = []
975
+ for index in ids:
976
+ index = int(index)
977
+ if skip_special_tokens and index in self.all_special_ids:
978
+ continue
979
+ if index in self._added_tokens_decoder:
980
+ tokens.append(self._added_tokens_decoder[index].content)
981
+ else:
982
+ tokens.append(self._convert_id_to_token(index))
983
+ return tokens
984
+
985
+ def _convert_id_to_token(self, index: int) -> str:
986
+ raise NotImplementedError
987
+
988
+ def convert_tokens_to_string(self, tokens: List[str]) -> str:
989
+ return " ".join(tokens)
990
+
991
+ def _decode(
992
+ self,
993
+ token_ids: List[int],
994
+ skip_special_tokens: bool = False,
995
+ clean_up_tokenization_spaces: bool = None,
996
+ spaces_between_special_tokens: bool = True,
997
+ **kwargs,
998
+ ) -> str:
999
+ self._decode_use_source_tokenizer = kwargs.pop("use_source_tokenizer", False)
1000
+
1001
+ filtered_tokens = self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens)
1002
+ legacy_added_tokens = set(self._added_tokens_encoder.keys()) - set(self.all_special_tokens) | {
1003
+ token for token in self.additional_special_tokens if self.convert_tokens_to_ids(token) >= self.vocab_size
1004
+ }
1005
+ # To avoid mixing byte-level and unicode for byte-level BPT
1006
+ # we need to build string separately for added tokens and byte-level tokens
1007
+ # cf. https://github.com/huggingface/transformers/issues/1133
1008
+ sub_texts = []
1009
+ current_sub_text = []
1010
+ # TODO @ArthurZ in version 5, special tokens should be handled in convert_tokens_to_string, while _convert_tokens_to_string
1011
+ for token in filtered_tokens:
1012
+ if skip_special_tokens and token in self.all_special_ids:
1013
+ continue
1014
+ if token in legacy_added_tokens:
1015
+ if current_sub_text:
1016
+ string = self.convert_tokens_to_string(current_sub_text)
1017
+ if len(string) > 0:
1018
+ sub_texts.append(string)
1019
+ current_sub_text = []
1020
+ sub_texts.append(token)
1021
+ else:
1022
+ current_sub_text.append(token)
1023
+ if current_sub_text:
1024
+ sub_texts.append(self.convert_tokens_to_string(current_sub_text))
1025
+
1026
+ if spaces_between_special_tokens:
1027
+ text = " ".join(sub_texts)
1028
+ else:
1029
+ text = "".join(sub_texts)
1030
+
1031
+ clean_up_tokenization_spaces = (
1032
+ clean_up_tokenization_spaces
1033
+ if clean_up_tokenization_spaces is not None
1034
+ else self.clean_up_tokenization_spaces
1035
+ )
1036
+ if clean_up_tokenization_spaces:
1037
+ clean_text = self.clean_up_tokenization(text)
1038
+ return clean_text
1039
+ else:
1040
+ return text
llmeval-env/lib/python3.10/site-packages/transformers/trainer.py ADDED
The diff for this file is too large to render. See raw diff
 
llmeval-env/lib/python3.10/site-packages/transformers/trainer_callback.py ADDED
@@ -0,0 +1,607 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020-present the HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """
16
+ Callbacks to use with the Trainer class and customize the training loop.
17
+ """
18
+ import copy
19
+ import dataclasses
20
+ import json
21
+ from dataclasses import dataclass
22
+ from typing import Dict, List, Optional, Union
23
+
24
+ import numpy as np
25
+ from tqdm.auto import tqdm
26
+
27
+ from .trainer_utils import IntervalStrategy, has_length
28
+ from .training_args import TrainingArguments
29
+ from .utils import logging
30
+
31
+
32
+ logger = logging.get_logger(__name__)
33
+
34
+
35
+ @dataclass
36
+ class TrainerState:
37
+ """
38
+ A class containing the [`Trainer`] inner state that will be saved along the model and optimizer when checkpointing
39
+ and passed to the [`TrainerCallback`].
40
+
41
+ <Tip>
42
+
43
+ In all this class, one step is to be understood as one update step. When using gradient accumulation, one update
44
+ step may require several forward and backward passes: if you use `gradient_accumulation_steps=n`, then one update
45
+ step requires going through *n* batches.
46
+
47
+ </Tip>
48
+
49
+ Args:
50
+ epoch (`float`, *optional*):
51
+ Only set during training, will represent the epoch the training is at (the decimal part being the
52
+ percentage of the current epoch completed).
53
+ global_step (`int`, *optional*, defaults to 0):
54
+ During training, represents the number of update steps completed.
55
+ max_steps (`int`, *optional*, defaults to 0):
56
+ The number of update steps to do during the current training.
57
+ logging_steps (`int`, *optional*, defaults to 500):
58
+ Log every X updates steps
59
+ eval_steps (`int`, *optional*):
60
+ Run an evaluation every X steps.
61
+ save_steps (`int`, *optional*, defaults to 500):
62
+ Save checkpoint every X updates steps.
63
+ train_batch_size (`int`, *optional*):
64
+ The batch size for the training dataloader. Only needed when
65
+ `auto_find_batch_size` has been used.
66
+ num_input_tokens_seen (`int`, *optional*, defaults to 0):
67
+ The number of tokens seen during training (number of input tokens, not the number of prediction tokens).
68
+ total_flos (`float`, *optional*, defaults to 0):
69
+ The total number of floating operations done by the model since the beginning of training (stored as floats
70
+ to avoid overflow).
71
+ log_history (`List[Dict[str, float]]`, *optional*):
72
+ The list of logs done since the beginning of training.
73
+ best_metric (`float`, *optional*):
74
+ When tracking the best model, the value of the best metric encountered so far.
75
+ best_model_checkpoint (`str`, *optional*):
76
+ When tracking the best model, the value of the name of the checkpoint for the best model encountered so
77
+ far.
78
+ is_local_process_zero (`bool`, *optional*, defaults to `True`):
79
+ Whether or not this process is the local (e.g., on one machine if training in a distributed fashion on
80
+ several machines) main process.
81
+ is_world_process_zero (`bool`, *optional*, defaults to `True`):
82
+ Whether or not this process is the global main process (when training in a distributed fashion on several
83
+ machines, this is only going to be `True` for one process).
84
+ is_hyper_param_search (`bool`, *optional*, defaults to `False`):
85
+ Whether we are in the process of a hyper parameter search using Trainer.hyperparameter_search. This will
86
+ impact the way data will be logged in TensorBoard.
87
+ """
88
+
89
+ epoch: Optional[float] = None
90
+ global_step: int = 0
91
+ max_steps: int = 0
92
+ logging_steps: int = 500
93
+ eval_steps: int = 500
94
+ save_steps: int = 500
95
+ train_batch_size: int = None
96
+ num_train_epochs: int = 0
97
+ num_input_tokens_seen: int = 0
98
+ total_flos: float = 0
99
+ log_history: List[Dict[str, float]] = None
100
+ best_metric: Optional[float] = None
101
+ best_model_checkpoint: Optional[str] = None
102
+ is_local_process_zero: bool = True
103
+ is_world_process_zero: bool = True
104
+ is_hyper_param_search: bool = False
105
+ trial_name: str = None
106
+ trial_params: Dict[str, Union[str, float, int, bool]] = None
107
+
108
+ def __post_init__(self):
109
+ if self.log_history is None:
110
+ self.log_history = []
111
+
112
+ def save_to_json(self, json_path: str):
113
+ """Save the content of this instance in JSON format inside `json_path`."""
114
+ json_string = json.dumps(dataclasses.asdict(self), indent=2, sort_keys=True) + "\n"
115
+ with open(json_path, "w", encoding="utf-8") as f:
116
+ f.write(json_string)
117
+
118
+ @classmethod
119
+ def load_from_json(cls, json_path: str):
120
+ """Create an instance from the content of `json_path`."""
121
+ with open(json_path, "r", encoding="utf-8") as f:
122
+ text = f.read()
123
+ return cls(**json.loads(text))
124
+
125
+
126
+ @dataclass
127
+ class TrainerControl:
128
+ """
129
+ A class that handles the [`Trainer`] control flow. This class is used by the [`TrainerCallback`] to activate some
130
+ switches in the training loop.
131
+
132
+ Args:
133
+ should_training_stop (`bool`, *optional*, defaults to `False`):
134
+ Whether or not the training should be interrupted.
135
+
136
+ If `True`, this variable will not be set back to `False`. The training will just stop.
137
+ should_epoch_stop (`bool`, *optional*, defaults to `False`):
138
+ Whether or not the current epoch should be interrupted.
139
+
140
+ If `True`, this variable will be set back to `False` at the beginning of the next epoch.
141
+ should_save (`bool`, *optional*, defaults to `False`):
142
+ Whether or not the model should be saved at this step.
143
+
144
+ If `True`, this variable will be set back to `False` at the beginning of the next step.
145
+ should_evaluate (`bool`, *optional*, defaults to `False`):
146
+ Whether or not the model should be evaluated at this step.
147
+
148
+ If `True`, this variable will be set back to `False` at the beginning of the next step.
149
+ should_log (`bool`, *optional*, defaults to `False`):
150
+ Whether or not the logs should be reported at this step.
151
+
152
+ If `True`, this variable will be set back to `False` at the beginning of the next step.
153
+ """
154
+
155
+ should_training_stop: bool = False
156
+ should_epoch_stop: bool = False
157
+ should_save: bool = False
158
+ should_evaluate: bool = False
159
+ should_log: bool = False
160
+
161
+ def _new_training(self):
162
+ """Internal method that resets the variable for a new training."""
163
+ self.should_training_stop = False
164
+
165
+ def _new_epoch(self):
166
+ """Internal method that resets the variable for a new epoch."""
167
+ self.should_epoch_stop = False
168
+
169
+ def _new_step(self):
170
+ """Internal method that resets the variable for a new step."""
171
+ self.should_save = False
172
+ self.should_evaluate = False
173
+ self.should_log = False
174
+
175
+
176
+ class TrainerCallback:
177
+ # no-format
178
+ """
179
+ A class for objects that will inspect the state of the training loop at some events and take some decisions. At
180
+ each of those events the following arguments are available:
181
+
182
+ Args:
183
+ args ([`TrainingArguments`]):
184
+ The training arguments used to instantiate the [`Trainer`].
185
+ state ([`TrainerState`]):
186
+ The current state of the [`Trainer`].
187
+ control ([`TrainerControl`]):
188
+ The object that is returned to the [`Trainer`] and can be used to make some decisions.
189
+ model ([`PreTrainedModel`] or `torch.nn.Module`):
190
+ The model being trained.
191
+ tokenizer ([`PreTrainedTokenizer`]):
192
+ The tokenizer used for encoding the data.
193
+ optimizer (`torch.optim.Optimizer`):
194
+ The optimizer used for the training steps.
195
+ lr_scheduler (`torch.optim.lr_scheduler.LambdaLR`):
196
+ The scheduler used for setting the learning rate.
197
+ train_dataloader (`torch.utils.data.DataLoader`, *optional*):
198
+ The current dataloader used for training.
199
+ eval_dataloader (`torch.utils.data.DataLoader`, *optional*):
200
+ The current dataloader used for evaluation.
201
+ metrics (`Dict[str, float]`):
202
+ The metrics computed by the last evaluation phase.
203
+
204
+ Those are only accessible in the event `on_evaluate`.
205
+ logs (`Dict[str, float]`):
206
+ The values to log.
207
+
208
+ Those are only accessible in the event `on_log`.
209
+
210
+ The `control` object is the only one that can be changed by the callback, in which case the event that changes it
211
+ should return the modified version.
212
+
213
+ The argument `args`, `state` and `control` are positionals for all events, all the others are grouped in `kwargs`.
214
+ You can unpack the ones you need in the signature of the event using them. As an example, see the code of the
215
+ simple [`~transformers.PrinterCallback`].
216
+
217
+ Example:
218
+
219
+ ```python
220
+ class PrinterCallback(TrainerCallback):
221
+ def on_log(self, args, state, control, logs=None, **kwargs):
222
+ _ = logs.pop("total_flos", None)
223
+ if state.is_local_process_zero:
224
+ print(logs)
225
+ ```"""
226
+
227
+ def on_init_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs):
228
+ """
229
+ Event called at the end of the initialization of the [`Trainer`].
230
+ """
231
+ pass
232
+
233
+ def on_train_begin(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs):
234
+ """
235
+ Event called at the beginning of training.
236
+ """
237
+ pass
238
+
239
+ def on_train_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs):
240
+ """
241
+ Event called at the end of training.
242
+ """
243
+ pass
244
+
245
+ def on_epoch_begin(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs):
246
+ """
247
+ Event called at the beginning of an epoch.
248
+ """
249
+ pass
250
+
251
+ def on_epoch_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs):
252
+ """
253
+ Event called at the end of an epoch.
254
+ """
255
+ pass
256
+
257
+ def on_step_begin(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs):
258
+ """
259
+ Event called at the beginning of a training step. If using gradient accumulation, one training step might take
260
+ several inputs.
261
+ """
262
+ pass
263
+
264
+ def on_substep_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs):
265
+ """
266
+ Event called at the end of an substep during gradient accumulation.
267
+ """
268
+ pass
269
+
270
+ def on_step_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs):
271
+ """
272
+ Event called at the end of a training step. If using gradient accumulation, one training step might take
273
+ several inputs.
274
+ """
275
+ pass
276
+
277
+ def on_evaluate(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs):
278
+ """
279
+ Event called after an evaluation phase.
280
+ """
281
+ pass
282
+
283
+ def on_predict(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, metrics, **kwargs):
284
+ """
285
+ Event called after a successful prediction.
286
+ """
287
+ pass
288
+
289
+ def on_save(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs):
290
+ """
291
+ Event called after a checkpoint save.
292
+ """
293
+ pass
294
+
295
+ def on_log(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs):
296
+ """
297
+ Event called after logging the last logs.
298
+ """
299
+ pass
300
+
301
+ def on_prediction_step(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs):
302
+ """
303
+ Event called after a prediction step.
304
+ """
305
+ pass
306
+
307
+
308
+ class CallbackHandler(TrainerCallback):
309
+ """Internal class that just calls the list of callbacks in order."""
310
+
311
+ def __init__(self, callbacks, model, tokenizer, optimizer, lr_scheduler):
312
+ self.callbacks = []
313
+ for cb in callbacks:
314
+ self.add_callback(cb)
315
+ self.model = model
316
+ self.tokenizer = tokenizer
317
+ self.optimizer = optimizer
318
+ self.lr_scheduler = lr_scheduler
319
+ self.train_dataloader = None
320
+ self.eval_dataloader = None
321
+
322
+ if not any(isinstance(cb, DefaultFlowCallback) for cb in self.callbacks):
323
+ logger.warning(
324
+ "The Trainer will not work properly if you don't have a `DefaultFlowCallback` in its callbacks. You\n"
325
+ + "should add one before training with `trainer.add_callback(DefaultFlowCallback). The current list of"
326
+ + "callbacks is\n:"
327
+ + self.callback_list
328
+ )
329
+
330
+ def add_callback(self, callback):
331
+ cb = callback() if isinstance(callback, type) else callback
332
+ cb_class = callback if isinstance(callback, type) else callback.__class__
333
+ if cb_class in [c.__class__ for c in self.callbacks]:
334
+ logger.warning(
335
+ f"You are adding a {cb_class} to the callbacks of this Trainer, but there is already one. The current"
336
+ + "list of callbacks is\n:"
337
+ + self.callback_list
338
+ )
339
+ self.callbacks.append(cb)
340
+
341
+ def pop_callback(self, callback):
342
+ if isinstance(callback, type):
343
+ for cb in self.callbacks:
344
+ if isinstance(cb, callback):
345
+ self.callbacks.remove(cb)
346
+ return cb
347
+ else:
348
+ for cb in self.callbacks:
349
+ if cb == callback:
350
+ self.callbacks.remove(cb)
351
+ return cb
352
+
353
+ def remove_callback(self, callback):
354
+ if isinstance(callback, type):
355
+ for cb in self.callbacks:
356
+ if isinstance(cb, callback):
357
+ self.callbacks.remove(cb)
358
+ return
359
+ else:
360
+ self.callbacks.remove(callback)
361
+
362
+ @property
363
+ def callback_list(self):
364
+ return "\n".join(cb.__class__.__name__ for cb in self.callbacks)
365
+
366
+ def on_init_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl):
367
+ return self.call_event("on_init_end", args, state, control)
368
+
369
+ def on_train_begin(self, args: TrainingArguments, state: TrainerState, control: TrainerControl):
370
+ control.should_training_stop = False
371
+ return self.call_event("on_train_begin", args, state, control)
372
+
373
+ def on_train_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl):
374
+ return self.call_event("on_train_end", args, state, control)
375
+
376
+ def on_epoch_begin(self, args: TrainingArguments, state: TrainerState, control: TrainerControl):
377
+ control.should_epoch_stop = False
378
+ return self.call_event("on_epoch_begin", args, state, control)
379
+
380
+ def on_epoch_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl):
381
+ return self.call_event("on_epoch_end", args, state, control)
382
+
383
+ def on_step_begin(self, args: TrainingArguments, state: TrainerState, control: TrainerControl):
384
+ control.should_log = False
385
+ control.should_evaluate = False
386
+ control.should_save = False
387
+ return self.call_event("on_step_begin", args, state, control)
388
+
389
+ def on_substep_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl):
390
+ return self.call_event("on_substep_end", args, state, control)
391
+
392
+ def on_step_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl):
393
+ return self.call_event("on_step_end", args, state, control)
394
+
395
+ def on_evaluate(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, metrics):
396
+ control.should_evaluate = False
397
+ return self.call_event("on_evaluate", args, state, control, metrics=metrics)
398
+
399
+ def on_predict(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, metrics):
400
+ return self.call_event("on_predict", args, state, control, metrics=metrics)
401
+
402
+ def on_save(self, args: TrainingArguments, state: TrainerState, control: TrainerControl):
403
+ control.should_save = False
404
+ return self.call_event("on_save", args, state, control)
405
+
406
+ def on_log(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, logs):
407
+ control.should_log = False
408
+ return self.call_event("on_log", args, state, control, logs=logs)
409
+
410
+ def on_prediction_step(self, args: TrainingArguments, state: TrainerState, control: TrainerControl):
411
+ return self.call_event("on_prediction_step", args, state, control)
412
+
413
+ def call_event(self, event, args, state, control, **kwargs):
414
+ for callback in self.callbacks:
415
+ result = getattr(callback, event)(
416
+ args,
417
+ state,
418
+ control,
419
+ model=self.model,
420
+ tokenizer=self.tokenizer,
421
+ optimizer=self.optimizer,
422
+ lr_scheduler=self.lr_scheduler,
423
+ train_dataloader=self.train_dataloader,
424
+ eval_dataloader=self.eval_dataloader,
425
+ **kwargs,
426
+ )
427
+ # A Callback can skip the return of `control` if it doesn't change it.
428
+ if result is not None:
429
+ control = result
430
+ return control
431
+
432
+
433
+ class DefaultFlowCallback(TrainerCallback):
434
+ """
435
+ A [`TrainerCallback`] that handles the default flow of the training loop for logs, evaluation and checkpoints.
436
+ """
437
+
438
+ def on_step_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs):
439
+ # Log
440
+ if state.global_step == 1 and args.logging_first_step:
441
+ control.should_log = True
442
+ if args.logging_strategy == IntervalStrategy.STEPS and state.global_step % state.logging_steps == 0:
443
+ control.should_log = True
444
+
445
+ # Evaluate
446
+ if (
447
+ args.evaluation_strategy == IntervalStrategy.STEPS
448
+ and state.global_step % state.eval_steps == 0
449
+ and args.eval_delay <= state.global_step
450
+ ):
451
+ control.should_evaluate = True
452
+
453
+ # Save
454
+ if (
455
+ args.save_strategy == IntervalStrategy.STEPS
456
+ and state.save_steps > 0
457
+ and state.global_step % state.save_steps == 0
458
+ ):
459
+ control.should_save = True
460
+
461
+ # End training
462
+ if state.global_step >= state.max_steps:
463
+ control.should_training_stop = True
464
+
465
+ return control
466
+
467
+ def on_epoch_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs):
468
+ # Log
469
+ if args.logging_strategy == IntervalStrategy.EPOCH:
470
+ control.should_log = True
471
+
472
+ # Evaluate
473
+ if args.evaluation_strategy == IntervalStrategy.EPOCH and args.eval_delay <= state.epoch:
474
+ control.should_evaluate = True
475
+
476
+ # Save
477
+ if args.save_strategy == IntervalStrategy.EPOCH:
478
+ control.should_save = True
479
+
480
+ return control
481
+
482
+
483
+ class ProgressCallback(TrainerCallback):
484
+ """
485
+ A [`TrainerCallback`] that displays the progress of training or evaluation.
486
+ """
487
+
488
+ def __init__(self):
489
+ self.training_bar = None
490
+ self.prediction_bar = None
491
+
492
+ def on_train_begin(self, args, state, control, **kwargs):
493
+ if state.is_world_process_zero:
494
+ self.training_bar = tqdm(total=state.max_steps, dynamic_ncols=True)
495
+ self.current_step = 0
496
+
497
+ def on_step_end(self, args, state, control, **kwargs):
498
+ if state.is_world_process_zero:
499
+ self.training_bar.update(state.global_step - self.current_step)
500
+ self.current_step = state.global_step
501
+
502
+ def on_prediction_step(self, args, state, control, eval_dataloader=None, **kwargs):
503
+ if state.is_world_process_zero and has_length(eval_dataloader):
504
+ if self.prediction_bar is None:
505
+ self.prediction_bar = tqdm(
506
+ total=len(eval_dataloader), leave=self.training_bar is None, dynamic_ncols=True
507
+ )
508
+ self.prediction_bar.update(1)
509
+
510
+ def on_evaluate(self, args, state, control, **kwargs):
511
+ if state.is_world_process_zero:
512
+ if self.prediction_bar is not None:
513
+ self.prediction_bar.close()
514
+ self.prediction_bar = None
515
+
516
+ def on_predict(self, args, state, control, **kwargs):
517
+ if state.is_world_process_zero:
518
+ if self.prediction_bar is not None:
519
+ self.prediction_bar.close()
520
+ self.prediction_bar = None
521
+
522
+ def on_log(self, args, state, control, logs=None, **kwargs):
523
+ if state.is_world_process_zero and self.training_bar is not None:
524
+ # avoid modifying the logs object as it is shared between callbacks
525
+ logs = copy.deepcopy(logs)
526
+ _ = logs.pop("total_flos", None)
527
+ # round numbers so that it looks better in console
528
+ if "epoch" in logs:
529
+ logs["epoch"] = round(logs["epoch"], 2)
530
+ self.training_bar.write(str(logs))
531
+
532
+ def on_train_end(self, args, state, control, **kwargs):
533
+ if state.is_world_process_zero:
534
+ self.training_bar.close()
535
+ self.training_bar = None
536
+
537
+
538
+ class PrinterCallback(TrainerCallback):
539
+ """
540
+ A bare [`TrainerCallback`] that just prints the logs.
541
+ """
542
+
543
+ def on_log(self, args, state, control, logs=None, **kwargs):
544
+ _ = logs.pop("total_flos", None)
545
+ if state.is_local_process_zero:
546
+ print(logs)
547
+
548
+
549
+ class EarlyStoppingCallback(TrainerCallback):
550
+ """
551
+ A [`TrainerCallback`] that handles early stopping.
552
+
553
+ Args:
554
+ early_stopping_patience (`int`):
555
+ Use with `metric_for_best_model` to stop training when the specified metric worsens for
556
+ `early_stopping_patience` evaluation calls.
557
+ early_stopping_threshold(`float`, *optional*):
558
+ Use with TrainingArguments `metric_for_best_model` and `early_stopping_patience` to denote how much the
559
+ specified metric must improve to satisfy early stopping conditions. `
560
+
561
+ This callback depends on [`TrainingArguments`] argument *load_best_model_at_end* functionality to set best_metric
562
+ in [`TrainerState`]. Note that if the [`TrainingArguments`] argument *save_steps* differs from *eval_steps*, the
563
+ early stopping will not occur until the next save step.
564
+ """
565
+
566
+ def __init__(self, early_stopping_patience: int = 1, early_stopping_threshold: Optional[float] = 0.0):
567
+ self.early_stopping_patience = early_stopping_patience
568
+ self.early_stopping_threshold = early_stopping_threshold
569
+ # early_stopping_patience_counter denotes the number of times validation metrics failed to improve.
570
+ self.early_stopping_patience_counter = 0
571
+
572
+ def check_metric_value(self, args, state, control, metric_value):
573
+ # best_metric is set by code for load_best_model
574
+ operator = np.greater if args.greater_is_better else np.less
575
+ if state.best_metric is None or (
576
+ operator(metric_value, state.best_metric)
577
+ and abs(metric_value - state.best_metric) > self.early_stopping_threshold
578
+ ):
579
+ self.early_stopping_patience_counter = 0
580
+ else:
581
+ self.early_stopping_patience_counter += 1
582
+
583
+ def on_train_begin(self, args, state, control, **kwargs):
584
+ assert args.load_best_model_at_end, "EarlyStoppingCallback requires load_best_model_at_end = True"
585
+ assert (
586
+ args.metric_for_best_model is not None
587
+ ), "EarlyStoppingCallback requires metric_for_best_model is defined"
588
+ assert (
589
+ args.evaluation_strategy != IntervalStrategy.NO
590
+ ), "EarlyStoppingCallback requires IntervalStrategy of steps or epoch"
591
+
592
+ def on_evaluate(self, args, state, control, metrics, **kwargs):
593
+ metric_to_check = args.metric_for_best_model
594
+ if not metric_to_check.startswith("eval_"):
595
+ metric_to_check = f"eval_{metric_to_check}"
596
+ metric_value = metrics.get(metric_to_check)
597
+
598
+ if metric_value is None:
599
+ logger.warning(
600
+ f"early stopping required metric_for_best_model, but did not find {metric_to_check} so early stopping"
601
+ " is disabled"
602
+ )
603
+ return
604
+
605
+ self.check_metric_value(args, state, control, metric_value)
606
+ if self.early_stopping_patience_counter >= self.early_stopping_patience:
607
+ control.should_training_stop = True
llmeval-env/lib/python3.10/site-packages/transformers/trainer_pt_utils.py ADDED
@@ -0,0 +1,1361 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020-present the HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """
16
+ Torch utilities for the Trainer class.
17
+ """
18
+
19
+ import copy
20
+ import datetime
21
+ import io
22
+ import json
23
+ import math
24
+ import os
25
+ import sys
26
+ import warnings
27
+ from collections.abc import Mapping
28
+ from contextlib import contextmanager
29
+ from dataclasses import dataclass, field
30
+ from logging import StreamHandler
31
+ from typing import Any, Dict, Iterator, List, Optional, Union
32
+
33
+ import numpy as np
34
+ import torch
35
+ import torch.distributed as dist
36
+ from torch import nn
37
+ from torch.utils.data import Dataset, IterableDataset, RandomSampler, Sampler
38
+ from torch.utils.data.distributed import DistributedSampler
39
+
40
+ from .integrations.deepspeed import is_deepspeed_zero3_enabled
41
+ from .tokenization_utils_base import BatchEncoding
42
+ from .utils import (
43
+ is_sagemaker_mp_enabled,
44
+ is_torch_available,
45
+ is_torch_xla_available,
46
+ is_training_run_on_sagemaker,
47
+ logging,
48
+ )
49
+
50
+
51
+ if is_training_run_on_sagemaker():
52
+ logging.add_handler(StreamHandler(sys.stdout))
53
+
54
+ if is_torch_xla_available():
55
+ import torch_xla.core.xla_model as xm
56
+
57
+ if is_torch_available():
58
+ from .pytorch_utils import is_torch_greater_or_equal_than_2_0
59
+
60
+ if is_torch_greater_or_equal_than_2_0:
61
+ from torch.optim.lr_scheduler import LRScheduler
62
+ else:
63
+ from torch.optim.lr_scheduler import _LRScheduler as LRScheduler
64
+
65
+
66
+ # this is used to suppress an undesired warning emitted by pytorch versions 1.4.2-1.7.0
67
+ try:
68
+ from torch.optim.lr_scheduler import SAVE_STATE_WARNING
69
+ except ImportError:
70
+ SAVE_STATE_WARNING = ""
71
+
72
+ logger = logging.get_logger(__name__)
73
+
74
+
75
+ def get_dataloader_sampler(dataloader):
76
+ if hasattr(dataloader, "batch_sampler") and dataloader.batch_sampler is not None:
77
+ return get_dataloader_sampler(dataloader.batch_sampler)
78
+ elif hasattr(dataloader, "sampler"):
79
+ return dataloader.sampler
80
+
81
+
82
+ def atleast_1d(tensor_or_array: Union[torch.Tensor, np.ndarray]):
83
+ if isinstance(tensor_or_array, torch.Tensor):
84
+ if hasattr(torch, "atleast_1d"):
85
+ tensor_or_array = torch.atleast_1d(tensor_or_array)
86
+ elif tensor_or_array.ndim < 1:
87
+ tensor_or_array = tensor_or_array[None]
88
+ else:
89
+ tensor_or_array = np.atleast_1d(tensor_or_array)
90
+ return tensor_or_array
91
+
92
+
93
+ def torch_pad_and_concatenate(tensor1, tensor2, padding_index=-100):
94
+ """Concatenates `tensor1` and `tensor2` on first axis, applying padding on the second if necessary."""
95
+ tensor1 = atleast_1d(tensor1)
96
+ tensor2 = atleast_1d(tensor2)
97
+
98
+ if len(tensor1.shape) == 1 or tensor1.shape[1] == tensor2.shape[1]:
99
+ return torch.cat((tensor1, tensor2), dim=0)
100
+
101
+ # Let's figure out the new shape
102
+ new_shape = (tensor1.shape[0] + tensor2.shape[0], max(tensor1.shape[1], tensor2.shape[1])) + tensor1.shape[2:]
103
+
104
+ # Now let's fill the result tensor
105
+ result = tensor1.new_full(new_shape, padding_index)
106
+ result[: tensor1.shape[0], : tensor1.shape[1]] = tensor1
107
+ result[tensor1.shape[0] :, : tensor2.shape[1]] = tensor2
108
+ return result
109
+
110
+
111
+ def numpy_pad_and_concatenate(array1, array2, padding_index=-100):
112
+ """Concatenates `array1` and `array2` on first axis, applying padding on the second if necessary."""
113
+ array1 = atleast_1d(array1)
114
+ array2 = atleast_1d(array2)
115
+
116
+ if len(array1.shape) == 1 or array1.shape[1] == array2.shape[1]:
117
+ return np.concatenate((array1, array2), axis=0)
118
+
119
+ # Let's figure out the new shape
120
+ new_shape = (array1.shape[0] + array2.shape[0], max(array1.shape[1], array2.shape[1])) + array1.shape[2:]
121
+
122
+ # Now let's fill the result tensor
123
+ result = np.full_like(array1, padding_index, shape=new_shape)
124
+ result[: array1.shape[0], : array1.shape[1]] = array1
125
+ result[array1.shape[0] :, : array2.shape[1]] = array2
126
+ return result
127
+
128
+
129
+ def nested_concat(tensors, new_tensors, padding_index=-100):
130
+ """
131
+ Concat the `new_tensors` to `tensors` on the first dim and pad them on the second if needed. Works for tensors or
132
+ nested list/tuples/dict of tensors.
133
+ """
134
+ assert type(tensors) == type(
135
+ new_tensors
136
+ ), f"Expected `tensors` and `new_tensors` to have the same type but found {type(tensors)} and {type(new_tensors)}."
137
+ if isinstance(tensors, (list, tuple)):
138
+ return type(tensors)(nested_concat(t, n, padding_index=padding_index) for t, n in zip(tensors, new_tensors))
139
+ elif isinstance(tensors, torch.Tensor):
140
+ return torch_pad_and_concatenate(tensors, new_tensors, padding_index=padding_index)
141
+ elif isinstance(tensors, Mapping):
142
+ return type(tensors)(
143
+ {k: nested_concat(t, new_tensors[k], padding_index=padding_index) for k, t in tensors.items()}
144
+ )
145
+ elif isinstance(tensors, np.ndarray):
146
+ return numpy_pad_and_concatenate(tensors, new_tensors, padding_index=padding_index)
147
+ else:
148
+ raise TypeError(f"Unsupported type for concatenation: got {type(tensors)}")
149
+
150
+
151
+ def find_batch_size(tensors):
152
+ """
153
+ Find the first dimension of a tensor in a nested list/tuple/dict of tensors.
154
+ """
155
+ if isinstance(tensors, (list, tuple)):
156
+ for t in tensors:
157
+ result = find_batch_size(t)
158
+ if result is not None:
159
+ return result
160
+ elif isinstance(tensors, Mapping):
161
+ for key, value in tensors.items():
162
+ result = find_batch_size(value)
163
+ if result is not None:
164
+ return result
165
+ elif isinstance(tensors, torch.Tensor):
166
+ return tensors.shape[0] if len(tensors.shape) >= 1 else None
167
+ elif isinstance(tensors, np.ndarray):
168
+ return tensors.shape[0] if len(tensors.shape) >= 1 else None
169
+
170
+
171
+ def nested_numpify(tensors):
172
+ "Numpify `tensors` (even if it's a nested list/tuple/dict of tensors)."
173
+ if isinstance(tensors, (list, tuple)):
174
+ return type(tensors)(nested_numpify(t) for t in tensors)
175
+ if isinstance(tensors, Mapping):
176
+ return type(tensors)({k: nested_numpify(t) for k, t in tensors.items()})
177
+
178
+ t = tensors.cpu()
179
+ if t.dtype == torch.bfloat16:
180
+ # As of Numpy 1.21.4, NumPy does not support bfloat16 (see
181
+ # https://github.com/numpy/numpy/blob/a47ecdea856986cd60eabbd53265c2ca5916ad5d/doc/source/user/basics.types.rst ).
182
+ # Until Numpy adds bfloat16, we must convert float32.
183
+ t = t.to(torch.float32)
184
+ return t.numpy()
185
+
186
+
187
+ def nested_detach(tensors):
188
+ "Detach `tensors` (even if it's a nested list/tuple/dict of tensors)."
189
+ if isinstance(tensors, (list, tuple)):
190
+ return type(tensors)(nested_detach(t) for t in tensors)
191
+ elif isinstance(tensors, Mapping):
192
+ return type(tensors)({k: nested_detach(t) for k, t in tensors.items()})
193
+ return tensors.detach()
194
+
195
+
196
+ def nested_xla_mesh_reduce(tensors, name):
197
+ if is_torch_xla_available():
198
+ import torch_xla.core.xla_model as xm
199
+
200
+ if isinstance(tensors, (list, tuple)):
201
+ return type(tensors)(nested_xla_mesh_reduce(t, f"{name}_{i}") for i, t in enumerate(tensors))
202
+ if isinstance(tensors, Mapping):
203
+ return type(tensors)(
204
+ {k: nested_xla_mesh_reduce(t, f"{name}_{i}") for i, (k, t) in enumerate(tensors.items())}
205
+ )
206
+
207
+ tensors = atleast_1d(tensors)
208
+ return xm.mesh_reduce(name, tensors, torch.cat)
209
+ else:
210
+ raise ImportError("Torch xla must be installed to use `nested_xla_mesh_reduce`")
211
+
212
+
213
+ def distributed_concat(tensor: Any, num_total_examples: Optional[int] = None) -> Any:
214
+ try:
215
+ if isinstance(tensor, (tuple, list)):
216
+ return type(tensor)(distributed_concat(t, num_total_examples) for t in tensor)
217
+ if isinstance(tensor, Mapping):
218
+ return type(tensor)({k: distributed_concat(t, num_total_examples) for k, t in tensor.items()})
219
+ tensor = atleast_1d(tensor).contiguous()
220
+ output_tensors = [tensor.clone() for _ in range(dist.get_world_size())]
221
+ dist.all_gather(output_tensors, tensor)
222
+ concat = torch.cat(output_tensors, dim=0)
223
+
224
+ # truncate the dummy elements added by SequentialDistributedSampler
225
+ if num_total_examples is not None:
226
+ concat = concat[:num_total_examples]
227
+ return concat
228
+ except AssertionError:
229
+ raise AssertionError("Not currently using distributed training")
230
+
231
+
232
+ def distributed_broadcast_scalars(
233
+ scalars: List[Union[int, float]],
234
+ num_total_examples: Optional[int] = None,
235
+ device: Optional[torch.device] = torch.device("cuda"),
236
+ ) -> torch.Tensor:
237
+ try:
238
+ tensorized_scalar = torch.tensor(scalars).to(device)
239
+ output_tensors = [tensorized_scalar.clone() for _ in range(dist.get_world_size())]
240
+ dist.all_gather(output_tensors, tensorized_scalar)
241
+ concat = torch.cat(output_tensors, dim=0)
242
+
243
+ # truncate the dummy elements added by SequentialDistributedSampler
244
+ if num_total_examples is not None:
245
+ concat = concat[:num_total_examples]
246
+ return concat
247
+ except AssertionError:
248
+ raise AssertionError("Not currently using distributed training")
249
+
250
+
251
+ def reissue_pt_warnings(caught_warnings):
252
+ # Reissue warnings that are not the SAVE_STATE_WARNING
253
+ if len(caught_warnings) > 1:
254
+ for w in caught_warnings:
255
+ if w.category != UserWarning or w.message != SAVE_STATE_WARNING:
256
+ warnings.warn(w.message, w.category)
257
+
258
+
259
+ @contextmanager
260
+ def torch_distributed_zero_first(local_rank: int):
261
+ """
262
+ Decorator to make all processes in distributed training wait for each local_master to do something.
263
+
264
+ Args:
265
+ local_rank (`int`): The rank of the local process.
266
+ """
267
+ if local_rank not in [-1, 0]:
268
+ dist.barrier()
269
+ yield
270
+ if local_rank == 0:
271
+ dist.barrier()
272
+
273
+
274
+ class DistributedSamplerWithLoop(DistributedSampler):
275
+ """
276
+ Like a torch.utils.data.distributed.DistributedSampler` but loops at the end back to the beginning of the shuffled
277
+ samples to make each process have a round multiple of batch_size samples.
278
+
279
+ Args:
280
+ dataset (`torch.utils.data.Dataset`):
281
+ Dataset used for sampling.
282
+ batch_size (`int`):
283
+ The batch size used with this sampler
284
+ kwargs (`Dict[str, Any]`, *optional*):
285
+ All other keyword arguments passed to `DistributedSampler`.
286
+ """
287
+
288
+ def __init__(self, dataset, batch_size, **kwargs):
289
+ super().__init__(dataset, **kwargs)
290
+ self.batch_size = batch_size
291
+
292
+ def __iter__(self):
293
+ indices = list(super().__iter__())
294
+ remainder = 0 if len(indices) % self.batch_size == 0 else self.batch_size - len(indices) % self.batch_size
295
+ # DistributedSampler already added samples from the beginning to make the number of samples a round multiple
296
+ # of the world size, so we skip those.
297
+ start_remainder = 1 if self.rank < len(self.dataset) % self.num_replicas else 0
298
+ indices += indices[start_remainder : start_remainder + remainder]
299
+ return iter(indices)
300
+
301
+
302
+ class EvalLoopContainer:
303
+ """
304
+ Container to store intermediate results of evaluation loop
305
+
306
+ Args:
307
+ do_nested_concat (`bool`, *optional*, defaults to `True`):
308
+ If set to `True`, each iteration will recursively concatenate a new object containing tensors to
309
+ the existing stored tensors, provided that the structure of the existing object and the new one
310
+ are identical. If set to `False`, all newly added tensors will be stored in a list.
311
+ padding_index (`int`, *optional*, defaults to -100):
312
+ Value used to pad tensors of different shapes when `do_nested_concat=True`.
313
+ """
314
+
315
+ def __init__(self, do_nested_concat: bool = True, padding_index: int = -100):
316
+ self.do_nested_concat = do_nested_concat
317
+ self.padding_index = padding_index
318
+ self.tensors = None
319
+ self.arrays = None
320
+
321
+ def add(self, tensors) -> None:
322
+ """Add tensors to the stored objects. If `do_nested_concat=True`, the tensors will be concatenated recursively."""
323
+ if self.tensors is None:
324
+ self.tensors = tensors if self.do_nested_concat else [tensors]
325
+ elif self.do_nested_concat:
326
+ self.tensors = nested_concat(self.tensors, tensors, padding_index=self.padding_index)
327
+ else:
328
+ self.tensors.append(tensors)
329
+
330
+ def to_cpu_and_numpy(self) -> None:
331
+ """Move tensors in stored objects to CPU and convert them to numpy arrays."""
332
+
333
+ # Check if we have something to add, if not just return
334
+ if self.tensors is None:
335
+ return
336
+
337
+ new_arrays = nested_numpify(self.tensors)
338
+ if self.arrays is None:
339
+ self.arrays = new_arrays
340
+ elif self.do_nested_concat:
341
+ self.arrays = nested_concat(self.arrays, new_arrays, padding_index=self.padding_index)
342
+ else:
343
+ self.arrays.extend(new_arrays)
344
+
345
+ # reset device tensors after adding to cpu
346
+ self.tensors = None
347
+
348
+ def get_arrays(self):
349
+ """Returns the numpified and moved to CPU stored objects."""
350
+ self.to_cpu_and_numpy()
351
+ return self.arrays
352
+
353
+
354
+ class SequentialDistributedSampler(Sampler):
355
+ """
356
+ Distributed Sampler that subsamples indices sequentially, making it easier to collate all results at the end.
357
+
358
+ Even though we only use this sampler for eval and predict (no training), which means that the model params won't
359
+ have to be synced (i.e. will not hang for synchronization even if varied number of forward passes), we still add
360
+ extra samples to the sampler to make it evenly divisible (like in `DistributedSampler`) to make it easy to `gather`
361
+ or `reduce` resulting tensors at the end of the loop.
362
+ """
363
+
364
+ def __init__(self, dataset, num_replicas=None, rank=None, batch_size=None):
365
+ warnings.warn(
366
+ "SequentialDistributedSampler is deprecated and will be removed in v5 of Transformers.",
367
+ FutureWarning,
368
+ )
369
+ if num_replicas is None:
370
+ if not dist.is_available():
371
+ raise RuntimeError("Requires distributed package to be available")
372
+ num_replicas = dist.get_world_size()
373
+ if rank is None:
374
+ if not dist.is_available():
375
+ raise RuntimeError("Requires distributed package to be available")
376
+ rank = dist.get_rank()
377
+ self.dataset = dataset
378
+ self.num_replicas = num_replicas
379
+ self.rank = rank
380
+ num_samples = len(self.dataset)
381
+ # Add extra samples to make num_samples a multiple of batch_size if passed
382
+ if batch_size is not None:
383
+ self.num_samples = int(math.ceil(num_samples / (batch_size * num_replicas))) * batch_size
384
+ else:
385
+ self.num_samples = int(math.ceil(num_samples / num_replicas))
386
+ self.total_size = self.num_samples * self.num_replicas
387
+ self.batch_size = batch_size
388
+
389
+ def __iter__(self):
390
+ indices = list(range(len(self.dataset)))
391
+
392
+ # add extra samples to make it evenly divisible
393
+ indices += indices[: (self.total_size - len(indices))]
394
+ assert (
395
+ len(indices) == self.total_size
396
+ ), f"Indices length {len(indices)} and total size {self.total_size} mismatched"
397
+
398
+ # subsample
399
+ indices = indices[self.rank * self.num_samples : (self.rank + 1) * self.num_samples]
400
+ assert (
401
+ len(indices) == self.num_samples
402
+ ), f"Indices length {len(indices)} and sample number {self.num_samples} mismatched"
403
+
404
+ return iter(indices)
405
+
406
+ def __len__(self):
407
+ return self.num_samples
408
+
409
+
410
+ def get_tpu_sampler(dataset: torch.utils.data.Dataset, batch_size: int):
411
+ if xm.xrt_world_size() <= 1:
412
+ return RandomSampler(dataset)
413
+ return DistributedSampler(dataset, num_replicas=xm.xrt_world_size(), rank=xm.get_ordinal())
414
+
415
+
416
+ def nested_new_like(arrays, num_samples, padding_index=-100):
417
+ """Create the same nested structure as `arrays` with a first dimension always at `num_samples`."""
418
+ if isinstance(arrays, (list, tuple)):
419
+ return type(arrays)(nested_new_like(x, num_samples) for x in arrays)
420
+ return np.full_like(arrays, padding_index, shape=(num_samples, *arrays.shape[1:]))
421
+
422
+
423
+ def expand_like(arrays, new_seq_length, padding_index=-100):
424
+ """Expand the `arrays` so that the second dimension grows to `new_seq_length`. Uses `padding_index` for padding."""
425
+ result = np.full_like(arrays, padding_index, shape=(arrays.shape[0], new_seq_length) + arrays.shape[2:])
426
+ result[:, : arrays.shape[1]] = arrays
427
+ return result
428
+
429
+
430
+ def nested_truncate(tensors, limit):
431
+ "Truncate `tensors` at `limit` (even if it's a nested list/tuple/dict of tensors)."
432
+ if isinstance(tensors, (list, tuple)):
433
+ return type(tensors)(nested_truncate(t, limit) for t in tensors)
434
+ if isinstance(tensors, Mapping):
435
+ return type(tensors)({k: nested_truncate(t, limit) for k, t in tensors.items()})
436
+
437
+ return tensors[:limit]
438
+
439
+
440
+ class DistributedTensorGatherer:
441
+ """
442
+ A class responsible for properly gathering tensors (or nested list/tuple of tensors) on the CPU by chunks.
443
+
444
+ If our dataset has 16 samples with a batch size of 2 on 3 processes and we gather then transfer on CPU at every
445
+ step, our sampler will generate the following indices:
446
+
447
+ `[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 0, 1]`
448
+
449
+ to get something of size a multiple of 3 (so that each process gets the same dataset length). Then process 0, 1 and
450
+ 2 will be responsible of making predictions for the following samples:
451
+
452
+ - P0: `[0, 1, 2, 3, 4, 5]`
453
+ - P1: `[6, 7, 8, 9, 10, 11]`
454
+ - P2: `[12, 13, 14, 15, 0, 1]`
455
+
456
+ The first batch treated on each process will be
457
+
458
+ - P0: `[0, 1]`
459
+ - P1: `[6, 7]`
460
+ - P2: `[12, 13]`
461
+
462
+ So if we gather at the end of the first batch, we will get a tensor (nested list/tuple of tensor) corresponding to
463
+ the following indices:
464
+
465
+ `[0, 1, 6, 7, 12, 13]`
466
+
467
+ If we directly concatenate our results without taking any precautions, the user will then get the predictions for
468
+ the indices in this order at the end of the prediction loop:
469
+
470
+ `[0, 1, 6, 7, 12, 13, 2, 3, 8, 9, 14, 15, 4, 5, 10, 11, 0, 1]`
471
+
472
+ For some reason, that's not going to roll their boat. This class is there to solve that problem.
473
+
474
+ Args:
475
+ world_size (`int`):
476
+ The number of processes used in the distributed training.
477
+ num_samples (`int`):
478
+ The number of samples in our dataset.
479
+ make_multiple_of (`int`, *optional*):
480
+ If passed, the class assumes the datasets passed to each process are made to be a multiple of this argument
481
+ (by adding samples).
482
+ padding_index (`int`, *optional*, defaults to -100):
483
+ The padding index to use if the arrays don't all have the same sequence length.
484
+ """
485
+
486
+ def __init__(self, world_size, num_samples, make_multiple_of=None, padding_index=-100):
487
+ warnings.warn(
488
+ "DistributedTensorGatherer is deprecated and will be removed in v5 of Transformers.",
489
+ FutureWarning,
490
+ )
491
+ self.world_size = world_size
492
+ self.num_samples = num_samples
493
+ total_size = world_size if make_multiple_of is None else world_size * make_multiple_of
494
+ self.total_samples = int(np.ceil(num_samples / total_size)) * total_size
495
+ self.process_length = self.total_samples // world_size
496
+ self._storage = None
497
+ self._offsets = None
498
+ self.padding_index = padding_index
499
+
500
+ def add_arrays(self, arrays):
501
+ """
502
+ Add `arrays` to the internal storage, Will initialize the storage to the full size at the first arrays passed
503
+ so that if we're bound to get an OOM, it happens at the beginning.
504
+ """
505
+ if arrays is None:
506
+ return
507
+ if self._storage is None:
508
+ self._storage = nested_new_like(arrays, self.total_samples, padding_index=self.padding_index)
509
+ self._offsets = list(range(0, self.total_samples, self.process_length))
510
+
511
+ slice_len, self._storage = self._nested_set_tensors(self._storage, arrays)
512
+ for i in range(self.world_size):
513
+ self._offsets[i] += slice_len
514
+
515
+ def _nested_set_tensors(self, storage, arrays):
516
+ if isinstance(arrays, (list, tuple)):
517
+ result = [self._nested_set_tensors(x, y) for x, y in zip(storage, arrays)]
518
+ return result[0][0], type(arrays)(r[1] for r in result)
519
+ assert (
520
+ arrays.shape[0] % self.world_size == 0
521
+ ), f"Arrays passed should all have a first dimension multiple of {self.world_size}, found {arrays.shape[0]}."
522
+
523
+ slice_len = arrays.shape[0] // self.world_size
524
+ for i in range(self.world_size):
525
+ if len(arrays.shape) == 1:
526
+ storage[self._offsets[i] : self._offsets[i] + slice_len] = arrays[i * slice_len : (i + 1) * slice_len]
527
+ else:
528
+ # Expand the array on the fly if needed.
529
+ if len(storage.shape) > 1 and storage.shape[1] < arrays.shape[1]:
530
+ storage = expand_like(storage, arrays.shape[1], padding_index=self.padding_index)
531
+ storage[self._offsets[i] : self._offsets[i] + slice_len, : arrays.shape[1]] = arrays[
532
+ i * slice_len : (i + 1) * slice_len
533
+ ]
534
+ return slice_len, storage
535
+
536
+ def finalize(self):
537
+ """
538
+ Return the properly gathered arrays and truncate to the number of samples (since the sampler added some extras
539
+ to get each process a dataset of the same length).
540
+ """
541
+ if self._storage is None:
542
+ return
543
+ if self._offsets[0] != self.process_length:
544
+ logger.warning("Not all data has been set. Are you sure you passed all values?")
545
+ return nested_truncate(self._storage, self.num_samples)
546
+
547
+
548
+ @dataclass
549
+ class LabelSmoother:
550
+ """
551
+ Adds label-smoothing on a pre-computed output from a Transformers model.
552
+
553
+ Args:
554
+ epsilon (`float`, *optional*, defaults to 0.1):
555
+ The label smoothing factor.
556
+ ignore_index (`int`, *optional*, defaults to -100):
557
+ The index in the labels to ignore when computing the loss.
558
+ """
559
+
560
+ epsilon: float = 0.1
561
+ ignore_index: int = -100
562
+
563
+ def __call__(self, model_output, labels, shift_labels=False):
564
+ logits = model_output["logits"] if isinstance(model_output, dict) else model_output[0]
565
+ if shift_labels:
566
+ logits = logits[..., :-1, :].contiguous()
567
+ labels = labels[..., 1:].contiguous()
568
+
569
+ log_probs = -nn.functional.log_softmax(logits, dim=-1)
570
+ if labels.dim() == log_probs.dim() - 1:
571
+ labels = labels.unsqueeze(-1)
572
+
573
+ padding_mask = labels.eq(self.ignore_index)
574
+ # In case the ignore_index is -100, the gather will fail, so we replace labels by 0. The padding_mask
575
+ # will ignore them in any case.
576
+ labels = torch.clamp(labels, min=0)
577
+ nll_loss = log_probs.gather(dim=-1, index=labels)
578
+ # works for fp16 input tensor too, by internally upcasting it to fp32
579
+ smoothed_loss = log_probs.sum(dim=-1, keepdim=True, dtype=torch.float32)
580
+
581
+ nll_loss.masked_fill_(padding_mask, 0.0)
582
+ smoothed_loss.masked_fill_(padding_mask, 0.0)
583
+
584
+ # Take the mean over the label dimensions, then divide by the number of active elements (i.e. not-padded):
585
+ num_active_elements = padding_mask.numel() - padding_mask.long().sum()
586
+ nll_loss = nll_loss.sum() / num_active_elements
587
+ smoothed_loss = smoothed_loss.sum() / (num_active_elements * log_probs.shape[-1])
588
+ return (1 - self.epsilon) * nll_loss + self.epsilon * smoothed_loss
589
+
590
+
591
+ def get_length_grouped_indices(lengths, batch_size, mega_batch_mult=None, generator=None):
592
+ """
593
+ Return a list of indices so that each slice of `batch_size` consecutive indices correspond to elements of similar
594
+ lengths. To do this, the indices are:
595
+
596
+ - randomly permuted
597
+ - grouped in mega-batches of size `mega_batch_mult * batch_size`
598
+ - sorted by length in each mega-batch
599
+
600
+ The result is the concatenation of all mega-batches, with the batch of `batch_size` containing the element of
601
+ maximum length placed first, so that an OOM happens sooner rather than later.
602
+ """
603
+ # Default for mega_batch_mult: 50 or the number to get 4 megabatches, whichever is smaller.
604
+ if mega_batch_mult is None:
605
+ mega_batch_mult = min(len(lengths) // (batch_size * 4), 50)
606
+ # Just in case, for tiny datasets
607
+ if mega_batch_mult == 0:
608
+ mega_batch_mult = 1
609
+
610
+ # We need to use torch for the random part as a distributed sampler will set the random seed for torch.
611
+ indices = torch.randperm(len(lengths), generator=generator)
612
+ megabatch_size = mega_batch_mult * batch_size
613
+ megabatches = [indices[i : i + megabatch_size].tolist() for i in range(0, len(lengths), megabatch_size)]
614
+ megabatches = [sorted(megabatch, key=lambda i: lengths[i], reverse=True) for megabatch in megabatches]
615
+
616
+ # The rest is to get the biggest batch first.
617
+ # Since each megabatch is sorted by descending length, the longest element is the first
618
+ megabatch_maximums = [lengths[megabatch[0]] for megabatch in megabatches]
619
+ max_idx = torch.argmax(torch.tensor(megabatch_maximums)).item()
620
+ # Switch to put the longest element in first position
621
+ megabatches[0][0], megabatches[max_idx][0] = megabatches[max_idx][0], megabatches[0][0]
622
+
623
+ return [i for megabatch in megabatches for i in megabatch]
624
+
625
+
626
+ class LengthGroupedSampler(Sampler):
627
+ r"""
628
+ Sampler that samples indices in a way that groups together features of the dataset of roughly the same length while
629
+ keeping a bit of randomness.
630
+ """
631
+
632
+ def __init__(
633
+ self,
634
+ batch_size: int,
635
+ dataset: Optional[Dataset] = None,
636
+ lengths: Optional[List[int]] = None,
637
+ model_input_name: Optional[str] = None,
638
+ generator=None,
639
+ ):
640
+ if dataset is None and lengths is None:
641
+ raise ValueError("One of dataset and lengths must be provided.")
642
+
643
+ self.batch_size = batch_size
644
+ if lengths is None:
645
+ model_input_name = model_input_name if model_input_name is not None else "input_ids"
646
+ if (
647
+ not (isinstance(dataset[0], dict) or isinstance(dataset[0], BatchEncoding))
648
+ or model_input_name not in dataset[0]
649
+ ):
650
+ raise ValueError(
651
+ "Can only automatically infer lengths for datasets whose items are dictionaries with an "
652
+ f"'{model_input_name}' key."
653
+ )
654
+ lengths = [len(feature[model_input_name]) for feature in dataset]
655
+ elif isinstance(lengths, torch.Tensor):
656
+ logger.info(
657
+ "If lengths is a torch.Tensor, LengthGroupedSampler will be slow. Converting lengths to List[int]..."
658
+ )
659
+ lengths = lengths.tolist()
660
+
661
+ self.lengths = lengths
662
+ self.generator = generator
663
+
664
+ def __len__(self):
665
+ return len(self.lengths)
666
+
667
+ def __iter__(self):
668
+ indices = get_length_grouped_indices(self.lengths, self.batch_size, generator=self.generator)
669
+ return iter(indices)
670
+
671
+
672
+ class DistributedLengthGroupedSampler(DistributedSampler):
673
+ r"""
674
+ Distributed Sampler that samples indices in a way that groups together features of the dataset of roughly the same
675
+ length while keeping a bit of randomness.
676
+ """
677
+
678
+ # Copied and adapted from PyTorch DistributedSampler.
679
+ def __init__(
680
+ self,
681
+ batch_size: int,
682
+ dataset: Optional[Dataset] = None,
683
+ num_replicas: Optional[int] = None,
684
+ rank: Optional[int] = None,
685
+ seed: int = 0,
686
+ drop_last: bool = False,
687
+ lengths: Optional[List[int]] = None,
688
+ model_input_name: Optional[str] = None,
689
+ ):
690
+ if dataset is None and lengths is None:
691
+ raise ValueError("One of dataset and lengths must be provided.")
692
+ if num_replicas is None:
693
+ if not dist.is_available():
694
+ raise RuntimeError("Requires distributed package to be available")
695
+ num_replicas = dist.get_world_size()
696
+ if rank is None:
697
+ if not dist.is_available():
698
+ raise RuntimeError("Requires distributed package to be available")
699
+ rank = dist.get_rank()
700
+
701
+ self.batch_size = batch_size
702
+ self.num_replicas = num_replicas
703
+ self.rank = rank
704
+ self.epoch = 0
705
+ self.drop_last = drop_last
706
+
707
+ if lengths is None:
708
+ model_input_name = model_input_name if model_input_name is not None else "input_ids"
709
+ if (
710
+ not (isinstance(dataset[0], dict) or isinstance(dataset[0], BatchEncoding))
711
+ or model_input_name not in dataset[0]
712
+ ):
713
+ raise ValueError(
714
+ "Can only automatically infer lengths for datasets whose items are dictionaries with an "
715
+ f"'{model_input_name}' key."
716
+ )
717
+ lengths = [len(feature[model_input_name]) for feature in dataset]
718
+ elif isinstance(lengths, torch.Tensor):
719
+ logger.info(
720
+ "If lengths is a torch.Tensor, DistributedLengthGroupedSampler will be slow. Converting lengths to"
721
+ " List[int]..."
722
+ )
723
+ lengths = lengths.tolist()
724
+
725
+ self.lengths = lengths
726
+
727
+ # If the dataset length is evenly divisible by # of replicas, then there
728
+ # is no need to drop any data, since the dataset will be split equally.
729
+ if self.drop_last and len(self.lengths) % self.num_replicas != 0:
730
+ # Split to nearest available length that is evenly divisible.
731
+ # This is to ensure each rank receives the same amount of data when
732
+ # using this Sampler.
733
+ self.num_samples = math.ceil((len(self.lengths) - self.num_replicas) / self.num_replicas)
734
+ else:
735
+ self.num_samples = math.ceil(len(self.lengths) / self.num_replicas)
736
+ self.total_size = self.num_samples * self.num_replicas
737
+ self.seed = seed
738
+
739
+ def __iter__(self) -> Iterator:
740
+ # Deterministically shuffle based on epoch and seed
741
+ g = torch.Generator()
742
+ g.manual_seed(self.seed + self.epoch)
743
+ indices = get_length_grouped_indices(self.lengths, self.batch_size, generator=g)
744
+
745
+ if not self.drop_last:
746
+ # add extra samples to make it evenly divisible
747
+ indices += indices[: (self.total_size - len(indices))]
748
+ else:
749
+ # remove tail of data to make it evenly divisible.
750
+ indices = indices[: self.total_size]
751
+ assert len(indices) == self.total_size
752
+
753
+ # subsample
754
+ indices = indices[self.rank : self.total_size : self.num_replicas]
755
+ assert len(indices) == self.num_samples
756
+
757
+ return iter(indices)
758
+
759
+
760
+ class ShardSampler(Sampler):
761
+ """
762
+ Sampler that shards batches between several processes. Dispatches indices batch by batch: on 2 processes with batch
763
+ size 4, the first two batches are `[0, 1, 2, 3, 4, 5, 6, 7]` and `[8, 9, 10, 11, 12, 13, 14, 15]`, which shard into
764
+ `[0, 1, 2, 3]` and `[8, 9, 10, 11]` for GPU-0 and `[4, 5, 6, 7]` and `[12, 13, 14, 15]` for GPU-1.
765
+
766
+ The sampler thus yields `[0, 1, 2, 3, 8, 9, 10, 11]` on GPU-0 and `[4, 5, 6, 7, 12, 13, 14, 15]` on GPU-1.
767
+ """
768
+
769
+ def __init__(
770
+ self,
771
+ dataset: Dataset,
772
+ batch_size: int = 1,
773
+ drop_last: bool = False,
774
+ num_processes: int = 1,
775
+ process_index: int = 0,
776
+ ):
777
+ self.dataset = dataset
778
+ self.batch_size = batch_size
779
+ self.drop_last = drop_last
780
+ self.num_processes = num_processes
781
+ self.process_index = process_index
782
+
783
+ self.total_batch_size = total_batch_size = batch_size * num_processes
784
+
785
+ num_batches = len(dataset) // total_batch_size if drop_last else math.ceil(len(dataset) / total_batch_size)
786
+ self.total_num_samples = num_batches * total_batch_size
787
+
788
+ def __iter__(self):
789
+ indices = list(range(len(self.dataset)))
790
+
791
+ # Add extra samples to make it evenly divisible. While loop is there in the edge case we have a tiny dataset
792
+ # and it needs to be done several times.
793
+ while len(indices) < self.total_num_samples:
794
+ indices += indices[: (self.total_num_samples - len(indices))]
795
+
796
+ result = []
797
+ for batch_start in range(self.batch_size * self.process_index, self.total_num_samples, self.total_batch_size):
798
+ result += indices[batch_start : batch_start + self.batch_size]
799
+
800
+ return iter(result)
801
+
802
+ def __len__(self):
803
+ # Each shard only sees a fraction of total_num_samples.
804
+ return self.total_num_samples // self.num_processes
805
+
806
+
807
+ class IterableDatasetShard(IterableDataset):
808
+ """
809
+ Wraps a PyTorch `IterableDataset` to generate samples for one of the processes only. Instances of this class will
810
+ always yield a number of samples that is a round multiple of the actual batch size (which is `batch_size x
811
+ num_processes`). Depending on the value of the `drop_last` attribute, it will either stop the iteration at the
812
+ first batch that would be too small or loop with indices from the beginning.
813
+
814
+ On two processes with an iterable dataset yielding of `[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]` with a batch size of
815
+ 2:
816
+
817
+ - the shard on process 0 will yield `[0, 1, 4, 5, 8, 9]` so will see batches `[0, 1]`, `[4, 5]`, `[8, 9]`
818
+ - the shard on process 1 will yield `[2, 3, 6, 7, 10, 11]` so will see batches `[2, 3]`, `[6, 7]`, `[10, 11]`
819
+
820
+ <Tip warning={true}>
821
+
822
+ If your IterableDataset implements some randomization that needs to be applied the same way on all processes
823
+ (for instance, a shuffling), you should use a `torch.Generator` in a `generator` attribute of the `dataset` to
824
+ generate your random numbers and call the [`~trainer_pt_utils.IterableDatasetShard.set_epoch`] method of this
825
+ object. It will set the seed of this `generator` to `seed + epoch` on all processes before starting the
826
+ iteration. Alternatively, you can also implement a `set_epoch()` method in your iterable dataset to deal with
827
+ this.
828
+
829
+ </Tip>
830
+
831
+ Args:
832
+ dataset (`torch.utils.data.IterableDataset`):
833
+ The batch sampler to split in several shards.
834
+ batch_size (`int`, *optional*, defaults to 1):
835
+ The size of the batches per shard.
836
+ drop_last (`bool`, *optional*, defaults to `False`):
837
+ Whether or not to drop the last incomplete batch or complete the last batches by using the samples from the
838
+ beginning.
839
+ num_processes (`int`, *optional*, defaults to 1):
840
+ The number of processes running concurrently.
841
+ process_index (`int`, *optional*, defaults to 0):
842
+ The index of the current process.
843
+ seed (`int`, *optional*, defaults to 0):
844
+ A random seed that will be used for the random number generation in
845
+ [`~trainer_pt_utils.IterableDatasetShard.set_epoch`].
846
+ """
847
+
848
+ def __init__(
849
+ self,
850
+ dataset: IterableDataset,
851
+ batch_size: int = 1,
852
+ drop_last: bool = False,
853
+ num_processes: int = 1,
854
+ process_index: int = 0,
855
+ seed: int = 0,
856
+ ):
857
+ self.dataset = dataset
858
+ self.batch_size = batch_size
859
+ self.drop_last = drop_last
860
+ self.num_processes = num_processes
861
+ self.process_index = process_index
862
+ self.seed = seed
863
+ self.epoch = 0
864
+ self.num_examples = 0
865
+
866
+ def set_epoch(self, epoch):
867
+ self.epoch = epoch
868
+ if hasattr(self.dataset, "set_epoch"):
869
+ self.dataset.set_epoch(epoch)
870
+
871
+ def __iter__(self):
872
+ self.num_examples = 0
873
+ if (
874
+ not hasattr(self.dataset, "set_epoch")
875
+ and hasattr(self.dataset, "generator")
876
+ and isinstance(self.dataset.generator, torch.Generator)
877
+ ):
878
+ self.dataset.generator.manual_seed(self.seed + self.epoch)
879
+ real_batch_size = self.batch_size * self.num_processes
880
+ process_slice = range(self.process_index * self.batch_size, (self.process_index + 1) * self.batch_size)
881
+
882
+ first_batch = None
883
+ current_batch = []
884
+ for element in self.dataset:
885
+ self.num_examples += 1
886
+ current_batch.append(element)
887
+ # Wait to have a full batch before yielding elements.
888
+ if len(current_batch) == real_batch_size:
889
+ for i in process_slice:
890
+ yield current_batch[i]
891
+ if first_batch is None:
892
+ first_batch = current_batch.copy()
893
+ current_batch = []
894
+
895
+ # Finished if drop_last is True, otherwise complete the last batch with elements from the beginning.
896
+ if not self.drop_last and len(current_batch) > 0:
897
+ if first_batch is None:
898
+ first_batch = current_batch.copy()
899
+ while len(current_batch) < real_batch_size:
900
+ current_batch += first_batch
901
+ for i in process_slice:
902
+ yield current_batch[i]
903
+
904
+ def __len__(self):
905
+ # Will raise an error if the underlying dataset is not sized.
906
+ if self.drop_last:
907
+ return (len(self.dataset) // (self.batch_size * self.num_processes)) * self.batch_size
908
+ else:
909
+ return math.ceil(len(self.dataset) / (self.batch_size * self.num_processes)) * self.batch_size
910
+
911
+
912
+ # In order to keep `trainer.py` compact and easy to understand, place any secondary PT Trainer
913
+ # helper methods here
914
+
915
+
916
+ def _get_learning_rate(self):
917
+ if self.is_deepspeed_enabled:
918
+ # with deepspeed's fp16 and dynamic loss scale enabled the optimizer/scheduler steps may
919
+ # not run for the first few dozen steps while loss scale is too large, and thus during
920
+ # that time `get_last_lr` will fail if called during that warm up stage, so work around it:
921
+ try:
922
+ last_lr = self.lr_scheduler.get_last_lr()[0]
923
+ except AssertionError as e:
924
+ if "need to call step" in str(e):
925
+ logger.warning("tried to get lr value before scheduler/optimizer started stepping, returning lr=0")
926
+ last_lr = 0
927
+ else:
928
+ raise
929
+ else:
930
+ if isinstance(self.lr_scheduler, torch.optim.lr_scheduler.ReduceLROnPlateau):
931
+ last_lr = self.optimizer.param_groups[0]["lr"]
932
+ else:
933
+ last_lr = self.lr_scheduler.get_last_lr()[0]
934
+ if torch.is_tensor(last_lr):
935
+ last_lr = last_lr.item()
936
+ return last_lr
937
+
938
+
939
+ def _secs2timedelta(secs):
940
+ """
941
+ convert seconds to hh:mm:ss.msec, msecs rounded to 2 decimals
942
+ """
943
+
944
+ msec = int(abs(secs - int(secs)) * 100)
945
+ return f"{datetime.timedelta(seconds=int(secs))}.{msec:02d}"
946
+
947
+
948
+ def metrics_format(self, metrics: Dict[str, float]) -> Dict[str, float]:
949
+ """
950
+ Reformat Trainer metrics values to a human-readable format
951
+
952
+ Args:
953
+ metrics (`Dict[str, float]`):
954
+ The metrics returned from train/evaluate/predict
955
+
956
+ Returns:
957
+ metrics (`Dict[str, float]`): The reformatted metrics
958
+ """
959
+
960
+ metrics_copy = metrics.copy()
961
+ for k, v in metrics_copy.items():
962
+ if "_mem_" in k:
963
+ metrics_copy[k] = f"{ v >> 20 }MB"
964
+ elif "_runtime" in k:
965
+ metrics_copy[k] = _secs2timedelta(v)
966
+ elif k == "total_flos":
967
+ metrics_copy[k] = f"{ int(v) >> 30 }GF"
968
+ elif isinstance(metrics_copy[k], float):
969
+ metrics_copy[k] = round(v, 4)
970
+
971
+ return metrics_copy
972
+
973
+
974
+ def log_metrics(self, split, metrics):
975
+ """
976
+ Log metrics in a specially formatted way
977
+
978
+ Under distributed environment this is done only for a process with rank 0.
979
+
980
+ Args:
981
+ split (`str`):
982
+ Mode/split name: one of `train`, `eval`, `test`
983
+ metrics (`Dict[str, float]`):
984
+ The metrics returned from train/evaluate/predictmetrics: metrics dict
985
+
986
+ Notes on memory reports:
987
+
988
+ In order to get memory usage report you need to install `psutil`. You can do that with `pip install psutil`.
989
+
990
+ Now when this method is run, you will see a report that will include: :
991
+
992
+ ```
993
+ init_mem_cpu_alloc_delta = 1301MB
994
+ init_mem_cpu_peaked_delta = 154MB
995
+ init_mem_gpu_alloc_delta = 230MB
996
+ init_mem_gpu_peaked_delta = 0MB
997
+ train_mem_cpu_alloc_delta = 1345MB
998
+ train_mem_cpu_peaked_delta = 0MB
999
+ train_mem_gpu_alloc_delta = 693MB
1000
+ train_mem_gpu_peaked_delta = 7MB
1001
+ ```
1002
+
1003
+ **Understanding the reports:**
1004
+
1005
+ - the first segment, e.g., `train__`, tells you which stage the metrics are for. Reports starting with `init_`
1006
+ will be added to the first stage that gets run. So that if only evaluation is run, the memory usage for the
1007
+ `__init__` will be reported along with the `eval_` metrics.
1008
+ - the third segment, is either `cpu` or `gpu`, tells you whether it's the general RAM or the gpu0 memory
1009
+ metric.
1010
+ - `*_alloc_delta` - is the difference in the used/allocated memory counter between the end and the start of the
1011
+ stage - it can be negative if a function released more memory than it allocated.
1012
+ - `*_peaked_delta` - is any extra memory that was consumed and then freed - relative to the current allocated
1013
+ memory counter - it is never negative. When you look at the metrics of any stage you add up `alloc_delta` +
1014
+ `peaked_delta` and you know how much memory was needed to complete that stage.
1015
+
1016
+ The reporting happens only for process of rank 0 and gpu 0 (if there is a gpu). Typically this is enough since the
1017
+ main process does the bulk of work, but it could be not quite so if model parallel is used and then other GPUs may
1018
+ use a different amount of gpu memory. This is also not the same under DataParallel where gpu0 may require much more
1019
+ memory than the rest since it stores the gradient and optimizer states for all participating GPUS. Perhaps in the
1020
+ future these reports will evolve to measure those too.
1021
+
1022
+ The CPU RAM metric measures RSS (Resident Set Size) includes both the memory which is unique to the process and the
1023
+ memory shared with other processes. It is important to note that it does not include swapped out memory, so the
1024
+ reports could be imprecise.
1025
+
1026
+ The CPU peak memory is measured using a sampling thread. Due to python's GIL it may miss some of the peak memory if
1027
+ that thread didn't get a chance to run when the highest memory was used. Therefore this report can be less than
1028
+ reality. Using `tracemalloc` would have reported the exact peak memory, but it doesn't report memory allocations
1029
+ outside of python. So if some C++ CUDA extension allocated its own memory it won't be reported. And therefore it
1030
+ was dropped in favor of the memory sampling approach, which reads the current process memory usage.
1031
+
1032
+ The GPU allocated and peak memory reporting is done with `torch.cuda.memory_allocated()` and
1033
+ `torch.cuda.max_memory_allocated()`. This metric reports only "deltas" for pytorch-specific allocations, as
1034
+ `torch.cuda` memory management system doesn't track any memory allocated outside of pytorch. For example, the very
1035
+ first cuda call typically loads CUDA kernels, which may take from 0.5 to 2GB of GPU memory.
1036
+
1037
+ Note that this tracker doesn't account for memory allocations outside of [`Trainer`]'s `__init__`, `train`,
1038
+ `evaluate` and `predict` calls.
1039
+
1040
+ Because `evaluation` calls may happen during `train`, we can't handle nested invocations because
1041
+ `torch.cuda.max_memory_allocated` is a single counter, so if it gets reset by a nested eval call, `train`'s tracker
1042
+ will report incorrect info. If this [pytorch issue](https://github.com/pytorch/pytorch/issues/16266) gets resolved
1043
+ it will be possible to change this class to be re-entrant. Until then we will only track the outer level of
1044
+ `train`, `evaluate` and `predict` methods. Which means that if `eval` is called during `train`, it's the latter
1045
+ that will account for its memory usage and that of the former.
1046
+
1047
+ This also means that if any other tool that is used along the [`Trainer`] calls
1048
+ `torch.cuda.reset_peak_memory_stats`, the gpu peak memory stats could be invalid. And the [`Trainer`] will disrupt
1049
+ the normal behavior of any such tools that rely on calling `torch.cuda.reset_peak_memory_stats` themselves.
1050
+
1051
+ For best performance you may want to consider turning the memory profiling off for production runs.
1052
+ """
1053
+ if not self.is_world_process_zero():
1054
+ return
1055
+
1056
+ print(f"***** {split} metrics *****")
1057
+ metrics_formatted = self.metrics_format(metrics)
1058
+ k_width = max(len(str(x)) for x in metrics_formatted.keys())
1059
+ v_width = max(len(str(x)) for x in metrics_formatted.values())
1060
+ for key in sorted(metrics_formatted.keys()):
1061
+ print(f" {key: <{k_width}} = {metrics_formatted[key]:>{v_width}}")
1062
+
1063
+
1064
+ def save_metrics(self, split, metrics, combined=True):
1065
+ """
1066
+ Save metrics into a json file for that split, e.g. `train_results.json`.
1067
+
1068
+ Under distributed environment this is done only for a process with rank 0.
1069
+
1070
+ Args:
1071
+ split (`str`):
1072
+ Mode/split name: one of `train`, `eval`, `test`, `all`
1073
+ metrics (`Dict[str, float]`):
1074
+ The metrics returned from train/evaluate/predict
1075
+ combined (`bool`, *optional*, defaults to `True`):
1076
+ Creates combined metrics by updating `all_results.json` with metrics of this call
1077
+
1078
+ To understand the metrics please read the docstring of [`~Trainer.log_metrics`]. The only difference is that raw
1079
+ unformatted numbers are saved in the current method.
1080
+
1081
+ """
1082
+ if not self.is_world_process_zero():
1083
+ return
1084
+
1085
+ path = os.path.join(self.args.output_dir, f"{split}_results.json")
1086
+ with open(path, "w") as f:
1087
+ json.dump(metrics, f, indent=4, sort_keys=True)
1088
+
1089
+ if combined:
1090
+ path = os.path.join(self.args.output_dir, "all_results.json")
1091
+ if os.path.exists(path):
1092
+ with open(path, "r") as f:
1093
+ all_metrics = json.load(f)
1094
+ else:
1095
+ all_metrics = {}
1096
+
1097
+ all_metrics.update(metrics)
1098
+ with open(path, "w") as f:
1099
+ json.dump(all_metrics, f, indent=4, sort_keys=True)
1100
+
1101
+
1102
+ def save_state(self):
1103
+ """
1104
+ Saves the Trainer state, since Trainer.save_model saves only the tokenizer with the model
1105
+
1106
+ Under distributed environment this is done only for a process with rank 0.
1107
+ """
1108
+ if not self.is_world_process_zero():
1109
+ return
1110
+
1111
+ path = os.path.join(self.args.output_dir, "trainer_state.json")
1112
+ self.state.save_to_json(path)
1113
+
1114
+
1115
+ def get_model_param_count(model, trainable_only=False):
1116
+ """
1117
+ Calculate model's total param count. If trainable_only is True then count only those requiring grads
1118
+ """
1119
+ if is_deepspeed_zero3_enabled():
1120
+
1121
+ def numel(p):
1122
+ return p.ds_numel if hasattr(p, "ds_numel") else p.numel()
1123
+
1124
+ else:
1125
+
1126
+ def numel(p):
1127
+ return p.numel()
1128
+
1129
+ return sum(numel(p) for p in model.parameters() if not trainable_only or p.requires_grad)
1130
+
1131
+
1132
+ def get_parameter_names(model, forbidden_layer_types):
1133
+ """
1134
+ Returns the names of the model parameters that are not inside a forbidden layer.
1135
+ """
1136
+ result = []
1137
+ for name, child in model.named_children():
1138
+ result += [
1139
+ f"{name}.{n}"
1140
+ for n in get_parameter_names(child, forbidden_layer_types)
1141
+ if not isinstance(child, tuple(forbidden_layer_types))
1142
+ ]
1143
+ # Add model specific parameters (defined with nn.Parameter) since they are not in any child.
1144
+ result += list(model._parameters.keys())
1145
+ return result
1146
+
1147
+
1148
+ def get_module_class_from_name(module, name):
1149
+ """
1150
+ Gets a class from a module by its name.
1151
+
1152
+ Args:
1153
+ module (`torch.nn.Module`): The module to get the class from.
1154
+ name (`str`): The name of the class.
1155
+ """
1156
+ modules_children = list(module.children())
1157
+ if module.__class__.__name__ == name:
1158
+ return module.__class__
1159
+ elif len(modules_children) == 0:
1160
+ return
1161
+ else:
1162
+ for child_module in modules_children:
1163
+ module_class = get_module_class_from_name(child_module, name)
1164
+ if module_class is not None:
1165
+ return module_class
1166
+
1167
+
1168
+ def remove_dummy_checkpoint(is_main_process, output_dir, filenames):
1169
+ if is_main_process:
1170
+ for filename in filenames:
1171
+ file = os.path.join(output_dir, filename)
1172
+ if os.path.isfile(file):
1173
+ os.remove(file)
1174
+
1175
+
1176
+ if is_sagemaker_mp_enabled():
1177
+ import smdistributed.modelparallel.torch as smp
1178
+
1179
+ @smp.step()
1180
+ def smp_forward_backward(model, inputs, gradient_accumulation_steps=1):
1181
+ outputs = model(**inputs)
1182
+ loss = outputs["loss"] if isinstance(outputs, dict) else outputs[0]
1183
+ loss /= gradient_accumulation_steps
1184
+ model.backward(loss)
1185
+ return loss
1186
+
1187
+ @smp.step()
1188
+ def smp_forward_only(model, inputs):
1189
+ return model(**inputs)
1190
+
1191
+ def smp_gather(tensor):
1192
+ if isinstance(tensor, (list, tuple)):
1193
+ return type(tensor)(smp_gather(t) for t in tensor)
1194
+ elif isinstance(tensor, dict):
1195
+ return type(tensor)({k: smp_gather(v) for k, v in tensor.items()})
1196
+ elif not isinstance(tensor, torch.Tensor):
1197
+ raise TypeError(
1198
+ f"Can't gather the values of type {type(tensor)}, only of nested list/tuple/dicts of tensors."
1199
+ )
1200
+ all_tensors = smp.allgather(tensor, smp.CommGroup.DP_GROUP)
1201
+ all_tensors = [atleast_1d(t) for t in all_tensors]
1202
+ return torch.cat([t.cpu() for t in all_tensors], dim=0)
1203
+
1204
+ def smp_nested_concat(tensor):
1205
+ if isinstance(tensor, (list, tuple)):
1206
+ return type(tensor)(smp_nested_concat(t) for t in tensor)
1207
+ elif isinstance(tensor, dict):
1208
+ return type(tensor)({k: smp_nested_concat(v) for k, v in tensor.items()})
1209
+ # It doesn't seem possible to check here if `tensor` is a StepOutput because StepOutput lives in `smp.step`
1210
+ # which is also the name of the decorator so Python is confused.
1211
+ return tensor.concat().detach().cpu()
1212
+
1213
+
1214
+ @dataclass
1215
+ class AcceleratorConfig:
1216
+ """
1217
+ A subset of arguments relating to the underlying [`accelerate.Accelerator`]
1218
+ implementation utilized in the `Trainer` that can be customized.
1219
+ Mostly relating to data.
1220
+
1221
+ Parameters:
1222
+ split_batches (`bool`, *optional*, defaults to `False`):
1223
+ Whether or not the accelerator should split the batches yielded by the dataloaders across the devices. If
1224
+ `True` the actual batch size used will be the same on any kind of distributed processes, but it must be a
1225
+ round multiple of the `num_processes` you are using. If `False`, actual batch size used will be the one set
1226
+ in your script multiplied by the number of processes.
1227
+ dispatch_batches (`bool`, *optional*):
1228
+ If set to `True`, the dataloader prepared by the Accelerator is only iterated through on the main process
1229
+ and then the batches are split and broadcast to each process. Will default to `True` for `DataLoader` whose
1230
+ underlying dataset is an `IterableDataset`, `False` otherwise.
1231
+ even_batches (`bool`, *optional*, defaults to `True`):
1232
+ If set to `True`, in cases where the total batch size across all processes does not exactly divide the
1233
+ dataset, samples at the start of the dataset will be duplicated so the batch can be divided equally among
1234
+ all workers.
1235
+ use_seedable_sampler (`bool`, *optional*, defaults to `True`):
1236
+ Whether or not use a fully seedable random sampler ([`accelerate.data_loader.SeedableRandomSampler`]). Ensures
1237
+ training results are fully reproducable using a different sampling technique. While seed-to-seed results
1238
+ may differ, on average the differences are neglible when using multiple different seeds to compare. Should
1239
+ also be ran with [`~utils.set_seed`] for the best results.
1240
+ gradient_accumulation_kwargs (`dict`, *optional*):
1241
+ Additional kwargs to configure gradient accumulation, see [`accelerate.utils.GradientAccumulationPlugin`].
1242
+ Any of the following (optional) keys are acceptable:
1243
+ num_steps (`int`): Will take precedence over [`~.TrainingArguments.gradient_accumulation_steps`] if
1244
+ the latter is set to 1, otherwise an exception will be raised.
1245
+ adjust_scheduler (`bool`): Whether to adjust the scheduler steps to account for [`~.TrainingArguments.gradient_accumulation_steps`].
1246
+ The [`accelerate.utils.GradientAccumulationPlugin`] default is `True`.
1247
+ sync_each_batch (`bool`): Whether to synchronize the gradients at each data batch.
1248
+ The [`accelerate.utils.GradientAccumulationPlugin`] default is `False`.
1249
+
1250
+ """
1251
+
1252
+ # Data related arguments
1253
+ split_batches: bool = field(
1254
+ default=False,
1255
+ metadata={
1256
+ "help": "Whether or not the accelerator should split the batches yielded by the dataloaders across the devices. If"
1257
+ " `True` the actual batch size used will be the same on any kind of distributed processes, but it must be a"
1258
+ " round multiple of the `num_processes` you are using. If `False`, actual batch size used will be the one set"
1259
+ " in your script multiplied by the number of processes."
1260
+ },
1261
+ )
1262
+ dispatch_batches: bool = field(
1263
+ default=None,
1264
+ metadata={
1265
+ "help": "If set to `True`, the dataloader prepared by the Accelerator is only iterated through on the main process"
1266
+ " and then the batches are split and broadcast to each process. Will default to `True` for `DataLoader` whose"
1267
+ " underlying dataset is an `IterableDataslet`, `False` otherwise."
1268
+ },
1269
+ )
1270
+ even_batches: bool = field(
1271
+ default=True,
1272
+ metadata={
1273
+ "help": "If set to `True`, in cases where the total batch size across all processes does not exactly divide the"
1274
+ " dataset, samples at the start of the dataset will be duplicated so the batch can be divided equally among"
1275
+ " all workers."
1276
+ },
1277
+ )
1278
+ use_seedable_sampler: bool = field(
1279
+ default=True,
1280
+ metadata={
1281
+ "help": "Whether or not use a fully seedable random sampler ([`accelerate.data_loader.SeedableRandomSampler`])."
1282
+ "Ensures training results are fully reproducable using a different sampling technique. "
1283
+ "While seed-to-seed results may differ, on average the differences are neglible when using"
1284
+ "multiple different seeds to compare. Should also be ran with [`~utils.set_seed`] for the best results."
1285
+ },
1286
+ )
1287
+ gradient_accumulation_kwargs: Optional[Dict] = field(
1288
+ default=None,
1289
+ metadata={
1290
+ "help": "Additional kwargs to configure gradient accumulation, see [`accelerate.utils.GradientAccumulationPlugin`]. "
1291
+ "Any of the following (optional) keys are acceptable: "
1292
+ " num_steps (`int`): Will take precedence over [`~.TrainingArguments.gradient_accumulation_steps`] if "
1293
+ " the latter is set to 1, otherwise an exception will be raised. "
1294
+ " adjust_scheduler (`bool`): Whether to adjust the scheduler steps to account for [`~.TrainingArguments.gradient_accumulation_steps`]. "
1295
+ " The [`accelerate.utils.GradientAccumulationPlugin`] default is `True`. "
1296
+ " sync_each_batch (`bool`): Whether to synchronize the gradients at each data batch. "
1297
+ " The [`accelerate.utils.GradientAccumulationPlugin`] default is `False`."
1298
+ },
1299
+ )
1300
+
1301
+ @classmethod
1302
+ def from_json_file(cls, json_file):
1303
+ # Check if exists
1304
+ open_file = io.open if os.path.exists(json_file) else open
1305
+ with open_file(json_file, "r", encoding="utf-8") as f:
1306
+ config_dict = json.load(f)
1307
+ # Check for keys and load sensible defaults
1308
+ extra_keys = sorted(key for key in config_dict.keys() if key not in cls.__dataclass_fields__.keys())
1309
+ if len(extra_keys) > 0:
1310
+ raise ValueError(
1311
+ f"The config file at {json_file} had unknown keys ({extra_keys}), please try upgrading your `transformers`"
1312
+ " version or fix (and potentially remove these keys) from your config file."
1313
+ )
1314
+ return cls(**config_dict)
1315
+
1316
+ def to_dict(self):
1317
+ return copy.deepcopy(self.__dict__)
1318
+
1319
+
1320
+ class LayerWiseDummyOptimizer(torch.optim.Optimizer):
1321
+ """
1322
+ For Layer-wise optimizers such as GaLoRE optimizer, the optimization
1323
+ step is already done through the post gradient hooks. Therefore
1324
+ the trick is to create a dummy optimizer that can take arbitrary
1325
+ args and kwargs and return a no-op during training.
1326
+
1327
+ Initial idea from @hiyouga in LLaMA-Factory:
1328
+ https://github.com/hiyouga/LLaMA-Factory/commit/8664262cde3919e10eaecbd66e8c5d356856362e#diff-ebe08ab14496dfb9e06075f0fdd36799ef6d1535cc4dd4715b74c4e3e06fe3ba
1329
+ """
1330
+
1331
+ def __init__(self, optimizer_dict=None, *args, **kwargs):
1332
+ dummy_tensor = torch.randn(1, 1)
1333
+ self.optimizer_dict = optimizer_dict
1334
+ super().__init__([dummy_tensor], {"lr": kwargs.get("lr", 1e-03)})
1335
+
1336
+ def zero_grad(self, set_to_none: bool = True) -> None:
1337
+ pass
1338
+
1339
+ def step(self, closure=None) -> Optional[float]:
1340
+ pass
1341
+
1342
+
1343
+ class LayerWiseDummyScheduler(LRScheduler):
1344
+ """
1345
+ For Layer-wise optimizers such as GaLoRE optimizer, the optimization and scheduling step
1346
+ are already done through the post gradient hooks. Therefore
1347
+ the trick is to create a dummy scheduler that can take arbitrary
1348
+ args and kwargs and return a no-op during training.
1349
+ """
1350
+
1351
+ def __init__(self, *args, **kwargs):
1352
+ optimizer = LayerWiseDummyOptimizer()
1353
+ last_epoch = -1
1354
+ verbose = False
1355
+ super().__init__(optimizer, last_epoch, verbose)
1356
+
1357
+ def get_lr(self):
1358
+ return [group["lr"] for group in self.optimizer.param_groups]
1359
+
1360
+ def _get_closed_form_lr(self):
1361
+ return self.base_lrs
llmeval-env/lib/python3.10/site-packages/transformers/trainer_seq2seq.py ADDED
@@ -0,0 +1,367 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import warnings
16
+ from copy import deepcopy
17
+ from pathlib import Path
18
+ from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union
19
+
20
+ import torch
21
+ from torch import nn
22
+ from torch.utils.data import Dataset
23
+
24
+ from .generation.configuration_utils import GenerationConfig
25
+ from .integrations.deepspeed import is_deepspeed_zero3_enabled
26
+ from .trainer import Trainer
27
+ from .utils import logging
28
+
29
+
30
+ if TYPE_CHECKING:
31
+ from .data.data_collator import DataCollator
32
+ from .modeling_utils import PreTrainedModel
33
+ from .tokenization_utils_base import PreTrainedTokenizerBase
34
+ from .trainer_callback import TrainerCallback
35
+ from .trainer_utils import EvalPrediction, PredictionOutput
36
+ from .training_args import TrainingArguments
37
+
38
+
39
+ logger = logging.get_logger(__name__)
40
+
41
+
42
+ class Seq2SeqTrainer(Trainer):
43
+ def __init__(
44
+ self,
45
+ model: Union["PreTrainedModel", nn.Module] = None,
46
+ args: "TrainingArguments" = None,
47
+ data_collator: Optional["DataCollator"] = None,
48
+ train_dataset: Optional[Dataset] = None,
49
+ eval_dataset: Optional[Union[Dataset, Dict[str, Dataset]]] = None,
50
+ tokenizer: Optional["PreTrainedTokenizerBase"] = None,
51
+ model_init: Optional[Callable[[], "PreTrainedModel"]] = None,
52
+ compute_metrics: Optional[Callable[["EvalPrediction"], Dict]] = None,
53
+ callbacks: Optional[List["TrainerCallback"]] = None,
54
+ optimizers: Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR] = (None, None),
55
+ preprocess_logits_for_metrics: Optional[Callable[[torch.Tensor, torch.Tensor], torch.Tensor]] = None,
56
+ ):
57
+ super().__init__(
58
+ model=model,
59
+ args=args,
60
+ data_collator=data_collator,
61
+ train_dataset=train_dataset,
62
+ eval_dataset=eval_dataset,
63
+ tokenizer=tokenizer,
64
+ model_init=model_init,
65
+ compute_metrics=compute_metrics,
66
+ callbacks=callbacks,
67
+ optimizers=optimizers,
68
+ preprocess_logits_for_metrics=preprocess_logits_for_metrics,
69
+ )
70
+
71
+ # Override self.model.generation_config if a GenerationConfig is specified in args.
72
+ # Priority: args.generation_config > model.generation_config > default GenerationConfig.
73
+ if self.args.generation_config is not None:
74
+ gen_config = self.load_generation_config(self.args.generation_config)
75
+ self.model.generation_config = gen_config
76
+
77
+ @staticmethod
78
+ def load_generation_config(gen_config_arg: Union[str, GenerationConfig]) -> GenerationConfig:
79
+ """
80
+ Loads a `~generation.GenerationConfig` from the `Seq2SeqTrainingArguments.generation_config` arguments.
81
+
82
+ Args:
83
+ gen_config_arg (`str` or [`~generation.GenerationConfig`]):
84
+ `Seq2SeqTrainingArguments.generation_config` argument.
85
+
86
+ Returns:
87
+ A `~generation.GenerationConfig`.
88
+ """
89
+
90
+ # GenerationConfig provided, nothing to do
91
+ if isinstance(gen_config_arg, GenerationConfig):
92
+ gen_config = deepcopy(gen_config_arg)
93
+ else:
94
+ # str or Path
95
+ pretrained_model_name = Path(gen_config_arg) if isinstance(gen_config_arg, str) else gen_config_arg
96
+ config_file_name = None
97
+
98
+ # Figuring if it is path pointing to a file, pointing to a directory or else a model id or URL
99
+ # This step is required in order to determine config_file_name
100
+ if pretrained_model_name.is_file():
101
+ config_file_name = pretrained_model_name.name
102
+ pretrained_model_name = pretrained_model_name.parent
103
+ # dir path
104
+ elif pretrained_model_name.is_dir():
105
+ pass
106
+ # model id or URL
107
+ else:
108
+ pretrained_model_name = gen_config_arg
109
+
110
+ gen_config = GenerationConfig.from_pretrained(pretrained_model_name, config_file_name)
111
+
112
+ # Strict validation to fail early. `GenerationConfig.save_pretrained()`, run at the end of training, throws
113
+ # an exception if there are warnings at validation time.
114
+ try:
115
+ with warnings.catch_warnings(record=True) as caught_warnings:
116
+ gen_config.validate()
117
+ if len(caught_warnings) > 0:
118
+ raise ValueError(str([w.message for w in caught_warnings]))
119
+ except ValueError as exc:
120
+ raise ValueError(
121
+ "The loaded generation config instance is invalid -- `GenerationConfig.validate()` throws warnings "
122
+ "and/or exceptions. Fix these issues to train your model.\n\nThrown during validation:\n" + str(exc)
123
+ )
124
+ return gen_config
125
+
126
+ def evaluate(
127
+ self,
128
+ eval_dataset: Optional[Dataset] = None,
129
+ ignore_keys: Optional[List[str]] = None,
130
+ metric_key_prefix: str = "eval",
131
+ **gen_kwargs,
132
+ ) -> Dict[str, float]:
133
+ """
134
+ Run evaluation and returns metrics.
135
+
136
+ The calling script will be responsible for providing a method to compute metrics, as they are task-dependent
137
+ (pass it to the init `compute_metrics` argument).
138
+
139
+ You can also subclass and override this method to inject custom behavior.
140
+
141
+ Args:
142
+ eval_dataset (`Dataset`, *optional*):
143
+ Pass a dataset if you wish to override `self.eval_dataset`. If it is an [`~datasets.Dataset`], columns
144
+ not accepted by the `model.forward()` method are automatically removed. It must implement the `__len__`
145
+ method.
146
+ ignore_keys (`List[str]`, *optional*):
147
+ A list of keys in the output of your model (if it is a dictionary) that should be ignored when
148
+ gathering predictions.
149
+ metric_key_prefix (`str`, *optional*, defaults to `"eval"`):
150
+ An optional prefix to be used as the metrics key prefix. For example the metrics "bleu" will be named
151
+ "eval_bleu" if the prefix is `"eval"` (default)
152
+ max_length (`int`, *optional*):
153
+ The maximum target length to use when predicting with the generate method.
154
+ num_beams (`int`, *optional*):
155
+ Number of beams for beam search that will be used when predicting with the generate method. 1 means no
156
+ beam search.
157
+ gen_kwargs:
158
+ Additional `generate` specific kwargs.
159
+
160
+ Returns:
161
+ A dictionary containing the evaluation loss and the potential metrics computed from the predictions. The
162
+ dictionary also contains the epoch number which comes from the training state.
163
+ """
164
+
165
+ gen_kwargs = gen_kwargs.copy()
166
+
167
+ # Use legacy argument setting if a) the option is not explicitly passed; and b) the argument is set in the
168
+ # training args
169
+ if (
170
+ gen_kwargs.get("max_length") is None
171
+ and gen_kwargs.get("max_new_tokens") is None
172
+ and self.args.generation_max_length is not None
173
+ ):
174
+ gen_kwargs["max_length"] = self.args.generation_max_length
175
+ if gen_kwargs.get("num_beams") is None and self.args.generation_num_beams is not None:
176
+ gen_kwargs["num_beams"] = self.args.generation_num_beams
177
+ # We don't want to drop samples in general
178
+ self.gather_function = self.accelerator.gather
179
+ self._gen_kwargs = gen_kwargs
180
+ return super().evaluate(eval_dataset, ignore_keys=ignore_keys, metric_key_prefix=metric_key_prefix)
181
+
182
+ def predict(
183
+ self,
184
+ test_dataset: Dataset,
185
+ ignore_keys: Optional[List[str]] = None,
186
+ metric_key_prefix: str = "test",
187
+ **gen_kwargs,
188
+ ) -> "PredictionOutput":
189
+ """
190
+ Run prediction and returns predictions and potential metrics.
191
+
192
+ Depending on the dataset and your use case, your test dataset may contain labels. In that case, this method
193
+ will also return metrics, like in `evaluate()`.
194
+
195
+ Args:
196
+ test_dataset (`Dataset`):
197
+ Dataset to run the predictions on. If it is a [`~datasets.Dataset`], columns not accepted by the
198
+ `model.forward()` method are automatically removed. Has to implement the method `__len__`
199
+ ignore_keys (`List[str]`, *optional*):
200
+ A list of keys in the output of your model (if it is a dictionary) that should be ignored when
201
+ gathering predictions.
202
+ metric_key_prefix (`str`, *optional*, defaults to `"eval"`):
203
+ An optional prefix to be used as the metrics key prefix. For example the metrics "bleu" will be named
204
+ "eval_bleu" if the prefix is `"eval"` (default)
205
+ max_length (`int`, *optional*):
206
+ The maximum target length to use when predicting with the generate method.
207
+ num_beams (`int`, *optional*):
208
+ Number of beams for beam search that will be used when predicting with the generate method. 1 means no
209
+ beam search.
210
+ gen_kwargs:
211
+ Additional `generate` specific kwargs.
212
+
213
+ <Tip>
214
+
215
+ If your predictions or labels have different sequence lengths (for instance because you're doing dynamic
216
+ padding in a token classification task) the predictions will be padded (on the right) to allow for
217
+ concatenation into one array. The padding index is -100.
218
+
219
+ </Tip>
220
+
221
+ Returns: *NamedTuple* A namedtuple with the following keys:
222
+
223
+ - predictions (`np.ndarray`): The predictions on `test_dataset`.
224
+ - label_ids (`np.ndarray`, *optional*): The labels (if the dataset contained some).
225
+ - metrics (`Dict[str, float]`, *optional*): The potential dictionary of metrics (if the dataset contained
226
+ labels).
227
+ """
228
+
229
+ gen_kwargs = gen_kwargs.copy()
230
+
231
+ # Use legacy argument setting if a) the option is not explicitly passed; and b) the argument is set in the
232
+ # training args
233
+ if (
234
+ gen_kwargs.get("max_length") is None
235
+ and gen_kwargs.get("max_new_tokens") is None
236
+ and self.args.generation_max_length is not None
237
+ ):
238
+ gen_kwargs["max_length"] = self.args.generation_max_length
239
+ if gen_kwargs.get("num_beams") is None and self.args.generation_num_beams is not None:
240
+ gen_kwargs["num_beams"] = self.args.generation_num_beams
241
+ self.gather_function = self.accelerator.gather
242
+ self._gen_kwargs = gen_kwargs
243
+
244
+ return super().predict(test_dataset, ignore_keys=ignore_keys, metric_key_prefix=metric_key_prefix)
245
+
246
+ def prediction_step(
247
+ self,
248
+ model: nn.Module,
249
+ inputs: Dict[str, Union[torch.Tensor, Any]],
250
+ prediction_loss_only: bool,
251
+ ignore_keys: Optional[List[str]] = None,
252
+ **gen_kwargs,
253
+ ) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:
254
+ """
255
+ Perform an evaluation step on `model` using `inputs`.
256
+
257
+ Subclass and override to inject custom behavior.
258
+
259
+ Args:
260
+ model (`nn.Module`):
261
+ The model to evaluate.
262
+ inputs (`Dict[str, Union[torch.Tensor, Any]]`):
263
+ The inputs and targets of the model.
264
+
265
+ The dictionary will be unpacked before being fed to the model. Most models expect the targets under the
266
+ argument `labels`. Check your model's documentation for all accepted arguments.
267
+ prediction_loss_only (`bool`):
268
+ Whether or not to return the loss only.
269
+ gen_kwargs:
270
+ Additional `generate` specific kwargs.
271
+
272
+ Return:
273
+ Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]: A tuple with the loss, logits and
274
+ labels (each being optional).
275
+ """
276
+
277
+ if not self.args.predict_with_generate or prediction_loss_only:
278
+ return super().prediction_step(
279
+ model, inputs, prediction_loss_only=prediction_loss_only, ignore_keys=ignore_keys
280
+ )
281
+
282
+ has_labels = "labels" in inputs
283
+ inputs = self._prepare_inputs(inputs)
284
+
285
+ # Priority (handled in generate):
286
+ # non-`None` gen_kwargs > model.generation_config > default GenerationConfig()
287
+ if len(gen_kwargs) == 0 and hasattr(self, "_gen_kwargs"):
288
+ gen_kwargs = self._gen_kwargs.copy()
289
+ if "num_beams" in gen_kwargs and gen_kwargs["num_beams"] is None:
290
+ gen_kwargs.pop("num_beams")
291
+ if "max_length" in gen_kwargs and gen_kwargs["max_length"] is None:
292
+ gen_kwargs.pop("max_length")
293
+
294
+ default_synced_gpus = True if is_deepspeed_zero3_enabled() else False
295
+ gen_kwargs["synced_gpus"] = (
296
+ gen_kwargs["synced_gpus"] if gen_kwargs.get("synced_gpus") is not None else default_synced_gpus
297
+ )
298
+
299
+ generation_inputs = inputs.copy()
300
+ # If the `decoder_input_ids` was created from `labels`, evict the former, so that the model can freely generate
301
+ # (otherwise, it would continue generating from the padded `decoder_input_ids`)
302
+ if (
303
+ "labels" in generation_inputs
304
+ and "decoder_input_ids" in generation_inputs
305
+ and generation_inputs["labels"].shape == generation_inputs["decoder_input_ids"].shape
306
+ ):
307
+ generation_inputs = {
308
+ k: v for k, v in inputs.items() if k not in ("decoder_input_ids", "decoder_attention_mask")
309
+ }
310
+ generated_tokens = self.model.generate(**generation_inputs, **gen_kwargs)
311
+
312
+ # Temporary hack to ensure the generation config is not initialized for each iteration of the evaluation loop
313
+ # TODO: remove this hack when the legacy code that initializes generation_config from a model config is
314
+ # removed in https://github.com/huggingface/transformers/blob/98d88b23f54e5a23e741833f1e973fdf600cc2c5/src/transformers/generation/utils.py#L1183
315
+ if self.model.generation_config._from_model_config:
316
+ self.model.generation_config._from_model_config = False
317
+
318
+ # Retrieves GenerationConfig from model.generation_config
319
+ gen_config = self.model.generation_config
320
+ # in case the batch is shorter than max length, the output should be padded
321
+ if generated_tokens.shape[-1] < gen_config.max_length:
322
+ generated_tokens = self._pad_tensors_to_max_len(generated_tokens, gen_config.max_length)
323
+ elif gen_config.max_new_tokens is not None and generated_tokens.shape[-1] < gen_config.max_new_tokens + 1:
324
+ generated_tokens = self._pad_tensors_to_max_len(generated_tokens, gen_config.max_new_tokens + 1)
325
+
326
+ with torch.no_grad():
327
+ if has_labels:
328
+ with self.compute_loss_context_manager():
329
+ outputs = model(**inputs)
330
+ if self.label_smoother is not None:
331
+ loss = self.label_smoother(outputs, inputs["labels"]).mean().detach()
332
+ else:
333
+ loss = (outputs["loss"] if isinstance(outputs, dict) else outputs[0]).mean().detach()
334
+ else:
335
+ loss = None
336
+
337
+ if self.args.prediction_loss_only:
338
+ return loss, None, None
339
+
340
+ if has_labels:
341
+ labels = inputs["labels"]
342
+ if labels.shape[-1] < gen_config.max_length:
343
+ labels = self._pad_tensors_to_max_len(labels, gen_config.max_length)
344
+ elif gen_config.max_new_tokens is not None and labels.shape[-1] < gen_config.max_new_tokens + 1:
345
+ labels = self._pad_tensors_to_max_len(labels, gen_config.max_new_tokens + 1)
346
+ else:
347
+ labels = None
348
+
349
+ return loss, generated_tokens, labels
350
+
351
+ def _pad_tensors_to_max_len(self, tensor, max_length):
352
+ if self.tokenizer is not None and hasattr(self.tokenizer, "pad_token_id"):
353
+ # If PAD token is not defined at least EOS token has to be defined
354
+ pad_token_id = (
355
+ self.tokenizer.pad_token_id if self.tokenizer.pad_token_id is not None else self.tokenizer.eos_token_id
356
+ )
357
+ else:
358
+ if self.model.config.pad_token_id is not None:
359
+ pad_token_id = self.model.config.pad_token_id
360
+ else:
361
+ raise ValueError("Pad_token_id must be set in the configuration of the model, in order to pad tensors")
362
+
363
+ padded_tensor = pad_token_id * torch.ones(
364
+ (tensor.shape[0], max_length), dtype=tensor.dtype, device=tensor.device
365
+ )
366
+ padded_tensor[:, : tensor.shape[-1]] = tensor
367
+ return padded_tensor
llmeval-env/lib/python3.10/site-packages/transformers/training_args_tf.py ADDED
@@ -0,0 +1,299 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import warnings
16
+ from dataclasses import dataclass, field
17
+ from typing import Optional, Tuple
18
+
19
+ from .training_args import TrainingArguments
20
+ from .utils import cached_property, is_tf_available, logging, requires_backends
21
+
22
+
23
+ logger = logging.get_logger(__name__)
24
+
25
+ if is_tf_available():
26
+ import tensorflow as tf
27
+
28
+ from .modeling_tf_utils import keras
29
+
30
+
31
+ @dataclass
32
+ class TFTrainingArguments(TrainingArguments):
33
+ """
34
+ TrainingArguments is the subset of the arguments we use in our example scripts **which relate to the training loop
35
+ itself**.
36
+
37
+ Using [`HfArgumentParser`] we can turn this class into
38
+ [argparse](https://docs.python.org/3/library/argparse#module-argparse) arguments that can be specified on the
39
+ command line.
40
+
41
+ Parameters:
42
+ output_dir (`str`):
43
+ The output directory where the model predictions and checkpoints will be written.
44
+ overwrite_output_dir (`bool`, *optional*, defaults to `False`):
45
+ If `True`, overwrite the content of the output directory. Use this to continue training if `output_dir`
46
+ points to a checkpoint directory.
47
+ do_train (`bool`, *optional*, defaults to `False`):
48
+ Whether to run training or not. This argument is not directly used by [`Trainer`], it's intended to be used
49
+ by your training/evaluation scripts instead. See the [example
50
+ scripts](https://github.com/huggingface/transformers/tree/main/examples) for more details.
51
+ do_eval (`bool`, *optional*):
52
+ Whether to run evaluation on the validation set or not. Will be set to `True` if `evaluation_strategy` is
53
+ different from `"no"`. This argument is not directly used by [`Trainer`], it's intended to be used by your
54
+ training/evaluation scripts instead. See the [example
55
+ scripts](https://github.com/huggingface/transformers/tree/main/examples) for more details.
56
+ do_predict (`bool`, *optional*, defaults to `False`):
57
+ Whether to run predictions on the test set or not. This argument is not directly used by [`Trainer`], it's
58
+ intended to be used by your training/evaluation scripts instead. See the [example
59
+ scripts](https://github.com/huggingface/transformers/tree/main/examples) for more details.
60
+ evaluation_strategy (`str` or [`~trainer_utils.IntervalStrategy`], *optional*, defaults to `"no"`):
61
+ The evaluation strategy to adopt during training. Possible values are:
62
+
63
+ - `"no"`: No evaluation is done during training.
64
+ - `"steps"`: Evaluation is done (and logged) every `eval_steps`.
65
+ - `"epoch"`: Evaluation is done at the end of each epoch.
66
+
67
+ per_device_train_batch_size (`int`, *optional*, defaults to 8):
68
+ The batch size per GPU/TPU core/CPU for training.
69
+ per_device_eval_batch_size (`int`, *optional*, defaults to 8):
70
+ The batch size per GPU/TPU core/CPU for evaluation.
71
+ gradient_accumulation_steps (`int`, *optional*, defaults to 1):
72
+ Number of updates steps to accumulate the gradients for, before performing a backward/update pass.
73
+
74
+ <Tip warning={true}>
75
+
76
+ When using gradient accumulation, one step is counted as one step with backward pass. Therefore, logging,
77
+ evaluation, save will be conducted every `gradient_accumulation_steps * xxx_step` training examples.
78
+
79
+ </Tip>
80
+
81
+ learning_rate (`float`, *optional*, defaults to 5e-5):
82
+ The initial learning rate for Adam.
83
+ weight_decay (`float`, *optional*, defaults to 0):
84
+ The weight decay to apply (if not zero).
85
+ adam_beta1 (`float`, *optional*, defaults to 0.9):
86
+ The beta1 hyperparameter for the Adam optimizer.
87
+ adam_beta2 (`float`, *optional*, defaults to 0.999):
88
+ The beta2 hyperparameter for the Adam optimizer.
89
+ adam_epsilon (`float`, *optional*, defaults to 1e-8):
90
+ The epsilon hyperparameter for the Adam optimizer.
91
+ max_grad_norm (`float`, *optional*, defaults to 1.0):
92
+ Maximum gradient norm (for gradient clipping).
93
+ num_train_epochs(`float`, *optional*, defaults to 3.0):
94
+ Total number of training epochs to perform.
95
+ max_steps (`int`, *optional*, defaults to -1):
96
+ If set to a positive number, the total number of training steps to perform. Overrides `num_train_epochs`.
97
+ For a finite dataset, training is reiterated through the dataset (if all data is exhausted) until
98
+ `max_steps` is reached.
99
+ warmup_ratio (`float`, *optional*, defaults to 0.0):
100
+ Ratio of total training steps used for a linear warmup from 0 to `learning_rate`.
101
+ warmup_steps (`int`, *optional*, defaults to 0):
102
+ Number of steps used for a linear warmup from 0 to `learning_rate`. Overrides any effect of `warmup_ratio`.
103
+ logging_dir (`str`, *optional*):
104
+ [TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to
105
+ *runs/**CURRENT_DATETIME_HOSTNAME***.
106
+ logging_strategy (`str` or [`~trainer_utils.IntervalStrategy`], *optional*, defaults to `"steps"`):
107
+ The logging strategy to adopt during training. Possible values are:
108
+
109
+ - `"no"`: No logging is done during training.
110
+ - `"epoch"`: Logging is done at the end of each epoch.
111
+ - `"steps"`: Logging is done every `logging_steps`.
112
+
113
+ logging_first_step (`bool`, *optional*, defaults to `False`):
114
+ Whether to log and evaluate the first `global_step` or not.
115
+ logging_steps (`int`, *optional*, defaults to 500):
116
+ Number of update steps between two logs if `logging_strategy="steps"`.
117
+ save_strategy (`str` or [`~trainer_utils.IntervalStrategy`], *optional*, defaults to `"steps"`):
118
+ The checkpoint save strategy to adopt during training. Possible values are:
119
+
120
+ - `"no"`: No save is done during training.
121
+ - `"epoch"`: Save is done at the end of each epoch.
122
+ - `"steps"`: Save is done every `save_steps`.
123
+
124
+ save_steps (`int`, *optional*, defaults to 500):
125
+ Number of updates steps before two checkpoint saves if `save_strategy="steps"`.
126
+ save_total_limit (`int`, *optional*):
127
+ If a value is passed, will limit the total amount of checkpoints. Deletes the older checkpoints in
128
+ `output_dir`.
129
+ no_cuda (`bool`, *optional*, defaults to `False`):
130
+ Whether to not use CUDA even when it is available or not.
131
+ seed (`int`, *optional*, defaults to 42):
132
+ Random seed that will be set at the beginning of training.
133
+ fp16 (`bool`, *optional*, defaults to `False`):
134
+ Whether to use 16-bit (mixed) precision training (through NVIDIA Apex) instead of 32-bit training.
135
+ fp16_opt_level (`str`, *optional*, defaults to 'O1'):
136
+ For `fp16` training, Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. See details on
137
+ the [Apex documentation](https://nvidia.github.io/apex/amp).
138
+ local_rank (`int`, *optional*, defaults to -1):
139
+ During distributed training, the rank of the process.
140
+ tpu_num_cores (`int`, *optional*):
141
+ When training on TPU, the number of TPU cores (automatically passed by launcher script).
142
+ debug (`bool`, *optional*, defaults to `False`):
143
+ Whether to activate the trace to record computation graphs and profiling information or not.
144
+ dataloader_drop_last (`bool`, *optional*, defaults to `False`):
145
+ Whether to drop the last incomplete batch (if the length of the dataset is not divisible by the batch size)
146
+ or not.
147
+ eval_steps (`int`, *optional*, defaults to 1000):
148
+ Number of update steps before two evaluations.
149
+ past_index (`int`, *optional*, defaults to -1):
150
+ Some models like [TransformerXL](../model_doc/transformerxl) or :doc*XLNet <../model_doc/xlnet>* can make
151
+ use of the past hidden states for their predictions. If this argument is set to a positive int, the
152
+ `Trainer` will use the corresponding output (usually index 2) as the past state and feed it to the model at
153
+ the next training step under the keyword argument `mems`.
154
+ tpu_name (`str`, *optional*):
155
+ The name of the TPU the process is running on.
156
+ tpu_zone (`str`, *optional*):
157
+ The zone of the TPU the process is running on. If not specified, we will attempt to automatically detect
158
+ from metadata.
159
+ gcp_project (`str`, *optional*):
160
+ Google Cloud Project name for the Cloud TPU-enabled project. If not specified, we will attempt to
161
+ automatically detect from metadata.
162
+ run_name (`str`, *optional*):
163
+ A descriptor for the run. Notably used for wandb logging.
164
+ xla (`bool`, *optional*):
165
+ Whether to activate the XLA compilation or not.
166
+ """
167
+
168
+ framework = "tf"
169
+ tpu_name: Optional[str] = field(
170
+ default=None,
171
+ metadata={"help": "Name of TPU"},
172
+ )
173
+
174
+ tpu_zone: Optional[str] = field(
175
+ default=None,
176
+ metadata={"help": "Zone of TPU"},
177
+ )
178
+
179
+ gcp_project: Optional[str] = field(
180
+ default=None,
181
+ metadata={"help": "Name of Cloud TPU-enabled project"},
182
+ )
183
+
184
+ poly_power: float = field(
185
+ default=1.0,
186
+ metadata={"help": "Power for the Polynomial decay LR scheduler."},
187
+ )
188
+
189
+ xla: bool = field(default=False, metadata={"help": "Whether to activate the XLA compilation or not"})
190
+
191
+ @cached_property
192
+ def _setup_strategy(self) -> Tuple["tf.distribute.Strategy", int]:
193
+ requires_backends(self, ["tf"])
194
+ logger.info("Tensorflow: setting up strategy")
195
+
196
+ gpus = tf.config.list_physical_devices("GPU")
197
+
198
+ # Set to float16 at first
199
+ if self.fp16:
200
+ keras.mixed_precision.set_global_policy("mixed_float16")
201
+
202
+ if self.no_cuda:
203
+ strategy = tf.distribute.OneDeviceStrategy(device="/cpu:0")
204
+ else:
205
+ try:
206
+ if self.tpu_name:
207
+ tpu = tf.distribute.cluster_resolver.TPUClusterResolver(
208
+ self.tpu_name, zone=self.tpu_zone, project=self.gcp_project
209
+ )
210
+ else:
211
+ tpu = tf.distribute.cluster_resolver.TPUClusterResolver()
212
+ except ValueError:
213
+ if self.tpu_name:
214
+ raise RuntimeError(f"Couldn't connect to TPU {self.tpu_name}!")
215
+ else:
216
+ tpu = None
217
+
218
+ if tpu:
219
+ # Set to bfloat16 in case of TPU
220
+ if self.fp16:
221
+ keras.mixed_precision.set_global_policy("mixed_bfloat16")
222
+
223
+ tf.config.experimental_connect_to_cluster(tpu)
224
+ tf.tpu.experimental.initialize_tpu_system(tpu)
225
+
226
+ strategy = tf.distribute.TPUStrategy(tpu)
227
+
228
+ elif len(gpus) == 0:
229
+ strategy = tf.distribute.OneDeviceStrategy(device="/cpu:0")
230
+ elif len(gpus) == 1:
231
+ strategy = tf.distribute.OneDeviceStrategy(device="/gpu:0")
232
+ elif len(gpus) > 1:
233
+ # If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
234
+ strategy = tf.distribute.MirroredStrategy()
235
+ else:
236
+ raise ValueError("Cannot find the proper strategy, please check your environment properties.")
237
+
238
+ return strategy
239
+
240
+ @property
241
+ def strategy(self) -> "tf.distribute.Strategy":
242
+ """
243
+ The strategy used for distributed training.
244
+ """
245
+ requires_backends(self, ["tf"])
246
+ return self._setup_strategy
247
+
248
+ @property
249
+ def n_replicas(self) -> int:
250
+ """
251
+ The number of replicas (CPUs, GPUs or TPU cores) used in this training.
252
+ """
253
+ requires_backends(self, ["tf"])
254
+ return self._setup_strategy.num_replicas_in_sync
255
+
256
+ @property
257
+ def should_log(self):
258
+ """
259
+ Whether or not the current process should produce log.
260
+ """
261
+ return False # TF Logging is handled by Keras not the Trainer
262
+
263
+ @property
264
+ def train_batch_size(self) -> int:
265
+ """
266
+ The actual batch size for training (may differ from `per_gpu_train_batch_size` in distributed training).
267
+ """
268
+ if self.per_gpu_train_batch_size:
269
+ logger.warning(
270
+ "Using deprecated `--per_gpu_train_batch_size` argument which will be removed in a future "
271
+ "version. Using `--per_device_train_batch_size` is preferred."
272
+ )
273
+ per_device_batch_size = self.per_gpu_train_batch_size or self.per_device_train_batch_size
274
+ return per_device_batch_size * self.n_replicas
275
+
276
+ @property
277
+ def eval_batch_size(self) -> int:
278
+ """
279
+ The actual batch size for evaluation (may differ from `per_gpu_eval_batch_size` in distributed training).
280
+ """
281
+ if self.per_gpu_eval_batch_size:
282
+ logger.warning(
283
+ "Using deprecated `--per_gpu_eval_batch_size` argument which will be removed in a future "
284
+ "version. Using `--per_device_eval_batch_size` is preferred."
285
+ )
286
+ per_device_batch_size = self.per_gpu_eval_batch_size or self.per_device_eval_batch_size
287
+ return per_device_batch_size * self.n_replicas
288
+
289
+ @property
290
+ def n_gpu(self) -> int:
291
+ """
292
+ The number of replicas (CPUs, GPUs or TPU cores) used in this training.
293
+ """
294
+ requires_backends(self, ["tf"])
295
+ warnings.warn(
296
+ "The n_gpu argument is deprecated and will be removed in a future version, use n_replicas instead.",
297
+ FutureWarning,
298
+ )
299
+ return self._setup_strategy.num_replicas_in_sync
llmeval-env/lib/python3.10/site-packages/transformers/utils/__init__.py ADDED
@@ -0,0 +1,258 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ # coding=utf-8
3
+
4
+ # Copyright 2021 The HuggingFace Inc. team. All rights reserved.
5
+ #
6
+ # Licensed under the Apache License, Version 2.0 (the "License");
7
+ # you may not use this file except in compliance with the License.
8
+ # You may obtain a copy of the License at
9
+ #
10
+ # http://www.apache.org/licenses/LICENSE-2.0
11
+ #
12
+ # Unless required by applicable law or agreed to in writing, software
13
+ # distributed under the License is distributed on an "AS IS" BASIS,
14
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15
+ # See the License for the specific language governing permissions and
16
+ # limitations under the License.
17
+
18
+ from huggingface_hub import get_full_repo_name # for backward compatibility
19
+ from huggingface_hub.constants import HF_HUB_DISABLE_TELEMETRY as DISABLE_TELEMETRY # for backward compatibility
20
+ from packaging import version
21
+
22
+ from .. import __version__
23
+ from .backbone_utils import BackboneConfigMixin, BackboneMixin
24
+ from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD
25
+ from .doc import (
26
+ add_code_sample_docstrings,
27
+ add_end_docstrings,
28
+ add_start_docstrings,
29
+ add_start_docstrings_to_model_forward,
30
+ copy_func,
31
+ replace_return_docstrings,
32
+ )
33
+ from .generic import (
34
+ ContextManagers,
35
+ ExplicitEnum,
36
+ ModelOutput,
37
+ PaddingStrategy,
38
+ TensorType,
39
+ add_model_info_to_auto_map,
40
+ cached_property,
41
+ can_return_loss,
42
+ expand_dims,
43
+ find_labels,
44
+ flatten_dict,
45
+ infer_framework,
46
+ is_jax_tensor,
47
+ is_numpy_array,
48
+ is_tensor,
49
+ is_tf_symbolic_tensor,
50
+ is_tf_tensor,
51
+ is_torch_device,
52
+ is_torch_dtype,
53
+ is_torch_tensor,
54
+ reshape,
55
+ squeeze,
56
+ strtobool,
57
+ tensor_size,
58
+ to_numpy,
59
+ to_py_obj,
60
+ transpose,
61
+ working_or_temp_dir,
62
+ )
63
+ from .hub import (
64
+ CLOUDFRONT_DISTRIB_PREFIX,
65
+ HF_MODULES_CACHE,
66
+ HUGGINGFACE_CO_PREFIX,
67
+ HUGGINGFACE_CO_RESOLVE_ENDPOINT,
68
+ PYTORCH_PRETRAINED_BERT_CACHE,
69
+ PYTORCH_TRANSFORMERS_CACHE,
70
+ S3_BUCKET_PREFIX,
71
+ TRANSFORMERS_CACHE,
72
+ TRANSFORMERS_DYNAMIC_MODULE_NAME,
73
+ EntryNotFoundError,
74
+ PushInProgress,
75
+ PushToHubMixin,
76
+ RepositoryNotFoundError,
77
+ RevisionNotFoundError,
78
+ cached_file,
79
+ default_cache_path,
80
+ define_sagemaker_information,
81
+ download_url,
82
+ extract_commit_hash,
83
+ get_cached_models,
84
+ get_file_from_repo,
85
+ has_file,
86
+ http_user_agent,
87
+ is_offline_mode,
88
+ is_remote_url,
89
+ move_cache,
90
+ send_example_telemetry,
91
+ try_to_load_from_cache,
92
+ )
93
+ from .import_utils import (
94
+ ACCELERATE_MIN_VERSION,
95
+ ENV_VARS_TRUE_AND_AUTO_VALUES,
96
+ ENV_VARS_TRUE_VALUES,
97
+ TORCH_FX_REQUIRED_VERSION,
98
+ USE_JAX,
99
+ USE_TF,
100
+ USE_TORCH,
101
+ XLA_FSDPV2_MIN_VERSION,
102
+ DummyObject,
103
+ OptionalDependencyNotAvailable,
104
+ _LazyModule,
105
+ ccl_version,
106
+ direct_transformers_import,
107
+ get_torch_version,
108
+ is_accelerate_available,
109
+ is_apex_available,
110
+ is_aqlm_available,
111
+ is_auto_awq_available,
112
+ is_auto_gptq_available,
113
+ is_av_available,
114
+ is_bitsandbytes_available,
115
+ is_bs4_available,
116
+ is_coloredlogs_available,
117
+ is_cv2_available,
118
+ is_cython_available,
119
+ is_datasets_available,
120
+ is_decord_available,
121
+ is_detectron2_available,
122
+ is_essentia_available,
123
+ is_faiss_available,
124
+ is_flash_attn_2_available,
125
+ is_flash_attn_greater_or_equal_2_10,
126
+ is_flax_available,
127
+ is_fsdp_available,
128
+ is_ftfy_available,
129
+ is_g2p_en_available,
130
+ is_galore_torch_available,
131
+ is_in_notebook,
132
+ is_ipex_available,
133
+ is_jieba_available,
134
+ is_jinja_available,
135
+ is_jumanpp_available,
136
+ is_kenlm_available,
137
+ is_keras_nlp_available,
138
+ is_levenshtein_available,
139
+ is_librosa_available,
140
+ is_mlx_available,
141
+ is_natten_available,
142
+ is_ninja_available,
143
+ is_nltk_available,
144
+ is_onnx_available,
145
+ is_openai_available,
146
+ is_optimum_available,
147
+ is_pandas_available,
148
+ is_peft_available,
149
+ is_phonemizer_available,
150
+ is_pretty_midi_available,
151
+ is_protobuf_available,
152
+ is_psutil_available,
153
+ is_py3nvml_available,
154
+ is_pyctcdecode_available,
155
+ is_pytesseract_available,
156
+ is_pytest_available,
157
+ is_pytorch_quantization_available,
158
+ is_quanto_available,
159
+ is_rjieba_available,
160
+ is_sacremoses_available,
161
+ is_safetensors_available,
162
+ is_sagemaker_dp_enabled,
163
+ is_sagemaker_mp_enabled,
164
+ is_scipy_available,
165
+ is_sentencepiece_available,
166
+ is_seqio_available,
167
+ is_sklearn_available,
168
+ is_soundfile_availble,
169
+ is_spacy_available,
170
+ is_speech_available,
171
+ is_sudachi_available,
172
+ is_sudachi_projection_available,
173
+ is_tensorflow_probability_available,
174
+ is_tensorflow_text_available,
175
+ is_tf2onnx_available,
176
+ is_tf_available,
177
+ is_timm_available,
178
+ is_tokenizers_available,
179
+ is_torch_available,
180
+ is_torch_bf16_available,
181
+ is_torch_bf16_available_on_device,
182
+ is_torch_bf16_cpu_available,
183
+ is_torch_bf16_gpu_available,
184
+ is_torch_compile_available,
185
+ is_torch_cuda_available,
186
+ is_torch_fp16_available_on_device,
187
+ is_torch_fx_available,
188
+ is_torch_fx_proxy,
189
+ is_torch_mlu_available,
190
+ is_torch_mps_available,
191
+ is_torch_neuroncore_available,
192
+ is_torch_npu_available,
193
+ is_torch_sdpa_available,
194
+ is_torch_tensorrt_fx_available,
195
+ is_torch_tf32_available,
196
+ is_torch_tpu_available,
197
+ is_torch_xla_available,
198
+ is_torch_xpu_available,
199
+ is_torchaudio_available,
200
+ is_torchdistx_available,
201
+ is_torchdynamo_available,
202
+ is_torchdynamo_compiling,
203
+ is_torchvision_available,
204
+ is_training_run_on_sagemaker,
205
+ is_vision_available,
206
+ requires_backends,
207
+ torch_only_method,
208
+ )
209
+ from .peft_utils import (
210
+ ADAPTER_CONFIG_NAME,
211
+ ADAPTER_SAFE_WEIGHTS_NAME,
212
+ ADAPTER_WEIGHTS_NAME,
213
+ check_peft_version,
214
+ find_adapter_config_file,
215
+ )
216
+
217
+
218
+ WEIGHTS_NAME = "pytorch_model.bin"
219
+ WEIGHTS_INDEX_NAME = "pytorch_model.bin.index.json"
220
+ TF2_WEIGHTS_NAME = "tf_model.h5"
221
+ TF2_WEIGHTS_INDEX_NAME = "tf_model.h5.index.json"
222
+ TF_WEIGHTS_NAME = "model.ckpt"
223
+ FLAX_WEIGHTS_NAME = "flax_model.msgpack"
224
+ FLAX_WEIGHTS_INDEX_NAME = "flax_model.msgpack.index.json"
225
+ SAFE_WEIGHTS_NAME = "model.safetensors"
226
+ SAFE_WEIGHTS_INDEX_NAME = "model.safetensors.index.json"
227
+ CONFIG_NAME = "config.json"
228
+ FEATURE_EXTRACTOR_NAME = "preprocessor_config.json"
229
+ IMAGE_PROCESSOR_NAME = FEATURE_EXTRACTOR_NAME
230
+ PROCESSOR_NAME = "processor_config.json"
231
+ GENERATION_CONFIG_NAME = "generation_config.json"
232
+ MODEL_CARD_NAME = "modelcard.json"
233
+
234
+ SENTENCEPIECE_UNDERLINE = "▁"
235
+ SPIECE_UNDERLINE = SENTENCEPIECE_UNDERLINE # Kept for backward compatibility
236
+
237
+ MULTIPLE_CHOICE_DUMMY_INPUTS = [
238
+ [[0, 1, 0, 1], [1, 0, 0, 1]]
239
+ ] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too.
240
+ DUMMY_INPUTS = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
241
+ DUMMY_MASK = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]]
242
+
243
+
244
+ def check_min_version(min_version):
245
+ if version.parse(__version__) < version.parse(min_version):
246
+ if "dev" in min_version:
247
+ error_message = (
248
+ "This example requires a source install from HuggingFace Transformers (see "
249
+ "`https://huggingface.co/docs/transformers/installation#install-from-source`),"
250
+ )
251
+ else:
252
+ error_message = f"This example requires a minimum version of {min_version},"
253
+ error_message += f" but the version found is {__version__}.\n"
254
+ raise ImportError(
255
+ error_message
256
+ + "Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other "
257
+ "versions of HuggingFace Transformers."
258
+ )
llmeval-env/lib/python3.10/site-packages/transformers/utils/__pycache__/backbone_utils.cpython-310.pyc ADDED
Binary file (13 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/utils/__pycache__/dummy_essentia_and_librosa_and_pretty_midi_and_scipy_and_torch_objects.cpython-310.pyc ADDED
Binary file (1.21 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/utils/__pycache__/dummy_flax_objects.cpython-310.pyc ADDED
Binary file (49.4 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/utils/__pycache__/dummy_pt_objects.cpython-310.pyc ADDED
Binary file (336 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/utils/__pycache__/dummy_sentencepiece_and_tokenizers_objects.cpython-310.pyc ADDED
Binary file (501 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/utils/__pycache__/dummy_speech_objects.cpython-310.pyc ADDED
Binary file (894 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/utils/__pycache__/dummy_tokenizers_objects.cpython-310.pyc ADDED
Binary file (15.7 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/utils/__pycache__/fx.cpython-310.pyc ADDED
Binary file (37.8 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/utils/__pycache__/peft_utils.cpython-310.pyc ADDED
Binary file (4.35 kB). View file