applied-ai-018 commited on
Commit
08b2f23
·
verified ·
1 Parent(s): 1274b42

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +1 -0
  2. env-llmeval/lib/python3.10/site-packages/pyarrow/libarrow.so.1500 +3 -0
  3. env-llmeval/lib/python3.10/site-packages/pyarrow/tests/data/orc/decimal.jsn.gz +3 -0
  4. env-llmeval/lib/python3.10/site-packages/pyarrow/tests/data/parquet/v0.7.1.parquet +3 -0
  5. env-llmeval/lib/python3.10/site-packages/pyarrow/tests/data/parquet/v0.7.1.some-named-index.parquet +3 -0
  6. env-llmeval/lib/python3.10/site-packages/transformers/activations.py +239 -0
  7. env-llmeval/lib/python3.10/site-packages/transformers/activations_tf.py +147 -0
  8. env-llmeval/lib/python3.10/site-packages/transformers/audio_utils.py +825 -0
  9. env-llmeval/lib/python3.10/site-packages/transformers/benchmark/__init__.py +0 -0
  10. env-llmeval/lib/python3.10/site-packages/transformers/benchmark/__pycache__/__init__.cpython-310.pyc +0 -0
  11. env-llmeval/lib/python3.10/site-packages/transformers/benchmark/__pycache__/benchmark.cpython-310.pyc +0 -0
  12. env-llmeval/lib/python3.10/site-packages/transformers/benchmark/__pycache__/benchmark_args.cpython-310.pyc +0 -0
  13. env-llmeval/lib/python3.10/site-packages/transformers/benchmark/__pycache__/benchmark_args_tf.cpython-310.pyc +0 -0
  14. env-llmeval/lib/python3.10/site-packages/transformers/benchmark/__pycache__/benchmark_args_utils.cpython-310.pyc +0 -0
  15. env-llmeval/lib/python3.10/site-packages/transformers/benchmark/__pycache__/benchmark_tf.cpython-310.pyc +0 -0
  16. env-llmeval/lib/python3.10/site-packages/transformers/benchmark/__pycache__/benchmark_utils.cpython-310.pyc +0 -0
  17. env-llmeval/lib/python3.10/site-packages/transformers/benchmark/benchmark.py +271 -0
  18. env-llmeval/lib/python3.10/site-packages/transformers/benchmark/benchmark_args.py +124 -0
  19. env-llmeval/lib/python3.10/site-packages/transformers/benchmark/benchmark_args_tf.py +136 -0
  20. env-llmeval/lib/python3.10/site-packages/transformers/benchmark/benchmark_args_utils.py +166 -0
  21. env-llmeval/lib/python3.10/site-packages/transformers/benchmark/benchmark_tf.py +303 -0
  22. env-llmeval/lib/python3.10/site-packages/transformers/benchmark/benchmark_utils.py +914 -0
  23. env-llmeval/lib/python3.10/site-packages/transformers/cache_utils.py +435 -0
  24. env-llmeval/lib/python3.10/site-packages/transformers/configuration_utils.py +1133 -0
  25. env-llmeval/lib/python3.10/site-packages/transformers/convert_pytorch_checkpoint_to_tf2.py +498 -0
  26. env-llmeval/lib/python3.10/site-packages/transformers/convert_slow_tokenizer.py +1525 -0
  27. env-llmeval/lib/python3.10/site-packages/transformers/convert_slow_tokenizers_checkpoints_to_fast.py +126 -0
  28. env-llmeval/lib/python3.10/site-packages/transformers/convert_tf_hub_seq_to_seq_bert_to_pytorch.py +88 -0
  29. env-llmeval/lib/python3.10/site-packages/transformers/dependency_versions_table.py +92 -0
  30. env-llmeval/lib/python3.10/site-packages/transformers/feature_extraction_sequence_utils.py +371 -0
  31. env-llmeval/lib/python3.10/site-packages/transformers/file_utils.py +133 -0
  32. env-llmeval/lib/python3.10/site-packages/transformers/generation_flax_utils.py +28 -0
  33. env-llmeval/lib/python3.10/site-packages/transformers/hf_argparser.py +419 -0
  34. env-llmeval/lib/python3.10/site-packages/transformers/hyperparameter_search.py +141 -0
  35. env-llmeval/lib/python3.10/site-packages/transformers/image_processing_utils.py +793 -0
  36. env-llmeval/lib/python3.10/site-packages/transformers/image_transforms.py +801 -0
  37. env-llmeval/lib/python3.10/site-packages/transformers/integrations/__init__.py +158 -0
  38. env-llmeval/lib/python3.10/site-packages/transformers/integrations/__pycache__/__init__.cpython-310.pyc +0 -0
  39. env-llmeval/lib/python3.10/site-packages/transformers/integrations/__pycache__/aqlm.cpython-310.pyc +0 -0
  40. env-llmeval/lib/python3.10/site-packages/transformers/integrations/__pycache__/awq.cpython-310.pyc +0 -0
  41. env-llmeval/lib/python3.10/site-packages/transformers/integrations/__pycache__/bitsandbytes.cpython-310.pyc +0 -0
  42. env-llmeval/lib/python3.10/site-packages/transformers/integrations/__pycache__/deepspeed.cpython-310.pyc +0 -0
  43. env-llmeval/lib/python3.10/site-packages/transformers/integrations/__pycache__/integration_utils.cpython-310.pyc +0 -0
  44. env-llmeval/lib/python3.10/site-packages/transformers/integrations/__pycache__/peft.cpython-310.pyc +0 -0
  45. env-llmeval/lib/python3.10/site-packages/transformers/integrations/__pycache__/quanto.cpython-310.pyc +0 -0
  46. env-llmeval/lib/python3.10/site-packages/transformers/integrations/__pycache__/tpu.cpython-310.pyc +0 -0
  47. env-llmeval/lib/python3.10/site-packages/transformers/integrations/aqlm.py +99 -0
  48. env-llmeval/lib/python3.10/site-packages/transformers/integrations/awq.py +421 -0
  49. env-llmeval/lib/python3.10/site-packages/transformers/integrations/bitsandbytes.py +321 -0
  50. env-llmeval/lib/python3.10/site-packages/transformers/integrations/deepspeed.py +438 -0
.gitattributes CHANGED
@@ -208,3 +208,4 @@ env-llmeval/lib/python3.10/site-packages/torch/lib/libcusparseLt-f8b4a9fb.so.0 f
208
  env-llmeval/lib/python3.10/site-packages/torch/lib/libtorch_cuda_linalg.so filter=lfs diff=lfs merge=lfs -text
209
  env-llmeval/lib/python3.10/site-packages/torch/lib/libtorch_cpu.so filter=lfs diff=lfs merge=lfs -text
210
  env-llmeval/lib/python3.10/site-packages/pyarrow/_dataset.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
 
 
208
  env-llmeval/lib/python3.10/site-packages/torch/lib/libtorch_cuda_linalg.so filter=lfs diff=lfs merge=lfs -text
209
  env-llmeval/lib/python3.10/site-packages/torch/lib/libtorch_cpu.so filter=lfs diff=lfs merge=lfs -text
210
  env-llmeval/lib/python3.10/site-packages/pyarrow/_dataset.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
211
+ env-llmeval/lib/python3.10/site-packages/pyarrow/libarrow.so.1500 filter=lfs diff=lfs merge=lfs -text
env-llmeval/lib/python3.10/site-packages/pyarrow/libarrow.so.1500 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:607b9370170efd1f347aa188239d228c6c17630c2e7227cb80466a4453e9cbfe
3
+ size 61303528
env-llmeval/lib/python3.10/site-packages/pyarrow/tests/data/orc/decimal.jsn.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:91313261d3c3000485517f0d8b2af2e6644317e63e2ecae14808dbbb8e779af0
3
+ size 19313
env-llmeval/lib/python3.10/site-packages/pyarrow/tests/data/parquet/v0.7.1.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:be6773848ce905b99192adc68f0c3b2aabab7d214db50b92a52203790566ab2b
3
+ size 4372
env-llmeval/lib/python3.10/site-packages/pyarrow/tests/data/parquet/v0.7.1.some-named-index.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5468128ea8a1091b5d07195471f3f9b3705247b69440aba45be6c68092dffc76
3
+ size 4008
env-llmeval/lib/python3.10/site-packages/transformers/activations.py ADDED
@@ -0,0 +1,239 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import math
16
+ from collections import OrderedDict
17
+
18
+ import torch
19
+ from packaging import version
20
+ from torch import Tensor, nn
21
+
22
+ from .utils import logging
23
+
24
+
25
+ logger = logging.get_logger(__name__)
26
+
27
+
28
+ class PytorchGELUTanh(nn.Module):
29
+ """
30
+ A fast C implementation of the tanh approximation of the GeLU activation function. See
31
+ https://arxiv.org/abs/1606.08415.
32
+
33
+ This implementation is equivalent to NewGELU and FastGELU but much faster. However, it is not an exact numerical
34
+ match due to rounding errors.
35
+ """
36
+
37
+ def __init__(self):
38
+ super().__init__()
39
+ if version.parse(torch.__version__) < version.parse("1.12.0"):
40
+ raise ImportError(
41
+ f"You are using torch=={torch.__version__}, but torch>=1.12.0 is required to use "
42
+ "PytorchGELUTanh. Please upgrade torch."
43
+ )
44
+
45
+ def forward(self, input: Tensor) -> Tensor:
46
+ return nn.functional.gelu(input, approximate="tanh")
47
+
48
+
49
+ class NewGELUActivation(nn.Module):
50
+ """
51
+ Implementation of the GELU activation function currently in Google BERT repo (identical to OpenAI GPT). Also see
52
+ the Gaussian Error Linear Units paper: https://arxiv.org/abs/1606.08415
53
+ """
54
+
55
+ def forward(self, input: Tensor) -> Tensor:
56
+ return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi) * (input + 0.044715 * torch.pow(input, 3.0))))
57
+
58
+
59
+ class GELUActivation(nn.Module):
60
+ """
61
+ Original Implementation of the GELU activation function in Google BERT repo when initially created. For
62
+ information: OpenAI GPT's GELU is slightly different (and gives slightly different results): 0.5 * x * (1 +
63
+ torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) This is now written in C in nn.functional
64
+ Also see the Gaussian Error Linear Units paper: https://arxiv.org/abs/1606.08415
65
+ """
66
+
67
+ def __init__(self, use_gelu_python: bool = False):
68
+ super().__init__()
69
+ if use_gelu_python:
70
+ self.act = self._gelu_python
71
+ else:
72
+ self.act = nn.functional.gelu
73
+
74
+ def _gelu_python(self, input: Tensor) -> Tensor:
75
+ return input * 0.5 * (1.0 + torch.erf(input / math.sqrt(2.0)))
76
+
77
+ def forward(self, input: Tensor) -> Tensor:
78
+ return self.act(input)
79
+
80
+
81
+ class FastGELUActivation(nn.Module):
82
+ """
83
+ Applies GELU approximation that is slower than QuickGELU but more accurate. See: https://github.com/hendrycks/GELUs
84
+ """
85
+
86
+ def forward(self, input: Tensor) -> Tensor:
87
+ return 0.5 * input * (1.0 + torch.tanh(input * 0.7978845608 * (1.0 + 0.044715 * input * input)))
88
+
89
+
90
+ class QuickGELUActivation(nn.Module):
91
+ """
92
+ Applies GELU approximation that is fast but somewhat inaccurate. See: https://github.com/hendrycks/GELUs
93
+ """
94
+
95
+ def forward(self, input: Tensor) -> Tensor:
96
+ return input * torch.sigmoid(1.702 * input)
97
+
98
+
99
+ class ClippedGELUActivation(nn.Module):
100
+ """
101
+ Clip the range of possible GeLU outputs between [min, max]. This is especially useful for quantization purpose, as
102
+ it allows mapping negatives values in the GeLU spectrum. For more information on this trick, please refer to
103
+ https://arxiv.org/abs/2004.09602.
104
+
105
+ Gaussian Error Linear Unit. Original Implementation of the gelu activation function in Google Bert repo when
106
+ initially created.
107
+
108
+ For information: OpenAI GPT's gelu is slightly different (and gives slightly different results): 0.5 * x * (1 +
109
+ torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))). See https://arxiv.org/abs/1606.08415
110
+ """
111
+
112
+ def __init__(self, min: float, max: float):
113
+ if min > max:
114
+ raise ValueError(f"min should be < max (got min: {min}, max: {max})")
115
+
116
+ super().__init__()
117
+ self.min = min
118
+ self.max = max
119
+
120
+ def forward(self, x: Tensor) -> Tensor:
121
+ return torch.clip(gelu(x), self.min, self.max)
122
+
123
+
124
+ class AccurateGELUActivation(nn.Module):
125
+ """
126
+ Applies GELU approximation that is faster than default and more accurate than QuickGELU. See:
127
+ https://github.com/hendrycks/GELUs
128
+
129
+ Implemented along with MEGA (Moving Average Equipped Gated Attention)
130
+ """
131
+
132
+ def __init__(self):
133
+ super().__init__()
134
+ self.precomputed_constant = math.sqrt(2 / math.pi)
135
+
136
+ def forward(self, input: Tensor) -> Tensor:
137
+ return 0.5 * input * (1 + torch.tanh(self.precomputed_constant * (input + 0.044715 * torch.pow(input, 3))))
138
+
139
+
140
+ class MishActivation(nn.Module):
141
+ """
142
+ See Mish: A Self-Regularized Non-Monotonic Activation Function (Misra., https://arxiv.org/abs/1908.08681). Also
143
+ visit the official repository for the paper: https://github.com/digantamisra98/Mish
144
+ """
145
+
146
+ def __init__(self):
147
+ super().__init__()
148
+ if version.parse(torch.__version__) < version.parse("1.9.0"):
149
+ self.act = self._mish_python
150
+ else:
151
+ self.act = nn.functional.mish
152
+
153
+ def _mish_python(self, input: Tensor) -> Tensor:
154
+ return input * torch.tanh(nn.functional.softplus(input))
155
+
156
+ def forward(self, input: Tensor) -> Tensor:
157
+ return self.act(input)
158
+
159
+
160
+ class LinearActivation(nn.Module):
161
+ """
162
+ Applies the linear activation function, i.e. forwarding input directly to output.
163
+ """
164
+
165
+ def forward(self, input: Tensor) -> Tensor:
166
+ return input
167
+
168
+
169
+ class LaplaceActivation(nn.Module):
170
+ """
171
+ Applies elementwise activation based on Laplace function, introduced in MEGA as an attention activation. See
172
+ https://arxiv.org/abs/2209.10655
173
+
174
+ Inspired by squared relu, but with bounded range and gradient for better stability
175
+ """
176
+
177
+ def forward(self, input, mu=0.707107, sigma=0.282095):
178
+ input = (input - mu).div(sigma * math.sqrt(2.0))
179
+ return 0.5 * (1.0 + torch.erf(input))
180
+
181
+
182
+ class ReLUSquaredActivation(nn.Module):
183
+ """
184
+ Applies the relu^2 activation introduced in https://arxiv.org/abs/2109.08668v2
185
+ """
186
+
187
+ def forward(self, input):
188
+ relu_applied = nn.functional.relu(input)
189
+ squared = torch.square(relu_applied)
190
+ return squared
191
+
192
+
193
+ class ClassInstantier(OrderedDict):
194
+ def __getitem__(self, key):
195
+ content = super().__getitem__(key)
196
+ cls, kwargs = content if isinstance(content, tuple) else (content, {})
197
+ return cls(**kwargs)
198
+
199
+
200
+ ACT2CLS = {
201
+ "gelu": GELUActivation,
202
+ "gelu_10": (ClippedGELUActivation, {"min": -10, "max": 10}),
203
+ "gelu_fast": FastGELUActivation,
204
+ "gelu_new": NewGELUActivation,
205
+ "gelu_python": (GELUActivation, {"use_gelu_python": True}),
206
+ "gelu_pytorch_tanh": PytorchGELUTanh,
207
+ "gelu_accurate": AccurateGELUActivation,
208
+ "laplace": LaplaceActivation,
209
+ "leaky_relu": nn.LeakyReLU,
210
+ "linear": LinearActivation,
211
+ "mish": MishActivation,
212
+ "quick_gelu": QuickGELUActivation,
213
+ "relu": nn.ReLU,
214
+ "relu2": ReLUSquaredActivation,
215
+ "relu6": nn.ReLU6,
216
+ "sigmoid": nn.Sigmoid,
217
+ "silu": nn.SiLU,
218
+ "swish": nn.SiLU,
219
+ "tanh": nn.Tanh,
220
+ }
221
+ ACT2FN = ClassInstantier(ACT2CLS)
222
+
223
+
224
+ def get_activation(activation_string):
225
+ if activation_string in ACT2FN:
226
+ return ACT2FN[activation_string]
227
+ else:
228
+ raise KeyError(f"function {activation_string} not found in ACT2FN mapping {list(ACT2FN.keys())}")
229
+
230
+
231
+ # For backwards compatibility with: from activations import gelu_python
232
+ gelu_python = get_activation("gelu_python")
233
+ gelu_new = get_activation("gelu_new")
234
+ gelu = get_activation("gelu")
235
+ gelu_fast = get_activation("gelu_fast")
236
+ quick_gelu = get_activation("quick_gelu")
237
+ silu = get_activation("silu")
238
+ mish = get_activation("mish")
239
+ linear_act = get_activation("linear")
env-llmeval/lib/python3.10/site-packages/transformers/activations_tf.py ADDED
@@ -0,0 +1,147 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import math
16
+
17
+ import tensorflow as tf
18
+ from packaging.version import parse
19
+
20
+
21
+ try:
22
+ import tf_keras as keras
23
+ except (ModuleNotFoundError, ImportError):
24
+ import keras
25
+
26
+ if parse(keras.__version__).major > 2:
27
+ raise ValueError(
28
+ "Your currently installed version of Keras is Keras 3, but this is not yet supported in "
29
+ "Transformers. Please install the backwards-compatible tf-keras package with "
30
+ "`pip install tf-keras`."
31
+ )
32
+
33
+
34
+ def _gelu(x):
35
+ """
36
+ Gaussian Error Linear Unit. Original Implementation of the gelu activation function in Google Bert repo when
37
+ initially created. For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):
38
+ 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) Also see
39
+ https://arxiv.org/abs/1606.08415
40
+ """
41
+ x = tf.convert_to_tensor(x)
42
+ cdf = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0), x.dtype)))
43
+
44
+ return x * cdf
45
+
46
+
47
+ def _gelu_new(x):
48
+ """
49
+ Gaussian Error Linear Unit. This is a smoother version of the GELU. Original paper: https://arxiv.org/abs/1606.0841
50
+
51
+ Args:
52
+ x: float Tensor to perform activation
53
+
54
+ Returns:
55
+ `x` with the GELU activation applied.
56
+ """
57
+ x = tf.convert_to_tensor(x)
58
+ pi = tf.cast(math.pi, x.dtype)
59
+ coeff = tf.cast(0.044715, x.dtype)
60
+ cdf = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi) * (x + coeff * tf.pow(x, 3))))
61
+
62
+ return x * cdf
63
+
64
+
65
+ def mish(x):
66
+ x = tf.convert_to_tensor(x)
67
+
68
+ return x * tf.tanh(tf.math.softplus(x))
69
+
70
+
71
+ def gelu_fast(x):
72
+ x = tf.convert_to_tensor(x)
73
+ coeff1 = tf.cast(0.044715, x.dtype)
74
+ coeff2 = tf.cast(0.7978845608, x.dtype)
75
+
76
+ return 0.5 * x * (1.0 + tf.tanh(x * coeff2 * (1.0 + coeff1 * x * x)))
77
+
78
+
79
+ def quick_gelu(x):
80
+ x = tf.convert_to_tensor(x)
81
+ coeff = tf.cast(1.702, x.dtype)
82
+ return x * tf.math.sigmoid(coeff * x)
83
+
84
+
85
+ def gelu_10(x):
86
+ """
87
+ Clip the range of possible GeLU outputs between [-10, 10]. This is especially useful for quantization purpose, as
88
+ it allows mapping 2 negatives values in the GeLU spectrum. For more information on this trick, please refer to
89
+ https://arxiv.org/abs/2004.09602
90
+
91
+ Gaussian Error Linear Unit. Original Implementation of the gelu activation function in Google Bert repo when
92
+ initially created. For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):
93
+ 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) Also see
94
+ https://arxiv.org/abs/1606.08415 :param x: :return:
95
+ """
96
+ return tf.clip_by_value(_gelu(x), -10, 10)
97
+
98
+
99
+ def glu(x, axis=-1):
100
+ """
101
+ Gated Linear Unit. Implementation as defined in the original paper (see https://arxiv.org/abs/1612.08083), where
102
+ the input `x` is split in two halves across a dimension (`axis`), A and B, returning A * sigmoid(B).
103
+
104
+ Args:
105
+ `x`: float Tensor to perform activation
106
+ `axis`: dimension across which `x` be split in half
107
+
108
+ Returns:
109
+ `x` with the GLU activation applied (with its size halved across the dimension `axis`).
110
+ """
111
+ a, b = tf.split(x, 2, axis=axis)
112
+ return a * tf.math.sigmoid(b)
113
+
114
+
115
+ if parse(tf.version.VERSION) >= parse("2.4"):
116
+
117
+ def approximate_gelu_wrap(x):
118
+ return keras.activations.gelu(x, approximate=True)
119
+
120
+ gelu = keras.activations.gelu
121
+ gelu_new = approximate_gelu_wrap
122
+ else:
123
+ gelu = _gelu
124
+ gelu_new = _gelu_new
125
+
126
+
127
+ ACT2FN = {
128
+ "gelu": gelu,
129
+ "gelu_10": gelu_10,
130
+ "gelu_fast": gelu_fast,
131
+ "gelu_new": gelu_new,
132
+ "glu": glu,
133
+ "mish": mish,
134
+ "quick_gelu": quick_gelu,
135
+ "relu": keras.activations.relu,
136
+ "sigmoid": keras.activations.sigmoid,
137
+ "silu": keras.activations.swish,
138
+ "swish": keras.activations.swish,
139
+ "tanh": keras.activations.tanh,
140
+ }
141
+
142
+
143
+ def get_tf_activation(activation_string):
144
+ if activation_string in ACT2FN:
145
+ return ACT2FN[activation_string]
146
+ else:
147
+ raise KeyError(f"function {activation_string} not found in ACT2FN mapping {list(ACT2FN.keys())}")
env-llmeval/lib/python3.10/site-packages/transformers/audio_utils.py ADDED
@@ -0,0 +1,825 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Inc. team and the librosa & torchaudio authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """
16
+ Audio processing functions to extract features from audio waveforms. This code is pure numpy to support all frameworks
17
+ and remove unnecessary dependencies.
18
+ """
19
+ import warnings
20
+ from typing import Optional, Tuple, Union
21
+
22
+ import numpy as np
23
+
24
+
25
+ def hertz_to_mel(freq: Union[float, np.ndarray], mel_scale: str = "htk") -> Union[float, np.ndarray]:
26
+ """
27
+ Convert frequency from hertz to mels.
28
+
29
+ Args:
30
+ freq (`float` or `np.ndarray`):
31
+ The frequency, or multiple frequencies, in hertz (Hz).
32
+ mel_scale (`str`, *optional*, defaults to `"htk"`):
33
+ The mel frequency scale to use, `"htk"`, `"kaldi"` or `"slaney"`.
34
+
35
+ Returns:
36
+ `float` or `np.ndarray`: The frequencies on the mel scale.
37
+ """
38
+
39
+ if mel_scale not in ["slaney", "htk", "kaldi"]:
40
+ raise ValueError('mel_scale should be one of "htk", "slaney" or "kaldi".')
41
+
42
+ if mel_scale == "htk":
43
+ return 2595.0 * np.log10(1.0 + (freq / 700.0))
44
+ elif mel_scale == "kaldi":
45
+ return 1127.0 * np.log(1.0 + (freq / 700.0))
46
+
47
+ min_log_hertz = 1000.0
48
+ min_log_mel = 15.0
49
+ logstep = 27.0 / np.log(6.4)
50
+ mels = 3.0 * freq / 200.0
51
+
52
+ if isinstance(freq, np.ndarray):
53
+ log_region = freq >= min_log_hertz
54
+ mels[log_region] = min_log_mel + np.log(freq[log_region] / min_log_hertz) * logstep
55
+ elif freq >= min_log_hertz:
56
+ mels = min_log_mel + np.log(freq / min_log_hertz) * logstep
57
+
58
+ return mels
59
+
60
+
61
+ def mel_to_hertz(mels: Union[float, np.ndarray], mel_scale: str = "htk") -> Union[float, np.ndarray]:
62
+ """
63
+ Convert frequency from mels to hertz.
64
+
65
+ Args:
66
+ mels (`float` or `np.ndarray`):
67
+ The frequency, or multiple frequencies, in mels.
68
+ mel_scale (`str`, *optional*, `"htk"`):
69
+ The mel frequency scale to use, `"htk"`, `"kaldi"` or `"slaney"`.
70
+
71
+ Returns:
72
+ `float` or `np.ndarray`: The frequencies in hertz.
73
+ """
74
+
75
+ if mel_scale not in ["slaney", "htk", "kaldi"]:
76
+ raise ValueError('mel_scale should be one of "htk", "slaney" or "kaldi".')
77
+
78
+ if mel_scale == "htk":
79
+ return 700.0 * (np.power(10, mels / 2595.0) - 1.0)
80
+ elif mel_scale == "kaldi":
81
+ return 700.0 * (np.exp(mels / 1127.0) - 1.0)
82
+
83
+ min_log_hertz = 1000.0
84
+ min_log_mel = 15.0
85
+ logstep = np.log(6.4) / 27.0
86
+ freq = 200.0 * mels / 3.0
87
+
88
+ if isinstance(mels, np.ndarray):
89
+ log_region = mels >= min_log_mel
90
+ freq[log_region] = min_log_hertz * np.exp(logstep * (mels[log_region] - min_log_mel))
91
+ elif mels >= min_log_mel:
92
+ freq = min_log_hertz * np.exp(logstep * (mels - min_log_mel))
93
+
94
+ return freq
95
+
96
+
97
+ def hertz_to_octave(
98
+ freq: Union[float, np.ndarray], tuning: Optional[float] = 0.0, bins_per_octave: Optional[int] = 12
99
+ ):
100
+ """
101
+ Convert frequency from hertz to fractional octave numbers.
102
+ Adapted from *librosa*.
103
+
104
+ Args:
105
+ freq (`float` or `np.ndarray`):
106
+ The frequency, or multiple frequencies, in hertz (Hz).
107
+ tuning (`float`, defaults to `0.`):
108
+ Tuning deviation from the Stuttgart pitch (A440) in (fractional) bins per octave.
109
+ bins_per_octave (`int`, defaults to `12`):
110
+ Number of bins per octave.
111
+
112
+ Returns:
113
+ `float` or `np.ndarray`: The frequencies on the octave scale.
114
+ """
115
+ stuttgart_pitch = 440.0 * 2.0 ** (tuning / bins_per_octave)
116
+ octave = np.log2(freq / (float(stuttgart_pitch) / 16))
117
+ return octave
118
+
119
+
120
+ def _create_triangular_filter_bank(fft_freqs: np.ndarray, filter_freqs: np.ndarray) -> np.ndarray:
121
+ """
122
+ Creates a triangular filter bank.
123
+
124
+ Adapted from *torchaudio* and *librosa*.
125
+
126
+ Args:
127
+ fft_freqs (`np.ndarray` of shape `(num_frequency_bins,)`):
128
+ Discrete frequencies of the FFT bins in Hz.
129
+ filter_freqs (`np.ndarray` of shape `(num_mel_filters,)`):
130
+ Center frequencies of the triangular filters to create, in Hz.
131
+
132
+ Returns:
133
+ `np.ndarray` of shape `(num_frequency_bins, num_mel_filters)`
134
+ """
135
+ filter_diff = np.diff(filter_freqs)
136
+ slopes = np.expand_dims(filter_freqs, 0) - np.expand_dims(fft_freqs, 1)
137
+ down_slopes = -slopes[:, :-2] / filter_diff[:-1]
138
+ up_slopes = slopes[:, 2:] / filter_diff[1:]
139
+ return np.maximum(np.zeros(1), np.minimum(down_slopes, up_slopes))
140
+
141
+
142
+ def chroma_filter_bank(
143
+ num_frequency_bins: int,
144
+ num_chroma: int,
145
+ sampling_rate: int,
146
+ tuning: float = 0.0,
147
+ power: Optional[float] = 2.0,
148
+ weighting_parameters: Optional[Tuple[float]] = (5.0, 2),
149
+ start_at_c_chroma: Optional[bool] = True,
150
+ ):
151
+ """
152
+ Creates a chroma filter bank, i.e a linear transformation to project spectrogram bins onto chroma bins.
153
+
154
+ Adapted from *librosa*.
155
+
156
+ Args:
157
+ num_frequency_bins (`int`):
158
+ Number of frequencies used to compute the spectrogram (should be the same as in `stft`).
159
+ num_chroma (`int`):
160
+ Number of chroma bins (i.e pitch classes).
161
+ sampling_rate (`float`):
162
+ Sample rate of the audio waveform.
163
+ tuning (`float`):
164
+ Tuning deviation from A440 in fractions of a chroma bin.
165
+ power (`float`, *optional*, defaults to 2.0):
166
+ If 12.0, normalizes each column with their L2 norm. If 1.0, normalizes each column with their L1 norm.
167
+ weighting_parameters (`Tuple[float]`, *optional*, defaults to `(5., 2.)`):
168
+ If specified, apply a Gaussian weighting parameterized by the first element of the tuple being the center and
169
+ the second element being the Gaussian half-width.
170
+ start_at_c_chroma (`float`, *optional*, defaults to `True`):
171
+ If True, the filter bank will start at the 'C' pitch class. Otherwise, it will start at 'A'.
172
+ Returns:
173
+ `np.ndarray` of shape `(num_frequency_bins, num_chroma)`
174
+ """
175
+ # Get the FFT bins, not counting the DC component
176
+ frequencies = np.linspace(0, sampling_rate, num_frequency_bins, endpoint=False)[1:]
177
+
178
+ freq_bins = num_chroma * hertz_to_octave(frequencies, tuning=tuning, bins_per_octave=num_chroma)
179
+
180
+ # make up a value for the 0 Hz bin = 1.5 octaves below bin 1
181
+ # (so chroma is 50% rotated from bin 1, and bin width is broad)
182
+ freq_bins = np.concatenate(([freq_bins[0] - 1.5 * num_chroma], freq_bins))
183
+
184
+ bins_width = np.concatenate((np.maximum(freq_bins[1:] - freq_bins[:-1], 1.0), [1]))
185
+
186
+ chroma_filters = np.subtract.outer(freq_bins, np.arange(0, num_chroma, dtype="d")).T
187
+
188
+ num_chroma2 = np.round(float(num_chroma) / 2)
189
+
190
+ # Project into range -num_chroma/2 .. num_chroma/2
191
+ # add on fixed offset of 10*num_chroma to ensure all values passed to
192
+ # rem are positive
193
+ chroma_filters = np.remainder(chroma_filters + num_chroma2 + 10 * num_chroma, num_chroma) - num_chroma2
194
+
195
+ # Gaussian bumps - 2*D to make them narrower
196
+ chroma_filters = np.exp(-0.5 * (2 * chroma_filters / np.tile(bins_width, (num_chroma, 1))) ** 2)
197
+
198
+ # normalize each column
199
+ if power is not None:
200
+ chroma_filters = chroma_filters / np.sum(chroma_filters**power, axis=0, keepdims=True) ** (1.0 / power)
201
+
202
+ # Maybe apply scaling for fft bins
203
+ if weighting_parameters is not None:
204
+ center, half_width = weighting_parameters
205
+ chroma_filters *= np.tile(
206
+ np.exp(-0.5 * (((freq_bins / num_chroma - center) / half_width) ** 2)),
207
+ (num_chroma, 1),
208
+ )
209
+
210
+ if start_at_c_chroma:
211
+ chroma_filters = np.roll(chroma_filters, -3 * (num_chroma // 12), axis=0)
212
+
213
+ # remove aliasing columns, copy to ensure row-contiguity
214
+ return np.ascontiguousarray(chroma_filters[:, : int(1 + num_frequency_bins / 2)])
215
+
216
+
217
+ def mel_filter_bank(
218
+ num_frequency_bins: int,
219
+ num_mel_filters: int,
220
+ min_frequency: float,
221
+ max_frequency: float,
222
+ sampling_rate: int,
223
+ norm: Optional[str] = None,
224
+ mel_scale: str = "htk",
225
+ triangularize_in_mel_space: bool = False,
226
+ ) -> np.ndarray:
227
+ """
228
+ Creates a frequency bin conversion matrix used to obtain a mel spectrogram. This is called a *mel filter bank*, and
229
+ various implementation exist, which differ in the number of filters, the shape of the filters, the way the filters
230
+ are spaced, the bandwidth of the filters, and the manner in which the spectrum is warped. The goal of these
231
+ features is to approximate the non-linear human perception of the variation in pitch with respect to the frequency.
232
+
233
+ Different banks of mel filters were introduced in the literature. The following variations are supported:
234
+
235
+ - MFCC FB-20: introduced in 1980 by Davis and Mermelstein, it assumes a sampling frequency of 10 kHz and a speech
236
+ bandwidth of `[0, 4600]` Hz.
237
+ - MFCC FB-24 HTK: from the Cambridge HMM Toolkit (HTK) (1995) uses a filter bank of 24 filters for a speech
238
+ bandwidth of `[0, 8000]` Hz. This assumes sampling rate ≥ 16 kHz.
239
+ - MFCC FB-40: from the Auditory Toolbox for MATLAB written by Slaney in 1998, assumes a sampling rate of 16 kHz and
240
+ speech bandwidth of `[133, 6854]` Hz. This version also includes area normalization.
241
+ - HFCC-E FB-29 (Human Factor Cepstral Coefficients) of Skowronski and Harris (2004), assumes a sampling rate of
242
+ 12.5 kHz and speech bandwidth of `[0, 6250]` Hz.
243
+
244
+ This code is adapted from *torchaudio* and *librosa*. Note that the default parameters of torchaudio's
245
+ `melscale_fbanks` implement the `"htk"` filters while librosa uses the `"slaney"` implementation.
246
+
247
+ Args:
248
+ num_frequency_bins (`int`):
249
+ Number of frequencies used to compute the spectrogram (should be the same as in `stft`).
250
+ num_mel_filters (`int`):
251
+ Number of mel filters to generate.
252
+ min_frequency (`float`):
253
+ Lowest frequency of interest in Hz.
254
+ max_frequency (`float`):
255
+ Highest frequency of interest in Hz. This should not exceed `sampling_rate / 2`.
256
+ sampling_rate (`int`):
257
+ Sample rate of the audio waveform.
258
+ norm (`str`, *optional*):
259
+ If `"slaney"`, divide the triangular mel weights by the width of the mel band (area normalization).
260
+ mel_scale (`str`, *optional*, defaults to `"htk"`):
261
+ The mel frequency scale to use, `"htk"`, `"kaldi"` or `"slaney"`.
262
+ triangularize_in_mel_space (`bool`, *optional*, defaults to `False`):
263
+ If this option is enabled, the triangular filter is applied in mel space rather than frequency space. This
264
+ should be set to `true` in order to get the same results as `torchaudio` when computing mel filters.
265
+
266
+ Returns:
267
+ `np.ndarray` of shape (`num_frequency_bins`, `num_mel_filters`): Triangular filter bank matrix. This is a
268
+ projection matrix to go from a spectrogram to a mel spectrogram.
269
+ """
270
+ if norm is not None and norm != "slaney":
271
+ raise ValueError('norm must be one of None or "slaney"')
272
+
273
+ # center points of the triangular mel filters
274
+ mel_min = hertz_to_mel(min_frequency, mel_scale=mel_scale)
275
+ mel_max = hertz_to_mel(max_frequency, mel_scale=mel_scale)
276
+ mel_freqs = np.linspace(mel_min, mel_max, num_mel_filters + 2)
277
+ filter_freqs = mel_to_hertz(mel_freqs, mel_scale=mel_scale)
278
+
279
+ if triangularize_in_mel_space:
280
+ # frequencies of FFT bins in Hz, but filters triangularized in mel space
281
+ fft_bin_width = sampling_rate / (num_frequency_bins * 2)
282
+ fft_freqs = hertz_to_mel(fft_bin_width * np.arange(num_frequency_bins), mel_scale=mel_scale)
283
+ filter_freqs = mel_freqs
284
+ else:
285
+ # frequencies of FFT bins in Hz
286
+ fft_freqs = np.linspace(0, sampling_rate // 2, num_frequency_bins)
287
+
288
+ mel_filters = _create_triangular_filter_bank(fft_freqs, filter_freqs)
289
+
290
+ if norm is not None and norm == "slaney":
291
+ # Slaney-style mel is scaled to be approx constant energy per channel
292
+ enorm = 2.0 / (filter_freqs[2 : num_mel_filters + 2] - filter_freqs[:num_mel_filters])
293
+ mel_filters *= np.expand_dims(enorm, 0)
294
+
295
+ if (mel_filters.max(axis=0) == 0.0).any():
296
+ warnings.warn(
297
+ "At least one mel filter has all zero values. "
298
+ f"The value for `num_mel_filters` ({num_mel_filters}) may be set too high. "
299
+ f"Or, the value for `num_frequency_bins` ({num_frequency_bins}) may be set too low."
300
+ )
301
+
302
+ return mel_filters
303
+
304
+
305
+ def optimal_fft_length(window_length: int) -> int:
306
+ """
307
+ Finds the best FFT input size for a given `window_length`. This function takes a given window length and, if not
308
+ already a power of two, rounds it up to the next power or two.
309
+
310
+ The FFT algorithm works fastest when the length of the input is a power of two, which may be larger than the size
311
+ of the window or analysis frame. For example, if the window is 400 samples, using an FFT input size of 512 samples
312
+ is more optimal than an FFT size of 400 samples. Using a larger FFT size does not affect the detected frequencies,
313
+ it simply gives a higher frequency resolution (i.e. the frequency bins are smaller).
314
+ """
315
+ return 2 ** int(np.ceil(np.log2(window_length)))
316
+
317
+
318
+ def window_function(
319
+ window_length: int,
320
+ name: str = "hann",
321
+ periodic: bool = True,
322
+ frame_length: Optional[int] = None,
323
+ center: bool = True,
324
+ ) -> np.ndarray:
325
+ """
326
+ Returns an array containing the specified window. This window is intended to be used with `stft`.
327
+
328
+ The following window types are supported:
329
+
330
+ - `"boxcar"`: a rectangular window
331
+ - `"hamming"`: the Hamming window
332
+ - `"hann"`: the Hann window
333
+ - `"povey"`: the Povey window
334
+
335
+ Args:
336
+ window_length (`int`):
337
+ The length of the window in samples.
338
+ name (`str`, *optional*, defaults to `"hann"`):
339
+ The name of the window function.
340
+ periodic (`bool`, *optional*, defaults to `True`):
341
+ Whether the window is periodic or symmetric.
342
+ frame_length (`int`, *optional*):
343
+ The length of the analysis frames in samples. Provide a value for `frame_length` if the window is smaller
344
+ than the frame length, so that it will be zero-padded.
345
+ center (`bool`, *optional*, defaults to `True`):
346
+ Whether to center the window inside the FFT buffer. Only used when `frame_length` is provided.
347
+
348
+ Returns:
349
+ `np.ndarray` of shape `(window_length,)` or `(frame_length,)` containing the window.
350
+ """
351
+ length = window_length + 1 if periodic else window_length
352
+
353
+ if name == "boxcar":
354
+ window = np.ones(length)
355
+ elif name in ["hamming", "hamming_window"]:
356
+ window = np.hamming(length)
357
+ elif name in ["hann", "hann_window"]:
358
+ window = np.hanning(length)
359
+ elif name in ["povey"]:
360
+ window = np.power(np.hanning(length), 0.85)
361
+ else:
362
+ raise ValueError(f"Unknown window function '{name}'")
363
+
364
+ if periodic:
365
+ window = window[:-1]
366
+
367
+ if frame_length is None:
368
+ return window
369
+
370
+ if window_length > frame_length:
371
+ raise ValueError(
372
+ f"Length of the window ({window_length}) may not be larger than frame_length ({frame_length})"
373
+ )
374
+
375
+ padded_window = np.zeros(frame_length)
376
+ offset = (frame_length - window_length) // 2 if center else 0
377
+ padded_window[offset : offset + window_length] = window
378
+ return padded_window
379
+
380
+
381
+ # TODO This method does not support batching yet as we are mainly focused on inference.
382
+ def spectrogram(
383
+ waveform: np.ndarray,
384
+ window: np.ndarray,
385
+ frame_length: int,
386
+ hop_length: int,
387
+ fft_length: Optional[int] = None,
388
+ power: Optional[float] = 1.0,
389
+ center: bool = True,
390
+ pad_mode: str = "reflect",
391
+ onesided: bool = True,
392
+ preemphasis: Optional[float] = None,
393
+ mel_filters: Optional[np.ndarray] = None,
394
+ mel_floor: float = 1e-10,
395
+ log_mel: Optional[str] = None,
396
+ reference: float = 1.0,
397
+ min_value: float = 1e-10,
398
+ db_range: Optional[float] = None,
399
+ remove_dc_offset: Optional[bool] = None,
400
+ dtype: np.dtype = np.float32,
401
+ ) -> np.ndarray:
402
+ """
403
+ Calculates a spectrogram over one waveform using the Short-Time Fourier Transform.
404
+
405
+ This function can create the following kinds of spectrograms:
406
+
407
+ - amplitude spectrogram (`power = 1.0`)
408
+ - power spectrogram (`power = 2.0`)
409
+ - complex-valued spectrogram (`power = None`)
410
+ - log spectrogram (use `log_mel` argument)
411
+ - mel spectrogram (provide `mel_filters`)
412
+ - log-mel spectrogram (provide `mel_filters` and `log_mel`)
413
+
414
+ How this works:
415
+
416
+ 1. The input waveform is split into frames of size `frame_length` that are partially overlapping by `frame_length
417
+ - hop_length` samples.
418
+ 2. Each frame is multiplied by the window and placed into a buffer of size `fft_length`.
419
+ 3. The DFT is taken of each windowed frame.
420
+ 4. The results are stacked into a spectrogram.
421
+
422
+ We make a distinction between the following "blocks" of sample data, each of which may have a different lengths:
423
+
424
+ - The analysis frame. This is the size of the time slices that the input waveform is split into.
425
+ - The window. Each analysis frame is multiplied by the window to avoid spectral leakage.
426
+ - The FFT input buffer. The length of this determines how many frequency bins are in the spectrogram.
427
+
428
+ In this implementation, the window is assumed to be zero-padded to have the same size as the analysis frame. A
429
+ padded window can be obtained from `window_function()`. The FFT input buffer may be larger than the analysis frame,
430
+ typically the next power of two.
431
+
432
+ Note: This function is not optimized for speed yet. It should be mostly compatible with `librosa.stft` and
433
+ `torchaudio.functional.transforms.Spectrogram`, although it is more flexible due to the different ways spectrograms
434
+ can be constructed.
435
+
436
+ Args:
437
+ waveform (`np.ndarray` of shape `(length,)`):
438
+ The input waveform. This must be a single real-valued, mono waveform.
439
+ window (`np.ndarray` of shape `(frame_length,)`):
440
+ The windowing function to apply, including zero-padding if necessary. The actual window length may be
441
+ shorter than `frame_length`, but we're assuming the array has already been zero-padded.
442
+ frame_length (`int`):
443
+ The length of the analysis frames in samples. With librosa this is always equal to `fft_length` but we also
444
+ allow smaller sizes.
445
+ hop_length (`int`):
446
+ The stride between successive analysis frames in samples.
447
+ fft_length (`int`, *optional*):
448
+ The size of the FFT buffer in samples. This determines how many frequency bins the spectrogram will have.
449
+ For optimal speed, this should be a power of two. If `None`, uses `frame_length`.
450
+ power (`float`, *optional*, defaults to 1.0):
451
+ If 1.0, returns the amplitude spectrogram. If 2.0, returns the power spectrogram. If `None`, returns
452
+ complex numbers.
453
+ center (`bool`, *optional*, defaults to `True`):
454
+ Whether to pad the waveform so that frame `t` is centered around time `t * hop_length`. If `False`, frame
455
+ `t` will start at time `t * hop_length`.
456
+ pad_mode (`str`, *optional*, defaults to `"reflect"`):
457
+ Padding mode used when `center` is `True`. Possible values are: `"constant"` (pad with zeros), `"edge"`
458
+ (pad with edge values), `"reflect"` (pads with mirrored values).
459
+ onesided (`bool`, *optional*, defaults to `True`):
460
+ If True, only computes the positive frequencies and returns a spectrogram containing `fft_length // 2 + 1`
461
+ frequency bins. If False, also computes the negative frequencies and returns `fft_length` frequency bins.
462
+ preemphasis (`float`, *optional*)
463
+ Coefficient for a low-pass filter that applies pre-emphasis before the DFT.
464
+ mel_filters (`np.ndarray` of shape `(num_freq_bins, num_mel_filters)`, *optional*):
465
+ The mel filter bank. If supplied, applies a this filter bank to create a mel spectrogram.
466
+ mel_floor (`float`, *optional*, defaults to 1e-10):
467
+ Minimum value of mel frequency banks.
468
+ log_mel (`str`, *optional*):
469
+ How to convert the spectrogram to log scale. Possible options are: `None` (don't convert), `"log"` (take
470
+ the natural logarithm) `"log10"` (take the base-10 logarithm), `"dB"` (convert to decibels). Can only be
471
+ used when `power` is not `None`.
472
+ reference (`float`, *optional*, defaults to 1.0):
473
+ Sets the input spectrogram value that corresponds to 0 dB. For example, use `np.max(spectrogram)` to set
474
+ the loudest part to 0 dB. Must be greater than zero.
475
+ min_value (`float`, *optional*, defaults to `1e-10`):
476
+ The spectrogram will be clipped to this minimum value before conversion to decibels, to avoid taking
477
+ `log(0)`. For a power spectrogram, the default of `1e-10` corresponds to a minimum of -100 dB. For an
478
+ amplitude spectrogram, the value `1e-5` corresponds to -100 dB. Must be greater than zero.
479
+ db_range (`float`, *optional*):
480
+ Sets the maximum dynamic range in decibels. For example, if `db_range = 80`, the difference between the
481
+ peak value and the smallest value will never be more than 80 dB. Must be greater than zero.
482
+ remove_dc_offset (`bool`, *optional*):
483
+ Subtract mean from waveform on each frame, applied before pre-emphasis. This should be set to `true` in
484
+ order to get the same results as `torchaudio.compliance.kaldi.fbank` when computing mel filters.
485
+ dtype (`np.dtype`, *optional*, defaults to `np.float32`):
486
+ Data type of the spectrogram tensor. If `power` is None, this argument is ignored and the dtype will be
487
+ `np.complex64`.
488
+
489
+ Returns:
490
+ `nd.array` containing a spectrogram of shape `(num_frequency_bins, length)` for a regular spectrogram or shape
491
+ `(num_mel_filters, length)` for a mel spectrogram.
492
+ """
493
+ window_length = len(window)
494
+
495
+ if fft_length is None:
496
+ fft_length = frame_length
497
+
498
+ if frame_length > fft_length:
499
+ raise ValueError(f"frame_length ({frame_length}) may not be larger than fft_length ({fft_length})")
500
+
501
+ if window_length != frame_length:
502
+ raise ValueError(f"Length of the window ({window_length}) must equal frame_length ({frame_length})")
503
+
504
+ if hop_length <= 0:
505
+ raise ValueError("hop_length must be greater than zero")
506
+
507
+ if waveform.ndim != 1:
508
+ raise ValueError(f"Input waveform must have only one dimension, shape is {waveform.shape}")
509
+
510
+ if np.iscomplexobj(waveform):
511
+ raise ValueError("Complex-valued input waveforms are not currently supported")
512
+
513
+ if power is None and mel_filters is not None:
514
+ raise ValueError(
515
+ "You have provided `mel_filters` but `power` is `None`. Mel spectrogram computation is not yet supported for complex-valued spectrogram."
516
+ "Specify `power` to fix this issue."
517
+ )
518
+
519
+ # center pad the waveform
520
+ if center:
521
+ padding = [(int(frame_length // 2), int(frame_length // 2))]
522
+ waveform = np.pad(waveform, padding, mode=pad_mode)
523
+
524
+ # promote to float64, since np.fft uses float64 internally
525
+ waveform = waveform.astype(np.float64)
526
+ window = window.astype(np.float64)
527
+
528
+ # split waveform into frames of frame_length size
529
+ num_frames = int(1 + np.floor((waveform.size - frame_length) / hop_length))
530
+
531
+ num_frequency_bins = (fft_length // 2) + 1 if onesided else fft_length
532
+ spectrogram = np.empty((num_frames, num_frequency_bins), dtype=np.complex64)
533
+
534
+ # rfft is faster than fft
535
+ fft_func = np.fft.rfft if onesided else np.fft.fft
536
+ buffer = np.zeros(fft_length)
537
+
538
+ timestep = 0
539
+ for frame_idx in range(num_frames):
540
+ buffer[:frame_length] = waveform[timestep : timestep + frame_length]
541
+
542
+ if remove_dc_offset:
543
+ buffer[:frame_length] = buffer[:frame_length] - buffer[:frame_length].mean()
544
+
545
+ if preemphasis is not None:
546
+ buffer[1:frame_length] -= preemphasis * buffer[: frame_length - 1]
547
+ buffer[0] *= 1 - preemphasis
548
+
549
+ buffer[:frame_length] *= window
550
+
551
+ spectrogram[frame_idx] = fft_func(buffer)
552
+ timestep += hop_length
553
+
554
+ # note: ** is much faster than np.power
555
+ if power is not None:
556
+ spectrogram = np.abs(spectrogram, dtype=np.float64) ** power
557
+
558
+ spectrogram = spectrogram.T
559
+
560
+ if mel_filters is not None:
561
+ spectrogram = np.maximum(mel_floor, np.dot(mel_filters.T, spectrogram))
562
+
563
+ if power is not None and log_mel is not None:
564
+ if log_mel == "log":
565
+ spectrogram = np.log(spectrogram)
566
+ elif log_mel == "log10":
567
+ spectrogram = np.log10(spectrogram)
568
+ elif log_mel == "dB":
569
+ if power == 1.0:
570
+ spectrogram = amplitude_to_db(spectrogram, reference, min_value, db_range)
571
+ elif power == 2.0:
572
+ spectrogram = power_to_db(spectrogram, reference, min_value, db_range)
573
+ else:
574
+ raise ValueError(f"Cannot use log_mel option '{log_mel}' with power {power}")
575
+ else:
576
+ raise ValueError(f"Unknown log_mel option: {log_mel}")
577
+
578
+ spectrogram = np.asarray(spectrogram, dtype)
579
+
580
+ return spectrogram
581
+
582
+
583
+ def power_to_db(
584
+ spectrogram: np.ndarray,
585
+ reference: float = 1.0,
586
+ min_value: float = 1e-10,
587
+ db_range: Optional[float] = None,
588
+ ) -> np.ndarray:
589
+ """
590
+ Converts a power spectrogram to the decibel scale. This computes `10 * log10(spectrogram / reference)`, using basic
591
+ logarithm properties for numerical stability.
592
+
593
+ The motivation behind applying the log function on the (mel) spectrogram is that humans do not hear loudness on a
594
+ linear scale. Generally to double the perceived volume of a sound we need to put 8 times as much energy into it.
595
+ This means that large variations in energy may not sound all that different if the sound is loud to begin with.
596
+ This compression operation makes the (mel) spectrogram features match more closely what humans actually hear.
597
+
598
+ Based on the implementation of `librosa.power_to_db`.
599
+
600
+ Args:
601
+ spectrogram (`np.ndarray`):
602
+ The input power (mel) spectrogram. Note that a power spectrogram has the amplitudes squared!
603
+ reference (`float`, *optional*, defaults to 1.0):
604
+ Sets the input spectrogram value that corresponds to 0 dB. For example, use `np.max(spectrogram)` to set
605
+ the loudest part to 0 dB. Must be greater than zero.
606
+ min_value (`float`, *optional*, defaults to `1e-10`):
607
+ The spectrogram will be clipped to this minimum value before conversion to decibels, to avoid taking
608
+ `log(0)`. The default of `1e-10` corresponds to a minimum of -100 dB. Must be greater than zero.
609
+ db_range (`float`, *optional*):
610
+ Sets the maximum dynamic range in decibels. For example, if `db_range = 80`, the difference between the
611
+ peak value and the smallest value will never be more than 80 dB. Must be greater than zero.
612
+
613
+ Returns:
614
+ `np.ndarray`: the spectrogram in decibels
615
+ """
616
+ if reference <= 0.0:
617
+ raise ValueError("reference must be greater than zero")
618
+ if min_value <= 0.0:
619
+ raise ValueError("min_value must be greater than zero")
620
+
621
+ reference = max(min_value, reference)
622
+
623
+ spectrogram = np.clip(spectrogram, a_min=min_value, a_max=None)
624
+ spectrogram = 10.0 * (np.log10(spectrogram) - np.log10(reference))
625
+
626
+ if db_range is not None:
627
+ if db_range <= 0.0:
628
+ raise ValueError("db_range must be greater than zero")
629
+ spectrogram = np.clip(spectrogram, a_min=spectrogram.max() - db_range, a_max=None)
630
+
631
+ return spectrogram
632
+
633
+
634
+ def amplitude_to_db(
635
+ spectrogram: np.ndarray,
636
+ reference: float = 1.0,
637
+ min_value: float = 1e-5,
638
+ db_range: Optional[float] = None,
639
+ ) -> np.ndarray:
640
+ """
641
+ Converts an amplitude spectrogram to the decibel scale. This computes `20 * log10(spectrogram / reference)`, using
642
+ basic logarithm properties for numerical stability.
643
+
644
+ The motivation behind applying the log function on the (mel) spectrogram is that humans do not hear loudness on a
645
+ linear scale. Generally to double the perceived volume of a sound we need to put 8 times as much energy into it.
646
+ This means that large variations in energy may not sound all that different if the sound is loud to begin with.
647
+ This compression operation makes the (mel) spectrogram features match more closely what humans actually hear.
648
+
649
+ Args:
650
+ spectrogram (`np.ndarray`):
651
+ The input amplitude (mel) spectrogram.
652
+ reference (`float`, *optional*, defaults to 1.0):
653
+ Sets the input spectrogram value that corresponds to 0 dB. For example, use `np.max(spectrogram)` to set
654
+ the loudest part to 0 dB. Must be greater than zero.
655
+ min_value (`float`, *optional*, defaults to `1e-5`):
656
+ The spectrogram will be clipped to this minimum value before conversion to decibels, to avoid taking
657
+ `log(0)`. The default of `1e-5` corresponds to a minimum of -100 dB. Must be greater than zero.
658
+ db_range (`float`, *optional*):
659
+ Sets the maximum dynamic range in decibels. For example, if `db_range = 80`, the difference between the
660
+ peak value and the smallest value will never be more than 80 dB. Must be greater than zero.
661
+
662
+ Returns:
663
+ `np.ndarray`: the spectrogram in decibels
664
+ """
665
+ if reference <= 0.0:
666
+ raise ValueError("reference must be greater than zero")
667
+ if min_value <= 0.0:
668
+ raise ValueError("min_value must be greater than zero")
669
+
670
+ reference = max(min_value, reference)
671
+
672
+ spectrogram = np.clip(spectrogram, a_min=min_value, a_max=None)
673
+ spectrogram = 20.0 * (np.log10(spectrogram) - np.log10(reference))
674
+
675
+ if db_range is not None:
676
+ if db_range <= 0.0:
677
+ raise ValueError("db_range must be greater than zero")
678
+ spectrogram = np.clip(spectrogram, a_min=spectrogram.max() - db_range, a_max=None)
679
+
680
+ return spectrogram
681
+
682
+
683
+ ### deprecated functions below this line ###
684
+
685
+
686
+ def get_mel_filter_banks(
687
+ nb_frequency_bins: int,
688
+ nb_mel_filters: int,
689
+ frequency_min: float,
690
+ frequency_max: float,
691
+ sample_rate: int,
692
+ norm: Optional[str] = None,
693
+ mel_scale: str = "htk",
694
+ ) -> np.array:
695
+ warnings.warn(
696
+ "The function `get_mel_filter_banks` is deprecated and will be removed in version 4.31.0 of Transformers",
697
+ FutureWarning,
698
+ )
699
+ return mel_filter_bank(
700
+ num_frequency_bins=nb_frequency_bins,
701
+ num_mel_filters=nb_mel_filters,
702
+ min_frequency=frequency_min,
703
+ max_frequency=frequency_max,
704
+ sampling_rate=sample_rate,
705
+ norm=norm,
706
+ mel_scale=mel_scale,
707
+ )
708
+
709
+
710
+ def fram_wave(waveform: np.array, hop_length: int = 160, fft_window_size: int = 400, center: bool = True):
711
+ """
712
+ In order to compute the short time fourier transform, the waveform needs to be split in overlapping windowed
713
+ segments called `frames`.
714
+
715
+ The window length (window_length) defines how much of the signal is contained in each frame, while the hop length
716
+ defines the step between the beginning of each new frame.
717
+
718
+
719
+ Args:
720
+ waveform (`np.array` of shape `(sample_length,)`):
721
+ The raw waveform which will be split into smaller chunks.
722
+ hop_length (`int`, *optional*, defaults to 160):
723
+ Step between each window of the waveform.
724
+ fft_window_size (`int`, *optional*, defaults to 400):
725
+ Defines the size of the window.
726
+ center (`bool`, defaults to `True`):
727
+ Whether or not to center each frame around the middle of the frame. Centering is done by reflecting the
728
+ waveform on the left and on the right.
729
+
730
+ Return:
731
+ framed_waveform (`np.array` of shape `(waveform.shape // hop_length , fft_window_size)`):
732
+ The framed waveforms that can be fed to `np.fft`.
733
+ """
734
+ warnings.warn(
735
+ "The function `fram_wave` is deprecated and will be removed in version 4.31.0 of Transformers",
736
+ FutureWarning,
737
+ )
738
+ frames = []
739
+ for i in range(0, waveform.shape[0] + 1, hop_length):
740
+ if center:
741
+ half_window = (fft_window_size - 1) // 2 + 1
742
+ start = i - half_window if i > half_window else 0
743
+ end = i + half_window if i < waveform.shape[0] - half_window else waveform.shape[0]
744
+ frame = waveform[start:end]
745
+ if start == 0:
746
+ padd_width = (-i + half_window, 0)
747
+ frame = np.pad(frame, pad_width=padd_width, mode="reflect")
748
+
749
+ elif end == waveform.shape[0]:
750
+ padd_width = (0, (i - waveform.shape[0] + half_window))
751
+ frame = np.pad(frame, pad_width=padd_width, mode="reflect")
752
+
753
+ else:
754
+ frame = waveform[i : i + fft_window_size]
755
+ frame_width = frame.shape[0]
756
+ if frame_width < waveform.shape[0]:
757
+ frame = np.lib.pad(
758
+ frame, pad_width=(0, fft_window_size - frame_width), mode="constant", constant_values=0
759
+ )
760
+ frames.append(frame)
761
+
762
+ frames = np.stack(frames, 0)
763
+ return frames
764
+
765
+
766
+ def stft(frames: np.array, windowing_function: np.array, fft_window_size: int = None):
767
+ """
768
+ Calculates the complex Short-Time Fourier Transform (STFT) of the given framed signal. Should give the same results
769
+ as `torch.stft`.
770
+
771
+ Args:
772
+ frames (`np.array` of dimension `(num_frames, fft_window_size)`):
773
+ A framed audio signal obtained using `audio_utils.fram_wav`.
774
+ windowing_function (`np.array` of dimension `(nb_frequency_bins, nb_mel_filters)`:
775
+ A array reprensenting the function that will be used to reduces the amplitude of the discontinuities at the
776
+ boundaries of each frame when computing the STFT. Each frame will be multiplied by the windowing_function.
777
+ For more information on the discontinuities, called *Spectral leakage*, refer to [this
778
+ tutorial]https://download.ni.com/evaluation/pxi/Understanding%20FFTs%20and%20Windowing.pdf
779
+ fft_window_size (`int`, *optional*):
780
+ Size of the window om which the Fourier transform is applied. This controls the frequency resolution of the
781
+ spectrogram. 400 means that the fourrier transform is computed on windows of 400 samples. The number of
782
+ frequency bins (`nb_frequency_bins`) used to divide the window into equal strips is equal to
783
+ `(1+fft_window_size)//2`. An increase of the fft_window_size slows the calculus time proportionnally.
784
+
785
+ Example:
786
+
787
+ ```python
788
+ >>> from transformers.audio_utils import stft, fram_wave
789
+ >>> import numpy as np
790
+
791
+ >>> audio = np.random.rand(50)
792
+ >>> fft_window_size = 10
793
+ >>> hop_length = 2
794
+ >>> framed_audio = fram_wave(audio, hop_length, fft_window_size)
795
+ >>> spectrogram = stft(framed_audio, np.hanning(fft_window_size + 1))
796
+ ```
797
+
798
+ Returns:
799
+ spectrogram (`np.ndarray`):
800
+ A spectrogram of shape `(num_frames, nb_frequency_bins)` obtained using the STFT algorithm
801
+ """
802
+ warnings.warn(
803
+ "The function `stft` is deprecated and will be removed in version 4.31.0 of Transformers",
804
+ FutureWarning,
805
+ )
806
+ frame_size = frames.shape[1]
807
+
808
+ if fft_window_size is None:
809
+ fft_window_size = frame_size
810
+
811
+ if fft_window_size < frame_size:
812
+ raise ValueError("FFT size must greater or equal the frame size")
813
+ # number of FFT bins to store
814
+ nb_frequency_bins = (fft_window_size >> 1) + 1
815
+
816
+ spectrogram = np.empty((len(frames), nb_frequency_bins), dtype=np.complex64)
817
+ fft_signal = np.zeros(fft_window_size)
818
+
819
+ for f, frame in enumerate(frames):
820
+ if windowing_function is not None:
821
+ np.multiply(frame, windowing_function, out=fft_signal[:frame_size])
822
+ else:
823
+ fft_signal[:frame_size] = frame
824
+ spectrogram[f] = np.fft.fft(fft_signal, axis=0)[:nb_frequency_bins]
825
+ return spectrogram.T
env-llmeval/lib/python3.10/site-packages/transformers/benchmark/__init__.py ADDED
File without changes
env-llmeval/lib/python3.10/site-packages/transformers/benchmark/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (187 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/benchmark/__pycache__/benchmark.cpython-310.pyc ADDED
Binary file (7.43 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/benchmark/__pycache__/benchmark_args.cpython-310.pyc ADDED
Binary file (3.44 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/benchmark/__pycache__/benchmark_args_tf.cpython-310.pyc ADDED
Binary file (3.94 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/benchmark/__pycache__/benchmark_args_utils.cpython-310.pyc ADDED
Binary file (5.61 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/benchmark/__pycache__/benchmark_tf.cpython-310.pyc ADDED
Binary file (9.51 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/benchmark/__pycache__/benchmark_utils.cpython-310.pyc ADDED
Binary file (30 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/benchmark/benchmark.py ADDED
@@ -0,0 +1,271 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The HuggingFace Inc. team.
3
+ # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """
17
+ Benchmarking the library on inference and training in PyTorch.
18
+ """
19
+
20
+
21
+ import timeit
22
+ from typing import Callable, Optional
23
+
24
+ from ..configuration_utils import PretrainedConfig
25
+ from ..models.auto.modeling_auto import MODEL_MAPPING, MODEL_WITH_LM_HEAD_MAPPING
26
+ from ..utils import is_py3nvml_available, is_torch_available, logging
27
+ from .benchmark_utils import (
28
+ Benchmark,
29
+ Memory,
30
+ MemorySummary,
31
+ measure_peak_memory_cpu,
32
+ start_memory_tracing,
33
+ stop_memory_tracing,
34
+ )
35
+
36
+
37
+ if is_torch_available():
38
+ import torch
39
+
40
+ from .benchmark_args import PyTorchBenchmarkArguments
41
+
42
+
43
+ if is_py3nvml_available():
44
+ import py3nvml.py3nvml as nvml
45
+
46
+
47
+ logger = logging.get_logger(__name__)
48
+
49
+
50
+ class PyTorchBenchmark(Benchmark):
51
+ args: PyTorchBenchmarkArguments
52
+ configs: PretrainedConfig
53
+ framework: str = "PyTorch"
54
+
55
+ @property
56
+ def framework_version(self):
57
+ return torch.__version__
58
+
59
+ def _inference_speed(self, model_name: str, batch_size: int, sequence_length: int) -> float:
60
+ _inference = self._prepare_inference_func(model_name, batch_size, sequence_length)
61
+ return self._measure_speed(_inference)
62
+
63
+ def _inference_memory(
64
+ self, model_name: str, batch_size: int, sequence_length: int
65
+ ) -> [Memory, Optional[MemorySummary]]:
66
+ _inference = self._prepare_inference_func(model_name, batch_size, sequence_length)
67
+ return self._measure_memory(_inference)
68
+
69
+ def _train_speed(self, model_name: str, batch_size: int, sequence_length: int) -> float:
70
+ _train = self._prepare_train_func(model_name, batch_size, sequence_length)
71
+ return self._measure_speed(_train)
72
+
73
+ def _train_memory(
74
+ self, model_name: str, batch_size: int, sequence_length: int
75
+ ) -> [Memory, Optional[MemorySummary]]:
76
+ _train = self._prepare_train_func(model_name, batch_size, sequence_length)
77
+ return self._measure_memory(_train)
78
+
79
+ def _prepare_inference_func(self, model_name: str, batch_size: int, sequence_length: int) -> Callable[[], None]:
80
+ config = self.config_dict[model_name]
81
+
82
+ if self.args.torchscript:
83
+ config.torchscript = True
84
+
85
+ has_model_class_in_config = (
86
+ hasattr(config, "architectures")
87
+ and isinstance(config.architectures, list)
88
+ and len(config.architectures) > 0
89
+ )
90
+ if not self.args.only_pretrain_model and has_model_class_in_config:
91
+ try:
92
+ model_class = config.architectures[0]
93
+ transformers_module = __import__("transformers", fromlist=[model_class])
94
+ model_cls = getattr(transformers_module, model_class)
95
+ model = model_cls(config)
96
+ except ImportError:
97
+ raise ImportError(
98
+ f"{model_class} does not exist. If you just want to test the pretrained model, you might want to"
99
+ " set `--only_pretrain_model` or `args.only_pretrain_model=True`."
100
+ )
101
+ else:
102
+ model = MODEL_MAPPING[config.__class__](config)
103
+
104
+ model.eval()
105
+ model.to(self.args.device)
106
+
107
+ # encoder-decoder has vocab size saved differently
108
+ vocab_size = config.vocab_size if hasattr(config, "vocab_size") else config.encoder.vocab_size
109
+ input_ids = torch.randint(vocab_size, (batch_size, sequence_length), dtype=torch.long, device=self.args.device)
110
+
111
+ if self.args.fp16:
112
+ logger.info("Running training in Mixed Precision...")
113
+ if not self.args.is_gpu:
114
+ raise ValueError("Mixed precision is possible only for GPU.")
115
+ # amp seems to have memory leaks so that memory usage
116
+ # is measured using .half() for now https://github.com/NVIDIA/apex/issues/439
117
+ model.half()
118
+
119
+ if self.args.torchscript:
120
+ with torch.no_grad():
121
+ inference_model = torch.jit.trace(model, input_ids)
122
+ else:
123
+ inference_model = model
124
+
125
+ def encoder_decoder_forward():
126
+ with torch.no_grad():
127
+ outputs = inference_model(input_ids, decoder_input_ids=input_ids)
128
+ return outputs
129
+
130
+ def encoder_forward():
131
+ with torch.no_grad():
132
+ outputs = inference_model(input_ids)
133
+ return outputs
134
+
135
+ _forward = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
136
+ return _forward
137
+
138
+ def _prepare_train_func(self, model_name: str, batch_size: int, sequence_length: int) -> Callable[[], None]:
139
+ config = self.config_dict[model_name]
140
+
141
+ has_model_class_in_config = (
142
+ hasattr(config, "architectures")
143
+ and isinstance(config.architectures, list)
144
+ and len(config.architectures) > 0
145
+ )
146
+ if not self.args.only_pretrain_model and has_model_class_in_config:
147
+ try:
148
+ model_class = config.architectures[0]
149
+ transformers_module = __import__("transformers", fromlist=[model_class])
150
+ model_cls = getattr(transformers_module, model_class)
151
+ model = model_cls(config)
152
+ except ImportError:
153
+ raise ImportError(
154
+ f"{model_class} does not exist. If you just want to test the pretrained model, you might want to"
155
+ " set `--only_pretrain_model` or `args.only_pretrain_model=True`."
156
+ )
157
+ else:
158
+ model = MODEL_WITH_LM_HEAD_MAPPING[config.__class__](config)
159
+
160
+ if self.args.torchscript:
161
+ raise NotImplementedError("Training for torchscript is currently not implemented")
162
+ else:
163
+ train_model = model
164
+
165
+ model.train()
166
+ model.to(self.args.device)
167
+
168
+ # encoder-decoder has vocab size saved differently
169
+ vocab_size = config.vocab_size if hasattr(config, "vocab_size") else config.encoder.vocab_size
170
+ input_ids = torch.randint(vocab_size, (batch_size, sequence_length), dtype=torch.long, device=self.args.device)
171
+
172
+ if self.args.fp16:
173
+ logger.info("Running training in Mixed Precision...")
174
+ if not self.args.is_gpu:
175
+ raise ValueError("Mixed precision is possible only for GPU.")
176
+
177
+ # amp seems to have memory leaks so that memory usage
178
+ # is measured using .half() for now https://github.com/NVIDIA/apex/issues/439
179
+ model.half()
180
+
181
+ def compute_loss_and_backprob_encoder():
182
+ loss = train_model(input_ids, labels=input_ids)[0]
183
+ loss.backward()
184
+ return loss
185
+
186
+ def compute_loss_and_backprob_encoder_decoder():
187
+ loss = train_model(input_ids, decoder_input_ids=input_ids, labels=input_ids)[0]
188
+ loss.backward()
189
+ return loss
190
+
191
+ _train = (
192
+ compute_loss_and_backprob_encoder_decoder
193
+ if config.is_encoder_decoder
194
+ else compute_loss_and_backprob_encoder
195
+ )
196
+ return _train
197
+
198
+ def _measure_speed(self, func) -> float:
199
+ try:
200
+ if self.args.is_tpu or self.args.torchscript:
201
+ # run additional 10 times to stabilize compilation for tpu and torchscript
202
+ logger.info("Do inference on TPU or torchscript. Running model 5 times to stabilize compilation")
203
+ timeit.repeat(
204
+ func,
205
+ repeat=1,
206
+ number=5,
207
+ )
208
+
209
+ # as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
210
+ runtimes = timeit.repeat(
211
+ func,
212
+ repeat=self.args.repeat,
213
+ number=10,
214
+ )
215
+
216
+ if self.args.is_tpu and self.args.torch_xla_tpu_print_metrics:
217
+ import torch_xla.debug.metrics as met
218
+
219
+ self.print_fn(met.metrics_report())
220
+
221
+ return min(runtimes) / 10.0
222
+ except RuntimeError as e:
223
+ self.print_fn(f"Doesn't fit on GPU. {e}")
224
+ return "N/A"
225
+
226
+ def _measure_memory(self, func: Callable[[], None]) -> [Memory, MemorySummary]:
227
+ try:
228
+ if self.args.trace_memory_line_by_line:
229
+ trace = start_memory_tracing("transformers")
230
+
231
+ if self.args.is_tpu:
232
+ # tpu
233
+ raise NotImplementedError(
234
+ "Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking with"
235
+ " `--no-memory` or `args.memory=False`"
236
+ )
237
+ elif self.args.is_gpu:
238
+ if not is_py3nvml_available():
239
+ logger.warning(
240
+ "py3nvml not installed, we won't log GPU memory usage. "
241
+ "Install py3nvml (pip install py3nvml) to log information about GPU."
242
+ )
243
+ memory = "N/A"
244
+ else:
245
+ logger.info(
246
+ "Measuring total GPU usage on GPU device. Make sure to not have additional processes running"
247
+ " on the same GPU."
248
+ )
249
+ # init nvml
250
+ nvml.nvmlInit()
251
+ func()
252
+ handle = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx)
253
+ meminfo = nvml.nvmlDeviceGetMemoryInfo(handle)
254
+ max_bytes_in_use = meminfo.used
255
+ memory = Memory(max_bytes_in_use)
256
+ # shutdown nvml
257
+ nvml.nvmlShutdown()
258
+ else:
259
+ # cpu
260
+ memory_bytes = measure_peak_memory_cpu(func)
261
+ memory = Memory(memory_bytes) if isinstance(memory_bytes, int) else memory_bytes
262
+
263
+ if self.args.trace_memory_line_by_line:
264
+ summary = stop_memory_tracing(trace)
265
+ else:
266
+ summary = None
267
+
268
+ return memory, summary
269
+ except RuntimeError as e:
270
+ self.print_fn(f"Doesn't fit on GPU. {e}")
271
+ return "N/A", None
env-llmeval/lib/python3.10/site-packages/transformers/benchmark/benchmark_args.py ADDED
@@ -0,0 +1,124 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The HuggingFace Inc. team.
3
+ # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ from dataclasses import dataclass, field
18
+ from typing import Tuple
19
+
20
+ from ..utils import (
21
+ cached_property,
22
+ is_torch_available,
23
+ is_torch_xla_available,
24
+ is_torch_xpu_available,
25
+ logging,
26
+ requires_backends,
27
+ )
28
+ from .benchmark_args_utils import BenchmarkArguments
29
+
30
+
31
+ if is_torch_available():
32
+ import torch
33
+
34
+ if is_torch_xla_available():
35
+ import torch_xla.core.xla_model as xm
36
+
37
+
38
+ logger = logging.get_logger(__name__)
39
+
40
+
41
+ @dataclass
42
+ class PyTorchBenchmarkArguments(BenchmarkArguments):
43
+ deprecated_args = [
44
+ "no_inference",
45
+ "no_cuda",
46
+ "no_tpu",
47
+ "no_speed",
48
+ "no_memory",
49
+ "no_env_print",
50
+ "no_multi_process",
51
+ ]
52
+
53
+ def __init__(self, **kwargs):
54
+ """
55
+ This __init__ is there for legacy code. When removing deprecated args completely, the class can simply be
56
+ deleted
57
+ """
58
+ for deprecated_arg in self.deprecated_args:
59
+ if deprecated_arg in kwargs:
60
+ positive_arg = deprecated_arg[3:]
61
+ setattr(self, positive_arg, not kwargs.pop(deprecated_arg))
62
+ logger.warning(
63
+ f"{deprecated_arg} is depreciated. Please use --no_{positive_arg} or"
64
+ f" {positive_arg}={kwargs[positive_arg]}"
65
+ )
66
+
67
+ self.torchscript = kwargs.pop("torchscript", self.torchscript)
68
+ self.torch_xla_tpu_print_metrics = kwargs.pop("torch_xla_tpu_print_metrics", self.torch_xla_tpu_print_metrics)
69
+ self.fp16_opt_level = kwargs.pop("fp16_opt_level", self.fp16_opt_level)
70
+ super().__init__(**kwargs)
71
+
72
+ torchscript: bool = field(default=False, metadata={"help": "Trace the models using torchscript"})
73
+ torch_xla_tpu_print_metrics: bool = field(default=False, metadata={"help": "Print Xla/PyTorch tpu metrics"})
74
+ fp16_opt_level: str = field(
75
+ default="O1",
76
+ metadata={
77
+ "help": (
78
+ "For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. "
79
+ "See details at https://nvidia.github.io/apex/amp.html"
80
+ )
81
+ },
82
+ )
83
+
84
+ @cached_property
85
+ def _setup_devices(self) -> Tuple["torch.device", int]:
86
+ requires_backends(self, ["torch"])
87
+ logger.info("PyTorch: setting up devices")
88
+ if not self.cuda:
89
+ device = torch.device("cpu")
90
+ n_gpu = 0
91
+ elif is_torch_xla_available():
92
+ device = xm.xla_device()
93
+ n_gpu = 0
94
+ elif is_torch_xpu_available():
95
+ device = torch.device("xpu")
96
+ n_gpu = torch.xpu.device_count()
97
+ else:
98
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
99
+ n_gpu = torch.cuda.device_count()
100
+ return device, n_gpu
101
+
102
+ @property
103
+ def is_tpu(self):
104
+ return is_torch_xla_available() and self.tpu
105
+
106
+ @property
107
+ def device_idx(self) -> int:
108
+ requires_backends(self, ["torch"])
109
+ # TODO(PVP): currently only single GPU is supported
110
+ return torch.cuda.current_device()
111
+
112
+ @property
113
+ def device(self) -> "torch.device":
114
+ requires_backends(self, ["torch"])
115
+ return self._setup_devices[0]
116
+
117
+ @property
118
+ def n_gpu(self):
119
+ requires_backends(self, ["torch"])
120
+ return self._setup_devices[1]
121
+
122
+ @property
123
+ def is_gpu(self):
124
+ return self.n_gpu > 0
env-llmeval/lib/python3.10/site-packages/transformers/benchmark/benchmark_args_tf.py ADDED
@@ -0,0 +1,136 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The HuggingFace Inc. team.
3
+ # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ from dataclasses import dataclass, field
18
+ from typing import Tuple
19
+
20
+ from ..utils import cached_property, is_tf_available, logging, requires_backends
21
+ from .benchmark_args_utils import BenchmarkArguments
22
+
23
+
24
+ if is_tf_available():
25
+ import tensorflow as tf
26
+
27
+
28
+ logger = logging.get_logger(__name__)
29
+
30
+
31
+ @dataclass
32
+ class TensorFlowBenchmarkArguments(BenchmarkArguments):
33
+ deprecated_args = [
34
+ "no_inference",
35
+ "no_cuda",
36
+ "no_tpu",
37
+ "no_speed",
38
+ "no_memory",
39
+ "no_env_print",
40
+ "no_multi_process",
41
+ ]
42
+
43
+ def __init__(self, **kwargs):
44
+ """
45
+ This __init__ is there for legacy code. When removing deprecated args completely, the class can simply be
46
+ deleted
47
+ """
48
+ for deprecated_arg in self.deprecated_args:
49
+ if deprecated_arg in kwargs:
50
+ positive_arg = deprecated_arg[3:]
51
+ kwargs[positive_arg] = not kwargs.pop(deprecated_arg)
52
+ logger.warning(
53
+ f"{deprecated_arg} is depreciated. Please use --no-{positive_arg} or"
54
+ f" {positive_arg}={kwargs[positive_arg]}"
55
+ )
56
+ self.tpu_name = kwargs.pop("tpu_name", self.tpu_name)
57
+ self.device_idx = kwargs.pop("device_idx", self.device_idx)
58
+ self.eager_mode = kwargs.pop("eager_mode", self.eager_mode)
59
+ self.use_xla = kwargs.pop("use_xla", self.use_xla)
60
+ super().__init__(**kwargs)
61
+
62
+ tpu_name: str = field(
63
+ default=None,
64
+ metadata={"help": "Name of TPU"},
65
+ )
66
+ device_idx: int = field(
67
+ default=0,
68
+ metadata={"help": "CPU / GPU device index. Defaults to 0."},
69
+ )
70
+ eager_mode: bool = field(default=False, metadata={"help": "Benchmark models in eager model."})
71
+ use_xla: bool = field(
72
+ default=False,
73
+ metadata={
74
+ "help": "Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`."
75
+ },
76
+ )
77
+
78
+ @cached_property
79
+ def _setup_tpu(self) -> Tuple["tf.distribute.cluster_resolver.TPUClusterResolver"]:
80
+ requires_backends(self, ["tf"])
81
+ tpu = None
82
+ if self.tpu:
83
+ try:
84
+ if self.tpu_name:
85
+ tpu = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name)
86
+ else:
87
+ tpu = tf.distribute.cluster_resolver.TPUClusterResolver()
88
+ except ValueError:
89
+ tpu = None
90
+ return tpu
91
+
92
+ @cached_property
93
+ def _setup_strategy(self) -> Tuple["tf.distribute.Strategy", "tf.distribute.cluster_resolver.TPUClusterResolver"]:
94
+ requires_backends(self, ["tf"])
95
+ if self.is_tpu:
96
+ tf.config.experimental_connect_to_cluster(self._setup_tpu)
97
+ tf.tpu.experimental.initialize_tpu_system(self._setup_tpu)
98
+
99
+ strategy = tf.distribute.TPUStrategy(self._setup_tpu)
100
+ else:
101
+ # currently no multi gpu is allowed
102
+ if self.is_gpu:
103
+ # TODO: Currently only single GPU is supported
104
+ tf.config.set_visible_devices(self.gpu_list[self.device_idx], "GPU")
105
+ strategy = tf.distribute.OneDeviceStrategy(device=f"/gpu:{self.device_idx}")
106
+ else:
107
+ tf.config.set_visible_devices([], "GPU") # disable GPU
108
+ strategy = tf.distribute.OneDeviceStrategy(device=f"/cpu:{self.device_idx}")
109
+
110
+ return strategy
111
+
112
+ @property
113
+ def is_tpu(self) -> bool:
114
+ requires_backends(self, ["tf"])
115
+ return self._setup_tpu is not None
116
+
117
+ @property
118
+ def strategy(self) -> "tf.distribute.Strategy":
119
+ requires_backends(self, ["tf"])
120
+ return self._setup_strategy
121
+
122
+ @property
123
+ def gpu_list(self):
124
+ requires_backends(self, ["tf"])
125
+ return tf.config.list_physical_devices("GPU")
126
+
127
+ @property
128
+ def n_gpu(self) -> int:
129
+ requires_backends(self, ["tf"])
130
+ if self.cuda:
131
+ return len(self.gpu_list)
132
+ return 0
133
+
134
+ @property
135
+ def is_gpu(self) -> bool:
136
+ return self.n_gpu > 0
env-llmeval/lib/python3.10/site-packages/transformers/benchmark/benchmark_args_utils.py ADDED
@@ -0,0 +1,166 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The HuggingFace Inc. team.
3
+ # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ import dataclasses
18
+ import json
19
+ import warnings
20
+ from dataclasses import dataclass, field
21
+ from time import time
22
+ from typing import List
23
+
24
+ from ..utils import logging
25
+
26
+
27
+ logger = logging.get_logger(__name__)
28
+
29
+
30
+ def list_field(default=None, metadata=None):
31
+ return field(default_factory=lambda: default, metadata=metadata)
32
+
33
+
34
+ @dataclass
35
+ class BenchmarkArguments:
36
+ """
37
+ BenchMarkArguments are arguments we use in our benchmark scripts **which relate to the training loop itself**.
38
+
39
+ Using `HfArgumentParser` we can turn this class into argparse arguments to be able to specify them on the command
40
+ line.
41
+ """
42
+
43
+ models: List[str] = list_field(
44
+ default=[],
45
+ metadata={
46
+ "help": (
47
+ "Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version"
48
+ " of all available models"
49
+ )
50
+ },
51
+ )
52
+
53
+ batch_sizes: List[int] = list_field(
54
+ default=[8], metadata={"help": "List of batch sizes for which memory and time performance will be evaluated"}
55
+ )
56
+
57
+ sequence_lengths: List[int] = list_field(
58
+ default=[8, 32, 128, 512],
59
+ metadata={"help": "List of sequence lengths for which memory and time performance will be evaluated"},
60
+ )
61
+
62
+ inference: bool = field(
63
+ default=True,
64
+ metadata={"help": "Whether to benchmark inference of model. Inference can be disabled via --no-inference."},
65
+ )
66
+ cuda: bool = field(
67
+ default=True,
68
+ metadata={"help": "Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."},
69
+ )
70
+ tpu: bool = field(
71
+ default=True, metadata={"help": "Whether to run on available tpu devices. TPU can be disabled via --no-tpu."}
72
+ )
73
+ fp16: bool = field(default=False, metadata={"help": "Use FP16 to accelerate inference."})
74
+ training: bool = field(default=False, metadata={"help": "Benchmark training of model"})
75
+ verbose: bool = field(default=False, metadata={"help": "Verbose memory tracing"})
76
+ speed: bool = field(
77
+ default=True,
78
+ metadata={"help": "Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."},
79
+ )
80
+ memory: bool = field(
81
+ default=True,
82
+ metadata={
83
+ "help": "Whether to perform memory measurements. Memory measurements can be disabled via --no-memory"
84
+ },
85
+ )
86
+ trace_memory_line_by_line: bool = field(default=False, metadata={"help": "Trace memory line by line"})
87
+ save_to_csv: bool = field(default=False, metadata={"help": "Save result to a CSV file"})
88
+ log_print: bool = field(default=False, metadata={"help": "Save all print statements in a log file"})
89
+ env_print: bool = field(default=False, metadata={"help": "Whether to print environment information"})
90
+ multi_process: bool = field(
91
+ default=True,
92
+ metadata={
93
+ "help": (
94
+ "Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use"
95
+ " multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled"
96
+ " for debugging / testing and on TPU."
97
+ )
98
+ },
99
+ )
100
+ inference_time_csv_file: str = field(
101
+ default=f"inference_time_{round(time())}.csv",
102
+ metadata={"help": "CSV filename used if saving time results to csv."},
103
+ )
104
+ inference_memory_csv_file: str = field(
105
+ default=f"inference_memory_{round(time())}.csv",
106
+ metadata={"help": "CSV filename used if saving memory results to csv."},
107
+ )
108
+ train_time_csv_file: str = field(
109
+ default=f"train_time_{round(time())}.csv",
110
+ metadata={"help": "CSV filename used if saving time results to csv for training."},
111
+ )
112
+ train_memory_csv_file: str = field(
113
+ default=f"train_memory_{round(time())}.csv",
114
+ metadata={"help": "CSV filename used if saving memory results to csv for training."},
115
+ )
116
+ env_info_csv_file: str = field(
117
+ default=f"env_info_{round(time())}.csv",
118
+ metadata={"help": "CSV filename used if saving environment information."},
119
+ )
120
+ log_filename: str = field(
121
+ default=f"log_{round(time())}.csv",
122
+ metadata={"help": "Log filename used if print statements are saved in log."},
123
+ )
124
+ repeat: int = field(default=3, metadata={"help": "Times an experiment will be run."})
125
+ only_pretrain_model: bool = field(
126
+ default=False,
127
+ metadata={
128
+ "help": (
129
+ "Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain"
130
+ " model weights."
131
+ )
132
+ },
133
+ )
134
+
135
+ def __post_init__(self):
136
+ warnings.warn(
137
+ f"The class {self.__class__} is deprecated. Hugging Face Benchmarking utils"
138
+ " are deprecated in general and it is advised to use external Benchmarking libraries "
139
+ " to benchmark Transformer models.",
140
+ FutureWarning,
141
+ )
142
+
143
+ def to_json_string(self):
144
+ """
145
+ Serializes this instance to a JSON string.
146
+ """
147
+ return json.dumps(dataclasses.asdict(self), indent=2)
148
+
149
+ @property
150
+ def model_names(self) -> List[str]:
151
+ if len(self.models) <= 0:
152
+ raise ValueError(
153
+ "Please make sure you provide at least one model name / model identifier, *e.g.* `--models"
154
+ " google-bert/bert-base-cased` or `args.models = ['google-bert/bert-base-cased']."
155
+ )
156
+ return self.models
157
+
158
+ @property
159
+ def do_multi_processing(self):
160
+ if not self.multi_process:
161
+ return False
162
+ elif self.is_tpu:
163
+ logger.info("Multiprocessing is currently not possible on TPU.")
164
+ return False
165
+ else:
166
+ return True
env-llmeval/lib/python3.10/site-packages/transformers/benchmark/benchmark_tf.py ADDED
@@ -0,0 +1,303 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The HuggingFace Inc. team.
3
+ # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """
17
+ Benchmarking the library on inference and training in PyTorch.
18
+ """
19
+
20
+
21
+ import random
22
+ import timeit
23
+ from functools import wraps
24
+ from typing import Callable, Optional
25
+
26
+ from ..configuration_utils import PretrainedConfig
27
+ from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
28
+ from ..utils import is_py3nvml_available, is_tf_available, logging
29
+ from .benchmark_utils import (
30
+ Benchmark,
31
+ Memory,
32
+ MemorySummary,
33
+ measure_peak_memory_cpu,
34
+ start_memory_tracing,
35
+ stop_memory_tracing,
36
+ )
37
+
38
+
39
+ if is_tf_available():
40
+ import tensorflow as tf
41
+ from tensorflow.python.framework.errors_impl import ResourceExhaustedError
42
+
43
+ from .benchmark_args_tf import TensorFlowBenchmarkArguments
44
+
45
+ if is_py3nvml_available():
46
+ import py3nvml.py3nvml as nvml
47
+
48
+ logger = logging.get_logger(__name__)
49
+
50
+
51
+ def run_with_tf_optimizations(do_eager_mode: bool, use_xla: bool):
52
+ def run_func(func):
53
+ @wraps(func)
54
+ def run_in_eager_mode(*args, **kwargs):
55
+ return func(*args, **kwargs)
56
+
57
+ @wraps(func)
58
+ @tf.function(experimental_compile=use_xla)
59
+ def run_in_graph_mode(*args, **kwargs):
60
+ return func(*args, **kwargs)
61
+
62
+ if do_eager_mode is True:
63
+ if use_xla is not False:
64
+ raise ValueError(
65
+ "Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`."
66
+ )
67
+ return run_in_eager_mode
68
+ else:
69
+ return run_in_graph_mode
70
+
71
+ return run_func
72
+
73
+
74
+ def random_input_ids(batch_size: int, sequence_length: int, vocab_size: int) -> ["tf.Tensor"]:
75
+ rng = random.Random()
76
+ values = [rng.randint(0, vocab_size - 1) for i in range(batch_size * sequence_length)]
77
+ return tf.constant(values, shape=(batch_size, sequence_length), dtype=tf.int32)
78
+
79
+
80
+ class TensorFlowBenchmark(Benchmark):
81
+ args: TensorFlowBenchmarkArguments
82
+ configs: PretrainedConfig
83
+ framework: str = "TensorFlow"
84
+
85
+ @property
86
+ def framework_version(self):
87
+ return tf.__version__
88
+
89
+ def _inference_speed(self, model_name: str, batch_size: int, sequence_length: int) -> float:
90
+ # initialize GPU on separate process
91
+ strategy = self.args.strategy
92
+ if strategy is None:
93
+ raise ValueError("A device strategy has to be initialized before using TensorFlow.")
94
+ _inference = self._prepare_inference_func(model_name, batch_size, sequence_length)
95
+ return self._measure_speed(_inference)
96
+
97
+ def _train_speed(self, model_name: str, batch_size: int, sequence_length: int) -> float:
98
+ strategy = self.args.strategy
99
+ if strategy is None:
100
+ raise ValueError("A device strategy has to be initialized before using TensorFlow.")
101
+ _train = self._prepare_train_func(model_name, batch_size, sequence_length)
102
+ return self._measure_speed(_train)
103
+
104
+ def _inference_memory(
105
+ self, model_name: str, batch_size: int, sequence_length: int
106
+ ) -> [Memory, Optional[MemorySummary]]:
107
+ # initialize GPU on separate process
108
+ if self.args.is_gpu:
109
+ tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx], True)
110
+ strategy = self.args.strategy
111
+ if strategy is None:
112
+ raise ValueError("A device strategy has to be initialized before using TensorFlow.")
113
+ _inference = self._prepare_inference_func(model_name, batch_size, sequence_length)
114
+ return self._measure_memory(_inference)
115
+
116
+ def _train_memory(
117
+ self, model_name: str, batch_size: int, sequence_length: int
118
+ ) -> [Memory, Optional[MemorySummary]]:
119
+ if self.args.is_gpu:
120
+ tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx], True)
121
+ strategy = self.args.strategy
122
+ if strategy is None:
123
+ raise ValueError("A device strategy has to be initialized before using TensorFlow.")
124
+
125
+ _train = self._prepare_train_func(model_name, batch_size, sequence_length)
126
+ return self._measure_memory(_train)
127
+
128
+ def _prepare_inference_func(self, model_name: str, batch_size: int, sequence_length: int) -> Callable[[], None]:
129
+ config = self.config_dict[model_name]
130
+
131
+ if self.args.fp16:
132
+ raise NotImplementedError("Mixed precision is currently not supported.")
133
+
134
+ has_model_class_in_config = (
135
+ hasattr(config, "architectures")
136
+ and isinstance(config.architectures, list)
137
+ and len(config.architectures) > 0
138
+ )
139
+ if not self.args.only_pretrain_model and has_model_class_in_config:
140
+ try:
141
+ model_class = "TF" + config.architectures[0] # prepend 'TF' for tensorflow model
142
+ transformers_module = __import__("transformers", fromlist=[model_class])
143
+ model_cls = getattr(transformers_module, model_class)
144
+ model = model_cls(config)
145
+ except ImportError:
146
+ raise ImportError(
147
+ f"{model_class} does not exist. If you just want to test the pretrained model, you might want to"
148
+ " set `--only_pretrain_model` or `args.only_pretrain_model=True`."
149
+ )
150
+ else:
151
+ model = TF_MODEL_MAPPING[config.__class__](config)
152
+
153
+ # encoder-decoder has vocab size saved differently
154
+ vocab_size = config.vocab_size if hasattr(config, "vocab_size") else config.encoder.vocab_size
155
+ input_ids = random_input_ids(batch_size, sequence_length, vocab_size)
156
+
157
+ @run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla)
158
+ def encoder_decoder_forward():
159
+ return model(input_ids, decoder_input_ids=input_ids, training=False)
160
+
161
+ @run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla)
162
+ def encoder_forward():
163
+ return model(input_ids, training=False)
164
+
165
+ _inference = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
166
+
167
+ return _inference
168
+
169
+ def _prepare_train_func(self, model_name: str, batch_size: int, sequence_length: int) -> Callable[[], None]:
170
+ config = self.config_dict[model_name]
171
+
172
+ if self.args.eager_mode is not False:
173
+ raise ValueError("Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.")
174
+
175
+ if self.args.fp16:
176
+ raise NotImplementedError("Mixed precision is currently not supported.")
177
+
178
+ has_model_class_in_config = (
179
+ hasattr(config, "architectures")
180
+ and isinstance(config.architectures, list)
181
+ and len(config.architectures) > 0
182
+ )
183
+ if not self.args.only_pretrain_model and has_model_class_in_config:
184
+ try:
185
+ model_class = "TF" + config.architectures[0] # prepend 'TF' for tensorflow model
186
+ transformers_module = __import__("transformers", fromlist=[model_class])
187
+ model_cls = getattr(transformers_module, model_class)
188
+ model = model_cls(config)
189
+ except ImportError:
190
+ raise ImportError(
191
+ f"{model_class} does not exist. If you just want to test the pretrained model, you might want to"
192
+ " set `--only_pretrain_model` or `args.only_pretrain_model=True`."
193
+ )
194
+ else:
195
+ model = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](config)
196
+
197
+ # encoder-decoder has vocab size saved differently
198
+ vocab_size = config.vocab_size if hasattr(config, "vocab_size") else config.encoder.vocab_size
199
+ input_ids = random_input_ids(batch_size, sequence_length, vocab_size)
200
+
201
+ @run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla)
202
+ def encoder_decoder_train():
203
+ loss = model(input_ids, decoder_input_ids=input_ids, labels=input_ids, training=True)[0]
204
+ gradients = tf.gradients(loss, model.trainable_variables)
205
+ return gradients
206
+
207
+ @run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla)
208
+ def encoder_train():
209
+ loss = model(input_ids, labels=input_ids, training=True)[0]
210
+ gradients = tf.gradients(loss, model.trainable_variables)
211
+ return gradients
212
+
213
+ _train = encoder_decoder_train if config.is_encoder_decoder else encoder_train
214
+
215
+ return _train
216
+
217
+ def _measure_speed(self, func) -> float:
218
+ with self.args.strategy.scope():
219
+ try:
220
+ if self.args.is_tpu or self.args.use_xla:
221
+ # run additional 10 times to stabilize compilation for tpu
222
+ logger.info("Do inference on TPU. Running model 5 times to stabilize compilation")
223
+ timeit.repeat(func, repeat=1, number=5)
224
+
225
+ # as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
226
+ runtimes = timeit.repeat(
227
+ func,
228
+ repeat=self.args.repeat,
229
+ number=10,
230
+ )
231
+
232
+ return min(runtimes) / 10.0
233
+ except ResourceExhaustedError as e:
234
+ self.print_fn(f"Doesn't fit on GPU. {e}")
235
+
236
+ def _measure_memory(self, func: Callable[[], None]) -> [Memory, MemorySummary]:
237
+ logger.info(
238
+ "Note that TensorFlow allocates more memory than "
239
+ "it might need to speed up computation. "
240
+ "The memory reported here corresponds to the memory "
241
+ "reported by `nvidia-smi`, which can vary depending "
242
+ "on total available memory on the GPU that is used."
243
+ )
244
+ with self.args.strategy.scope():
245
+ try:
246
+ if self.args.trace_memory_line_by_line:
247
+ if not self.args.eager_mode:
248
+ raise ValueError(
249
+ "`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory"
250
+ " consumption line by line."
251
+ )
252
+ trace = start_memory_tracing("transformers")
253
+
254
+ if self.args.is_tpu:
255
+ # tpu
256
+ raise NotImplementedError(
257
+ "Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking"
258
+ " with `args.memory=False`"
259
+ )
260
+ elif self.args.is_gpu:
261
+ # gpu
262
+ if not is_py3nvml_available():
263
+ logger.warning(
264
+ "py3nvml not installed, we won't log GPU memory usage. "
265
+ "Install py3nvml (pip install py3nvml) to log information about GPU."
266
+ )
267
+ memory = "N/A"
268
+ else:
269
+ logger.info(
270
+ "Measuring total GPU usage on GPU device. Make sure to not have additional processes"
271
+ " running on the same GPU."
272
+ )
273
+ # init nvml
274
+ nvml.nvmlInit()
275
+ func()
276
+ handle = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx)
277
+ meminfo = nvml.nvmlDeviceGetMemoryInfo(handle)
278
+ max_bytes_in_use = meminfo.used
279
+ memory = Memory(max_bytes_in_use)
280
+ # shutdown nvml
281
+ nvml.nvmlShutdown()
282
+ else:
283
+ # cpu
284
+ if self.args.trace_memory_line_by_line:
285
+ logger.info(
286
+ "When enabling line by line tracing, the max peak memory for CPU is inaccurate in"
287
+ " TensorFlow."
288
+ )
289
+ memory = None
290
+ else:
291
+ memory_bytes = measure_peak_memory_cpu(func)
292
+ memory = Memory(memory_bytes) if isinstance(memory_bytes, int) else memory_bytes
293
+ if self.args.trace_memory_line_by_line:
294
+ summary = stop_memory_tracing(trace)
295
+ if memory is None:
296
+ memory = summary.total
297
+ else:
298
+ summary = None
299
+
300
+ return memory, summary
301
+ except ResourceExhaustedError as e:
302
+ self.print_fn(f"Doesn't fit on GPU. {e}")
303
+ return "N/A", None
env-llmeval/lib/python3.10/site-packages/transformers/benchmark/benchmark_utils.py ADDED
@@ -0,0 +1,914 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file is adapted from the AllenNLP library at https://github.com/allenai/allennlp
2
+
3
+ # Copyright 2020 The HuggingFace Team and the AllenNLP authors. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """
17
+ Utilities for working with the local dataset cache.
18
+ """
19
+
20
+ import copy
21
+ import csv
22
+ import linecache
23
+ import os
24
+ import platform
25
+ import sys
26
+ import warnings
27
+ from abc import ABC, abstractmethod
28
+ from collections import defaultdict, namedtuple
29
+ from datetime import datetime
30
+ from multiprocessing import Pipe, Process, Queue
31
+ from multiprocessing.connection import Connection
32
+ from typing import Callable, Iterable, List, NamedTuple, Optional, Union
33
+
34
+ from .. import AutoConfig, PretrainedConfig
35
+ from .. import __version__ as version
36
+ from ..utils import is_psutil_available, is_py3nvml_available, is_tf_available, is_torch_available, logging
37
+ from .benchmark_args_utils import BenchmarkArguments
38
+
39
+
40
+ if is_torch_available():
41
+ from torch.cuda import empty_cache as torch_empty_cache
42
+
43
+ if is_tf_available():
44
+ from tensorflow.python.eager import context as tf_context
45
+
46
+ if is_psutil_available():
47
+ import psutil
48
+
49
+ if is_py3nvml_available():
50
+ import py3nvml.py3nvml as nvml
51
+
52
+ if platform.system() == "Windows":
53
+ from signal import CTRL_C_EVENT as SIGKILL
54
+ else:
55
+ from signal import SIGKILL
56
+
57
+
58
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
59
+
60
+
61
+ _is_memory_tracing_enabled = False
62
+
63
+ BenchmarkOutput = namedtuple(
64
+ "BenchmarkOutput",
65
+ [
66
+ "time_inference_result",
67
+ "memory_inference_result",
68
+ "time_train_result",
69
+ "memory_train_result",
70
+ "inference_summary",
71
+ "train_summary",
72
+ ],
73
+ )
74
+
75
+
76
+ def separate_process_wrapper_fn(func: Callable[[], None], do_multi_processing: bool) -> Callable[[], None]:
77
+ """
78
+ This function wraps another function into its own separated process. In order to ensure accurate memory
79
+ measurements it is important that the function is executed in a separate process
80
+
81
+ Args:
82
+ - `func`: (`callable`): function() -> ... generic function which will be executed in its own separate process
83
+ - `do_multi_processing`: (`bool`) Whether to run function on separate process or not
84
+ """
85
+
86
+ def multi_process_func(*args, **kwargs):
87
+ # run function in an individual
88
+ # process to get correct memory
89
+ def wrapper_func(queue: Queue, *args):
90
+ try:
91
+ result = func(*args)
92
+ except Exception as e:
93
+ logger.error(e)
94
+ print(e)
95
+ result = "N/A"
96
+ queue.put(result)
97
+
98
+ queue = Queue()
99
+ p = Process(target=wrapper_func, args=[queue] + list(args))
100
+ p.start()
101
+ result = queue.get()
102
+ p.join()
103
+ return result
104
+
105
+ if do_multi_processing:
106
+ logger.info(f"Function {func} is executed in its own process...")
107
+ return multi_process_func
108
+ else:
109
+ return func
110
+
111
+
112
+ def is_memory_tracing_enabled():
113
+ global _is_memory_tracing_enabled
114
+ return _is_memory_tracing_enabled
115
+
116
+
117
+ class Frame(NamedTuple):
118
+ """
119
+ `Frame` is a NamedTuple used to gather the current frame state. `Frame` has the following fields:
120
+
121
+ - 'filename' (string): Name of the file currently executed
122
+ - 'module' (string): Name of the module currently executed
123
+ - 'line_number' (int): Number of the line currently executed
124
+ - 'event' (string): Event that triggered the tracing (default will be "line")
125
+ - 'line_text' (string): Text of the line in the python script
126
+ """
127
+
128
+ filename: str
129
+ module: str
130
+ line_number: int
131
+ event: str
132
+ line_text: str
133
+
134
+
135
+ class UsedMemoryState(NamedTuple):
136
+ """
137
+ `UsedMemoryState` are named tuples with the following fields:
138
+
139
+ - 'frame': a `Frame` namedtuple (see below) storing information on the current tracing frame (current file,
140
+ location in current file)
141
+ - 'cpu_memory': CPU RSS memory state *before* executing the line
142
+ - 'gpu_memory': GPU used memory *before* executing the line (sum for all GPUs or for only `gpus_to_trace` if
143
+ provided)
144
+ """
145
+
146
+ frame: Frame
147
+ cpu_memory: int
148
+ gpu_memory: int
149
+
150
+
151
+ class Memory(NamedTuple):
152
+ """
153
+ `Memory` NamedTuple have a single field `bytes` and you can get a human readable str of the number of mega bytes by
154
+ calling `__repr__`
155
+
156
+ - `byte` (integer): number of bytes,
157
+ """
158
+
159
+ bytes: int
160
+
161
+ def __repr__(self) -> str:
162
+ return str(bytes_to_mega_bytes(self.bytes))
163
+
164
+
165
+ class MemoryState(NamedTuple):
166
+ """
167
+ `MemoryState` are namedtuples listing frame + CPU/GPU memory with the following fields:
168
+
169
+ - `frame` (`Frame`): the current frame (see above)
170
+ - `cpu`: CPU memory consumed at during the current frame as a `Memory` named tuple
171
+ - `gpu`: GPU memory consumed at during the current frame as a `Memory` named tuple
172
+ - `cpu_gpu`: CPU + GPU memory consumed at during the current frame as a `Memory` named tuple
173
+ """
174
+
175
+ frame: Frame
176
+ cpu: Memory
177
+ gpu: Memory
178
+ cpu_gpu: Memory
179
+
180
+
181
+ class MemorySummary(NamedTuple):
182
+ """
183
+ `MemorySummary` namedtuple otherwise with the fields:
184
+
185
+ - `sequential`: a list of `MemoryState` namedtuple (see below) computed from the provided `memory_trace` by
186
+ subtracting the memory after executing each line from the memory before executing said line.
187
+ - `cumulative`: a list of `MemoryState` namedtuple (see below) with cumulative increase in memory for each line
188
+ obtained by summing repeated memory increase for a line if it's executed several times. The list is sorted
189
+ from the frame with the largest memory consumption to the frame with the smallest (can be negative if memory
190
+ is released)
191
+ - `total`: total memory increase during the full tracing as a `Memory` named tuple (see below). Line with
192
+ memory release (negative consumption) are ignored if `ignore_released_memory` is `True` (default).
193
+ """
194
+
195
+ sequential: List[MemoryState]
196
+ cumulative: List[MemoryState]
197
+ current: List[MemoryState]
198
+ total: Memory
199
+
200
+
201
+ MemoryTrace = List[UsedMemoryState]
202
+
203
+
204
+ def measure_peak_memory_cpu(function: Callable[[], None], interval=0.5, device_idx=None) -> int:
205
+ """
206
+ measures peak cpu memory consumption of a given `function` running the function for at least interval seconds and
207
+ at most 20 * interval seconds. This function is heavily inspired by: `memory_usage` of the package
208
+ `memory_profiler`:
209
+ https://github.com/pythonprofilers/memory_profiler/blob/895c4ac7a08020d66ae001e24067da6dcea42451/memory_profiler.py#L239
210
+
211
+ Args:
212
+ - `function`: (`callable`): function() -> ... function without any arguments to measure for which to measure
213
+ the peak memory
214
+
215
+ - `interval`: (`float`, `optional`, defaults to `0.5`) interval in second for which to measure the memory usage
216
+
217
+ - `device_idx`: (`int`, `optional`, defaults to `None`) device id for which to measure gpu usage
218
+
219
+ Returns:
220
+
221
+ - `max_memory`: (`int`) consumed memory peak in Bytes
222
+ """
223
+
224
+ def get_cpu_memory(process_id: int) -> int:
225
+ """
226
+ measures current cpu memory usage of a given `process_id`
227
+
228
+ Args:
229
+ - `process_id`: (`int`) process_id for which to measure memory
230
+
231
+ Returns
232
+
233
+ - `memory`: (`int`) consumed memory in Bytes
234
+ """
235
+ process = psutil.Process(process_id)
236
+ try:
237
+ meminfo_attr = "memory_info" if hasattr(process, "memory_info") else "get_memory_info"
238
+ memory = getattr(process, meminfo_attr)()[0]
239
+ except psutil.AccessDenied:
240
+ raise ValueError("Error with Psutil.")
241
+ return memory
242
+
243
+ if not is_psutil_available():
244
+ logger.warning(
245
+ "Psutil not installed, we won't log CPU memory usage. "
246
+ "Install Psutil (pip install psutil) to use CPU memory tracing."
247
+ )
248
+ max_memory = "N/A"
249
+ else:
250
+
251
+ class MemoryMeasureProcess(Process):
252
+
253
+ """
254
+ `MemoryMeasureProcess` inherits from `Process` and overwrites its `run()` method. Used to measure the
255
+ memory usage of a process
256
+ """
257
+
258
+ def __init__(self, process_id: int, child_connection: Connection, interval: float):
259
+ super().__init__()
260
+ self.process_id = process_id
261
+ self.interval = interval
262
+ self.connection = child_connection
263
+ self.num_measurements = 1
264
+ self.mem_usage = get_cpu_memory(self.process_id)
265
+
266
+ def run(self):
267
+ self.connection.send(0)
268
+ stop = False
269
+ while True:
270
+ self.mem_usage = max(self.mem_usage, get_cpu_memory(self.process_id))
271
+ self.num_measurements += 1
272
+
273
+ if stop:
274
+ break
275
+
276
+ stop = self.connection.poll(self.interval)
277
+
278
+ # send results to parent pipe
279
+ self.connection.send(self.mem_usage)
280
+ self.connection.send(self.num_measurements)
281
+
282
+ while True:
283
+ # create child, parent connection
284
+ child_connection, parent_connection = Pipe()
285
+
286
+ # instantiate process
287
+ mem_process = MemoryMeasureProcess(os.getpid(), child_connection, interval)
288
+ mem_process.start()
289
+
290
+ # wait until we get memory
291
+ parent_connection.recv()
292
+
293
+ try:
294
+ # execute function
295
+ function()
296
+
297
+ # start parent connection
298
+ parent_connection.send(0)
299
+
300
+ # receive memory and num measurements
301
+ max_memory = parent_connection.recv()
302
+ num_measurements = parent_connection.recv()
303
+ except Exception:
304
+ # kill process in a clean way
305
+ parent = psutil.Process(os.getpid())
306
+ for child in parent.children(recursive=True):
307
+ os.kill(child.pid, SIGKILL)
308
+ mem_process.join(0)
309
+ raise RuntimeError("Process killed. Error in Process")
310
+
311
+ # run process at least 20 * interval or until it finishes
312
+ mem_process.join(20 * interval)
313
+
314
+ if (num_measurements > 4) or (interval < 1e-6):
315
+ break
316
+
317
+ # reduce interval
318
+ interval /= 10
319
+
320
+ return max_memory
321
+
322
+
323
+ def start_memory_tracing(
324
+ modules_to_trace: Optional[Union[str, Iterable[str]]] = None,
325
+ modules_not_to_trace: Optional[Union[str, Iterable[str]]] = None,
326
+ events_to_trace: str = "line",
327
+ gpus_to_trace: Optional[List[int]] = None,
328
+ ) -> MemoryTrace:
329
+ """
330
+ Setup line-by-line tracing to record rss mem (RAM) at each line of a module or sub-module. See `./benchmark.py` for
331
+ usage examples. Current memory consumption is returned using psutil and in particular is the RSS memory "Resident
332
+ Set Size” (the non-swapped physical memory the process is using). See
333
+ https://psutil.readthedocs.io/en/latest/#psutil.Process.memory_info
334
+
335
+ Args:
336
+ - `modules_to_trace`: (None, string, list/tuple of string) if None, all events are recorded if string or list
337
+ of strings: only events from the listed module/sub-module will be recorded (e.g. 'fairseq' or
338
+ 'transformers.models.gpt2.modeling_gpt2')
339
+ - `modules_not_to_trace`: (None, string, list/tuple of string) if None, no module is avoided if string or list
340
+ of strings: events from the listed module/sub-module will not be recorded (e.g. 'torch')
341
+ - `events_to_trace`: string or list of string of events to be recorded (see official python doc for
342
+ `sys.settrace` for the list of events) default to line
343
+ - `gpus_to_trace`: (optional list, default None) list of GPUs to trace. Default to tracing all GPUs
344
+
345
+ Return:
346
+
347
+ - `memory_trace` is a list of `UsedMemoryState` for each event (default each line of the traced script).
348
+
349
+ - `UsedMemoryState` are named tuples with the following fields:
350
+
351
+ - 'frame': a `Frame` namedtuple (see below) storing information on the current tracing frame (current
352
+ file, location in current file)
353
+ - 'cpu_memory': CPU RSS memory state *before* executing the line
354
+ - 'gpu_memory': GPU used memory *before* executing the line (sum for all GPUs or for only
355
+ `gpus_to_trace` if provided)
356
+
357
+ `Frame` is a namedtuple used by `UsedMemoryState` to list the current frame state. `Frame` has the following
358
+ fields: - 'filename' (string): Name of the file currently executed - 'module' (string): Name of the module
359
+ currently executed - 'line_number' (int): Number of the line currently executed - 'event' (string): Event that
360
+ triggered the tracing (default will be "line") - 'line_text' (string): Text of the line in the python script
361
+
362
+ """
363
+ if is_psutil_available():
364
+ process = psutil.Process(os.getpid())
365
+ else:
366
+ logger.warning(
367
+ "Psutil not installed, we won't log CPU memory usage. "
368
+ "Install psutil (pip install psutil) to use CPU memory tracing."
369
+ )
370
+ process = None
371
+
372
+ if is_py3nvml_available():
373
+ try:
374
+ nvml.nvmlInit()
375
+ devices = list(range(nvml.nvmlDeviceGetCount())) if gpus_to_trace is None else gpus_to_trace
376
+ nvml.nvmlShutdown()
377
+ except (OSError, nvml.NVMLError):
378
+ logger.warning("Error while initializing communication with GPU. We won't perform GPU memory tracing.")
379
+ log_gpu = False
380
+ else:
381
+ log_gpu = is_torch_available() or is_tf_available()
382
+ else:
383
+ logger.warning(
384
+ "py3nvml not installed, we won't log GPU memory usage. "
385
+ "Install py3nvml (pip install py3nvml) to use GPU memory tracing."
386
+ )
387
+ log_gpu = False
388
+
389
+ memory_trace = []
390
+
391
+ def traceit(frame, event, args):
392
+ """
393
+ Tracing method executed before running each line in a module or sub-module Record memory allocated in a list
394
+ with debugging information
395
+ """
396
+ global _is_memory_tracing_enabled
397
+
398
+ if not _is_memory_tracing_enabled:
399
+ return traceit
400
+
401
+ # Filter events
402
+ if events_to_trace is not None:
403
+ if isinstance(events_to_trace, str) and event != events_to_trace:
404
+ return traceit
405
+ elif isinstance(events_to_trace, (list, tuple)) and event not in events_to_trace:
406
+ return traceit
407
+
408
+ if "__name__" not in frame.f_globals:
409
+ return traceit
410
+
411
+ # Filter modules
412
+ name = frame.f_globals["__name__"]
413
+ if not isinstance(name, str):
414
+ return traceit
415
+ else:
416
+ # Filter whitelist of modules to trace
417
+ if modules_to_trace is not None:
418
+ if isinstance(modules_to_trace, str) and modules_to_trace not in name:
419
+ return traceit
420
+ elif isinstance(modules_to_trace, (list, tuple)) and all(m not in name for m in modules_to_trace):
421
+ return traceit
422
+
423
+ # Filter blacklist of modules not to trace
424
+ if modules_not_to_trace is not None:
425
+ if isinstance(modules_not_to_trace, str) and modules_not_to_trace in name:
426
+ return traceit
427
+ elif isinstance(modules_not_to_trace, (list, tuple)) and any(m in name for m in modules_not_to_trace):
428
+ return traceit
429
+
430
+ # Record current tracing state (file, location in file...)
431
+ lineno = frame.f_lineno
432
+ filename = frame.f_globals["__file__"]
433
+ if filename.endswith(".pyc") or filename.endswith(".pyo"):
434
+ filename = filename[:-1]
435
+ line = linecache.getline(filename, lineno).rstrip()
436
+ traced_state = Frame(filename, name, lineno, event, line)
437
+
438
+ # Record current memory state (rss memory) and compute difference with previous memory state
439
+ cpu_mem = 0
440
+ if process is not None:
441
+ mem = process.memory_info()
442
+ cpu_mem = mem.rss
443
+
444
+ gpu_mem = 0
445
+ if log_gpu:
446
+ # Clear GPU caches
447
+ if is_torch_available():
448
+ torch_empty_cache()
449
+ if is_tf_available():
450
+ tf_context.context()._clear_caches() # See https://github.com/tensorflow/tensorflow/issues/20218#issuecomment-416771802
451
+
452
+ # Sum used memory for all GPUs
453
+ nvml.nvmlInit()
454
+
455
+ for i in devices:
456
+ handle = nvml.nvmlDeviceGetHandleByIndex(i)
457
+ meminfo = nvml.nvmlDeviceGetMemoryInfo(handle)
458
+ gpu_mem += meminfo.used
459
+
460
+ nvml.nvmlShutdown()
461
+
462
+ mem_state = UsedMemoryState(traced_state, cpu_mem, gpu_mem)
463
+ memory_trace.append(mem_state)
464
+
465
+ return traceit
466
+
467
+ sys.settrace(traceit)
468
+
469
+ global _is_memory_tracing_enabled
470
+ _is_memory_tracing_enabled = True
471
+
472
+ return memory_trace
473
+
474
+
475
+ def stop_memory_tracing(
476
+ memory_trace: Optional[MemoryTrace] = None, ignore_released_memory: bool = True
477
+ ) -> Optional[MemorySummary]:
478
+ """
479
+ Stop memory tracing cleanly and return a summary of the memory trace if a trace is given.
480
+
481
+ Args:
482
+ `memory_trace` (optional output of start_memory_tracing, default: None):
483
+ memory trace to convert in summary
484
+ `ignore_released_memory` (boolean, default: None):
485
+ if True we only sum memory increase to compute total memory
486
+
487
+ Return:
488
+
489
+ - None if `memory_trace` is None
490
+ - `MemorySummary` namedtuple otherwise with the fields:
491
+
492
+ - `sequential`: a list of `MemoryState` namedtuple (see below) computed from the provided `memory_trace` by
493
+ subtracting the memory after executing each line from the memory before executing said line.
494
+ - `cumulative`: a list of `MemoryState` namedtuple (see below) with cumulative increase in memory for each
495
+ line obtained by summing repeated memory increase for a line if it's executed several times. The list is
496
+ sorted from the frame with the largest memory consumption to the frame with the smallest (can be negative
497
+ if memory is released)
498
+ - `total`: total memory increase during the full tracing as a `Memory` named tuple (see below). Line with
499
+ memory release (negative consumption) are ignored if `ignore_released_memory` is `True` (default).
500
+
501
+ `Memory` named tuple have fields
502
+
503
+ - `byte` (integer): number of bytes,
504
+ - `string` (string): same as human readable string (ex: "3.5MB")
505
+
506
+ `Frame` are namedtuple used to list the current frame state and have the following fields:
507
+
508
+ - 'filename' (string): Name of the file currently executed
509
+ - 'module' (string): Name of the module currently executed
510
+ - 'line_number' (int): Number of the line currently executed
511
+ - 'event' (string): Event that triggered the tracing (default will be "line")
512
+ - 'line_text' (string): Text of the line in the python script
513
+
514
+ `MemoryState` are namedtuples listing frame + CPU/GPU memory with the following fields:
515
+
516
+ - `frame` (`Frame`): the current frame (see above)
517
+ - `cpu`: CPU memory consumed at during the current frame as a `Memory` named tuple
518
+ - `gpu`: GPU memory consumed at during the current frame as a `Memory` named tuple
519
+ - `cpu_gpu`: CPU + GPU memory consumed at during the current frame as a `Memory` named tuple
520
+ """
521
+ global _is_memory_tracing_enabled
522
+ _is_memory_tracing_enabled = False
523
+
524
+ if memory_trace is not None and len(memory_trace) > 1:
525
+ memory_diff_trace = []
526
+ memory_curr_trace = []
527
+
528
+ cumulative_memory_dict = defaultdict(lambda: [0, 0, 0])
529
+
530
+ for (
531
+ (frame, cpu_mem, gpu_mem),
532
+ (next_frame, next_cpu_mem, next_gpu_mem),
533
+ ) in zip(memory_trace[:-1], memory_trace[1:]):
534
+ cpu_mem_inc = next_cpu_mem - cpu_mem
535
+ gpu_mem_inc = next_gpu_mem - gpu_mem
536
+ cpu_gpu_mem_inc = cpu_mem_inc + gpu_mem_inc
537
+ memory_diff_trace.append(
538
+ MemoryState(
539
+ frame=frame,
540
+ cpu=Memory(cpu_mem_inc),
541
+ gpu=Memory(gpu_mem_inc),
542
+ cpu_gpu=Memory(cpu_gpu_mem_inc),
543
+ )
544
+ )
545
+
546
+ memory_curr_trace.append(
547
+ MemoryState(
548
+ frame=frame,
549
+ cpu=Memory(next_cpu_mem),
550
+ gpu=Memory(next_gpu_mem),
551
+ cpu_gpu=Memory(next_gpu_mem + next_cpu_mem),
552
+ )
553
+ )
554
+
555
+ cumulative_memory_dict[frame][0] += cpu_mem_inc
556
+ cumulative_memory_dict[frame][1] += gpu_mem_inc
557
+ cumulative_memory_dict[frame][2] += cpu_gpu_mem_inc
558
+
559
+ cumulative_memory = sorted(
560
+ cumulative_memory_dict.items(), key=lambda x: x[1][2], reverse=True
561
+ ) # order by the total CPU + GPU memory increase
562
+ cumulative_memory = [
563
+ MemoryState(
564
+ frame=frame,
565
+ cpu=Memory(cpu_mem_inc),
566
+ gpu=Memory(gpu_mem_inc),
567
+ cpu_gpu=Memory(cpu_gpu_mem_inc),
568
+ )
569
+ for frame, (cpu_mem_inc, gpu_mem_inc, cpu_gpu_mem_inc) in cumulative_memory
570
+ ]
571
+
572
+ memory_curr_trace = sorted(memory_curr_trace, key=lambda x: x.cpu_gpu.bytes, reverse=True)
573
+
574
+ if ignore_released_memory:
575
+ total_memory = sum(max(0, step_trace.cpu_gpu.bytes) for step_trace in memory_diff_trace)
576
+ else:
577
+ total_memory = sum(step_trace.cpu_gpu.bytes for step_trace in memory_diff_trace)
578
+
579
+ total_memory = Memory(total_memory)
580
+
581
+ return MemorySummary(
582
+ sequential=memory_diff_trace,
583
+ cumulative=cumulative_memory,
584
+ current=memory_curr_trace,
585
+ total=total_memory,
586
+ )
587
+
588
+ return None
589
+
590
+
591
+ def bytes_to_mega_bytes(memory_amount: int) -> int:
592
+ """Utility to convert a number of bytes (int) into a number of mega bytes (int)"""
593
+ return memory_amount >> 20
594
+
595
+
596
+ class Benchmark(ABC):
597
+ """
598
+ Benchmarks is a simple but feature-complete benchmarking script to compare memory and time performance of models in
599
+ Transformers.
600
+ """
601
+
602
+ args: BenchmarkArguments
603
+ configs: PretrainedConfig
604
+ framework: str
605
+
606
+ def __init__(self, args: BenchmarkArguments = None, configs: PretrainedConfig = None):
607
+ self.args = args
608
+ if configs is None:
609
+ self.config_dict = {
610
+ model_name: AutoConfig.from_pretrained(model_name) for model_name in self.args.model_names
611
+ }
612
+ else:
613
+ self.config_dict = dict(zip(self.args.model_names, configs))
614
+
615
+ warnings.warn(
616
+ f"The class {self.__class__} is deprecated. Hugging Face Benchmarking utils"
617
+ " are deprecated in general and it is advised to use external Benchmarking libraries "
618
+ " to benchmark Transformer models.",
619
+ FutureWarning,
620
+ )
621
+
622
+ if self.args.memory and os.getenv("TRANSFORMERS_USE_MULTIPROCESSING") == 0:
623
+ logger.warning(
624
+ "Memory consumption will not be measured accurately if `args.multi_process` is set to `False.` The"
625
+ " flag 'TRANSFORMERS_USE_MULTIPROCESSING' should only be disabled for debugging / testing."
626
+ )
627
+
628
+ self._print_fn = None
629
+ self._framework_version = None
630
+ self._environment_info = None
631
+
632
+ @property
633
+ def print_fn(self):
634
+ if self._print_fn is None:
635
+ if self.args.log_print:
636
+
637
+ def print_and_log(*args):
638
+ with open(self.args.log_filename, "a") as log_file:
639
+ log_file.write("".join(args) + "\n")
640
+ print(*args)
641
+
642
+ self._print_fn = print_and_log
643
+ else:
644
+ self._print_fn = print
645
+ return self._print_fn
646
+
647
+ @property
648
+ @abstractmethod
649
+ def framework_version(self):
650
+ pass
651
+
652
+ @abstractmethod
653
+ def _inference_speed(self, model_name: str, batch_size: int, sequence_length: int) -> float:
654
+ pass
655
+
656
+ @abstractmethod
657
+ def _train_speed(self, model_name: str, batch_size: int, sequence_length: int) -> float:
658
+ pass
659
+
660
+ @abstractmethod
661
+ def _inference_memory(
662
+ self, model_name: str, batch_size: int, sequence_length: int
663
+ ) -> [Memory, Optional[MemorySummary]]:
664
+ pass
665
+
666
+ @abstractmethod
667
+ def _train_memory(
668
+ self, model_name: str, batch_size: int, sequence_length: int
669
+ ) -> [Memory, Optional[MemorySummary]]:
670
+ pass
671
+
672
+ def inference_speed(self, *args, **kwargs) -> float:
673
+ return separate_process_wrapper_fn(self._inference_speed, self.args.do_multi_processing)(*args, **kwargs)
674
+
675
+ def train_speed(self, *args, **kwargs) -> float:
676
+ return separate_process_wrapper_fn(self._train_speed, self.args.do_multi_processing)(*args, **kwargs)
677
+
678
+ def inference_memory(self, *args, **kwargs) -> [Memory, Optional[MemorySummary]]:
679
+ return separate_process_wrapper_fn(self._inference_memory, self.args.do_multi_processing)(*args, **kwargs)
680
+
681
+ def train_memory(self, *args, **kwargs) -> [Memory, Optional[MemorySummary]]:
682
+ return separate_process_wrapper_fn(self._train_memory, self.args.do_multi_processing)(*args, **kwargs)
683
+
684
+ def run(self):
685
+ result_dict = {model_name: {} for model_name in self.args.model_names}
686
+ inference_result_time = copy.deepcopy(result_dict)
687
+ inference_result_memory = copy.deepcopy(result_dict)
688
+ train_result_time = copy.deepcopy(result_dict)
689
+ train_result_memory = copy.deepcopy(result_dict)
690
+
691
+ for c, model_name in enumerate(self.args.model_names):
692
+ self.print_fn(f"{c + 1} / {len(self.args.model_names)}")
693
+
694
+ model_dict = {
695
+ "bs": self.args.batch_sizes,
696
+ "ss": self.args.sequence_lengths,
697
+ "result": {i: {} for i in self.args.batch_sizes},
698
+ }
699
+ inference_result_time[model_name] = copy.deepcopy(model_dict)
700
+ inference_result_memory[model_name] = copy.deepcopy(model_dict)
701
+ train_result_time[model_name] = copy.deepcopy(model_dict)
702
+ train_result_memory[model_name] = copy.deepcopy(model_dict)
703
+
704
+ inference_summary = train_summary = None
705
+
706
+ for batch_size in self.args.batch_sizes:
707
+ for sequence_length in self.args.sequence_lengths:
708
+ if self.args.inference:
709
+ if self.args.memory:
710
+ memory, inference_summary = self.inference_memory(model_name, batch_size, sequence_length)
711
+ inference_result_memory[model_name]["result"][batch_size][sequence_length] = memory
712
+ if self.args.speed:
713
+ time = self.inference_speed(model_name, batch_size, sequence_length)
714
+ inference_result_time[model_name]["result"][batch_size][sequence_length] = time
715
+
716
+ if self.args.training:
717
+ if self.args.memory:
718
+ memory, train_summary = self.train_memory(model_name, batch_size, sequence_length)
719
+ train_result_memory[model_name]["result"][batch_size][sequence_length] = memory
720
+ if self.args.speed:
721
+ time = self.train_speed(model_name, batch_size, sequence_length)
722
+ train_result_time[model_name]["result"][batch_size][sequence_length] = time
723
+
724
+ if self.args.inference:
725
+ if self.args.speed:
726
+ self.print_fn("\n" + 20 * "=" + ("INFERENCE - SPEED - RESULT").center(40) + 20 * "=")
727
+ self.print_results(inference_result_time, type_label="Time in s")
728
+ self.save_to_csv(inference_result_time, self.args.inference_time_csv_file)
729
+ if self.args.is_tpu:
730
+ self.print_fn(
731
+ "TPU was used for inference. Note that the time after compilation stabilized (after ~10"
732
+ " inferences model.forward(..) calls) was measured."
733
+ )
734
+
735
+ if self.args.memory:
736
+ self.print_fn("\n" + 20 * "=" + ("INFERENCE - MEMORY - RESULT").center(40) + 20 * "=")
737
+ self.print_results(inference_result_memory, type_label="Memory in MB")
738
+ self.save_to_csv(inference_result_memory, self.args.inference_memory_csv_file)
739
+
740
+ if self.args.trace_memory_line_by_line:
741
+ self.print_fn("\n" + 20 * "=" + ("INFERENCE - MEMOMRY - LINE BY LINE - SUMMARY").center(40) + 20 * "=")
742
+ self.print_memory_trace_statistics(inference_summary)
743
+
744
+ if self.args.training:
745
+ if self.args.speed:
746
+ self.print_fn("\n" + 20 * "=" + ("TRAIN - SPEED - RESULTS").center(40) + 20 * "=")
747
+ self.print_results(train_result_time, "Time in s")
748
+ self.save_to_csv(train_result_time, self.args.train_time_csv_file)
749
+ if self.args.is_tpu:
750
+ self.print_fn(
751
+ "TPU was used for training. Note that the time after compilation stabilized (after ~10 train"
752
+ " loss=model.forward(...) + loss.backward() calls) was measured."
753
+ )
754
+
755
+ if self.args.memory:
756
+ self.print_fn("\n" + 20 * "=" + ("TRAIN - MEMORY - RESULTS").center(40) + 20 * "=")
757
+ self.print_results(train_result_memory, type_label="Memory in MB")
758
+ self.save_to_csv(train_result_memory, self.args.train_memory_csv_file)
759
+
760
+ if self.args.trace_memory_line_by_line:
761
+ self.print_fn("\n" + 20 * "=" + ("TRAIN - MEMOMRY - LINE BY LINE - SUMMARY").center(40) + 20 * "=")
762
+ self.print_memory_trace_statistics(train_summary)
763
+
764
+ if self.args.env_print:
765
+ self.print_fn("\n" + 20 * "=" + ("ENVIRONMENT INFORMATION").center(40) + 20 * "=")
766
+ self.print_fn("\n".join([f"- {prop}: {val}" for prop, val in self.environment_info.items()]) + "\n")
767
+
768
+ if self.args.save_to_csv:
769
+ with open(self.args.env_info_csv_file, mode="w", newline="") as csv_file:
770
+ writer = csv.writer(csv_file)
771
+ for key, value in self.environment_info.items():
772
+ writer.writerow([key, value])
773
+
774
+ return BenchmarkOutput(
775
+ inference_result_time,
776
+ inference_result_memory,
777
+ train_result_time,
778
+ train_result_memory,
779
+ inference_summary,
780
+ train_summary,
781
+ )
782
+
783
+ @property
784
+ def environment_info(self):
785
+ if self._environment_info is None:
786
+ info = {}
787
+ info["transformers_version"] = version
788
+ info["framework"] = self.framework
789
+ if self.framework == "PyTorch":
790
+ info["use_torchscript"] = self.args.torchscript
791
+ if self.framework == "TensorFlow":
792
+ info["eager_mode"] = self.args.eager_mode
793
+ info["use_xla"] = self.args.use_xla
794
+ info["framework_version"] = self.framework_version
795
+ info["python_version"] = platform.python_version()
796
+ info["system"] = platform.system()
797
+ info["cpu"] = platform.processor()
798
+ info["architecture"] = platform.architecture()[0]
799
+ info["date"] = datetime.date(datetime.now())
800
+ info["time"] = datetime.time(datetime.now())
801
+ info["fp16"] = self.args.fp16
802
+ info["use_multiprocessing"] = self.args.do_multi_processing
803
+ info["only_pretrain_model"] = self.args.only_pretrain_model
804
+
805
+ if is_psutil_available():
806
+ info["cpu_ram_mb"] = bytes_to_mega_bytes(psutil.virtual_memory().total)
807
+ else:
808
+ logger.warning(
809
+ "Psutil not installed, we won't log available CPU memory. "
810
+ "Install psutil (pip install psutil) to log available CPU memory."
811
+ )
812
+ info["cpu_ram_mb"] = "N/A"
813
+
814
+ info["use_gpu"] = self.args.is_gpu
815
+ if self.args.is_gpu:
816
+ info["num_gpus"] = 1 # TODO(PVP) Currently only single GPU is supported
817
+ if is_py3nvml_available():
818
+ nvml.nvmlInit()
819
+ handle = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx)
820
+ info["gpu"] = nvml.nvmlDeviceGetName(handle)
821
+ info["gpu_ram_mb"] = bytes_to_mega_bytes(nvml.nvmlDeviceGetMemoryInfo(handle).total)
822
+ info["gpu_power_watts"] = nvml.nvmlDeviceGetPowerManagementLimit(handle) / 1000
823
+ info["gpu_performance_state"] = nvml.nvmlDeviceGetPerformanceState(handle)
824
+ nvml.nvmlShutdown()
825
+ else:
826
+ logger.warning(
827
+ "py3nvml not installed, we won't log GPU memory usage. "
828
+ "Install py3nvml (pip install py3nvml) to log information about GPU."
829
+ )
830
+ info["gpu"] = "N/A"
831
+ info["gpu_ram_mb"] = "N/A"
832
+ info["gpu_power_watts"] = "N/A"
833
+ info["gpu_performance_state"] = "N/A"
834
+
835
+ info["use_tpu"] = self.args.is_tpu
836
+ # TODO(PVP): See if we can add more information about TPU
837
+ # see: https://github.com/pytorch/xla/issues/2180
838
+
839
+ self._environment_info = info
840
+ return self._environment_info
841
+
842
+ def print_results(self, result_dict, type_label):
843
+ self.print_fn(80 * "-")
844
+ self.print_fn(
845
+ "Model Name".center(30) + "Batch Size".center(15) + "Seq Length".center(15) + type_label.center(15)
846
+ )
847
+ self.print_fn(80 * "-")
848
+ for model_name in self.args.model_names:
849
+ for batch_size in result_dict[model_name]["bs"]:
850
+ for sequence_length in result_dict[model_name]["ss"]:
851
+ result = result_dict[model_name]["result"][batch_size][sequence_length]
852
+ if isinstance(result, float):
853
+ result = round(1000 * result) / 1000
854
+ result = "< 0.001" if result == 0.0 else str(result)
855
+ else:
856
+ result = str(result)
857
+ self.print_fn(
858
+ model_name[:30].center(30) + str(batch_size).center(15),
859
+ str(sequence_length).center(15),
860
+ result.center(15),
861
+ )
862
+ self.print_fn(80 * "-")
863
+
864
+ def print_memory_trace_statistics(self, summary: MemorySummary):
865
+ self.print_fn(
866
+ "\nLine by line memory consumption:\n"
867
+ + "\n".join(
868
+ f"{state.frame.filename}:{state.frame.line_number}: mem {state.cpu_gpu}: {state.frame.line_text}"
869
+ for state in summary.sequential
870
+ )
871
+ )
872
+ self.print_fn(
873
+ "\nLines with top memory consumption:\n"
874
+ + "\n".join(
875
+ f"=> {state.frame.filename}:{state.frame.line_number}: mem {state.cpu_gpu}: {state.frame.line_text}"
876
+ for state in summary.cumulative[:6]
877
+ )
878
+ )
879
+ self.print_fn(
880
+ "\nLines with lowest memory consumption:\n"
881
+ + "\n".join(
882
+ f"=> {state.frame.filename}:{state.frame.line_number}: mem {state.cpu_gpu}: {state.frame.line_text}"
883
+ for state in summary.cumulative[-6:]
884
+ )
885
+ )
886
+ self.print_fn(f"\nTotal memory increase: {summary.total}")
887
+
888
+ def save_to_csv(self, result_dict, filename):
889
+ if not self.args.save_to_csv:
890
+ return
891
+ self.print_fn("Saving results to csv.")
892
+ with open(filename, mode="w") as csv_file:
893
+ if len(self.args.model_names) <= 0:
894
+ raise ValueError(f"At least 1 model should be defined, but got {self.model_names}")
895
+
896
+ fieldnames = ["model", "batch_size", "sequence_length"]
897
+ writer = csv.DictWriter(csv_file, fieldnames=fieldnames + ["result"])
898
+ writer.writeheader()
899
+
900
+ for model_name in self.args.model_names:
901
+ result_dict_model = result_dict[model_name]["result"]
902
+ for bs in result_dict_model:
903
+ for ss in result_dict_model[bs]:
904
+ result_model = result_dict_model[bs][ss]
905
+ writer.writerow(
906
+ {
907
+ "model": model_name,
908
+ "batch_size": bs,
909
+ "sequence_length": ss,
910
+ "result": ("{}" if not isinstance(result_model, float) else "{:.4f}").format(
911
+ result_model
912
+ ),
913
+ }
914
+ )
env-llmeval/lib/python3.10/site-packages/transformers/cache_utils.py ADDED
@@ -0,0 +1,435 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dataclasses import dataclass
2
+ from typing import Any, Dict, List, Optional, Tuple
3
+
4
+ import torch
5
+
6
+ from .configuration_utils import PretrainedConfig
7
+ from .utils import logging
8
+
9
+
10
+ logger = logging.get_logger(__name__)
11
+
12
+
13
+ @dataclass
14
+ class Cache:
15
+ """
16
+ Base, abstract class for all caches. The actual data structure is specific to each subclass.
17
+ """
18
+
19
+ def update(
20
+ self,
21
+ key_states: torch.Tensor,
22
+ value_states: torch.Tensor,
23
+ layer_idx: int,
24
+ cache_kwargs: Optional[Dict[str, Any]] = None,
25
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
26
+ """
27
+ Updates the cache with the new `key_states` and `value_states` for the layer `layer_idx`.
28
+
29
+ Parameters:
30
+ key_states (`torch.Tensor`):
31
+ The new key states to cache.
32
+ value_states (`torch.Tensor`):
33
+ The new value states to cache.
34
+ layer_idx (`int`):
35
+ The index of the layer to cache the states for.
36
+ cache_kwargs (`Dict[str, Any]`, `optional`):
37
+ Additional arguments for the cache subclass. These are specific to each subclass and allow new types of
38
+ cache to be created.
39
+
40
+ Return:
41
+ A tuple containing the updated key and value states.
42
+ """
43
+ raise NotImplementedError("Make sure to implement `update` in a subclass.")
44
+
45
+ def get_seq_length(self, layer_idx: Optional[int] = 0) -> int:
46
+ """Returns the sequence length of the cached states. A layer index can be optionally passed."""
47
+ raise NotImplementedError("Make sure to implement `get_seq_length` in a subclass.")
48
+
49
+ def get_max_length(self) -> Optional[int]:
50
+ """Returns the maximum sequence length of the cached states, if there is any."""
51
+ raise NotImplementedError("Make sure to implement `get_max_length` in a subclass.")
52
+
53
+ def get_usable_length(self, new_seq_length: int, layer_idx: Optional[int] = 0) -> int:
54
+ """Given the sequence length of the new inputs, returns the usable length of the cache."""
55
+ # Cache without size limit -> all cache is usable
56
+ # Cache with size limit -> if the length cache plus the length of the new inputs is larger the maximum cache
57
+ # length, we will need to evict part of the cache (and thus not all cache is usable)
58
+ max_length = self.get_max_length()
59
+ previous_seq_length = self.get_seq_length(layer_idx)
60
+ if max_length is not None and previous_seq_length + new_seq_length > max_length:
61
+ return max_length - new_seq_length
62
+ return previous_seq_length
63
+
64
+ @property
65
+ def seen_tokens(self):
66
+ logger.warning_once(
67
+ "The `seen_tokens` attribute is deprecated and will be removed in v4.41. Use the `cache_position` "
68
+ "model input instead."
69
+ )
70
+ if hasattr(self, "_seen_tokens"):
71
+ return self._seen_tokens
72
+ else:
73
+ return None
74
+
75
+
76
+ class DynamicCache(Cache):
77
+ """
78
+ A cache that grows dynamically as more tokens are generated. This is the default for generative models.
79
+
80
+ It stores the Key and Value states as a list of tensors, one for each layer. The expected shape for each tensor is
81
+ `[batch_size, num_heads, seq_len, head_dim]`.
82
+ """
83
+
84
+ def __init__(self) -> None:
85
+ self.key_cache: List[torch.Tensor] = []
86
+ self.value_cache: List[torch.Tensor] = []
87
+ self._seen_tokens = 0 # Used in `generate` to keep tally of how many tokens the cache has seen
88
+
89
+ def __getitem__(self, layer_idx: int) -> List[Tuple[torch.Tensor]]:
90
+ """
91
+ Support for backwards-compatible `past_key_value` indexing, e.g. `past_key_value[0][0].shape[2]` to get the
92
+ sequence length.
93
+ """
94
+ if layer_idx < len(self):
95
+ return (self.key_cache[layer_idx], self.value_cache[layer_idx])
96
+ else:
97
+ raise KeyError(f"Cache only has {len(self)} layers, attempted to access layer with index {layer_idx}")
98
+
99
+ def __iter__(self):
100
+ """
101
+ Support for backwards-compatible `past_key_value` iteration, e.g. `for x in past_key_value:` to iterate over
102
+ keys and values
103
+ """
104
+ for layer_idx in range(len(self)):
105
+ yield (self.key_cache[layer_idx], self.value_cache[layer_idx])
106
+
107
+ def __len__(self):
108
+ """
109
+ Support for backwards-compatible `past_key_value` length, e.g. `len(past_key_value)`. This value corresponds
110
+ to the number of layers in the model.
111
+ """
112
+ return len(self.key_cache)
113
+
114
+ def update(
115
+ self,
116
+ key_states: torch.Tensor,
117
+ value_states: torch.Tensor,
118
+ layer_idx: int,
119
+ cache_kwargs: Optional[Dict[str, Any]] = None,
120
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
121
+ """
122
+ Updates the cache with the new `key_states` and `value_states` for the layer `layer_idx`.
123
+
124
+ Parameters:
125
+ key_states (`torch.Tensor`):
126
+ The new key states to cache.
127
+ value_states (`torch.Tensor`):
128
+ The new value states to cache.
129
+ layer_idx (`int`):
130
+ The index of the layer to cache the states for.
131
+ cache_kwargs (`Dict[str, Any]`, `optional`):
132
+ Additional arguments for the cache subclass. No additional arguments are used in `DynamicCache`.
133
+
134
+ Return:
135
+ A tuple containing the updated key and value states.
136
+ """
137
+ # Update the number of seen tokens
138
+ if layer_idx == 0:
139
+ self._seen_tokens += key_states.shape[-2]
140
+
141
+ # Update the cache
142
+ if len(self.key_cache) <= layer_idx:
143
+ self.key_cache.append(key_states)
144
+ self.value_cache.append(value_states)
145
+ else:
146
+ self.key_cache[layer_idx] = torch.cat([self.key_cache[layer_idx], key_states], dim=-2)
147
+ self.value_cache[layer_idx] = torch.cat([self.value_cache[layer_idx], value_states], dim=-2)
148
+
149
+ return self.key_cache[layer_idx], self.value_cache[layer_idx]
150
+
151
+ def get_seq_length(self, layer_idx: Optional[int] = 0) -> int:
152
+ """Returns the sequence length of the cached states. A layer index can be optionally passed."""
153
+ if len(self.key_cache) <= layer_idx:
154
+ return 0
155
+ return self.key_cache[layer_idx].shape[-2]
156
+
157
+ def get_max_length(self) -> Optional[int]:
158
+ """Returns the maximum sequence length of the cached states. DynamicCache does not have a maximum length."""
159
+ return None
160
+
161
+ def reorder_cache(self, beam_idx: torch.LongTensor):
162
+ """Reorders the cache for beam search, given the selected beam indices."""
163
+ for layer_idx in range(len(self.key_cache)):
164
+ device = self.key_cache[layer_idx].device
165
+ self.key_cache[layer_idx] = self.key_cache[layer_idx].index_select(0, beam_idx.to(device))
166
+ device = self.value_cache[layer_idx].device
167
+ self.value_cache[layer_idx] = self.value_cache[layer_idx].index_select(0, beam_idx.to(device))
168
+
169
+ def to_legacy_cache(self) -> Tuple[Tuple[torch.Tensor], Tuple[torch.Tensor]]:
170
+ """Converts the `DynamicCache` instance into the its equivalent in the legacy cache format."""
171
+ legacy_cache = ()
172
+ for layer_idx in range(len(self)):
173
+ legacy_cache += ((self.key_cache[layer_idx], self.value_cache[layer_idx]),)
174
+ return legacy_cache
175
+
176
+ @classmethod
177
+ def from_legacy_cache(cls, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None) -> "DynamicCache":
178
+ """Converts a cache in the legacy cache format into an equivalent `DynamicCache`."""
179
+ cache = cls()
180
+ if past_key_values is not None:
181
+ for layer_idx in range(len(past_key_values)):
182
+ key_states, value_states = past_key_values[layer_idx]
183
+ cache.update(key_states, value_states, layer_idx)
184
+ return cache
185
+
186
+
187
+ class SinkCache(Cache):
188
+ """
189
+ A cache that as described in the [Attention Sinks paper](https://arxiv.org/abs/2309.17453). It allows the model to
190
+ generate beyond the length of its context window, without losing fluency in the conversation. As it discards past
191
+ tokens, the model will lose the ability to generate tokens that depend on the context that was discarded.
192
+
193
+ It stores the Key and Value states as a list of tensors, one for each layer. The expected shape for each tensor is
194
+ `[batch_size, num_heads, seq_len, head_dim]`.
195
+
196
+ Parameters:
197
+ window_length (`int`):
198
+ The length of the context window.
199
+ num_sink_tokens (`int`):
200
+ The number of sink tokens. See the original paper for more information.
201
+ """
202
+
203
+ def __init__(self, window_length: int, num_sink_tokens: int) -> None:
204
+ self.key_cache: List[torch.Tensor] = []
205
+ self.value_cache: List[torch.Tensor] = []
206
+ self.window_length = window_length
207
+ self.num_sink_tokens = num_sink_tokens
208
+ self.cos_sin_cache = {}
209
+ self._seen_tokens = 0 # Used in `generate` to keep tally of how many tokens the cache has seen
210
+
211
+ @staticmethod
212
+ def _rotate_half(x):
213
+ x1 = x[..., : x.shape[-1] // 2]
214
+ x2 = x[..., x.shape[-1] // 2 :]
215
+ return torch.cat((-x2, x1), dim=-1)
216
+
217
+ def _apply_key_rotary_pos_emb(
218
+ self, key_states: torch.Tensor, cos: torch.Tensor, sin: torch.Tensor
219
+ ) -> torch.Tensor:
220
+ rotated_key_states = (key_states * cos) + (self._rotate_half(key_states) * sin)
221
+ return rotated_key_states
222
+
223
+ def _get_rerotation_cos_sin(
224
+ self, key_states: torch.Tensor, cos: torch.Tensor, sin: torch.Tensor
225
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
226
+ if key_states.shape[-2] not in self.cos_sin_cache:
227
+ # Upcast to float32 temporarily for better accuracy
228
+ cos = cos.to(torch.float32)
229
+ sin = sin.to(torch.float32)
230
+
231
+ # Compute the cos and sin required for back- and forward-rotating to one position earlier in the sequence
232
+ original_cos = cos[self.num_sink_tokens + key_states.shape[-2] :]
233
+ shifted_cos = cos[self.num_sink_tokens : -key_states.shape[-2]]
234
+ original_sin = sin[self.num_sink_tokens + key_states.shape[-2] :]
235
+ shifted_sin = sin[self.num_sink_tokens : -key_states.shape[-2]]
236
+ rerotation_cos = original_cos * shifted_cos + original_sin * shifted_sin
237
+ rerotation_sin = -original_sin * shifted_cos + original_cos * shifted_sin
238
+
239
+ self.cos_sin_cache[key_states.shape[-2]] = (
240
+ rerotation_cos.to(key_states.dtype).unsqueeze(0),
241
+ rerotation_sin.to(key_states.dtype).unsqueeze(0),
242
+ )
243
+ return self.cos_sin_cache[key_states.shape[-2]]
244
+
245
+ def get_seq_length(self, layer_idx: Optional[int] = 0) -> int:
246
+ """Returns the sequence length of the cached states. A layer index can be optionally passed."""
247
+ # Workaround to make 'key_states.shape[-2] + past_key_value.get_seq_length(self.layer_idx)' <= window_length
248
+ if len(self.key_cache) <= layer_idx:
249
+ return 0
250
+ return self.key_cache[layer_idx].shape[-2]
251
+
252
+ def get_max_length(self) -> Optional[int]:
253
+ """Returns the maximum sequence length of the cached states."""
254
+ return self.window_length
255
+
256
+ def update(
257
+ self,
258
+ key_states: torch.Tensor,
259
+ value_states: torch.Tensor,
260
+ layer_idx: int,
261
+ cache_kwargs: Optional[Dict[str, Any]] = None,
262
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
263
+ """
264
+ Updates the cache with the new `key_states` and `value_states` for the layer `layer_idx`.
265
+
266
+ Parameters:
267
+ key_states (`torch.Tensor`):
268
+ The new key states to cache.
269
+ value_states (`torch.Tensor`):
270
+ The new value states to cache.
271
+ layer_idx (`int`):
272
+ The index of the layer to cache the states for.
273
+ cache_kwargs (`Dict[str, Any]`, `optional`):
274
+ Additional arguments for the cache subclass. The following arguments can be used in `SinkCache`: `sin`,
275
+ `cos` and `partial_rotation_size`. These arguments are used with models using RoPE, to recompute the
276
+ rotation as the tokens are shifted.
277
+
278
+ Return:
279
+ A tuple containing the updated key and value states.
280
+ """
281
+ # Optional kwargs for `SinkCache` -- needed on models using RoPE. `partial_rotation_size` is used on models
282
+ # with partially rotated position embeddings, like Phi or Persimmon.
283
+ sin = cache_kwargs.get("sin")
284
+ cos = cache_kwargs.get("cos")
285
+ partial_rotation_size = cache_kwargs.get("partial_rotation_size")
286
+ using_rope = cos is not None and sin is not None
287
+
288
+ # Update the number of seen tokens
289
+ if layer_idx == 0:
290
+ self._seen_tokens += key_states.shape[-2]
291
+
292
+ # [bsz, num_heads, seq_len, head_dim]
293
+ if len(self.key_cache) <= layer_idx:
294
+ # Empty cache
295
+ self.key_cache.append(key_states)
296
+ self.value_cache.append(value_states)
297
+
298
+ elif key_states.shape[-2] + self.get_seq_length(layer_idx) < self.window_length:
299
+ # Growing cache
300
+ self.key_cache[layer_idx] = torch.cat([self.key_cache[layer_idx], key_states], dim=-2)
301
+ self.value_cache[layer_idx] = torch.cat([self.value_cache[layer_idx], value_states], dim=-2)
302
+
303
+ else:
304
+ # Shifting cache
305
+ keys_to_keep = self.key_cache[layer_idx][
306
+ :, :, -self.window_length + self.num_sink_tokens + key_states.shape[-2] :
307
+ ]
308
+
309
+ # On RoPE models, we need to recompute the Key rotation as the tokens are shifted
310
+ if using_rope:
311
+ rerotation_cos, rerotation_sin = self._get_rerotation_cos_sin(
312
+ key_states, cos[: self.window_length], sin[: self.window_length]
313
+ )
314
+ if partial_rotation_size is not None:
315
+ keys_to_keep, keys_pass = (
316
+ keys_to_keep[..., :partial_rotation_size],
317
+ keys_to_keep[..., partial_rotation_size:],
318
+ )
319
+ keys_to_keep = self._apply_key_rotary_pos_emb(keys_to_keep, rerotation_cos, rerotation_sin)
320
+ if partial_rotation_size is not None:
321
+ keys_to_keep = torch.cat((keys_to_keep, keys_pass), dim=-1)
322
+
323
+ # Concatenate sink tokens, shifted & rotated tokens (if needed), and new tokens
324
+ sink_keys = self.key_cache[layer_idx][:, :, : self.num_sink_tokens]
325
+ self.key_cache[layer_idx] = torch.cat([sink_keys, keys_to_keep, key_states], dim=-2)
326
+
327
+ sink_values = self.value_cache[layer_idx][:, :, : self.num_sink_tokens]
328
+ values_to_keep = self.value_cache[layer_idx][
329
+ :, :, -self.window_length + self.num_sink_tokens + value_states.shape[-2] :
330
+ ]
331
+ self.value_cache[layer_idx] = torch.cat([sink_values, values_to_keep, value_states], dim=-2)
332
+
333
+ return self.key_cache[layer_idx], self.value_cache[layer_idx]
334
+
335
+ def reorder_cache(self, beam_idx: torch.LongTensor):
336
+ """Reorders the cache for beam search, given the selected beam indices."""
337
+ for layer_idx in range(len(self.key_cache)):
338
+ device = self.key_cache[layer_idx].device
339
+ self.key_cache[layer_idx] = self.key_cache[layer_idx].index_select(0, beam_idx.to(device))
340
+ device = self.value_cache[layer_idx].device
341
+ self.value_cache[layer_idx] = self.value_cache[layer_idx].index_select(0, beam_idx.to(device))
342
+
343
+
344
+ class StaticCache(Cache):
345
+ """
346
+ Static Cache class to be used with `torch.compile(model)`.
347
+
348
+ Parameters:
349
+ config (`PretrainedConfig):
350
+ The configuration file defining the `max_position_embeddings`, `hidden_size` and `num_attention_heads`
351
+ required to initialize the static cache.
352
+ max_batch_size (`int`):
353
+ The maximum batch size with which the model will be used.
354
+ max_cache_len (`int`):
355
+ The maximum sequence length with which the model will be used.
356
+ device (`torch.device`):
357
+ The device on which the cache should be initialized. Should be the same as the layer.
358
+ dtype (*optional*, defaults to `torch.float32`):
359
+ The default `dtype` to use when initializing the layer.
360
+ """
361
+
362
+ def __init__(self, config: PretrainedConfig, max_batch_size: int, max_cache_len: int, device, dtype=None) -> None:
363
+ super().__init__()
364
+ self.max_batch_size = max_batch_size
365
+ self.max_cache_len = config.max_position_embeddings if max_cache_len is None else max_cache_len
366
+ # Some model define a custom `head_dim` != config.hidden_size // config.num_attention_heads
367
+ self.head_dim = (
368
+ config.head_dim if hasattr(config, "head_dim") else config.hidden_size // config.num_attention_heads
369
+ )
370
+
371
+ self.dtype = dtype if dtype is not None else torch.float32
372
+ self.num_key_value_heads = (
373
+ config.num_attention_heads if config.num_key_value_heads is None else config.num_key_value_heads
374
+ )
375
+
376
+ cache_shape = (max_batch_size, self.num_key_value_heads, self.max_cache_len, self.head_dim)
377
+ self.key_cache: torch.Tensor = torch.zeros(cache_shape, dtype=self.dtype, device=device)
378
+ self.value_cache: torch.Tensor = torch.zeros(cache_shape, dtype=self.dtype, device=device)
379
+
380
+ def update(
381
+ self,
382
+ key_states: torch.Tensor,
383
+ value_states: torch.Tensor,
384
+ layer_idx: int,
385
+ cache_kwargs: Optional[Dict[str, Any]] = None,
386
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
387
+ """
388
+ Updates the cache with the new `key_states` and `value_states` for the layer `layer_idx`.
389
+ It is VERY important to index using a tensor, otherwise you introduce a copy to the device.
390
+
391
+ Parameters:
392
+ key_states (`torch.Tensor`):
393
+ The new key states to cache.
394
+ value_states (`torch.Tensor`):
395
+ The new value states to cache.
396
+ layer_idx (`int`):
397
+ The index of the layer to cache the states for. Kept for backward compatibility
398
+ cache_kwargs (`Dict[str, Any]`, `optional`):
399
+ Additional arguments for the cache subclass. The `StaticCache` just needs the `q_len`
400
+ to know how much of the cache it should overwrite.
401
+
402
+ Return:
403
+ A tuple containing the updated key and value states.
404
+ """
405
+ new_cache_positions = cache_kwargs.get("cache_position")
406
+ k_out = self.key_cache
407
+ v_out = self.value_cache
408
+
409
+ k_out[:, :, new_cache_positions] = key_states
410
+ v_out[:, :, new_cache_positions] = value_states
411
+
412
+ return k_out, v_out
413
+
414
+ def get_seq_length(self, layer_idx: Optional[int] = 0) -> int:
415
+ """Returns the sequence length of the cached states that were seen by the model. `layer_idx` kept for BC"""
416
+ # Occupied cache == any slot in the 3rd dim (sequence length) holds a non-zero value. To save on compute, let's
417
+ # limit the check to the first batch member and head dimension.
418
+ # TODO: This is error prone, a filled cache may be `0.0`. Let's use a stateless integer instead, after
419
+ # https://github.com/pytorch/pytorch/issues/120248 is fixed
420
+ return (self.key_cache[0, 0].any(dim=-1)).sum()
421
+
422
+ def get_max_length(self) -> Optional[int]:
423
+ """Returns the maximum sequence length of the cached states. DynamicCache does not have a maximum length."""
424
+ return self.max_cache_len
425
+
426
+ def reorder_cache(self, beam_idx: torch.LongTensor):
427
+ """Reorders the cache for beam search, given the selected beam indices."""
428
+ device = self.key_cache.device
429
+ self.key_cache = self.key_cache.index_select(0, beam_idx.to(device))
430
+ device = self.value_cache.device
431
+ self.value_cache = self.value_cache.index_select(0, beam_idx.to(device))
432
+
433
+ def to_legacy_cache(self):
434
+ """Dummy function for BC. We have to keep it because otherwise the call in the forward of models will break it"""
435
+ return None
env-llmeval/lib/python3.10/site-packages/transformers/configuration_utils.py ADDED
@@ -0,0 +1,1133 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
3
+ # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """ Configuration base class and utilities."""
17
+
18
+
19
+ import copy
20
+ import json
21
+ import os
22
+ import re
23
+ import warnings
24
+ from typing import Any, Dict, List, Optional, Tuple, Union
25
+
26
+ from packaging import version
27
+
28
+ from . import __version__
29
+ from .dynamic_module_utils import custom_object_save
30
+ from .utils import (
31
+ CONFIG_NAME,
32
+ PushToHubMixin,
33
+ add_model_info_to_auto_map,
34
+ cached_file,
35
+ copy_func,
36
+ download_url,
37
+ extract_commit_hash,
38
+ is_remote_url,
39
+ is_torch_available,
40
+ logging,
41
+ )
42
+
43
+
44
+ logger = logging.get_logger(__name__)
45
+
46
+ _re_configuration_file = re.compile(r"config\.(.*)\.json")
47
+
48
+
49
+ class PretrainedConfig(PushToHubMixin):
50
+ # no-format
51
+ r"""
52
+ Base class for all configuration classes. Handles a few parameters common to all models' configurations as well as
53
+ methods for loading/downloading/saving configurations.
54
+
55
+ <Tip>
56
+
57
+ A configuration file can be loaded and saved to disk. Loading the configuration file and using this file to
58
+ initialize a model does **not** load the model weights. It only affects the model's configuration.
59
+
60
+ </Tip>
61
+
62
+ Class attributes (overridden by derived classes):
63
+
64
+ - **model_type** (`str`) -- An identifier for the model type, serialized into the JSON file, and used to recreate
65
+ the correct object in [`~transformers.AutoConfig`].
66
+ - **is_composition** (`bool`) -- Whether the config class is composed of multiple sub-configs. In this case the
67
+ config has to be initialized from two or more configs of type [`~transformers.PretrainedConfig`] like:
68
+ [`~transformers.EncoderDecoderConfig`] or [`~RagConfig`].
69
+ - **keys_to_ignore_at_inference** (`List[str]`) -- A list of keys to ignore by default when looking at dictionary
70
+ outputs of the model during inference.
71
+ - **attribute_map** (`Dict[str, str]`) -- A dict that maps model specific attribute names to the standardized
72
+ naming of attributes.
73
+
74
+ Common attributes (present in all subclasses):
75
+
76
+ - **vocab_size** (`int`) -- The number of tokens in the vocabulary, which is also the first dimension of the
77
+ embeddings matrix (this attribute may be missing for models that don't have a text modality like ViT).
78
+ - **hidden_size** (`int`) -- The hidden size of the model.
79
+ - **num_attention_heads** (`int`) -- The number of attention heads used in the multi-head attention layers of the
80
+ model.
81
+ - **num_hidden_layers** (`int`) -- The number of blocks in the model.
82
+
83
+ Arg:
84
+ name_or_path (`str`, *optional*, defaults to `""`):
85
+ Store the string that was passed to [`PreTrainedModel.from_pretrained`] or
86
+ [`TFPreTrainedModel.from_pretrained`] as `pretrained_model_name_or_path` if the configuration was created
87
+ with such a method.
88
+ output_hidden_states (`bool`, *optional*, defaults to `False`):
89
+ Whether or not the model should return all hidden-states.
90
+ output_attentions (`bool`, *optional*, defaults to `False`):
91
+ Whether or not the model should returns all attentions.
92
+ return_dict (`bool`, *optional*, defaults to `True`):
93
+ Whether or not the model should return a [`~transformers.utils.ModelOutput`] instead of a plain tuple.
94
+ is_encoder_decoder (`bool`, *optional*, defaults to `False`):
95
+ Whether the model is used as an encoder/decoder or not.
96
+ is_decoder (`bool`, *optional*, defaults to `False`):
97
+ Whether the model is used as decoder or not (in which case it's used as an encoder).
98
+ cross_attention_hidden_size** (`bool`, *optional*):
99
+ The hidden size of the cross-attention layer in case the model is used as a decoder in an encoder-decoder
100
+ setting and the cross-attention hidden dimension differs from `self.config.hidden_size`.
101
+ add_cross_attention (`bool`, *optional*, defaults to `False`):
102
+ Whether cross-attention layers should be added to the model. Note, this option is only relevant for models
103
+ that can be used as decoder models within the [`EncoderDecoderModel`] class, which consists of all models
104
+ in `AUTO_MODELS_FOR_CAUSAL_LM`.
105
+ tie_encoder_decoder (`bool`, *optional*, defaults to `False`):
106
+ Whether all encoder weights should be tied to their equivalent decoder weights. This requires the encoder
107
+ and decoder model to have the exact same parameter names.
108
+ prune_heads (`Dict[int, List[int]]`, *optional*, defaults to `{}`):
109
+ Pruned heads of the model. The keys are the selected layer indices and the associated values, the list of
110
+ heads to prune in said layer.
111
+
112
+ For instance `{1: [0, 2], 2: [2, 3]}` will prune heads 0 and 2 on layer 1 and heads 2 and 3 on layer 2.
113
+ chunk_size_feed_forward (`int`, *optional*, defaults to `0`):
114
+ The chunk size of all feed forward layers in the residual attention blocks. A chunk size of `0` means that
115
+ the feed forward layer is not chunked. A chunk size of n means that the feed forward layer processes `n` <
116
+ sequence_length embeddings at a time. For more information on feed forward chunking, see [How does Feed
117
+ Forward Chunking work?](../glossary.html#feed-forward-chunking).
118
+
119
+ > Parameters for sequence generation
120
+
121
+ max_length (`int`, *optional*, defaults to 20):
122
+ Maximum length that will be used by default in the `generate` method of the model.
123
+ min_length (`int`, *optional*, defaults to 0):
124
+ Minimum length that will be used by default in the `generate` method of the model.
125
+ do_sample (`bool`, *optional*, defaults to `False`):
126
+ Flag that will be used by default in the `generate` method of the model. Whether or not to use sampling ;
127
+ use greedy decoding otherwise.
128
+ early_stopping (`bool`, *optional*, defaults to `False`):
129
+ Flag that will be used by default in the `generate` method of the model. Whether to stop the beam search
130
+ when at least `num_beams` sentences are finished per batch or not.
131
+ num_beams (`int`, *optional*, defaults to 1):
132
+ Number of beams for beam search that will be used by default in the `generate` method of the model. 1 means
133
+ no beam search.
134
+ num_beam_groups (`int`, *optional*, defaults to 1):
135
+ Number of groups to divide `num_beams` into in order to ensure diversity among different groups of beams
136
+ that will be used by default in the `generate` method of the model. 1 means no group beam search.
137
+ diversity_penalty (`float`, *optional*, defaults to 0.0):
138
+ Value to control diversity for group beam search. that will be used by default in the `generate` method of
139
+ the model. 0 means no diversity penalty. The higher the penalty, the more diverse are the outputs.
140
+ temperature (`float`, *optional*, defaults to 1.0):
141
+ The value used to module the next token probabilities that will be used by default in the `generate` method
142
+ of the model. Must be strictly positive.
143
+ top_k (`int`, *optional*, defaults to 50):
144
+ Number of highest probability vocabulary tokens to keep for top-k-filtering that will be used by default in
145
+ the `generate` method of the model.
146
+ top_p (`float`, *optional*, defaults to 1):
147
+ Value that will be used by default in the `generate` method of the model for `top_p`. If set to float < 1,
148
+ only the most probable tokens with probabilities that add up to `top_p` or higher are kept for generation.
149
+ typical_p (`float`, *optional*, defaults to 1):
150
+ Local typicality measures how similar the conditional probability of predicting a target token next is to
151
+ the expected conditional probability of predicting a random token next, given the partial text already
152
+ generated. If set to float < 1, the smallest set of the most locally typical tokens with probabilities that
153
+ add up to `typical_p` or higher are kept for generation. See [this
154
+ paper](https://arxiv.org/pdf/2202.00666.pdf) for more details.
155
+ repetition_penalty (`float`, *optional*, defaults to 1):
156
+ Parameter for repetition penalty that will be used by default in the `generate` method of the model. 1.0
157
+ means no penalty.
158
+ length_penalty (`float`, *optional*, defaults to 1):
159
+ Exponential penalty to the length that is used with beam-based generation. It is applied as an exponent to
160
+ the sequence length, which in turn is used to divide the score of the sequence. Since the score is the log
161
+ likelihood of the sequence (i.e. negative), `length_penalty` > 0.0 promotes longer sequences, while
162
+ `length_penalty` < 0.0 encourages shorter sequences.
163
+ no_repeat_ngram_size (`int`, *optional*, defaults to 0) -- Value that will be used by default in the
164
+ `generate` method of the model for `no_repeat_ngram_size`. If set to int > 0, all ngrams of that size can
165
+ only occur once.
166
+ encoder_no_repeat_ngram_size (`int`, *optional*, defaults to 0) -- Value that will be used by
167
+ default in the `generate` method of the model for `encoder_no_repeat_ngram_size`. If set to int > 0, all
168
+ ngrams of that size that occur in the `encoder_input_ids` cannot occur in the `decoder_input_ids`.
169
+ bad_words_ids (`List[int]`, *optional*):
170
+ List of token ids that are not allowed to be generated that will be used by default in the `generate`
171
+ method of the model. In order to get the tokens of the words that should not appear in the generated text,
172
+ use `tokenizer.encode(bad_word, add_prefix_space=True)`.
173
+ num_return_sequences (`int`, *optional*, defaults to 1):
174
+ Number of independently computed returned sequences for each element in the batch that will be used by
175
+ default in the `generate` method of the model.
176
+ output_scores (`bool`, *optional*, defaults to `False`):
177
+ Whether the model should return the logits when used for generation.
178
+ return_dict_in_generate (`bool`, *optional*, defaults to `False`):
179
+ Whether the model should return a [`~transformers.utils.ModelOutput`] instead of a `torch.LongTensor`.
180
+ forced_bos_token_id (`int`, *optional*):
181
+ The id of the token to force as the first generated token after the `decoder_start_token_id`. Useful for
182
+ multilingual models like [mBART](../model_doc/mbart) where the first generated token needs to be the target
183
+ language token.
184
+ forced_eos_token_id (`int`, *optional*):
185
+ The id of the token to force as the last generated token when `max_length` is reached.
186
+ remove_invalid_values (`bool`, *optional*):
187
+ Whether to remove possible _nan_ and _inf_ outputs of the model to prevent the generation method to crash.
188
+ Note that using `remove_invalid_values` can slow down generation.
189
+
190
+ > Parameters for fine-tuning tasks
191
+
192
+ architectures (`List[str]`, *optional*):
193
+ Model architectures that can be used with the model pretrained weights.
194
+ finetuning_task (`str`, *optional*):
195
+ Name of the task used to fine-tune the model. This can be used when converting from an original (TensorFlow
196
+ or PyTorch) checkpoint.
197
+ id2label (`Dict[int, str]`, *optional*):
198
+ A map from index (for instance prediction index, or target index) to label.
199
+ label2id (`Dict[str, int]`, *optional*): A map from label to index for the model.
200
+ num_labels (`int`, *optional*):
201
+ Number of labels to use in the last layer added to the model, typically for a classification task.
202
+ task_specific_params (`Dict[str, Any]`, *optional*):
203
+ Additional keyword arguments to store for the current task.
204
+ problem_type (`str`, *optional*):
205
+ Problem type for `XxxForSequenceClassification` models. Can be one of `"regression"`,
206
+ `"single_label_classification"` or `"multi_label_classification"`.
207
+
208
+ > Parameters linked to the tokenizer
209
+
210
+ tokenizer_class (`str`, *optional*):
211
+ The name of the associated tokenizer class to use (if none is set, will use the tokenizer associated to the
212
+ model by default).
213
+ prefix (`str`, *optional*):
214
+ A specific prompt that should be added at the beginning of each text before calling the model.
215
+ bos_token_id (`int`, *optional*): The id of the _beginning-of-stream_ token.
216
+ pad_token_id (`int`, *optional*): The id of the _padding_ token.
217
+ eos_token_id (`int`, *optional*): The id of the _end-of-stream_ token.
218
+ decoder_start_token_id (`int`, *optional*):
219
+ If an encoder-decoder model starts decoding with a different token than _bos_, the id of that token.
220
+ sep_token_id (`int`, *optional*): The id of the _separation_ token.
221
+
222
+ > PyTorch specific parameters
223
+
224
+ torchscript (`bool`, *optional*, defaults to `False`):
225
+ Whether or not the model should be used with Torchscript.
226
+ tie_word_embeddings (`bool`, *optional*, defaults to `True`):
227
+ Whether the model's input and output word embeddings should be tied. Note that this is only relevant if the
228
+ model has a output word embedding layer.
229
+ torch_dtype (`str`, *optional*):
230
+ The `dtype` of the weights. This attribute can be used to initialize the model to a non-default `dtype`
231
+ (which is normally `float32`) and thus allow for optimal storage allocation. For example, if the saved
232
+ model is `float16`, ideally we want to load it back using the minimal amount of memory needed to load
233
+ `float16` weights. Since the config object is stored in plain text, this attribute contains just the
234
+ floating type string without the `torch.` prefix. For example, for `torch.float16` ``torch_dtype` is the
235
+ `"float16"` string.
236
+
237
+ This attribute is currently not being used during model loading time, but this may change in the future
238
+ versions. But we can already start preparing for the future by saving the dtype with save_pretrained.
239
+
240
+ > TensorFlow specific parameters
241
+
242
+ use_bfloat16 (`bool`, *optional*, defaults to `False`):
243
+ Whether or not the model should use BFloat16 scalars (only used by some TensorFlow models).
244
+ tf_legacy_loss (`bool`, *optional*, defaults to `False`):
245
+ Whether the model should use legacy TensorFlow losses. Legacy losses have variable output shapes and may
246
+ not be XLA-compatible. This option is here for backward compatibility and will be removed in Transformers
247
+ v5.
248
+ """
249
+
250
+ model_type: str = ""
251
+ is_composition: bool = False
252
+ attribute_map: Dict[str, str] = {}
253
+ _auto_class: Optional[str] = None
254
+
255
+ def __setattr__(self, key, value):
256
+ if key in super().__getattribute__("attribute_map"):
257
+ key = super().__getattribute__("attribute_map")[key]
258
+ super().__setattr__(key, value)
259
+
260
+ def __getattribute__(self, key):
261
+ if key != "attribute_map" and key in super().__getattribute__("attribute_map"):
262
+ key = super().__getattribute__("attribute_map")[key]
263
+ return super().__getattribute__(key)
264
+
265
+ def __init__(self, **kwargs):
266
+ # Attributes with defaults
267
+ self.return_dict = kwargs.pop("return_dict", True)
268
+ self.output_hidden_states = kwargs.pop("output_hidden_states", False)
269
+ self.output_attentions = kwargs.pop("output_attentions", False)
270
+ self.torchscript = kwargs.pop("torchscript", False) # Only used by PyTorch models
271
+ self.torch_dtype = kwargs.pop("torch_dtype", None) # Only used by PyTorch models
272
+ self.use_bfloat16 = kwargs.pop("use_bfloat16", False)
273
+ self.tf_legacy_loss = kwargs.pop("tf_legacy_loss", False) # Only used by TensorFlow models
274
+ self.pruned_heads = kwargs.pop("pruned_heads", {})
275
+ self.tie_word_embeddings = kwargs.pop(
276
+ "tie_word_embeddings", True
277
+ ) # Whether input and output word embeddings should be tied for all MLM, LM and Seq2Seq models.
278
+ self.chunk_size_feed_forward = kwargs.pop("chunk_size_feed_forward", 0)
279
+
280
+ # Is decoder is used in encoder-decoder models to differentiate encoder from decoder
281
+ self.is_encoder_decoder = kwargs.pop("is_encoder_decoder", False)
282
+ self.is_decoder = kwargs.pop("is_decoder", False)
283
+ self.cross_attention_hidden_size = kwargs.pop("cross_attention_hidden_size", None)
284
+ self.add_cross_attention = kwargs.pop("add_cross_attention", False)
285
+ self.tie_encoder_decoder = kwargs.pop("tie_encoder_decoder", False)
286
+
287
+ # Retrocompatibility: Parameters for sequence generation. While we will keep the ability to load these
288
+ # parameters, saving them will be deprecated. In a distant future, we won't need to load them.
289
+ for parameter_name, default_value in self._get_generation_defaults().items():
290
+ setattr(self, parameter_name, kwargs.pop(parameter_name, default_value))
291
+
292
+ # Fine-tuning task arguments
293
+ self.architectures = kwargs.pop("architectures", None)
294
+ self.finetuning_task = kwargs.pop("finetuning_task", None)
295
+ self.id2label = kwargs.pop("id2label", None)
296
+ self.label2id = kwargs.pop("label2id", None)
297
+ if self.label2id is not None and not isinstance(self.label2id, dict):
298
+ raise ValueError("Argument label2id should be a dictionary.")
299
+ if self.id2label is not None:
300
+ if not isinstance(self.id2label, dict):
301
+ raise ValueError("Argument id2label should be a dictionary.")
302
+ num_labels = kwargs.pop("num_labels", None)
303
+ if num_labels is not None and len(self.id2label) != num_labels:
304
+ logger.warning(
305
+ f"You passed along `num_labels={num_labels}` with an incompatible id to label map: "
306
+ f"{self.id2label}. The number of labels wil be overwritten to {self.num_labels}."
307
+ )
308
+ self.id2label = {int(key): value for key, value in self.id2label.items()}
309
+ # Keys are always strings in JSON so convert ids to int here.
310
+ else:
311
+ self.num_labels = kwargs.pop("num_labels", 2)
312
+
313
+ if self.torch_dtype is not None and isinstance(self.torch_dtype, str):
314
+ # we will start using self.torch_dtype in v5, but to be consistent with
315
+ # from_pretrained's torch_dtype arg convert it to an actual torch.dtype object
316
+ if is_torch_available():
317
+ import torch
318
+
319
+ self.torch_dtype = getattr(torch, self.torch_dtype)
320
+
321
+ # Tokenizer arguments TODO: eventually tokenizer and models should share the same config
322
+ self.tokenizer_class = kwargs.pop("tokenizer_class", None)
323
+ self.prefix = kwargs.pop("prefix", None)
324
+ self.bos_token_id = kwargs.pop("bos_token_id", None)
325
+ self.pad_token_id = kwargs.pop("pad_token_id", None)
326
+ self.eos_token_id = kwargs.pop("eos_token_id", None)
327
+ self.sep_token_id = kwargs.pop("sep_token_id", None)
328
+
329
+ self.decoder_start_token_id = kwargs.pop("decoder_start_token_id", None)
330
+
331
+ # task specific arguments
332
+ self.task_specific_params = kwargs.pop("task_specific_params", None)
333
+
334
+ # regression / multi-label classification
335
+ self.problem_type = kwargs.pop("problem_type", None)
336
+ allowed_problem_types = ("regression", "single_label_classification", "multi_label_classification")
337
+ if self.problem_type is not None and self.problem_type not in allowed_problem_types:
338
+ raise ValueError(
339
+ f"The config parameter `problem_type` was not understood: received {self.problem_type} "
340
+ "but only 'regression', 'single_label_classification' and 'multi_label_classification' are valid."
341
+ )
342
+
343
+ # TPU arguments
344
+ if kwargs.pop("xla_device", None) is not None:
345
+ logger.warning(
346
+ "The `xla_device` argument has been deprecated in v4.4.0 of Transformers. It is ignored and you can "
347
+ "safely remove it from your `config.json` file."
348
+ )
349
+
350
+ # Name or path to the pretrained checkpoint
351
+ self._name_or_path = str(kwargs.pop("name_or_path", ""))
352
+ # Config hash
353
+ self._commit_hash = kwargs.pop("_commit_hash", None)
354
+
355
+ # Attention implementation to use, if relevant.
356
+ self._attn_implementation_internal = kwargs.pop("attn_implementation", None)
357
+
358
+ # Drop the transformers version info
359
+ self.transformers_version = kwargs.pop("transformers_version", None)
360
+
361
+ # Deal with gradient checkpointing
362
+ if kwargs.get("gradient_checkpointing", False):
363
+ warnings.warn(
364
+ "Passing `gradient_checkpointing` to a config initialization is deprecated and will be removed in v5 "
365
+ "Transformers. Using `model.gradient_checkpointing_enable()` instead, or if you are using the "
366
+ "`Trainer` API, pass `gradient_checkpointing=True` in your `TrainingArguments`."
367
+ )
368
+
369
+ # Additional attributes without default values
370
+ for key, value in kwargs.items():
371
+ try:
372
+ setattr(self, key, value)
373
+ except AttributeError as err:
374
+ logger.error(f"Can't set {key} with value {value} for {self}")
375
+ raise err
376
+
377
+ @property
378
+ def name_or_path(self) -> str:
379
+ return getattr(self, "_name_or_path", None)
380
+
381
+ @name_or_path.setter
382
+ def name_or_path(self, value):
383
+ self._name_or_path = str(value) # Make sure that name_or_path is a string (for JSON encoding)
384
+
385
+ @property
386
+ def use_return_dict(self) -> bool:
387
+ """
388
+ `bool`: Whether or not return [`~utils.ModelOutput`] instead of tuples.
389
+ """
390
+ # If torchscript is set, force `return_dict=False` to avoid jit errors
391
+ return self.return_dict and not self.torchscript
392
+
393
+ @property
394
+ def num_labels(self) -> int:
395
+ """
396
+ `int`: The number of labels for classification models.
397
+ """
398
+ return len(self.id2label)
399
+
400
+ @num_labels.setter
401
+ def num_labels(self, num_labels: int):
402
+ if not hasattr(self, "id2label") or self.id2label is None or len(self.id2label) != num_labels:
403
+ self.id2label = {i: f"LABEL_{i}" for i in range(num_labels)}
404
+ self.label2id = dict(zip(self.id2label.values(), self.id2label.keys()))
405
+
406
+ @property
407
+ def _attn_implementation(self):
408
+ # This property is made private for now (as it cannot be changed and a PreTrainedModel.use_attn_implementation method needs to be implemented.)
409
+ if hasattr(self, "_attn_implementation_internal"):
410
+ if self._attn_implementation_internal is None:
411
+ # `config.attn_implementation` should never be None, for backward compatibility.
412
+ return "eager"
413
+ else:
414
+ return self._attn_implementation_internal
415
+ else:
416
+ return "eager"
417
+
418
+ @_attn_implementation.setter
419
+ def _attn_implementation(self, value):
420
+ self._attn_implementation_internal = value
421
+
422
+ def save_pretrained(self, save_directory: Union[str, os.PathLike], push_to_hub: bool = False, **kwargs):
423
+ """
424
+ Save a configuration object to the directory `save_directory`, so that it can be re-loaded using the
425
+ [`~PretrainedConfig.from_pretrained`] class method.
426
+
427
+ Args:
428
+ save_directory (`str` or `os.PathLike`):
429
+ Directory where the configuration JSON file will be saved (will be created if it does not exist).
430
+ push_to_hub (`bool`, *optional*, defaults to `False`):
431
+ Whether or not to push your model to the Hugging Face model hub after saving it. You can specify the
432
+ repository you want to push to with `repo_id` (will default to the name of `save_directory` in your
433
+ namespace).
434
+ kwargs (`Dict[str, Any]`, *optional*):
435
+ Additional key word arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method.
436
+ """
437
+ self._set_token_in_kwargs(kwargs)
438
+
439
+ if os.path.isfile(save_directory):
440
+ raise AssertionError(f"Provided path ({save_directory}) should be a directory, not a file")
441
+
442
+ non_default_generation_parameters = {}
443
+ for parameter_name, default_value in self._get_generation_defaults().items():
444
+ if hasattr(self, parameter_name) and getattr(self, parameter_name) != default_value:
445
+ non_default_generation_parameters[parameter_name] = getattr(self, parameter_name)
446
+ if len(non_default_generation_parameters) > 0:
447
+ logger.warning(
448
+ "Some non-default generation parameters are set in the model config. These should go into a "
449
+ "GenerationConfig file (https://huggingface.co/docs/transformers/generation_strategies#save-a-custom-decoding-strategy-with-your-model) "
450
+ "instead. This warning will be raised to an exception in v4.41.\n"
451
+ f"Non-default generation parameters: {str(non_default_generation_parameters)}"
452
+ )
453
+
454
+ os.makedirs(save_directory, exist_ok=True)
455
+
456
+ if push_to_hub:
457
+ commit_message = kwargs.pop("commit_message", None)
458
+ repo_id = kwargs.pop("repo_id", save_directory.split(os.path.sep)[-1])
459
+ repo_id = self._create_repo(repo_id, **kwargs)
460
+ files_timestamps = self._get_files_timestamps(save_directory)
461
+
462
+ # If we have a custom config, we copy the file defining it in the folder and set the attributes so it can be
463
+ # loaded from the Hub.
464
+ if self._auto_class is not None:
465
+ custom_object_save(self, save_directory, config=self)
466
+
467
+ # If we save using the predefined names, we can load using `from_pretrained`
468
+ output_config_file = os.path.join(save_directory, CONFIG_NAME)
469
+
470
+ self.to_json_file(output_config_file, use_diff=True)
471
+ logger.info(f"Configuration saved in {output_config_file}")
472
+
473
+ if push_to_hub:
474
+ self._upload_modified_files(
475
+ save_directory,
476
+ repo_id,
477
+ files_timestamps,
478
+ commit_message=commit_message,
479
+ token=kwargs.get("token"),
480
+ )
481
+
482
+ @staticmethod
483
+ def _set_token_in_kwargs(kwargs, token=None):
484
+ """Temporary method to deal with `token` and `use_auth_token`.
485
+
486
+ This method is to avoid apply the same changes in all model config classes that overwrite `from_pretrained`.
487
+
488
+ Need to clean up `use_auth_token` in a follow PR.
489
+ """
490
+ # Some model config classes like CLIP define their own `from_pretrained` without the new argument `token` yet.
491
+ if token is None:
492
+ token = kwargs.pop("token", None)
493
+ use_auth_token = kwargs.pop("use_auth_token", None)
494
+
495
+ if use_auth_token is not None:
496
+ warnings.warn(
497
+ "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.",
498
+ FutureWarning,
499
+ )
500
+ if token is not None:
501
+ raise ValueError(
502
+ "`token` and `use_auth_token` are both specified. Please set only the argument `token`."
503
+ )
504
+ token = use_auth_token
505
+
506
+ if token is not None:
507
+ kwargs["token"] = token
508
+
509
+ @classmethod
510
+ def from_pretrained(
511
+ cls,
512
+ pretrained_model_name_or_path: Union[str, os.PathLike],
513
+ cache_dir: Optional[Union[str, os.PathLike]] = None,
514
+ force_download: bool = False,
515
+ local_files_only: bool = False,
516
+ token: Optional[Union[str, bool]] = None,
517
+ revision: str = "main",
518
+ **kwargs,
519
+ ) -> "PretrainedConfig":
520
+ r"""
521
+ Instantiate a [`PretrainedConfig`] (or a derived class) from a pretrained model configuration.
522
+
523
+ Args:
524
+ pretrained_model_name_or_path (`str` or `os.PathLike`):
525
+ This can be either:
526
+
527
+ - a string, the *model id* of a pretrained model configuration hosted inside a model repo on
528
+ huggingface.co.
529
+ - a path to a *directory* containing a configuration file saved using the
530
+ [`~PretrainedConfig.save_pretrained`] method, e.g., `./my_model_directory/`.
531
+ - a path or url to a saved configuration JSON *file*, e.g., `./my_model_directory/configuration.json`.
532
+ cache_dir (`str` or `os.PathLike`, *optional*):
533
+ Path to a directory in which a downloaded pretrained model configuration should be cached if the
534
+ standard cache should not be used.
535
+ force_download (`bool`, *optional*, defaults to `False`):
536
+ Whether or not to force to (re-)download the configuration files and override the cached versions if
537
+ they exist.
538
+ resume_download (`bool`, *optional*, defaults to `False`):
539
+ Whether or not to delete incompletely received file. Attempts to resume the download if such a file
540
+ exists.
541
+ proxies (`Dict[str, str]`, *optional*):
542
+ A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',
543
+ 'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request.
544
+ token (`str` or `bool`, *optional*):
545
+ The token to use as HTTP bearer authorization for remote files. If `True`, or not specified, will use
546
+ the token generated when running `huggingface-cli login` (stored in `~/.huggingface`).
547
+ revision (`str`, *optional*, defaults to `"main"`):
548
+ The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
549
+ git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any
550
+ identifier allowed by git.
551
+
552
+ <Tip>
553
+
554
+ To test a pull request you made on the Hub, you can pass `revision="refs/pr/<pr_number>".
555
+
556
+ </Tip>
557
+
558
+ return_unused_kwargs (`bool`, *optional*, defaults to `False`):
559
+ If `False`, then this function returns just the final configuration object.
560
+
561
+ If `True`, then this functions returns a `Tuple(config, unused_kwargs)` where *unused_kwargs* is a
562
+ dictionary consisting of the key/value pairs whose keys are not configuration attributes: i.e., the
563
+ part of `kwargs` which has not been used to update `config` and is otherwise ignored.
564
+ subfolder (`str`, *optional*, defaults to `""`):
565
+ In case the relevant files are located inside a subfolder of the model repo on huggingface.co, you can
566
+ specify the folder name here.
567
+ kwargs (`Dict[str, Any]`, *optional*):
568
+ The values in kwargs of any keys which are configuration attributes will be used to override the loaded
569
+ values. Behavior concerning key/value pairs whose keys are *not* configuration attributes is controlled
570
+ by the `return_unused_kwargs` keyword parameter.
571
+
572
+ Returns:
573
+ [`PretrainedConfig`]: The configuration object instantiated from this pretrained model.
574
+
575
+ Examples:
576
+
577
+ ```python
578
+ # We can't instantiate directly the base class *PretrainedConfig* so let's show the examples on a
579
+ # derived class: BertConfig
580
+ config = BertConfig.from_pretrained(
581
+ "google-bert/bert-base-uncased"
582
+ ) # Download configuration from huggingface.co and cache.
583
+ config = BertConfig.from_pretrained(
584
+ "./test/saved_model/"
585
+ ) # E.g. config (or model) was saved using *save_pretrained('./test/saved_model/')*
586
+ config = BertConfig.from_pretrained("./test/saved_model/my_configuration.json")
587
+ config = BertConfig.from_pretrained("google-bert/bert-base-uncased", output_attentions=True, foo=False)
588
+ assert config.output_attentions == True
589
+ config, unused_kwargs = BertConfig.from_pretrained(
590
+ "google-bert/bert-base-uncased", output_attentions=True, foo=False, return_unused_kwargs=True
591
+ )
592
+ assert config.output_attentions == True
593
+ assert unused_kwargs == {"foo": False}
594
+ ```"""
595
+ kwargs["cache_dir"] = cache_dir
596
+ kwargs["force_download"] = force_download
597
+ kwargs["local_files_only"] = local_files_only
598
+ kwargs["revision"] = revision
599
+
600
+ cls._set_token_in_kwargs(kwargs, token)
601
+
602
+ config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
603
+ if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
604
+ logger.warning(
605
+ f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
606
+ f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
607
+ )
608
+
609
+ return cls.from_dict(config_dict, **kwargs)
610
+
611
+ @classmethod
612
+ def get_config_dict(
613
+ cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs
614
+ ) -> Tuple[Dict[str, Any], Dict[str, Any]]:
615
+ """
616
+ From a `pretrained_model_name_or_path`, resolve to a dictionary of parameters, to be used for instantiating a
617
+ [`PretrainedConfig`] using `from_dict`.
618
+
619
+ Parameters:
620
+ pretrained_model_name_or_path (`str` or `os.PathLike`):
621
+ The identifier of the pre-trained checkpoint from which we want the dictionary of parameters.
622
+
623
+ Returns:
624
+ `Tuple[Dict, Dict]`: The dictionary(ies) that will be used to instantiate the configuration object.
625
+
626
+ """
627
+ cls._set_token_in_kwargs(kwargs)
628
+
629
+ original_kwargs = copy.deepcopy(kwargs)
630
+ # Get config dict associated with the base config file
631
+ config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs)
632
+ if "_commit_hash" in config_dict:
633
+ original_kwargs["_commit_hash"] = config_dict["_commit_hash"]
634
+
635
+ # That config file may point us toward another config file to use.
636
+ if "configuration_files" in config_dict:
637
+ configuration_file = get_configuration_file(config_dict["configuration_files"])
638
+ config_dict, kwargs = cls._get_config_dict(
639
+ pretrained_model_name_or_path, _configuration_file=configuration_file, **original_kwargs
640
+ )
641
+
642
+ return config_dict, kwargs
643
+
644
+ @classmethod
645
+ def _get_config_dict(
646
+ cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs
647
+ ) -> Tuple[Dict[str, Any], Dict[str, Any]]:
648
+ cache_dir = kwargs.pop("cache_dir", None)
649
+ force_download = kwargs.pop("force_download", False)
650
+ resume_download = kwargs.pop("resume_download", False)
651
+ proxies = kwargs.pop("proxies", None)
652
+ token = kwargs.pop("token", None)
653
+ local_files_only = kwargs.pop("local_files_only", False)
654
+ revision = kwargs.pop("revision", None)
655
+ trust_remote_code = kwargs.pop("trust_remote_code", None)
656
+ subfolder = kwargs.pop("subfolder", "")
657
+ from_pipeline = kwargs.pop("_from_pipeline", None)
658
+ from_auto_class = kwargs.pop("_from_auto", False)
659
+ commit_hash = kwargs.pop("_commit_hash", None)
660
+
661
+ if trust_remote_code is True:
662
+ logger.warning(
663
+ "The argument `trust_remote_code` is to be used with Auto classes. It has no effect here and is"
664
+ " ignored."
665
+ )
666
+
667
+ user_agent = {"file_type": "config", "from_auto_class": from_auto_class}
668
+ if from_pipeline is not None:
669
+ user_agent["using_pipeline"] = from_pipeline
670
+
671
+ pretrained_model_name_or_path = str(pretrained_model_name_or_path)
672
+
673
+ is_local = os.path.isdir(pretrained_model_name_or_path)
674
+ if os.path.isfile(os.path.join(subfolder, pretrained_model_name_or_path)):
675
+ # Special case when pretrained_model_name_or_path is a local file
676
+ resolved_config_file = pretrained_model_name_or_path
677
+ is_local = True
678
+ elif is_remote_url(pretrained_model_name_or_path):
679
+ configuration_file = pretrained_model_name_or_path
680
+ resolved_config_file = download_url(pretrained_model_name_or_path)
681
+ else:
682
+ configuration_file = kwargs.pop("_configuration_file", CONFIG_NAME)
683
+
684
+ try:
685
+ # Load from local folder or from cache or download from model Hub and cache
686
+ resolved_config_file = cached_file(
687
+ pretrained_model_name_or_path,
688
+ configuration_file,
689
+ cache_dir=cache_dir,
690
+ force_download=force_download,
691
+ proxies=proxies,
692
+ resume_download=resume_download,
693
+ local_files_only=local_files_only,
694
+ token=token,
695
+ user_agent=user_agent,
696
+ revision=revision,
697
+ subfolder=subfolder,
698
+ _commit_hash=commit_hash,
699
+ )
700
+ commit_hash = extract_commit_hash(resolved_config_file, commit_hash)
701
+ except EnvironmentError:
702
+ # Raise any environment error raise by `cached_file`. It will have a helpful error message adapted to
703
+ # the original exception.
704
+ raise
705
+ except Exception:
706
+ # For any other exception, we throw a generic error.
707
+ raise EnvironmentError(
708
+ f"Can't load the configuration of '{pretrained_model_name_or_path}'. If you were trying to load it"
709
+ " from 'https://huggingface.co/models', make sure you don't have a local directory with the same"
710
+ f" name. Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory"
711
+ f" containing a {configuration_file} file"
712
+ )
713
+
714
+ try:
715
+ # Load config dict
716
+ config_dict = cls._dict_from_json_file(resolved_config_file)
717
+ config_dict["_commit_hash"] = commit_hash
718
+ except (json.JSONDecodeError, UnicodeDecodeError):
719
+ raise EnvironmentError(
720
+ f"It looks like the config file at '{resolved_config_file}' is not a valid JSON file."
721
+ )
722
+
723
+ if is_local:
724
+ logger.info(f"loading configuration file {resolved_config_file}")
725
+ else:
726
+ logger.info(f"loading configuration file {configuration_file} from cache at {resolved_config_file}")
727
+
728
+ if "auto_map" in config_dict and not is_local:
729
+ config_dict["auto_map"] = add_model_info_to_auto_map(
730
+ config_dict["auto_map"], pretrained_model_name_or_path
731
+ )
732
+ return config_dict, kwargs
733
+
734
+ @classmethod
735
+ def from_dict(cls, config_dict: Dict[str, Any], **kwargs) -> "PretrainedConfig":
736
+ """
737
+ Instantiates a [`PretrainedConfig`] from a Python dictionary of parameters.
738
+
739
+ Args:
740
+ config_dict (`Dict[str, Any]`):
741
+ Dictionary that will be used to instantiate the configuration object. Such a dictionary can be
742
+ retrieved from a pretrained checkpoint by leveraging the [`~PretrainedConfig.get_config_dict`] method.
743
+ kwargs (`Dict[str, Any]`):
744
+ Additional parameters from which to initialize the configuration object.
745
+
746
+ Returns:
747
+ [`PretrainedConfig`]: The configuration object instantiated from those parameters.
748
+ """
749
+ return_unused_kwargs = kwargs.pop("return_unused_kwargs", False)
750
+ # Those arguments may be passed along for our internal telemetry.
751
+ # We remove them so they don't appear in `return_unused_kwargs`.
752
+ kwargs.pop("_from_auto", None)
753
+ kwargs.pop("_from_pipeline", None)
754
+ # The commit hash might have been updated in the `config_dict`, we don't want the kwargs to erase that update.
755
+ if "_commit_hash" in kwargs and "_commit_hash" in config_dict:
756
+ kwargs["_commit_hash"] = config_dict["_commit_hash"]
757
+
758
+ # We remove it from kwargs so that it does not appear in `return_unused_kwargs`.
759
+ config_dict["attn_implementation"] = kwargs.pop("attn_implementation", None)
760
+
761
+ config = cls(**config_dict)
762
+
763
+ if hasattr(config, "pruned_heads"):
764
+ config.pruned_heads = {int(key): value for key, value in config.pruned_heads.items()}
765
+
766
+ # Update config with kwargs if needed
767
+ if "num_labels" in kwargs and "id2label" in kwargs:
768
+ num_labels = kwargs["num_labels"]
769
+ id2label = kwargs["id2label"] if kwargs["id2label"] is not None else []
770
+ if len(id2label) != num_labels:
771
+ raise ValueError(
772
+ f"You passed along `num_labels={num_labels }` with an incompatible id to label map: "
773
+ f"{kwargs['id2label']}. Since those arguments are inconsistent with each other, you should remove "
774
+ "one of them."
775
+ )
776
+ to_remove = []
777
+ for key, value in kwargs.items():
778
+ if hasattr(config, key):
779
+ current_attr = getattr(config, key)
780
+ # To authorize passing a custom subconfig as kwarg in models that have nested configs.
781
+ if isinstance(current_attr, PretrainedConfig) and isinstance(value, dict):
782
+ value = current_attr.__class__(**value)
783
+ setattr(config, key, value)
784
+ if key != "torch_dtype":
785
+ to_remove.append(key)
786
+ for key in to_remove:
787
+ kwargs.pop(key, None)
788
+
789
+ logger.info(f"Model config {config}")
790
+ if return_unused_kwargs:
791
+ return config, kwargs
792
+ else:
793
+ return config
794
+
795
+ @classmethod
796
+ def from_json_file(cls, json_file: Union[str, os.PathLike]) -> "PretrainedConfig":
797
+ """
798
+ Instantiates a [`PretrainedConfig`] from the path to a JSON file of parameters.
799
+
800
+ Args:
801
+ json_file (`str` or `os.PathLike`):
802
+ Path to the JSON file containing the parameters.
803
+
804
+ Returns:
805
+ [`PretrainedConfig`]: The configuration object instantiated from that JSON file.
806
+
807
+ """
808
+ config_dict = cls._dict_from_json_file(json_file)
809
+ return cls(**config_dict)
810
+
811
+ @classmethod
812
+ def _dict_from_json_file(cls, json_file: Union[str, os.PathLike]):
813
+ with open(json_file, "r", encoding="utf-8") as reader:
814
+ text = reader.read()
815
+ return json.loads(text)
816
+
817
+ def __eq__(self, other):
818
+ return isinstance(other, PretrainedConfig) and (self.__dict__ == other.__dict__)
819
+
820
+ def __repr__(self):
821
+ return f"{self.__class__.__name__} {self.to_json_string()}"
822
+
823
+ def to_diff_dict(self) -> Dict[str, Any]:
824
+ """
825
+ Removes all attributes from config which correspond to the default config attributes for better readability and
826
+ serializes to a Python dictionary.
827
+
828
+ Returns:
829
+ `Dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance,
830
+ """
831
+ config_dict = self.to_dict()
832
+
833
+ # get the default config dict
834
+ default_config_dict = PretrainedConfig().to_dict()
835
+
836
+ # get class specific config dict
837
+ class_config_dict = self.__class__().to_dict() if not self.is_composition else {}
838
+
839
+ serializable_config_dict = {}
840
+
841
+ # only serialize values that differ from the default config
842
+ for key, value in config_dict.items():
843
+ if (
844
+ isinstance(getattr(self, key, None), PretrainedConfig)
845
+ and key in class_config_dict
846
+ and isinstance(class_config_dict[key], dict)
847
+ ):
848
+ # For nested configs we need to clean the diff recursively
849
+ diff = recursive_diff_dict(value, class_config_dict[key], config_obj=getattr(self, key, None))
850
+ if "model_type" in value:
851
+ # Needs to be set even if it's not in the diff
852
+ diff["model_type"] = value["model_type"]
853
+ if len(diff) > 0:
854
+ serializable_config_dict[key] = diff
855
+ elif (
856
+ key not in default_config_dict
857
+ or key == "transformers_version"
858
+ or value != default_config_dict[key]
859
+ or (key in class_config_dict and value != class_config_dict[key])
860
+ ):
861
+ serializable_config_dict[key] = value
862
+
863
+ if hasattr(self, "quantization_config"):
864
+ serializable_config_dict["quantization_config"] = (
865
+ self.quantization_config.to_dict()
866
+ if not isinstance(self.quantization_config, dict)
867
+ else self.quantization_config
868
+ )
869
+
870
+ # pop the `_pre_quantization_dtype` as torch.dtypes are not serializable.
871
+ _ = serializable_config_dict.pop("_pre_quantization_dtype", None)
872
+
873
+ self.dict_torch_dtype_to_str(serializable_config_dict)
874
+
875
+ if "_attn_implementation_internal" in serializable_config_dict:
876
+ del serializable_config_dict["_attn_implementation_internal"]
877
+
878
+ return serializable_config_dict
879
+
880
+ def to_dict(self) -> Dict[str, Any]:
881
+ """
882
+ Serializes this instance to a Python dictionary.
883
+
884
+ Returns:
885
+ `Dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance.
886
+ """
887
+ output = copy.deepcopy(self.__dict__)
888
+ if hasattr(self.__class__, "model_type"):
889
+ output["model_type"] = self.__class__.model_type
890
+ if "_auto_class" in output:
891
+ del output["_auto_class"]
892
+ if "_commit_hash" in output:
893
+ del output["_commit_hash"]
894
+ if "_attn_implementation_internal" in output:
895
+ del output["_attn_implementation_internal"]
896
+
897
+ # Transformers version when serializing the model
898
+ output["transformers_version"] = __version__
899
+
900
+ for key, value in output.items():
901
+ # Deal with nested configs like CLIP
902
+ if isinstance(value, PretrainedConfig):
903
+ value = value.to_dict()
904
+ del value["transformers_version"]
905
+
906
+ output[key] = value
907
+
908
+ if hasattr(self, "quantization_config"):
909
+ output["quantization_config"] = (
910
+ self.quantization_config.to_dict()
911
+ if not isinstance(self.quantization_config, dict)
912
+ else self.quantization_config
913
+ )
914
+
915
+ # pop the `_pre_quantization_dtype` as torch.dtypes are not serializable.
916
+ _ = output.pop("_pre_quantization_dtype", None)
917
+
918
+ self.dict_torch_dtype_to_str(output)
919
+
920
+ return output
921
+
922
+ def to_json_string(self, use_diff: bool = True) -> str:
923
+ """
924
+ Serializes this instance to a JSON string.
925
+
926
+ Args:
927
+ use_diff (`bool`, *optional*, defaults to `True`):
928
+ If set to `True`, only the difference between the config instance and the default `PretrainedConfig()`
929
+ is serialized to JSON string.
930
+
931
+ Returns:
932
+ `str`: String containing all the attributes that make up this configuration instance in JSON format.
933
+ """
934
+ if use_diff is True:
935
+ config_dict = self.to_diff_dict()
936
+ else:
937
+ config_dict = self.to_dict()
938
+ return json.dumps(config_dict, indent=2, sort_keys=True) + "\n"
939
+
940
+ def to_json_file(self, json_file_path: Union[str, os.PathLike], use_diff: bool = True):
941
+ """
942
+ Save this instance to a JSON file.
943
+
944
+ Args:
945
+ json_file_path (`str` or `os.PathLike`):
946
+ Path to the JSON file in which this configuration instance's parameters will be saved.
947
+ use_diff (`bool`, *optional*, defaults to `True`):
948
+ If set to `True`, only the difference between the config instance and the default `PretrainedConfig()`
949
+ is serialized to JSON file.
950
+ """
951
+ with open(json_file_path, "w", encoding="utf-8") as writer:
952
+ writer.write(self.to_json_string(use_diff=use_diff))
953
+
954
+ def update(self, config_dict: Dict[str, Any]):
955
+ """
956
+ Updates attributes of this class with attributes from `config_dict`.
957
+
958
+ Args:
959
+ config_dict (`Dict[str, Any]`): Dictionary of attributes that should be updated for this class.
960
+ """
961
+ for key, value in config_dict.items():
962
+ setattr(self, key, value)
963
+
964
+ def update_from_string(self, update_str: str):
965
+ """
966
+ Updates attributes of this class with attributes from `update_str`.
967
+
968
+ The expected format is ints, floats and strings as is, and for booleans use `true` or `false`. For example:
969
+ "n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"
970
+
971
+ The keys to change have to already exist in the config object.
972
+
973
+ Args:
974
+ update_str (`str`): String with attributes that should be updated for this class.
975
+
976
+ """
977
+
978
+ d = dict(x.split("=") for x in update_str.split(","))
979
+ for k, v in d.items():
980
+ if not hasattr(self, k):
981
+ raise ValueError(f"key {k} isn't in the original config dict")
982
+
983
+ old_v = getattr(self, k)
984
+ if isinstance(old_v, bool):
985
+ if v.lower() in ["true", "1", "y", "yes"]:
986
+ v = True
987
+ elif v.lower() in ["false", "0", "n", "no"]:
988
+ v = False
989
+ else:
990
+ raise ValueError(f"can't derive true or false from {v} (key {k})")
991
+ elif isinstance(old_v, int):
992
+ v = int(v)
993
+ elif isinstance(old_v, float):
994
+ v = float(v)
995
+ elif not isinstance(old_v, str):
996
+ raise ValueError(
997
+ f"You can only update int, float, bool or string values in the config, got {v} for key {k}"
998
+ )
999
+
1000
+ setattr(self, k, v)
1001
+
1002
+ def dict_torch_dtype_to_str(self, d: Dict[str, Any]) -> None:
1003
+ """
1004
+ Checks whether the passed dictionary and its nested dicts have a *torch_dtype* key and if it's not None,
1005
+ converts torch.dtype to a string of just the type. For example, `torch.float32` get converted into *"float32"*
1006
+ string, which can then be stored in the json format.
1007
+ """
1008
+ if d.get("torch_dtype", None) is not None and not isinstance(d["torch_dtype"], str):
1009
+ d["torch_dtype"] = str(d["torch_dtype"]).split(".")[1]
1010
+ for value in d.values():
1011
+ if isinstance(value, dict):
1012
+ self.dict_torch_dtype_to_str(value)
1013
+
1014
+ @classmethod
1015
+ def register_for_auto_class(cls, auto_class="AutoConfig"):
1016
+ """
1017
+ Register this class with a given auto class. This should only be used for custom configurations as the ones in
1018
+ the library are already mapped with `AutoConfig`.
1019
+
1020
+ <Tip warning={true}>
1021
+
1022
+ This API is experimental and may have some slight breaking changes in the next releases.
1023
+
1024
+ </Tip>
1025
+
1026
+ Args:
1027
+ auto_class (`str` or `type`, *optional*, defaults to `"AutoConfig"`):
1028
+ The auto class to register this new configuration with.
1029
+ """
1030
+ if not isinstance(auto_class, str):
1031
+ auto_class = auto_class.__name__
1032
+
1033
+ import transformers.models.auto as auto_module
1034
+
1035
+ if not hasattr(auto_module, auto_class):
1036
+ raise ValueError(f"{auto_class} is not a valid auto class.")
1037
+
1038
+ cls._auto_class = auto_class
1039
+
1040
+ @staticmethod
1041
+ def _get_generation_defaults() -> Dict[str, Any]:
1042
+ return {
1043
+ "max_length": 20,
1044
+ "min_length": 0,
1045
+ "do_sample": False,
1046
+ "early_stopping": False,
1047
+ "num_beams": 1,
1048
+ "num_beam_groups": 1,
1049
+ "diversity_penalty": 0.0,
1050
+ "temperature": 1.0,
1051
+ "top_k": 50,
1052
+ "top_p": 1.0,
1053
+ "typical_p": 1.0,
1054
+ "repetition_penalty": 1.0,
1055
+ "length_penalty": 1.0,
1056
+ "no_repeat_ngram_size": 0,
1057
+ "encoder_no_repeat_ngram_size": 0,
1058
+ "bad_words_ids": None,
1059
+ "num_return_sequences": 1,
1060
+ "output_scores": False,
1061
+ "return_dict_in_generate": False,
1062
+ "forced_bos_token_id": None,
1063
+ "forced_eos_token_id": None,
1064
+ "remove_invalid_values": False,
1065
+ "exponential_decay_length_penalty": None,
1066
+ "suppress_tokens": None,
1067
+ "begin_suppress_tokens": None,
1068
+ }
1069
+
1070
+ def _has_non_default_generation_parameters(self) -> bool:
1071
+ """
1072
+ Whether or not this instance holds non-default generation parameters.
1073
+ """
1074
+ for parameter_name, default_value in self._get_generation_defaults().items():
1075
+ if hasattr(self, parameter_name) and getattr(self, parameter_name) != default_value:
1076
+ return True
1077
+ return False
1078
+
1079
+
1080
+ def get_configuration_file(configuration_files: List[str]) -> str:
1081
+ """
1082
+ Get the configuration file to use for this version of transformers.
1083
+
1084
+ Args:
1085
+ configuration_files (`List[str]`): The list of available configuration files.
1086
+
1087
+ Returns:
1088
+ `str`: The configuration file to use.
1089
+ """
1090
+ configuration_files_map = {}
1091
+ for file_name in configuration_files:
1092
+ search = _re_configuration_file.search(file_name)
1093
+ if search is not None:
1094
+ v = search.groups()[0]
1095
+ configuration_files_map[v] = file_name
1096
+ available_versions = sorted(configuration_files_map.keys())
1097
+
1098
+ # Defaults to FULL_CONFIGURATION_FILE and then try to look at some newer versions.
1099
+ configuration_file = CONFIG_NAME
1100
+ transformers_version = version.parse(__version__)
1101
+ for v in available_versions:
1102
+ if version.parse(v) <= transformers_version:
1103
+ configuration_file = configuration_files_map[v]
1104
+ else:
1105
+ # No point going further since the versions are sorted.
1106
+ break
1107
+
1108
+ return configuration_file
1109
+
1110
+
1111
+ def recursive_diff_dict(dict_a, dict_b, config_obj=None):
1112
+ """
1113
+ Helper function to recursively take the diff between two nested dictionaries. The resulting diff only contains the
1114
+ values from `dict_a` that are different from values in `dict_b`.
1115
+ """
1116
+ diff = {}
1117
+ default = config_obj.__class__().to_dict() if config_obj is not None else {}
1118
+ for key, value in dict_a.items():
1119
+ obj_value = getattr(config_obj, str(key), None)
1120
+ if isinstance(obj_value, PretrainedConfig) and key in dict_b and isinstance(dict_b[key], dict):
1121
+ diff_value = recursive_diff_dict(value, dict_b[key], config_obj=obj_value)
1122
+ if len(diff_value) > 0:
1123
+ diff[key] = diff_value
1124
+ elif key not in dict_b or value != dict_b[key] or key not in default or value != default[key]:
1125
+ diff[key] = value
1126
+ return diff
1127
+
1128
+
1129
+ PretrainedConfig.push_to_hub = copy_func(PretrainedConfig.push_to_hub)
1130
+ if PretrainedConfig.push_to_hub.__doc__ is not None:
1131
+ PretrainedConfig.push_to_hub.__doc__ = PretrainedConfig.push_to_hub.__doc__.format(
1132
+ object="config", object_class="AutoConfig", object_files="configuration file"
1133
+ )
env-llmeval/lib/python3.10/site-packages/transformers/convert_pytorch_checkpoint_to_tf2.py ADDED
@@ -0,0 +1,498 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Convert pytorch checkpoints to TensorFlow"""
16
+
17
+
18
+ import argparse
19
+ import os
20
+
21
+ from . import (
22
+ ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
23
+ BART_PRETRAINED_MODEL_ARCHIVE_LIST,
24
+ BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
25
+ CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
26
+ CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
27
+ DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
28
+ DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
29
+ DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
30
+ DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
31
+ ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
32
+ FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
33
+ GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
34
+ LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
35
+ LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
36
+ OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
37
+ ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
38
+ T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
39
+ TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
40
+ WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
41
+ XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
42
+ XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
43
+ XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
44
+ AlbertConfig,
45
+ BartConfig,
46
+ BertConfig,
47
+ CamembertConfig,
48
+ CTRLConfig,
49
+ DistilBertConfig,
50
+ DPRConfig,
51
+ ElectraConfig,
52
+ FlaubertConfig,
53
+ GPT2Config,
54
+ LayoutLMConfig,
55
+ LxmertConfig,
56
+ OpenAIGPTConfig,
57
+ RobertaConfig,
58
+ T5Config,
59
+ TFAlbertForPreTraining,
60
+ TFBartForConditionalGeneration,
61
+ TFBartForSequenceClassification,
62
+ TFBertForPreTraining,
63
+ TFBertForQuestionAnswering,
64
+ TFBertForSequenceClassification,
65
+ TFCamembertForMaskedLM,
66
+ TFCTRLLMHeadModel,
67
+ TFDistilBertForMaskedLM,
68
+ TFDistilBertForQuestionAnswering,
69
+ TFDPRContextEncoder,
70
+ TFDPRQuestionEncoder,
71
+ TFDPRReader,
72
+ TFElectraForPreTraining,
73
+ TFFlaubertWithLMHeadModel,
74
+ TFGPT2LMHeadModel,
75
+ TFLayoutLMForMaskedLM,
76
+ TFLxmertForPreTraining,
77
+ TFLxmertVisualFeatureEncoder,
78
+ TFOpenAIGPTLMHeadModel,
79
+ TFRobertaForCausalLM,
80
+ TFRobertaForMaskedLM,
81
+ TFRobertaForSequenceClassification,
82
+ TFT5ForConditionalGeneration,
83
+ TFTransfoXLLMHeadModel,
84
+ TFWav2Vec2Model,
85
+ TFXLMRobertaForMaskedLM,
86
+ TFXLMWithLMHeadModel,
87
+ TFXLNetLMHeadModel,
88
+ TransfoXLConfig,
89
+ Wav2Vec2Config,
90
+ Wav2Vec2Model,
91
+ XLMConfig,
92
+ XLMRobertaConfig,
93
+ XLNetConfig,
94
+ is_torch_available,
95
+ load_pytorch_checkpoint_in_tf2_model,
96
+ )
97
+ from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging
98
+
99
+
100
+ if is_torch_available():
101
+ import numpy as np
102
+ import torch
103
+
104
+ from . import (
105
+ AlbertForPreTraining,
106
+ BartForConditionalGeneration,
107
+ BertForPreTraining,
108
+ BertForQuestionAnswering,
109
+ BertForSequenceClassification,
110
+ CamembertForMaskedLM,
111
+ CTRLLMHeadModel,
112
+ DistilBertForMaskedLM,
113
+ DistilBertForQuestionAnswering,
114
+ DPRContextEncoder,
115
+ DPRQuestionEncoder,
116
+ DPRReader,
117
+ ElectraForPreTraining,
118
+ FlaubertWithLMHeadModel,
119
+ GPT2LMHeadModel,
120
+ LayoutLMForMaskedLM,
121
+ LxmertForPreTraining,
122
+ LxmertVisualFeatureEncoder,
123
+ OpenAIGPTLMHeadModel,
124
+ RobertaForMaskedLM,
125
+ RobertaForSequenceClassification,
126
+ T5ForConditionalGeneration,
127
+ TransfoXLLMHeadModel,
128
+ XLMRobertaForMaskedLM,
129
+ XLMWithLMHeadModel,
130
+ XLNetLMHeadModel,
131
+ )
132
+ from .pytorch_utils import is_torch_greater_or_equal_than_1_13
133
+
134
+
135
+ logging.set_verbosity_info()
136
+
137
+ MODEL_CLASSES = {
138
+ "bart": (
139
+ BartConfig,
140
+ TFBartForConditionalGeneration,
141
+ TFBartForSequenceClassification,
142
+ BartForConditionalGeneration,
143
+ BART_PRETRAINED_MODEL_ARCHIVE_LIST,
144
+ ),
145
+ "bert": (
146
+ BertConfig,
147
+ TFBertForPreTraining,
148
+ BertForPreTraining,
149
+ BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
150
+ ),
151
+ "google-bert/bert-large-uncased-whole-word-masking-finetuned-squad": (
152
+ BertConfig,
153
+ TFBertForQuestionAnswering,
154
+ BertForQuestionAnswering,
155
+ BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
156
+ ),
157
+ "google-bert/bert-large-cased-whole-word-masking-finetuned-squad": (
158
+ BertConfig,
159
+ TFBertForQuestionAnswering,
160
+ BertForQuestionAnswering,
161
+ BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
162
+ ),
163
+ "google-bert/bert-base-cased-finetuned-mrpc": (
164
+ BertConfig,
165
+ TFBertForSequenceClassification,
166
+ BertForSequenceClassification,
167
+ BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
168
+ ),
169
+ "dpr": (
170
+ DPRConfig,
171
+ TFDPRQuestionEncoder,
172
+ TFDPRContextEncoder,
173
+ TFDPRReader,
174
+ DPRQuestionEncoder,
175
+ DPRContextEncoder,
176
+ DPRReader,
177
+ DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
178
+ DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
179
+ DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
180
+ ),
181
+ "openai-community/gpt2": (
182
+ GPT2Config,
183
+ TFGPT2LMHeadModel,
184
+ GPT2LMHeadModel,
185
+ GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
186
+ ),
187
+ "xlnet": (
188
+ XLNetConfig,
189
+ TFXLNetLMHeadModel,
190
+ XLNetLMHeadModel,
191
+ XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
192
+ ),
193
+ "xlm": (
194
+ XLMConfig,
195
+ TFXLMWithLMHeadModel,
196
+ XLMWithLMHeadModel,
197
+ XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
198
+ ),
199
+ "xlm-roberta": (
200
+ XLMRobertaConfig,
201
+ TFXLMRobertaForMaskedLM,
202
+ XLMRobertaForMaskedLM,
203
+ XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
204
+ ),
205
+ "transfo-xl": (
206
+ TransfoXLConfig,
207
+ TFTransfoXLLMHeadModel,
208
+ TransfoXLLMHeadModel,
209
+ TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
210
+ ),
211
+ "openai-community/openai-gpt": (
212
+ OpenAIGPTConfig,
213
+ TFOpenAIGPTLMHeadModel,
214
+ OpenAIGPTLMHeadModel,
215
+ OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
216
+ ),
217
+ "roberta": (
218
+ RobertaConfig,
219
+ TFRobertaForCausalLM,
220
+ TFRobertaForMaskedLM,
221
+ RobertaForMaskedLM,
222
+ ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
223
+ ),
224
+ "layoutlm": (
225
+ LayoutLMConfig,
226
+ TFLayoutLMForMaskedLM,
227
+ LayoutLMForMaskedLM,
228
+ LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
229
+ ),
230
+ "FacebookAI/roberta-large-mnli": (
231
+ RobertaConfig,
232
+ TFRobertaForSequenceClassification,
233
+ RobertaForSequenceClassification,
234
+ ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
235
+ ),
236
+ "camembert": (
237
+ CamembertConfig,
238
+ TFCamembertForMaskedLM,
239
+ CamembertForMaskedLM,
240
+ CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
241
+ ),
242
+ "flaubert": (
243
+ FlaubertConfig,
244
+ TFFlaubertWithLMHeadModel,
245
+ FlaubertWithLMHeadModel,
246
+ FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
247
+ ),
248
+ "distilbert": (
249
+ DistilBertConfig,
250
+ TFDistilBertForMaskedLM,
251
+ DistilBertForMaskedLM,
252
+ DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
253
+ ),
254
+ "distilbert-base-distilled-squad": (
255
+ DistilBertConfig,
256
+ TFDistilBertForQuestionAnswering,
257
+ DistilBertForQuestionAnswering,
258
+ DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
259
+ ),
260
+ "lxmert": (
261
+ LxmertConfig,
262
+ TFLxmertForPreTraining,
263
+ LxmertForPreTraining,
264
+ LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
265
+ ),
266
+ "lxmert-visual-feature-encoder": (
267
+ LxmertConfig,
268
+ TFLxmertVisualFeatureEncoder,
269
+ LxmertVisualFeatureEncoder,
270
+ LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
271
+ ),
272
+ "Salesforce/ctrl": (
273
+ CTRLConfig,
274
+ TFCTRLLMHeadModel,
275
+ CTRLLMHeadModel,
276
+ CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
277
+ ),
278
+ "albert": (
279
+ AlbertConfig,
280
+ TFAlbertForPreTraining,
281
+ AlbertForPreTraining,
282
+ ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
283
+ ),
284
+ "t5": (
285
+ T5Config,
286
+ TFT5ForConditionalGeneration,
287
+ T5ForConditionalGeneration,
288
+ T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
289
+ ),
290
+ "electra": (
291
+ ElectraConfig,
292
+ TFElectraForPreTraining,
293
+ ElectraForPreTraining,
294
+ ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
295
+ ),
296
+ "wav2vec2": (
297
+ Wav2Vec2Config,
298
+ TFWav2Vec2Model,
299
+ Wav2Vec2Model,
300
+ WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
301
+ ),
302
+ }
303
+
304
+
305
+ def convert_pt_checkpoint_to_tf(
306
+ model_type, pytorch_checkpoint_path, config_file, tf_dump_path, compare_with_pt_model=False, use_cached_models=True
307
+ ):
308
+ if model_type not in MODEL_CLASSES:
309
+ raise ValueError(f"Unrecognized model type, should be one of {list(MODEL_CLASSES.keys())}.")
310
+
311
+ config_class, model_class, pt_model_class, aws_config_map = MODEL_CLASSES[model_type]
312
+
313
+ # Initialise TF model
314
+ if config_file in aws_config_map:
315
+ config_file = cached_file(config_file, CONFIG_NAME, force_download=not use_cached_models)
316
+ config = config_class.from_json_file(config_file)
317
+ config.output_hidden_states = True
318
+ config.output_attentions = True
319
+ print(f"Building TensorFlow model from configuration: {config}")
320
+ tf_model = model_class(config)
321
+
322
+ # Load weights from tf checkpoint
323
+ if pytorch_checkpoint_path in aws_config_map.keys():
324
+ pytorch_checkpoint_path = cached_file(
325
+ pytorch_checkpoint_path, WEIGHTS_NAME, force_download=not use_cached_models
326
+ )
327
+ # Load PyTorch checkpoint in tf2 model:
328
+ tf_model = load_pytorch_checkpoint_in_tf2_model(tf_model, pytorch_checkpoint_path)
329
+
330
+ if compare_with_pt_model:
331
+ tfo = tf_model(tf_model.dummy_inputs, training=False) # build the network
332
+
333
+ weights_only_kwarg = {"weights_only": True} if is_torch_greater_or_equal_than_1_13 else {}
334
+ state_dict = torch.load(
335
+ pytorch_checkpoint_path,
336
+ map_location="cpu",
337
+ **weights_only_kwarg,
338
+ )
339
+ pt_model = pt_model_class.from_pretrained(
340
+ pretrained_model_name_or_path=None, config=config, state_dict=state_dict
341
+ )
342
+
343
+ with torch.no_grad():
344
+ pto = pt_model(**pt_model.dummy_inputs)
345
+
346
+ np_pt = pto[0].numpy()
347
+ np_tf = tfo[0].numpy()
348
+ diff = np.amax(np.abs(np_pt - np_tf))
349
+ print(f"Max absolute difference between models outputs {diff}")
350
+ assert diff <= 2e-2, f"Error, model absolute difference is >2e-2: {diff}"
351
+
352
+ # Save pytorch-model
353
+ print(f"Save TensorFlow model to {tf_dump_path}")
354
+ tf_model.save_weights(tf_dump_path, save_format="h5")
355
+
356
+
357
+ def convert_all_pt_checkpoints_to_tf(
358
+ args_model_type,
359
+ tf_dump_path,
360
+ model_shortcut_names_or_path=None,
361
+ config_shortcut_names_or_path=None,
362
+ compare_with_pt_model=False,
363
+ use_cached_models=False,
364
+ remove_cached_files=False,
365
+ only_convert_finetuned_models=False,
366
+ ):
367
+ if args_model_type is None:
368
+ model_types = list(MODEL_CLASSES.keys())
369
+ else:
370
+ model_types = [args_model_type]
371
+
372
+ for j, model_type in enumerate(model_types, start=1):
373
+ print("=" * 100)
374
+ print(f" Converting model type {j}/{len(model_types)}: {model_type}")
375
+ print("=" * 100)
376
+ if model_type not in MODEL_CLASSES:
377
+ raise ValueError(f"Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys())}.")
378
+
379
+ config_class, model_class, pt_model_class, aws_model_maps, aws_config_map = MODEL_CLASSES[model_type]
380
+
381
+ if model_shortcut_names_or_path is None:
382
+ model_shortcut_names_or_path = list(aws_model_maps.keys())
383
+ if config_shortcut_names_or_path is None:
384
+ config_shortcut_names_or_path = model_shortcut_names_or_path
385
+
386
+ for i, (model_shortcut_name, config_shortcut_name) in enumerate(
387
+ zip(model_shortcut_names_or_path, config_shortcut_names_or_path), start=1
388
+ ):
389
+ print("-" * 100)
390
+ if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name:
391
+ if not only_convert_finetuned_models:
392
+ print(f" Skipping finetuned checkpoint {model_shortcut_name}")
393
+ continue
394
+ model_type = model_shortcut_name
395
+ elif only_convert_finetuned_models:
396
+ print(f" Skipping not finetuned checkpoint {model_shortcut_name}")
397
+ continue
398
+ print(
399
+ f" Converting checkpoint {i}/{len(aws_config_map)}: {model_shortcut_name} - model_type {model_type}"
400
+ )
401
+ print("-" * 100)
402
+
403
+ if config_shortcut_name in aws_config_map:
404
+ config_file = cached_file(config_shortcut_name, CONFIG_NAME, force_download=not use_cached_models)
405
+ else:
406
+ config_file = config_shortcut_name
407
+
408
+ if model_shortcut_name in aws_model_maps:
409
+ model_file = cached_file(model_shortcut_name, WEIGHTS_NAME, force_download=not use_cached_models)
410
+ else:
411
+ model_file = model_shortcut_name
412
+
413
+ if os.path.isfile(model_shortcut_name):
414
+ model_shortcut_name = "converted_model"
415
+
416
+ convert_pt_checkpoint_to_tf(
417
+ model_type=model_type,
418
+ pytorch_checkpoint_path=model_file,
419
+ config_file=config_file,
420
+ tf_dump_path=os.path.join(tf_dump_path, model_shortcut_name + "-tf_model.h5"),
421
+ compare_with_pt_model=compare_with_pt_model,
422
+ )
423
+ if remove_cached_files:
424
+ os.remove(config_file)
425
+ os.remove(model_file)
426
+
427
+
428
+ if __name__ == "__main__":
429
+ parser = argparse.ArgumentParser()
430
+ # Required parameters
431
+ parser.add_argument(
432
+ "--tf_dump_path", default=None, type=str, required=True, help="Path to the output Tensorflow dump file."
433
+ )
434
+ parser.add_argument(
435
+ "--model_type",
436
+ default=None,
437
+ type=str,
438
+ help=(
439
+ f"Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and "
440
+ "convert all the models from AWS."
441
+ ),
442
+ )
443
+ parser.add_argument(
444
+ "--pytorch_checkpoint_path",
445
+ default=None,
446
+ type=str,
447
+ help=(
448
+ "Path to the PyTorch checkpoint path or shortcut name to download from AWS. "
449
+ "If not given, will download and convert all the checkpoints from AWS."
450
+ ),
451
+ )
452
+ parser.add_argument(
453
+ "--config_file",
454
+ default=None,
455
+ type=str,
456
+ help=(
457
+ "The config json file corresponding to the pre-trained model. \n"
458
+ "This specifies the model architecture. If not given and "
459
+ "--pytorch_checkpoint_path is not given or is a shortcut name "
460
+ "use the configuration associated to the shortcut name on the AWS"
461
+ ),
462
+ )
463
+ parser.add_argument(
464
+ "--compare_with_pt_model", action="store_true", help="Compare Tensorflow and PyTorch model predictions."
465
+ )
466
+ parser.add_argument(
467
+ "--use_cached_models",
468
+ action="store_true",
469
+ help="Use cached models if possible instead of updating to latest checkpoint versions.",
470
+ )
471
+ parser.add_argument(
472
+ "--remove_cached_files",
473
+ action="store_true",
474
+ help="Remove pytorch models after conversion (save memory when converting in batches).",
475
+ )
476
+ parser.add_argument("--only_convert_finetuned_models", action="store_true", help="Only convert finetuned models.")
477
+ args = parser.parse_args()
478
+
479
+ # if args.pytorch_checkpoint_path is not None:
480
+ # convert_pt_checkpoint_to_tf(args.model_type.lower(),
481
+ # args.pytorch_checkpoint_path,
482
+ # args.config_file if args.config_file is not None else args.pytorch_checkpoint_path,
483
+ # args.tf_dump_path,
484
+ # compare_with_pt_model=args.compare_with_pt_model,
485
+ # use_cached_models=args.use_cached_models)
486
+ # else:
487
+ convert_all_pt_checkpoints_to_tf(
488
+ args.model_type.lower() if args.model_type is not None else None,
489
+ args.tf_dump_path,
490
+ model_shortcut_names_or_path=[args.pytorch_checkpoint_path]
491
+ if args.pytorch_checkpoint_path is not None
492
+ else None,
493
+ config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None,
494
+ compare_with_pt_model=args.compare_with_pt_model,
495
+ use_cached_models=args.use_cached_models,
496
+ remove_cached_files=args.remove_cached_files,
497
+ only_convert_finetuned_models=args.only_convert_finetuned_models,
498
+ )
env-llmeval/lib/python3.10/site-packages/transformers/convert_slow_tokenizer.py ADDED
@@ -0,0 +1,1525 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """
16
+ Utilities to convert slow tokenizers in their fast tokenizers counterparts.
17
+
18
+ All the conversions are grouped here to gather SentencePiece dependencies outside of the fast tokenizers files and
19
+ allow to make our dependency on SentencePiece optional.
20
+ """
21
+
22
+ import warnings
23
+ from typing import Dict, List, Tuple
24
+
25
+ from packaging import version
26
+ from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, processors
27
+ from tokenizers.models import BPE, Unigram, WordPiece
28
+
29
+ from .utils import is_protobuf_available, requires_backends
30
+ from .utils.import_utils import PROTOBUF_IMPORT_ERROR
31
+
32
+
33
+ def import_protobuf(error_message=""):
34
+ if is_protobuf_available():
35
+ import google.protobuf
36
+
37
+ if version.parse(google.protobuf.__version__) < version.parse("4.0.0"):
38
+ from transformers.utils import sentencepiece_model_pb2
39
+ else:
40
+ from transformers.utils import sentencepiece_model_pb2_new as sentencepiece_model_pb2
41
+ return sentencepiece_model_pb2
42
+ else:
43
+ raise ImportError(PROTOBUF_IMPORT_ERROR.format(error_message))
44
+
45
+
46
+ class SentencePieceExtractor:
47
+ """
48
+ Extractor implementation for SentencePiece trained models. https://github.com/google/sentencepiece
49
+ """
50
+
51
+ def __init__(self, model: str):
52
+ requires_backends(self, "sentencepiece")
53
+ from sentencepiece import SentencePieceProcessor
54
+
55
+ self.sp = SentencePieceProcessor()
56
+ self.sp.Load(model)
57
+
58
+ def extract(self, vocab_scores=None) -> Tuple[Dict[str, int], List[Tuple]]:
59
+ """
60
+ By default will return vocab and merges with respect to their order, by sending `vocab_scores` we're going to
61
+ order the merges with respect to the piece scores instead.
62
+ """
63
+ sp = self.sp
64
+ vocab = {sp.id_to_piece(index): index for index in range(sp.GetPieceSize())}
65
+
66
+ if vocab_scores is not None:
67
+ vocab_scores, reverse = dict(vocab_scores), True
68
+ else:
69
+ vocab_scores, reverse = vocab, False
70
+
71
+ # Merges
72
+ merges = []
73
+ for merge, piece_score in vocab_scores.items():
74
+ local = []
75
+ for index in range(1, len(merge)):
76
+ piece_l, piece_r = merge[:index], merge[index:]
77
+ if piece_l in vocab and piece_r in vocab:
78
+ local.append((piece_l, piece_r, piece_score))
79
+ local = sorted(local, key=lambda x: (vocab[x[0]], vocab[x[1]]))
80
+ merges.extend(local)
81
+
82
+ merges = sorted(merges, key=lambda val: val[2], reverse=reverse)
83
+ merges = [(val[0], val[1]) for val in merges]
84
+ return vocab, merges
85
+
86
+
87
+ class GemmaSentencePieceExtractor(SentencePieceExtractor):
88
+ def extract(self, vocab_scores=None) -> Tuple[Dict[str, int], List[Tuple]]:
89
+ """
90
+ By default will return vocab and merges with respect to their order, by sending `vocab_scores` we're going to
91
+ order the merges with respect to the piece scores instead.
92
+ """
93
+ sp = self.sp
94
+ vocab = {sp.id_to_piece(index): index for index in range(sp.GetPieceSize())}
95
+
96
+ # there is a missing token in the vocab. We have to do this to support merges
97
+ # "<0x09>" is the bytefallback for `\t`
98
+ vocab["\t"] = vocab.pop("<0x09>")
99
+
100
+ if vocab_scores is not None:
101
+ vocab_scores, reverse = dict(vocab_scores), True
102
+ else:
103
+ vocab_scores, reverse = vocab, False
104
+
105
+ # Merges
106
+ merges = []
107
+ for merge, piece_score in vocab_scores.items():
108
+ local = []
109
+ for index in range(1, len(merge)):
110
+ piece_l, piece_r = merge[:index], merge[index:]
111
+ if piece_l in vocab and piece_r in vocab:
112
+ local.append((piece_l, piece_r, piece_score))
113
+ local = sorted(local, key=lambda x: (vocab[x[0]], vocab[x[1]]))
114
+ merges.extend(local)
115
+
116
+ merges = sorted(merges, key=lambda val: val[2], reverse=reverse)
117
+ merges = [(val[0], val[1]) for val in merges]
118
+ return vocab, merges
119
+
120
+
121
+ def check_number_comma(piece: str) -> bool:
122
+ return len(piece) < 2 or piece[-1] != "," or not piece[-2].isdigit()
123
+
124
+
125
+ class Converter:
126
+ def __init__(self, original_tokenizer):
127
+ self.original_tokenizer = original_tokenizer
128
+
129
+ def converted(self) -> Tokenizer:
130
+ raise NotImplementedError()
131
+
132
+
133
+ class BertConverter(Converter):
134
+ def converted(self) -> Tokenizer:
135
+ vocab = self.original_tokenizer.vocab
136
+ tokenizer = Tokenizer(WordPiece(vocab, unk_token=str(self.original_tokenizer.unk_token)))
137
+
138
+ tokenize_chinese_chars = False
139
+ strip_accents = False
140
+ do_lower_case = False
141
+ if hasattr(self.original_tokenizer, "basic_tokenizer"):
142
+ tokenize_chinese_chars = self.original_tokenizer.basic_tokenizer.tokenize_chinese_chars
143
+ strip_accents = self.original_tokenizer.basic_tokenizer.strip_accents
144
+ do_lower_case = self.original_tokenizer.basic_tokenizer.do_lower_case
145
+
146
+ tokenizer.normalizer = normalizers.BertNormalizer(
147
+ clean_text=True,
148
+ handle_chinese_chars=tokenize_chinese_chars,
149
+ strip_accents=strip_accents,
150
+ lowercase=do_lower_case,
151
+ )
152
+ tokenizer.pre_tokenizer = pre_tokenizers.BertPreTokenizer()
153
+
154
+ cls = str(self.original_tokenizer.cls_token)
155
+ sep = str(self.original_tokenizer.sep_token)
156
+ cls_token_id = self.original_tokenizer.cls_token_id
157
+ sep_token_id = self.original_tokenizer.sep_token_id
158
+
159
+ tokenizer.post_processor = processors.TemplateProcessing(
160
+ single=f"{cls}:0 $A:0 {sep}:0",
161
+ pair=f"{cls}:0 $A:0 {sep}:0 $B:1 {sep}:1",
162
+ special_tokens=[
163
+ (cls, cls_token_id),
164
+ (sep, sep_token_id),
165
+ ],
166
+ )
167
+ tokenizer.decoder = decoders.WordPiece(prefix="##")
168
+
169
+ return tokenizer
170
+
171
+
172
+ class SplinterConverter(Converter):
173
+ def converted(self) -> Tokenizer:
174
+ vocab = self.original_tokenizer.vocab
175
+ tokenizer = Tokenizer(WordPiece(vocab, unk_token=str(self.original_tokenizer.unk_token)))
176
+
177
+ tokenize_chinese_chars = False
178
+ strip_accents = False
179
+ do_lower_case = False
180
+ if hasattr(self.original_tokenizer, "basic_tokenizer"):
181
+ tokenize_chinese_chars = self.original_tokenizer.basic_tokenizer.tokenize_chinese_chars
182
+ strip_accents = self.original_tokenizer.basic_tokenizer.strip_accents
183
+ do_lower_case = self.original_tokenizer.basic_tokenizer.do_lower_case
184
+
185
+ tokenizer.normalizer = normalizers.BertNormalizer(
186
+ clean_text=True,
187
+ handle_chinese_chars=tokenize_chinese_chars,
188
+ strip_accents=strip_accents,
189
+ lowercase=do_lower_case,
190
+ )
191
+ tokenizer.pre_tokenizer = pre_tokenizers.BertPreTokenizer()
192
+
193
+ cls = str(self.original_tokenizer.cls_token)
194
+ sep = str(self.original_tokenizer.sep_token)
195
+ question = str(self.original_tokenizer.question_token)
196
+ dot = "."
197
+ cls_token_id = self.original_tokenizer.cls_token_id
198
+ sep_token_id = self.original_tokenizer.sep_token_id
199
+ question_token_id = self.original_tokenizer.question_token_id
200
+ dot_token_id = self.original_tokenizer.convert_tokens_to_ids(".")
201
+
202
+ if self.original_tokenizer.padding_side == "right":
203
+ pair = f"{cls}:0 $A:0 {question} {dot} {sep}:0 $B:1 {sep}:1"
204
+ else:
205
+ pair = f"{cls}:0 $A:0 {sep}:0 $B:1 {question} {dot} {sep}:1"
206
+
207
+ tokenizer.post_processor = processors.TemplateProcessing(
208
+ single=f"{cls}:0 $A:0 {sep}:0",
209
+ pair=pair,
210
+ special_tokens=[
211
+ (cls, cls_token_id),
212
+ (sep, sep_token_id),
213
+ (question, question_token_id),
214
+ (dot, dot_token_id),
215
+ ],
216
+ )
217
+ tokenizer.decoder = decoders.WordPiece(prefix="##")
218
+
219
+ return tokenizer
220
+
221
+
222
+ class FunnelConverter(Converter):
223
+ def converted(self) -> Tokenizer:
224
+ vocab = self.original_tokenizer.vocab
225
+ tokenizer = Tokenizer(WordPiece(vocab, unk_token=str(self.original_tokenizer.unk_token)))
226
+
227
+ tokenize_chinese_chars = False
228
+ strip_accents = False
229
+ do_lower_case = False
230
+ if hasattr(self.original_tokenizer, "basic_tokenizer"):
231
+ tokenize_chinese_chars = self.original_tokenizer.basic_tokenizer.tokenize_chinese_chars
232
+ strip_accents = self.original_tokenizer.basic_tokenizer.strip_accents
233
+ do_lower_case = self.original_tokenizer.basic_tokenizer.do_lower_case
234
+
235
+ tokenizer.normalizer = normalizers.BertNormalizer(
236
+ clean_text=True,
237
+ handle_chinese_chars=tokenize_chinese_chars,
238
+ strip_accents=strip_accents,
239
+ lowercase=do_lower_case,
240
+ )
241
+ tokenizer.pre_tokenizer = pre_tokenizers.BertPreTokenizer()
242
+
243
+ cls = str(self.original_tokenizer.cls_token)
244
+ sep = str(self.original_tokenizer.sep_token)
245
+ cls_token_id = self.original_tokenizer.cls_token_id
246
+ sep_token_id = self.original_tokenizer.sep_token_id
247
+
248
+ tokenizer.post_processor = processors.TemplateProcessing(
249
+ single=f"{cls}:2 $A:0 {sep}:0", # token_type_id is 2 for Funnel transformer
250
+ pair=f"{cls}:2 $A:0 {sep}:0 $B:1 {sep}:1",
251
+ special_tokens=[
252
+ (cls, cls_token_id),
253
+ (sep, sep_token_id),
254
+ ],
255
+ )
256
+ tokenizer.decoder = decoders.WordPiece(prefix="##")
257
+
258
+ return tokenizer
259
+
260
+
261
+ class MPNetConverter(Converter):
262
+ def converted(self) -> Tokenizer:
263
+ vocab = self.original_tokenizer.vocab
264
+ tokenizer = Tokenizer(WordPiece(vocab, unk_token=str(self.original_tokenizer.unk_token)))
265
+
266
+ tokenize_chinese_chars = False
267
+ strip_accents = False
268
+ do_lower_case = False
269
+ if hasattr(self.original_tokenizer, "basic_tokenizer"):
270
+ tokenize_chinese_chars = self.original_tokenizer.basic_tokenizer.tokenize_chinese_chars
271
+ strip_accents = self.original_tokenizer.basic_tokenizer.strip_accents
272
+ do_lower_case = self.original_tokenizer.basic_tokenizer.do_lower_case
273
+
274
+ tokenizer.normalizer = normalizers.BertNormalizer(
275
+ clean_text=True,
276
+ handle_chinese_chars=tokenize_chinese_chars,
277
+ strip_accents=strip_accents,
278
+ lowercase=do_lower_case,
279
+ )
280
+ tokenizer.pre_tokenizer = pre_tokenizers.BertPreTokenizer()
281
+
282
+ cls = str(self.original_tokenizer.cls_token)
283
+ sep = str(self.original_tokenizer.sep_token)
284
+ cls_token_id = self.original_tokenizer.cls_token_id
285
+ sep_token_id = self.original_tokenizer.sep_token_id
286
+
287
+ tokenizer.post_processor = processors.TemplateProcessing(
288
+ single=f"{cls}:0 $A:0 {sep}:0",
289
+ pair=f"{cls}:0 $A:0 {sep}:0 {sep}:0 $B:1 {sep}:1", # MPNet uses two [SEP] tokens
290
+ special_tokens=[
291
+ (cls, cls_token_id),
292
+ (sep, sep_token_id),
293
+ ],
294
+ )
295
+ tokenizer.decoder = decoders.WordPiece(prefix="##")
296
+
297
+ return tokenizer
298
+
299
+
300
+ class OpenAIGPTConverter(Converter):
301
+ def converted(self) -> Tokenizer:
302
+ vocab = self.original_tokenizer.encoder
303
+ merges = list(self.original_tokenizer.bpe_ranks.keys())
304
+ unk_token = self.original_tokenizer.unk_token
305
+
306
+ tokenizer = Tokenizer(
307
+ BPE(
308
+ vocab=vocab,
309
+ merges=merges,
310
+ dropout=None,
311
+ unk_token=str(unk_token),
312
+ end_of_word_suffix="</w>",
313
+ fuse_unk=False,
314
+ )
315
+ )
316
+
317
+ if tokenizer.token_to_id(str(unk_token)) is not None:
318
+ tokenizer.add_special_tokens([str(unk_token)])
319
+
320
+ tokenizer.normalizer = normalizers.BertNormalizer(lowercase=True)
321
+ tokenizer.pre_tokenizer = pre_tokenizers.BertPreTokenizer()
322
+ tokenizer.decoder = decoders.BPEDecoder(suffix="</w>")
323
+
324
+ return tokenizer
325
+
326
+
327
+ class GPT2Converter(Converter):
328
+ def converted(self) -> Tokenizer:
329
+ vocab = self.original_tokenizer.encoder
330
+ merges = list(self.original_tokenizer.bpe_ranks.keys())
331
+
332
+ tokenizer = Tokenizer(
333
+ BPE(
334
+ vocab=vocab,
335
+ merges=merges,
336
+ dropout=None,
337
+ continuing_subword_prefix="",
338
+ end_of_word_suffix="",
339
+ fuse_unk=False,
340
+ )
341
+ )
342
+
343
+ tokenizer.pre_tokenizer = pre_tokenizers.ByteLevel(add_prefix_space=self.original_tokenizer.add_prefix_space)
344
+ tokenizer.decoder = decoders.ByteLevel()
345
+ if self.original_tokenizer.add_bos_token:
346
+ bos = self.original_tokenizer.bos_token
347
+ bos_token_id = self.original_tokenizer.bos_token_id
348
+ tokenizer.post_processor = processors.TemplateProcessing(
349
+ single=f"{bos}:0 $A:0",
350
+ pair=f"{bos}:0 $A:0 $B:1",
351
+ special_tokens=[
352
+ (bos, bos_token_id),
353
+ ],
354
+ )
355
+ else:
356
+ # XXX trim_offsets=False actually means this post_processor doesn't
357
+ # really do anything.
358
+ tokenizer.post_processor = processors.ByteLevel(trim_offsets=False)
359
+ return tokenizer
360
+
361
+
362
+ class HerbertConverter(Converter):
363
+ def converted(self) -> Tokenizer:
364
+ tokenizer_info_str = "#version:"
365
+ token_suffix = "</w>"
366
+
367
+ vocab = self.original_tokenizer.encoder
368
+ merges = list(self.original_tokenizer.bpe_ranks.keys())
369
+ if tokenizer_info_str in merges[0][0]:
370
+ merges = merges[1:]
371
+
372
+ tokenizer = Tokenizer(
373
+ BPE(
374
+ vocab,
375
+ merges,
376
+ dropout=None,
377
+ unk_token=self.original_tokenizer.unk_token,
378
+ end_of_word_suffix=token_suffix,
379
+ )
380
+ )
381
+
382
+ tokenizer.normalizer = normalizers.BertNormalizer(lowercase=False, strip_accents=False)
383
+ tokenizer.pre_tokenizer = pre_tokenizers.BertPreTokenizer()
384
+ tokenizer.decoder = decoders.BPEDecoder(suffix=token_suffix)
385
+ tokenizer.post_processor = processors.BertProcessing(
386
+ sep=(self.original_tokenizer.sep_token, self.original_tokenizer.sep_token_id),
387
+ cls=(self.original_tokenizer.cls_token, self.original_tokenizer.cls_token_id),
388
+ )
389
+
390
+ return tokenizer
391
+
392
+
393
+ class Qwen2Converter(Converter):
394
+ def converted(self) -> Tokenizer:
395
+ vocab = self.original_tokenizer.encoder
396
+ merges = list(self.original_tokenizer.bpe_ranks.keys())
397
+
398
+ tokenizer = Tokenizer(
399
+ BPE(
400
+ vocab=vocab,
401
+ merges=merges,
402
+ dropout=None,
403
+ unk_token=None,
404
+ continuing_subword_prefix="",
405
+ end_of_word_suffix="",
406
+ fuse_unk=False,
407
+ byte_fallback=False,
408
+ )
409
+ )
410
+
411
+ tokenizer.normalizer = normalizers.NFC()
412
+
413
+ tokenizer.pre_tokenizer = pre_tokenizers.Sequence(
414
+ [
415
+ pre_tokenizers.Split(
416
+ Regex(
417
+ r"""(?i:'s|'t|'re|'ve|'m|'ll|'d)|[^\r\n\p{L}\p{N}]?\p{L}+|\p{N}| ?[^\s\p{L}\p{N}]+[\r\n]*|\s*[\r\n]+|\s+(?!\S)|\s+"""
418
+ ),
419
+ behavior="isolated",
420
+ invert=False,
421
+ ),
422
+ pre_tokenizers.ByteLevel(
423
+ add_prefix_space=getattr(self.original_tokenizer, "add_prefix_space", False),
424
+ use_regex=False,
425
+ ),
426
+ ]
427
+ )
428
+
429
+ tokenizer.decoder = decoders.ByteLevel()
430
+ tokenizer.post_processor = processors.ByteLevel(trim_offsets=False)
431
+
432
+ return tokenizer
433
+
434
+
435
+ class RobertaConverter(Converter):
436
+ def converted(self) -> Tokenizer:
437
+ ot = self.original_tokenizer
438
+ vocab = ot.encoder
439
+ merges = list(ot.bpe_ranks.keys())
440
+
441
+ tokenizer = Tokenizer(
442
+ BPE(
443
+ vocab=vocab,
444
+ merges=merges,
445
+ dropout=None,
446
+ continuing_subword_prefix="",
447
+ end_of_word_suffix="",
448
+ fuse_unk=False,
449
+ )
450
+ )
451
+
452
+ tokenizer.pre_tokenizer = pre_tokenizers.ByteLevel(add_prefix_space=ot.add_prefix_space)
453
+ tokenizer.decoder = decoders.ByteLevel()
454
+ tokenizer.post_processor = processors.RobertaProcessing(
455
+ sep=(ot.sep_token, ot.sep_token_id),
456
+ cls=(ot.cls_token, ot.cls_token_id),
457
+ add_prefix_space=ot.add_prefix_space,
458
+ trim_offsets=True, # True by default on Roberta (historical)
459
+ )
460
+
461
+ return tokenizer
462
+
463
+
464
+ class RoFormerConverter(Converter):
465
+ def converted(self) -> Tokenizer:
466
+ from .models.roformer.tokenization_utils import JiebaPreTokenizer
467
+
468
+ vocab = self.original_tokenizer.vocab
469
+ tokenizer = Tokenizer(WordPiece(vocab, unk_token=str(self.original_tokenizer.unk_token)))
470
+
471
+ strip_accents = False
472
+ do_lower_case = False
473
+ if hasattr(self.original_tokenizer, "basic_tokenizer"):
474
+ strip_accents = self.original_tokenizer.basic_tokenizer.strip_accents
475
+ do_lower_case = self.original_tokenizer.basic_tokenizer.do_lower_case
476
+
477
+ tokenizer.normalizer = normalizers.BertNormalizer(
478
+ clean_text=True,
479
+ handle_chinese_chars=False,
480
+ strip_accents=strip_accents,
481
+ lowercase=do_lower_case,
482
+ )
483
+ tokenizer.pre_tokenizer = pre_tokenizers.PreTokenizer.custom(JiebaPreTokenizer(vocab))
484
+
485
+ cls = str(self.original_tokenizer.cls_token)
486
+ sep = str(self.original_tokenizer.sep_token)
487
+ cls_token_id = self.original_tokenizer.cls_token_id
488
+ sep_token_id = self.original_tokenizer.sep_token_id
489
+
490
+ tokenizer.post_processor = processors.TemplateProcessing(
491
+ single=f"{cls}:0 $A:0 {sep}:0",
492
+ pair=f"{cls}:0 $A:0 {sep}:0 $B:1 {sep}:1",
493
+ special_tokens=[
494
+ (cls, cls_token_id),
495
+ (sep, sep_token_id),
496
+ ],
497
+ )
498
+ tokenizer.decoder = decoders.WordPiece(prefix="##")
499
+
500
+ return tokenizer
501
+
502
+
503
+ class DebertaConverter(Converter):
504
+ def converted(self) -> Tokenizer:
505
+ ot = self.original_tokenizer
506
+ vocab = ot.encoder
507
+ merges = list(ot.bpe_ranks.keys())
508
+
509
+ tokenizer = Tokenizer(
510
+ BPE(
511
+ vocab=vocab,
512
+ merges=merges,
513
+ dropout=None,
514
+ continuing_subword_prefix="",
515
+ end_of_word_suffix="",
516
+ fuse_unk=False,
517
+ )
518
+ )
519
+
520
+ tokenizer.pre_tokenizer = pre_tokenizers.ByteLevel(add_prefix_space=ot.add_prefix_space)
521
+ tokenizer.decoder = decoders.ByteLevel()
522
+ tokenizer.post_processor = processors.TemplateProcessing(
523
+ single="[CLS]:0 $A:0 [SEP]:0",
524
+ pair="[CLS]:0 $A:0 [SEP]:0 $B:1 [SEP]:1",
525
+ special_tokens=[
526
+ ("[CLS]", self.original_tokenizer.convert_tokens_to_ids("[CLS]")),
527
+ ("[SEP]", self.original_tokenizer.convert_tokens_to_ids("[SEP]")),
528
+ ],
529
+ )
530
+
531
+ return tokenizer
532
+
533
+
534
+ class SpmConverter(Converter):
535
+ def __init__(self, *args):
536
+ requires_backends(self, "protobuf")
537
+
538
+ super().__init__(*args)
539
+
540
+ # from .utils import sentencepiece_model_pb2 as model_pb2
541
+ model_pb2 = import_protobuf()
542
+
543
+ m = model_pb2.ModelProto()
544
+ with open(self.original_tokenizer.vocab_file, "rb") as f:
545
+ m.ParseFromString(f.read())
546
+ self.proto = m
547
+
548
+ if self.proto.trainer_spec.byte_fallback:
549
+ if not getattr(self, "handle_byte_fallback", None):
550
+ warnings.warn(
551
+ "The sentencepiece tokenizer that you are converting to a fast tokenizer uses the byte fallback option"
552
+ " which is not implemented in the fast tokenizers. In practice this means that the fast version of the"
553
+ " tokenizer can produce unknown tokens whereas the sentencepiece version would have converted these "
554
+ "unknown tokens into a sequence of byte tokens matching the original piece of text."
555
+ )
556
+
557
+ def vocab(self, proto):
558
+ return [(piece.piece, piece.score) for piece in proto.pieces]
559
+
560
+ def unk_id(self, proto):
561
+ return proto.trainer_spec.unk_id
562
+
563
+ def tokenizer(self, proto):
564
+ model_type = proto.trainer_spec.model_type
565
+ vocab_scores = self.vocab(proto)
566
+ unk_id = self.unk_id(proto)
567
+
568
+ if model_type == 1:
569
+ tokenizer = Tokenizer(Unigram(vocab_scores, unk_id))
570
+ elif model_type == 2:
571
+ _, merges = SentencePieceExtractor(self.original_tokenizer.vocab_file).extract()
572
+ bpe_vocab = {word: i for i, (word, score) in enumerate(vocab_scores)}
573
+ tokenizer = Tokenizer(
574
+ BPE(
575
+ bpe_vocab,
576
+ merges,
577
+ unk_token=proto.trainer_spec.unk_piece,
578
+ fuse_unk=True,
579
+ )
580
+ )
581
+ else:
582
+ raise Exception(
583
+ "You're trying to run a `Unigram` model but you're file was trained with a different algorithm"
584
+ )
585
+
586
+ return tokenizer
587
+
588
+ def normalizer(self, proto):
589
+ precompiled_charsmap = proto.normalizer_spec.precompiled_charsmap
590
+ _normalizers = [
591
+ normalizers.Strip(left=False, right=True), # stripping is important
592
+ normalizers.Replace(Regex(" {2,}"), "▁"),
593
+ ]
594
+ if not precompiled_charsmap:
595
+ return normalizers.Sequence(_normalizers)
596
+ else:
597
+ return normalizers.Sequence([normalizers.Precompiled(precompiled_charsmap)] + _normalizers)
598
+
599
+ def pre_tokenizer(self, replacement, add_prefix_space):
600
+ prepend_scheme = "always"
601
+ if hasattr(self.original_tokenizer, "legacy") and not self.original_tokenizer.legacy:
602
+ prepend_scheme = "first"
603
+ return pre_tokenizers.Metaspace(
604
+ replacement=replacement, add_prefix_space=add_prefix_space, prepend_scheme=prepend_scheme
605
+ )
606
+
607
+ def post_processor(self):
608
+ return None
609
+
610
+ def decoder(self, replacement, add_prefix_space):
611
+ return decoders.Metaspace(replacement=replacement, add_prefix_space=add_prefix_space)
612
+
613
+ def converted(self) -> Tokenizer:
614
+ tokenizer = self.tokenizer(self.proto)
615
+
616
+ # Tokenizer assemble
617
+ normalizer = self.normalizer(self.proto)
618
+ if normalizer is not None:
619
+ tokenizer.normalizer = normalizer
620
+
621
+ replacement = "▁"
622
+ add_prefix_space = True
623
+ if hasattr(self.original_tokenizer, "add_prefix_space"):
624
+ add_prefix_space = self.original_tokenizer.add_prefix_space
625
+
626
+ pre_tokenizer = self.pre_tokenizer(replacement, add_prefix_space)
627
+ if pre_tokenizer is not None:
628
+ tokenizer.pre_tokenizer = pre_tokenizer
629
+
630
+ tokenizer.decoder = self.decoder(replacement, add_prefix_space)
631
+ post_processor = self.post_processor()
632
+ if post_processor:
633
+ tokenizer.post_processor = post_processor
634
+
635
+ return tokenizer
636
+
637
+
638
+ class AlbertConverter(SpmConverter):
639
+ def vocab(self, proto):
640
+ return [
641
+ (piece.piece, piece.score) if check_number_comma(piece.piece) else (piece.piece, piece.score - 100)
642
+ for piece in proto.pieces
643
+ ]
644
+
645
+ def normalizer(self, proto):
646
+ list_normalizers = [
647
+ normalizers.Replace("``", '"'),
648
+ normalizers.Replace("''", '"'),
649
+ ]
650
+ if not self.original_tokenizer.keep_accents:
651
+ list_normalizers.append(normalizers.NFKD())
652
+ list_normalizers.append(normalizers.StripAccents())
653
+ if self.original_tokenizer.do_lower_case:
654
+ list_normalizers.append(normalizers.Lowercase())
655
+
656
+ precompiled_charsmap = proto.normalizer_spec.precompiled_charsmap
657
+
658
+ if precompiled_charsmap:
659
+ list_normalizers.append(normalizers.Precompiled(precompiled_charsmap))
660
+
661
+ list_normalizers.append(normalizers.Replace(Regex(" {2,}"), " "))
662
+ return normalizers.Sequence(list_normalizers)
663
+
664
+ def post_processor(self):
665
+ return processors.TemplateProcessing(
666
+ single="[CLS]:0 $A:0 [SEP]:0",
667
+ pair="[CLS]:0 $A:0 [SEP]:0 $B:1 [SEP]:1",
668
+ special_tokens=[
669
+ ("[CLS]", self.original_tokenizer.convert_tokens_to_ids("[CLS]")),
670
+ ("[SEP]", self.original_tokenizer.convert_tokens_to_ids("[SEP]")),
671
+ ],
672
+ )
673
+
674
+
675
+ class BarthezConverter(SpmConverter):
676
+ def unk_id(self, proto):
677
+ unk_id = 3
678
+ return unk_id
679
+
680
+ def post_processor(self):
681
+ return processors.TemplateProcessing(
682
+ single="<s> $A </s>",
683
+ pair="<s> $A </s> </s> $B </s>",
684
+ special_tokens=[
685
+ ("<s>", self.original_tokenizer.convert_tokens_to_ids("<s>")),
686
+ ("</s>", self.original_tokenizer.convert_tokens_to_ids("</s>")),
687
+ ],
688
+ )
689
+
690
+
691
+ class CamembertConverter(SpmConverter):
692
+ def vocab(self, proto):
693
+ vocab = [
694
+ ("<s>NOTUSED", 0.0),
695
+ ("<pad>", 0.0),
696
+ ("</s>NOTUSED", 0.0),
697
+ ("<unk>", 0.0),
698
+ ("<unk>NOTUSED", -100),
699
+ ]
700
+ # We down-grade the original SentencePiece by -100 to avoid using it and use our added token instead
701
+ vocab += [(piece.piece, piece.score) for piece in proto.pieces[1:]]
702
+ vocab += [("<mask>", 0.0)]
703
+ return vocab
704
+
705
+ def unk_id(self, proto):
706
+ # See vocab unk position
707
+ return 3
708
+
709
+ def post_processor(self):
710
+ return processors.TemplateProcessing(
711
+ single="<s> $A </s>",
712
+ pair="<s> $A </s> </s> $B </s>",
713
+ special_tokens=[
714
+ ("<s>", self.original_tokenizer.convert_tokens_to_ids("<s>")),
715
+ ("</s>", self.original_tokenizer.convert_tokens_to_ids("</s>")),
716
+ ],
717
+ )
718
+
719
+
720
+ class DebertaV2Converter(SpmConverter):
721
+ def pre_tokenizer(self, replacement, add_prefix_space):
722
+ list_pretokenizers = []
723
+ if self.original_tokenizer.split_by_punct:
724
+ list_pretokenizers.append(pre_tokenizers.Punctuation(behavior="isolated"))
725
+ list_pretokenizers.append(pre_tokenizers.Metaspace(replacement=replacement, add_prefix_space=add_prefix_space))
726
+ return pre_tokenizers.Sequence(list_pretokenizers)
727
+
728
+ def normalizer(self, proto):
729
+ list_normalizers = []
730
+ if self.original_tokenizer.do_lower_case:
731
+ list_normalizers.append(normalizers.Lowercase())
732
+ list_normalizers.append(normalizers.Strip())
733
+
734
+ precompiled_charsmap = proto.normalizer_spec.precompiled_charsmap
735
+ if precompiled_charsmap:
736
+ list_normalizers.append(normalizers.Precompiled(precompiled_charsmap))
737
+ list_normalizers.append(normalizers.Replace(Regex(" {2,}"), " "))
738
+
739
+ return normalizers.Sequence(list_normalizers)
740
+
741
+ def post_processor(self):
742
+ return processors.TemplateProcessing(
743
+ single="[CLS]:0 $A:0 [SEP]:0",
744
+ pair="[CLS]:0 $A:0 [SEP]:0 $B:1 [SEP]:1",
745
+ special_tokens=[
746
+ ("[CLS]", self.original_tokenizer.convert_tokens_to_ids("[CLS]")),
747
+ ("[SEP]", self.original_tokenizer.convert_tokens_to_ids("[SEP]")),
748
+ ],
749
+ )
750
+
751
+
752
+ class MBartConverter(SpmConverter):
753
+ def vocab(self, proto):
754
+ vocab = [
755
+ ("<s>", 0.0),
756
+ ("<pad>", 0.0),
757
+ ("</s>", 0.0),
758
+ ("<unk>", 0.0),
759
+ ]
760
+ vocab += [(piece.piece, piece.score) for piece in proto.pieces[3:]]
761
+ vocab += [
762
+ ("ar_AR", 0.0),
763
+ ("cs_CZ", 0.0),
764
+ ("de_DE", 0.0),
765
+ ("en_XX", 0.0),
766
+ ("es_XX", 0.0),
767
+ ("et_EE", 0.0),
768
+ ("fi_FI", 0.0),
769
+ ("fr_XX", 0.0),
770
+ ("gu_IN", 0.0),
771
+ ("hi_IN", 0.0),
772
+ ("it_IT", 0.0),
773
+ ("ja_XX", 0.0),
774
+ ("kk_KZ", 0.0),
775
+ ("ko_KR", 0.0),
776
+ ("lt_LT", 0.0),
777
+ ("lv_LV", 0.0),
778
+ ("my_MM", 0.0),
779
+ ("ne_NP", 0.0),
780
+ ("nl_XX", 0.0),
781
+ ("ro_RO", 0.0),
782
+ ("ru_RU", 0.0),
783
+ ("si_LK", 0.0),
784
+ ("tr_TR", 0.0),
785
+ ("vi_VN", 0.0),
786
+ ("zh_CN", 0.0),
787
+ ]
788
+ vocab += [("<mask>", 0.0)]
789
+ return vocab
790
+
791
+ def unk_id(self, proto):
792
+ return 3
793
+
794
+ def post_processor(self):
795
+ return processors.TemplateProcessing(
796
+ single="$A </s> en_XX",
797
+ pair="$A $B </s> en_XX",
798
+ special_tokens=[
799
+ ("en_XX", self.original_tokenizer.convert_tokens_to_ids("en_XX")),
800
+ ("</s>", self.original_tokenizer.convert_tokens_to_ids("</s>")),
801
+ ],
802
+ )
803
+
804
+
805
+ class MBart50Converter(SpmConverter):
806
+ def vocab(self, proto):
807
+ vocab = [
808
+ ("<s>", 0.0),
809
+ ("<pad>", 0.0),
810
+ ("</s>", 0.0),
811
+ ("<unk>", 0.0),
812
+ ]
813
+ vocab += [(piece.piece, piece.score) for piece in proto.pieces[3:]]
814
+ vocab += [("ar_AR", 0.0), ("cs_CZ", 0.0), ("de_DE", 0.0), ("en_XX", 0.0), ("es_XX", 0.0), ("et_EE", 0.0), ("fi_FI", 0.0), ("fr_XX", 0.0), ("gu_IN", 0.0), ("hi_IN", 0.0), ("it_IT", 0.0), ("ja_XX", 0.0), ("kk_KZ", 0.0), ("ko_KR", 0.0), ("lt_LT", 0.0), ("lv_LV", 0.0), ("my_MM", 0.0), ("ne_NP", 0.0), ("nl_XX", 0.0), ("ro_RO", 0.0), ("ru_RU", 0.0), ("si_LK", 0.0), ("tr_TR", 0.0), ("vi_VN", 0.0), ("zh_CN", 0.0), ("af_ZA", 0.0), ("az_AZ", 0.0), ("bn_IN", 0.0), ("fa_IR", 0.0), ("he_IL", 0.0), ("hr_HR", 0.0), ("id_ID", 0.0), ("ka_GE", 0.0), ("km_KH", 0.0), ("mk_MK", 0.0), ("ml_IN", 0.0), ("mn_MN", 0.0), ("mr_IN", 0.0), ("pl_PL", 0.0), ("ps_AF", 0.0), ("pt_XX", 0.0), ("sv_SE", 0.0), ("sw_KE", 0.0), ("ta_IN", 0.0), ("te_IN", 0.0), ("th_TH", 0.0), ("tl_XX", 0.0), ("uk_UA", 0.0), ("ur_PK", 0.0), ("xh_ZA", 0.0), ("gl_ES", 0.0), ("sl_SI", 0.0)] # fmt: skip
815
+ vocab += [("<mask>", 0.0)]
816
+ return vocab
817
+
818
+ def unk_id(self, proto):
819
+ return 3
820
+
821
+ def post_processor(self):
822
+ return processors.TemplateProcessing(
823
+ single="en_XX $A </s>",
824
+ pair="en_XX $A $B </s>",
825
+ special_tokens=[
826
+ ("en_XX", self.original_tokenizer.convert_tokens_to_ids("en_XX")),
827
+ ("</s>", self.original_tokenizer.convert_tokens_to_ids("</s>")),
828
+ ],
829
+ )
830
+
831
+
832
+ class NllbConverter(SpmConverter):
833
+ def vocab(self, proto):
834
+ vocab = [
835
+ ("<s>", 0.0),
836
+ ("<pad>", 0.0),
837
+ ("</s>", 0.0),
838
+ ("<unk>", 0.0),
839
+ ]
840
+ vocab += [(piece.piece, piece.score) for piece in proto.pieces[3:]]
841
+ return vocab
842
+
843
+ def unk_id(self, proto):
844
+ return 3
845
+
846
+ def post_processor(self):
847
+ return processors.TemplateProcessing(
848
+ single="eng_Latn $A </s>",
849
+ pair="eng_Latn $A $B </s>",
850
+ special_tokens=[
851
+ ("eng_Latn", self.original_tokenizer.convert_tokens_to_ids("eng_Latn")),
852
+ ("</s>", self.original_tokenizer.convert_tokens_to_ids("</s>")),
853
+ ],
854
+ )
855
+
856
+
857
+ class SeamlessM4TConverter(SpmConverter):
858
+ def vocab(self, proto):
859
+ vocab = [
860
+ ("<pad>", 0.0),
861
+ ("<unk>", 0.0),
862
+ ("<s>", 0.0),
863
+ ("</s>", 0.0),
864
+ ]
865
+ vocab += [(piece.piece, piece.score) for piece in proto.pieces[3:]]
866
+ return vocab
867
+
868
+ def unk_id(self, proto):
869
+ return self.original_tokenizer.unk_token_id
870
+
871
+ def post_processor(self):
872
+ return processors.TemplateProcessing(
873
+ single="__eng__ $A </s>",
874
+ pair="__eng__ $A $B </s>",
875
+ special_tokens=[
876
+ ("__eng__", self.original_tokenizer.convert_tokens_to_ids("__eng__")),
877
+ ("</s>", self.original_tokenizer.convert_tokens_to_ids("</s>")),
878
+ ],
879
+ )
880
+
881
+
882
+ class XLMRobertaConverter(SpmConverter):
883
+ def vocab(self, proto):
884
+ vocab = [
885
+ ("<s>", 0.0),
886
+ ("<pad>", 0.0),
887
+ ("</s>", 0.0),
888
+ ("<unk>", 0.0),
889
+ ]
890
+ vocab += [(piece.piece, piece.score) for piece in proto.pieces[3:]]
891
+ vocab += [("<mask>", 0.0)]
892
+ return vocab
893
+
894
+ def unk_id(self, proto):
895
+ unk_id = 3
896
+ return unk_id
897
+
898
+ def post_processor(self):
899
+ return processors.TemplateProcessing(
900
+ single="<s> $A </s>",
901
+ pair="<s> $A </s> </s> $B </s>",
902
+ special_tokens=[
903
+ ("<s>", self.original_tokenizer.convert_tokens_to_ids("<s>")),
904
+ ("</s>", self.original_tokenizer.convert_tokens_to_ids("</s>")),
905
+ ],
906
+ )
907
+
908
+
909
+ class XLNetConverter(SpmConverter):
910
+ def vocab(self, proto):
911
+ return [
912
+ (piece.piece, piece.score) if check_number_comma(piece.piece) else (piece.piece, piece.score - 100)
913
+ for piece in proto.pieces
914
+ ]
915
+
916
+ def normalizer(self, proto):
917
+ list_normalizers = [
918
+ normalizers.Replace("``", '"'),
919
+ normalizers.Replace("''", '"'),
920
+ ]
921
+ if not self.original_tokenizer.keep_accents:
922
+ list_normalizers.append(normalizers.NFKD())
923
+ list_normalizers.append(normalizers.StripAccents())
924
+ if self.original_tokenizer.do_lower_case:
925
+ list_normalizers.append(normalizers.Lowercase())
926
+
927
+ precompiled_charsmap = proto.normalizer_spec.precompiled_charsmap
928
+
929
+ if precompiled_charsmap:
930
+ list_normalizers.append(normalizers.Precompiled(precompiled_charsmap))
931
+
932
+ list_normalizers.append(normalizers.Replace(Regex(" {2,}"), " "))
933
+ return normalizers.Sequence(list_normalizers)
934
+
935
+ def post_processor(self):
936
+ return processors.TemplateProcessing(
937
+ single="$A:0 <sep>:0 <cls>:2",
938
+ pair="$A:0 <sep>:0 $B:1 <sep>:1 <cls>:2",
939
+ special_tokens=[
940
+ ("<sep>", self.original_tokenizer.convert_tokens_to_ids("<sep>")),
941
+ ("<cls>", self.original_tokenizer.convert_tokens_to_ids("<cls>")),
942
+ ],
943
+ )
944
+
945
+
946
+ class ReformerConverter(SpmConverter):
947
+ pass
948
+
949
+
950
+ class RemBertConverter(SpmConverter):
951
+ # Inspired from AlbertConverter
952
+ def normalizer(self, proto):
953
+ list_normalizers = [
954
+ normalizers.Replace("``", '"'),
955
+ normalizers.Replace("''", '"'),
956
+ normalizers.Replace(Regex(" {2,}"), " "),
957
+ ]
958
+ if not self.original_tokenizer.keep_accents:
959
+ list_normalizers.append(normalizers.NFKD())
960
+ list_normalizers.append(normalizers.StripAccents())
961
+ if self.original_tokenizer.do_lower_case:
962
+ list_normalizers.append(normalizers.Lowercase())
963
+
964
+ precompiled_charsmap = proto.normalizer_spec.precompiled_charsmap
965
+
966
+ if precompiled_charsmap:
967
+ list_normalizers.append(normalizers.Precompiled(precompiled_charsmap))
968
+
969
+ return normalizers.Sequence(list_normalizers)
970
+
971
+ def post_processor(self):
972
+ return processors.TemplateProcessing(
973
+ single="[CLS]:0 $A:0 [SEP]:0",
974
+ pair="[CLS]:0 $A:0 [SEP]:0 $B:1 [SEP]:1",
975
+ special_tokens=[
976
+ ("[CLS]", self.original_tokenizer.convert_tokens_to_ids("[CLS]")),
977
+ ("[SEP]", self.original_tokenizer.convert_tokens_to_ids("[SEP]")),
978
+ ],
979
+ )
980
+
981
+
982
+ class BertGenerationConverter(SpmConverter):
983
+ pass
984
+
985
+
986
+ class PegasusConverter(SpmConverter):
987
+ def vocab(self, proto):
988
+ vocab = [
989
+ (self.original_tokenizer.pad_token, 0.0),
990
+ (self.original_tokenizer.eos_token, 0.0),
991
+ ]
992
+
993
+ if self.original_tokenizer.mask_token_sent is not None:
994
+ vocab += [(self.original_tokenizer.mask_token_sent, 0.0)]
995
+
996
+ if (
997
+ self.original_tokenizer.mask_token is not None
998
+ and self.original_tokenizer.mask_token_id < self.original_tokenizer.offset
999
+ ):
1000
+ vocab += [(self.original_tokenizer.mask_token, 0.0)]
1001
+
1002
+ vocab += [(f"<unk_{i}>", -100.0) for i in range(2, self.original_tokenizer.offset)]
1003
+ vocab += [(piece.piece, piece.score) for piece in proto.pieces[2:]]
1004
+ return vocab
1005
+
1006
+ def unk_id(self, proto):
1007
+ return proto.trainer_spec.unk_id + self.original_tokenizer.offset
1008
+
1009
+ def pre_tokenizer(self, replacement, add_prefix_space):
1010
+ return pre_tokenizers.Sequence(
1011
+ [
1012
+ pre_tokenizers.WhitespaceSplit(),
1013
+ pre_tokenizers.Metaspace(replacement=replacement, add_prefix_space=add_prefix_space),
1014
+ ]
1015
+ )
1016
+
1017
+ def post_processor(self):
1018
+ eos = self.original_tokenizer.eos_token
1019
+ special_tokens = [
1020
+ (eos, self.original_tokenizer.eos_token_id),
1021
+ ]
1022
+ return processors.TemplateProcessing(single=["$A", eos], pair=["$A", "$B", eos], special_tokens=special_tokens)
1023
+
1024
+
1025
+ class T5Converter(SpmConverter):
1026
+ def vocab(self, proto):
1027
+ num_extra_ids = self.original_tokenizer._extra_ids
1028
+ vocab = [(piece.piece, piece.score) for piece in proto.pieces]
1029
+ vocab += [(f"<extra_id_{i}>", 0.0) for i in range(num_extra_ids - 1, -1, -1)]
1030
+ return vocab
1031
+
1032
+ def post_processor(self):
1033
+ return processors.TemplateProcessing(
1034
+ single=["$A", "</s>"],
1035
+ pair=["$A", "</s>", "$B", "</s>"],
1036
+ special_tokens=[
1037
+ ("</s>", self.original_tokenizer.convert_tokens_to_ids("</s>")),
1038
+ ],
1039
+ )
1040
+
1041
+
1042
+ class UdopConverter(SpmConverter):
1043
+ def post_processor(self):
1044
+ return processors.TemplateProcessing(
1045
+ single=["$A", "</s>"],
1046
+ pair=["$A", "</s>", "$B", "</s>"],
1047
+ special_tokens=[
1048
+ ("</s>", self.original_tokenizer.convert_tokens_to_ids("</s>")),
1049
+ ],
1050
+ )
1051
+
1052
+
1053
+ class WhisperConverter(Converter):
1054
+ def converted(self) -> Tokenizer:
1055
+ vocab = self.original_tokenizer.encoder
1056
+ merges = list(self.original_tokenizer.bpe_ranks.keys())
1057
+
1058
+ tokenizer = Tokenizer(
1059
+ BPE(
1060
+ vocab=vocab,
1061
+ merges=merges,
1062
+ dropout=None,
1063
+ continuing_subword_prefix="",
1064
+ end_of_word_suffix="",
1065
+ fuse_unk=False,
1066
+ )
1067
+ )
1068
+
1069
+ tokenizer.pre_tokenizer = pre_tokenizers.ByteLevel(add_prefix_space=self.original_tokenizer.add_prefix_space)
1070
+ tokenizer.decoder = decoders.ByteLevel()
1071
+
1072
+ prefix_token_ids = self.original_tokenizer.prefix_tokens
1073
+ prefixes = self.original_tokenizer.convert_ids_to_tokens(prefix_token_ids)
1074
+ eos = self.original_tokenizer.eos_token
1075
+ eos_token_id = self.original_tokenizer.eos_token_id
1076
+ prefix_template = " ".join([f"{token}:0" for token in prefixes])
1077
+ tokenizer.post_processor = processors.TemplateProcessing(
1078
+ single=f"{prefix_template} $A:0 {eos}:0",
1079
+ pair=f"{prefix_template} $A:0 $B:1 {eos}:1",
1080
+ special_tokens=[
1081
+ (eos, eos_token_id),
1082
+ *zip(prefixes, prefix_token_ids),
1083
+ ],
1084
+ )
1085
+
1086
+ return tokenizer
1087
+
1088
+
1089
+ class BigBirdConverter(SpmConverter):
1090
+ def post_processor(self):
1091
+ return processors.TemplateProcessing(
1092
+ single="[CLS]:0 $A:0 [SEP]:0",
1093
+ pair="[CLS]:0 $A:0 [SEP]:0 $B:1 [SEP]:1",
1094
+ special_tokens=[
1095
+ ("[CLS]", self.original_tokenizer.convert_tokens_to_ids("[CLS]")),
1096
+ ("[SEP]", self.original_tokenizer.convert_tokens_to_ids("[SEP]")),
1097
+ ],
1098
+ )
1099
+
1100
+
1101
+ class CLIPConverter(Converter):
1102
+ def converted(self) -> Tokenizer:
1103
+ vocab = self.original_tokenizer.encoder
1104
+ merges = list(self.original_tokenizer.bpe_ranks.keys())
1105
+ unk_token = self.original_tokenizer.unk_token
1106
+
1107
+ tokenizer = Tokenizer(
1108
+ BPE(
1109
+ vocab=vocab,
1110
+ merges=merges,
1111
+ dropout=None,
1112
+ continuing_subword_prefix="",
1113
+ end_of_word_suffix="</w>",
1114
+ fuse_unk=False,
1115
+ unk_token=str(unk_token),
1116
+ )
1117
+ )
1118
+
1119
+ tokenizer.normalizer = normalizers.Sequence(
1120
+ [normalizers.NFC(), normalizers.Replace(Regex(r"\s+"), " "), normalizers.Lowercase()]
1121
+ )
1122
+ tokenizer.pre_tokenizer = pre_tokenizers.Sequence(
1123
+ [
1124
+ pre_tokenizers.Split(
1125
+ Regex(r"""'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+"""),
1126
+ behavior="removed",
1127
+ invert=True,
1128
+ ),
1129
+ pre_tokenizers.ByteLevel(add_prefix_space=False),
1130
+ ]
1131
+ )
1132
+ tokenizer.decoder = decoders.ByteLevel()
1133
+
1134
+ # Hack to have a ByteLevel and TemplaceProcessor
1135
+ tokenizer.post_processor = processors.RobertaProcessing(
1136
+ sep=(self.original_tokenizer.eos_token, self.original_tokenizer.eos_token_id),
1137
+ cls=(self.original_tokenizer.bos_token, self.original_tokenizer.bos_token_id),
1138
+ add_prefix_space=False,
1139
+ trim_offsets=False,
1140
+ )
1141
+ return tokenizer
1142
+
1143
+
1144
+ class LayoutLMv2Converter(Converter):
1145
+ def converted(self) -> Tokenizer:
1146
+ vocab = self.original_tokenizer.vocab
1147
+ tokenizer = Tokenizer(WordPiece(vocab, unk_token=str(self.original_tokenizer.unk_token)))
1148
+
1149
+ tokenize_chinese_chars = False
1150
+ strip_accents = False
1151
+ do_lower_case = True
1152
+ if hasattr(self.original_tokenizer, "basic_tokenizer"):
1153
+ tokenize_chinese_chars = self.original_tokenizer.basic_tokenizer.tokenize_chinese_chars
1154
+ strip_accents = self.original_tokenizer.basic_tokenizer.strip_accents
1155
+ do_lower_case = self.original_tokenizer.basic_tokenizer.do_lower_case
1156
+
1157
+ tokenizer.normalizer = normalizers.BertNormalizer(
1158
+ clean_text=True,
1159
+ handle_chinese_chars=tokenize_chinese_chars,
1160
+ strip_accents=strip_accents,
1161
+ lowercase=do_lower_case,
1162
+ )
1163
+ tokenizer.pre_tokenizer = pre_tokenizers.BertPreTokenizer()
1164
+
1165
+ cls = str(self.original_tokenizer.cls_token)
1166
+ sep = str(self.original_tokenizer.sep_token)
1167
+ cls_token_id = self.original_tokenizer.cls_token_id
1168
+ sep_token_id = self.original_tokenizer.sep_token_id
1169
+
1170
+ tokenizer.post_processor = processors.TemplateProcessing(
1171
+ single=f"{cls}:0 $A:0 {sep}:0",
1172
+ pair=f"{cls}:0 $A:0 {sep}:0 $B:1 {sep}:1",
1173
+ special_tokens=[
1174
+ (cls, cls_token_id),
1175
+ (sep, sep_token_id),
1176
+ ],
1177
+ )
1178
+ tokenizer.decoder = decoders.WordPiece(prefix="##")
1179
+
1180
+ return tokenizer
1181
+
1182
+
1183
+ class BlenderbotConverter(Converter):
1184
+ def converted(self) -> Tokenizer:
1185
+ ot = self.original_tokenizer
1186
+ vocab = ot.encoder
1187
+ merges = list(ot.bpe_ranks.keys())
1188
+
1189
+ tokenizer = Tokenizer(
1190
+ BPE(
1191
+ vocab=vocab,
1192
+ merges=merges,
1193
+ dropout=None,
1194
+ continuing_subword_prefix="",
1195
+ end_of_word_suffix="",
1196
+ fuse_unk=False,
1197
+ )
1198
+ )
1199
+
1200
+ tokenizer.pre_tokenizer = pre_tokenizers.ByteLevel(add_prefix_space=ot.add_prefix_space)
1201
+ tokenizer.decoder = decoders.ByteLevel()
1202
+ tokenizer.post_processor = processors.TemplateProcessing(
1203
+ single=f"$A:0 {ot.eos_token}:0",
1204
+ special_tokens=[
1205
+ (ot.eos_token, ot.eos_token_id),
1206
+ ],
1207
+ )
1208
+
1209
+ return tokenizer
1210
+
1211
+
1212
+ class XGLMConverter(SpmConverter):
1213
+ def vocab(self, proto):
1214
+ vocab = [
1215
+ ("<s>", 0.0),
1216
+ ("<pad>", 0.0),
1217
+ ("</s>", 0.0),
1218
+ ("<unk>", 0.0),
1219
+ ]
1220
+ vocab += [(piece.piece, piece.score) for piece in proto.pieces[3:]]
1221
+ vocab += [("<madeupword0>", 0.0), ("<madeupword1>", 0.0), ("<madeupword2>", 0.0), ("<madeupword3>", 0.0), ("<madeupword4>", 0.0), ("<madeupword5>", 0.0), ("<madeupword6>", 0.0)] # fmt: skip
1222
+ return vocab
1223
+
1224
+ def unk_id(self, proto):
1225
+ unk_id = 3
1226
+ return unk_id
1227
+
1228
+ def post_processor(self):
1229
+ return processors.TemplateProcessing(
1230
+ single="</s> $A",
1231
+ pair="</s> $A </s> </s> $B",
1232
+ special_tokens=[
1233
+ ("<s>", self.original_tokenizer.convert_tokens_to_ids("<s>")),
1234
+ ("</s>", self.original_tokenizer.convert_tokens_to_ids("</s>")),
1235
+ ],
1236
+ )
1237
+
1238
+
1239
+ class GemmaConvert(SpmConverter):
1240
+ handle_byte_fallback = True
1241
+
1242
+ """"
1243
+ split_by_unicode_script: true
1244
+ split_by_number: true
1245
+ split_by_whitespace: true
1246
+ treat_whitespace_as_suffix: false
1247
+ allow_whitespace_only_pieces: true
1248
+ split_digits: true
1249
+ byte_fallback: true
1250
+ """
1251
+
1252
+ def normalizer(self, proto):
1253
+ return normalizers.Replace(" ", "▁")
1254
+
1255
+ def vocab(self, proto):
1256
+ vocab = [
1257
+ (self.original_tokenizer.pad_token, 0.0),
1258
+ (self.original_tokenizer.eos_token, 0.0),
1259
+ (self.original_tokenizer.bos_token, 0.0),
1260
+ ]
1261
+ for piece in proto.pieces[3:]:
1262
+ if piece.piece == "<0x09>":
1263
+ vocab += [("\t", piece.score)]
1264
+ else:
1265
+ vocab += [(piece.piece, piece.score)]
1266
+ # vocab += [(piece.piece, piece.score) for piece in proto.pieces[3:]]
1267
+ return vocab
1268
+
1269
+ def pre_tokenizer(self, replacement, add_prefix_space):
1270
+ return None
1271
+
1272
+ def unk_id(self, proto):
1273
+ unk_id = 3
1274
+ return unk_id
1275
+
1276
+ def decoder(self, replacement, add_prefix_space):
1277
+ return decoders.Sequence(
1278
+ [
1279
+ decoders.Replace("▁", " "),
1280
+ decoders.ByteFallback(),
1281
+ decoders.Fuse(),
1282
+ ]
1283
+ )
1284
+
1285
+ def tokenizer(self, proto):
1286
+ model_type = proto.trainer_spec.model_type
1287
+ vocab_scores = self.vocab(proto)
1288
+ if model_type == 1:
1289
+ import tokenizers
1290
+
1291
+ if version.parse(tokenizers.__version__) < version.parse("0.14.0"):
1292
+ tokenizer = Tokenizer(Unigram(vocab_scores, 0))
1293
+ else:
1294
+ tokenizer = Tokenizer(Unigram(vocab_scores, 0, byte_fallback=True))
1295
+
1296
+ elif model_type == 2:
1297
+ _, merges = GemmaSentencePieceExtractor(self.original_tokenizer.vocab_file).extract(vocab_scores)
1298
+ bpe_vocab = {word: i for i, (word, _score) in enumerate(vocab_scores)}
1299
+
1300
+ tokenizer = Tokenizer(
1301
+ BPE(
1302
+ bpe_vocab,
1303
+ merges,
1304
+ unk_token=proto.trainer_spec.unk_piece,
1305
+ fuse_unk=True,
1306
+ byte_fallback=True,
1307
+ dropout=None,
1308
+ )
1309
+ )
1310
+ tokenizer.add_special_tokens(
1311
+ [
1312
+ AddedToken("<pad>", normalized=False, special=True),
1313
+ AddedToken("<eos>", normalized=False, special=True),
1314
+ AddedToken("<bos>", normalized=False, special=True),
1315
+ AddedToken("<unk>", normalized=False, special=True),
1316
+ ]
1317
+ )
1318
+ else:
1319
+ raise Exception(
1320
+ "You're trying to run a `Unigram` model but you're file was trained with a different algorithm"
1321
+ )
1322
+ user_defined_symbols = [
1323
+ AddedToken(token, normalized=False, special=False) for token in proto.trainer_spec.user_defined_symbols
1324
+ ]
1325
+ tokenizer.add_tokens(user_defined_symbols)
1326
+ return tokenizer
1327
+
1328
+
1329
+ class LlamaConverter(SpmConverter):
1330
+ handle_byte_fallback = True
1331
+
1332
+ def vocab(self, proto):
1333
+ vocab = [
1334
+ (self.original_tokenizer.convert_ids_to_tokens(0), 0.0),
1335
+ (self.original_tokenizer.convert_ids_to_tokens(1), 0.0),
1336
+ (self.original_tokenizer.convert_ids_to_tokens(2), 0.0),
1337
+ ]
1338
+ vocab += [(piece.piece, piece.score) for piece in proto.pieces[3:]]
1339
+ return vocab
1340
+
1341
+ def unk_id(self, proto):
1342
+ unk_id = 0
1343
+ return unk_id
1344
+
1345
+ def decoder(self, replacement, add_prefix_space):
1346
+ sequence = [
1347
+ decoders.Replace("▁", " "),
1348
+ decoders.ByteFallback(),
1349
+ decoders.Fuse(),
1350
+ ]
1351
+ if add_prefix_space:
1352
+ sequence += [decoders.Strip(content=" ", left=1)]
1353
+ return decoders.Sequence(sequence)
1354
+
1355
+ def tokenizer(self, proto):
1356
+ model_type = proto.trainer_spec.model_type
1357
+ vocab_scores = self.vocab(proto)
1358
+ if model_type == 1:
1359
+ import tokenizers
1360
+
1361
+ if version.parse(tokenizers.__version__) < version.parse("0.14.0"):
1362
+ tokenizer = Tokenizer(Unigram(vocab_scores, 0))
1363
+ else:
1364
+ tokenizer = Tokenizer(Unigram(vocab_scores, 0, byte_fallback=True))
1365
+
1366
+ elif model_type == 2:
1367
+ _, merges = SentencePieceExtractor(self.original_tokenizer.vocab_file).extract(vocab_scores)
1368
+ bpe_vocab = {word: i for i, (word, _score) in enumerate(vocab_scores)}
1369
+ tokenizer = Tokenizer(
1370
+ BPE(bpe_vocab, merges, unk_token=proto.trainer_spec.unk_piece, fuse_unk=True, byte_fallback=True)
1371
+ )
1372
+ tokenizer.add_special_tokens(
1373
+ [
1374
+ AddedToken(self.original_tokenizer.convert_ids_to_tokens(0), normalized=False, special=True),
1375
+ AddedToken(self.original_tokenizer.convert_ids_to_tokens(1), normalized=False, special=True),
1376
+ AddedToken(self.original_tokenizer.convert_ids_to_tokens(2), normalized=False, special=True),
1377
+ ]
1378
+ )
1379
+ else:
1380
+ raise Exception(
1381
+ "You're trying to run a `Unigram` model but you're file was trained with a different algorithm"
1382
+ )
1383
+
1384
+ return tokenizer
1385
+
1386
+ def normalizer(self, proto):
1387
+ sequence = []
1388
+ if hasattr(self.original_tokenizer, "add_prefix_space"):
1389
+ if self.original_tokenizer.add_prefix_space:
1390
+ sequence += [normalizers.Prepend(prepend="▁")]
1391
+ sequence += [normalizers.Replace(pattern=" ", content="▁")]
1392
+ return normalizers.Sequence(sequence)
1393
+
1394
+ def pre_tokenizer(self, replacement, add_prefix_space):
1395
+ return None
1396
+
1397
+ def post_processor(self):
1398
+ # the processor is defined in the LlamaTokenizerFast class.
1399
+ return None
1400
+
1401
+
1402
+ class MarkupLMConverter(Converter):
1403
+ def converted(self) -> Tokenizer:
1404
+ ot = self.original_tokenizer
1405
+ vocab = ot.encoder
1406
+ merges = list(ot.bpe_ranks.keys())
1407
+
1408
+ tokenizer = Tokenizer(
1409
+ BPE(
1410
+ vocab=vocab,
1411
+ merges=merges,
1412
+ dropout=None,
1413
+ continuing_subword_prefix="",
1414
+ end_of_word_suffix="",
1415
+ fuse_unk=False,
1416
+ unk_token=self.original_tokenizer.unk_token,
1417
+ )
1418
+ )
1419
+
1420
+ tokenizer.pre_tokenizer = pre_tokenizers.ByteLevel(add_prefix_space=ot.add_prefix_space)
1421
+ tokenizer.decoder = decoders.ByteLevel()
1422
+
1423
+ cls = str(self.original_tokenizer.cls_token)
1424
+ sep = str(self.original_tokenizer.sep_token)
1425
+ cls_token_id = self.original_tokenizer.cls_token_id
1426
+ sep_token_id = self.original_tokenizer.sep_token_id
1427
+
1428
+ tokenizer.post_processor = processors.TemplateProcessing(
1429
+ single=f"{cls} $A {sep}",
1430
+ pair=f"{cls} $A {sep} $B {sep}",
1431
+ special_tokens=[
1432
+ (cls, cls_token_id),
1433
+ (sep, sep_token_id),
1434
+ ],
1435
+ )
1436
+
1437
+ return tokenizer
1438
+
1439
+
1440
+ SLOW_TO_FAST_CONVERTERS = {
1441
+ "AlbertTokenizer": AlbertConverter,
1442
+ "BartTokenizer": RobertaConverter,
1443
+ "BarthezTokenizer": BarthezConverter,
1444
+ "BertTokenizer": BertConverter,
1445
+ "BigBirdTokenizer": BigBirdConverter,
1446
+ "BlenderbotTokenizer": BlenderbotConverter,
1447
+ "CamembertTokenizer": CamembertConverter,
1448
+ "CLIPTokenizer": CLIPConverter,
1449
+ "CodeGenTokenizer": GPT2Converter,
1450
+ "ConvBertTokenizer": BertConverter,
1451
+ "DebertaTokenizer": DebertaConverter,
1452
+ "DebertaV2Tokenizer": DebertaV2Converter,
1453
+ "DistilBertTokenizer": BertConverter,
1454
+ "DPRReaderTokenizer": BertConverter,
1455
+ "DPRQuestionEncoderTokenizer": BertConverter,
1456
+ "DPRContextEncoderTokenizer": BertConverter,
1457
+ "ElectraTokenizer": BertConverter,
1458
+ "FNetTokenizer": AlbertConverter,
1459
+ "FunnelTokenizer": FunnelConverter,
1460
+ "GPT2Tokenizer": GPT2Converter,
1461
+ "HerbertTokenizer": HerbertConverter,
1462
+ "LayoutLMTokenizer": BertConverter,
1463
+ "LayoutLMv2Tokenizer": BertConverter,
1464
+ "LayoutLMv3Tokenizer": RobertaConverter,
1465
+ "LayoutXLMTokenizer": XLMRobertaConverter,
1466
+ "LongformerTokenizer": RobertaConverter,
1467
+ "LEDTokenizer": RobertaConverter,
1468
+ "LxmertTokenizer": BertConverter,
1469
+ "MarkupLMTokenizer": MarkupLMConverter,
1470
+ "MBartTokenizer": MBartConverter,
1471
+ "MBart50Tokenizer": MBart50Converter,
1472
+ "MPNetTokenizer": MPNetConverter,
1473
+ "MobileBertTokenizer": BertConverter,
1474
+ "MvpTokenizer": RobertaConverter,
1475
+ "NllbTokenizer": NllbConverter,
1476
+ "OpenAIGPTTokenizer": OpenAIGPTConverter,
1477
+ "PegasusTokenizer": PegasusConverter,
1478
+ "Qwen2Tokenizer": Qwen2Converter,
1479
+ "RealmTokenizer": BertConverter,
1480
+ "ReformerTokenizer": ReformerConverter,
1481
+ "RemBertTokenizer": RemBertConverter,
1482
+ "RetriBertTokenizer": BertConverter,
1483
+ "RobertaTokenizer": RobertaConverter,
1484
+ "RoFormerTokenizer": RoFormerConverter,
1485
+ "SeamlessM4TTokenizer": SeamlessM4TConverter,
1486
+ "SqueezeBertTokenizer": BertConverter,
1487
+ "T5Tokenizer": T5Converter,
1488
+ "UdopTokenizer": UdopConverter,
1489
+ "WhisperTokenizer": WhisperConverter,
1490
+ "XLMRobertaTokenizer": XLMRobertaConverter,
1491
+ "XLNetTokenizer": XLNetConverter,
1492
+ "SplinterTokenizer": SplinterConverter,
1493
+ "XGLMTokenizer": XGLMConverter,
1494
+ "LlamaTokenizer": LlamaConverter,
1495
+ "CodeLlamaTokenizer": LlamaConverter,
1496
+ "GemmaTokenizer": GemmaConvert,
1497
+ }
1498
+
1499
+
1500
+ def convert_slow_tokenizer(transformer_tokenizer) -> Tokenizer:
1501
+ """
1502
+ Utilities to convert a slow tokenizer instance in a fast tokenizer instance.
1503
+
1504
+ Args:
1505
+ transformer_tokenizer ([`~tokenization_utils_base.PreTrainedTokenizer`]):
1506
+ Instance of a slow tokenizer to convert in the backend tokenizer for
1507
+ [`~tokenization_utils_base.PreTrainedTokenizerFast`].
1508
+
1509
+ Return:
1510
+ A instance of [`~tokenizers.Tokenizer`] to be used as the backend tokenizer of a
1511
+ [`~tokenization_utils_base.PreTrainedTokenizerFast`]
1512
+ """
1513
+
1514
+ tokenizer_class_name = transformer_tokenizer.__class__.__name__
1515
+
1516
+ if tokenizer_class_name not in SLOW_TO_FAST_CONVERTERS:
1517
+ raise ValueError(
1518
+ f"An instance of tokenizer class {tokenizer_class_name} cannot be converted in a Fast tokenizer instance."
1519
+ " No converter was found. Currently available slow->fast convertors:"
1520
+ f" {list(SLOW_TO_FAST_CONVERTERS.keys())}"
1521
+ )
1522
+
1523
+ converter_class = SLOW_TO_FAST_CONVERTERS[tokenizer_class_name]
1524
+
1525
+ return converter_class(transformer_tokenizer).converted()
env-llmeval/lib/python3.10/site-packages/transformers/convert_slow_tokenizers_checkpoints_to_fast.py ADDED
@@ -0,0 +1,126 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Convert slow tokenizers checkpoints in fast (serialization format of the `tokenizers` library)"""
16
+
17
+ import argparse
18
+ import os
19
+
20
+ import transformers
21
+
22
+ from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
23
+ from .utils import logging
24
+
25
+
26
+ logging.set_verbosity_info()
27
+
28
+ logger = logging.get_logger(__name__)
29
+
30
+
31
+ TOKENIZER_CLASSES = {name: getattr(transformers, name + "Fast") for name in SLOW_TO_FAST_CONVERTERS}
32
+
33
+
34
+ def convert_slow_checkpoint_to_fast(tokenizer_name, checkpoint_name, dump_path, force_download):
35
+ if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
36
+ raise ValueError(f"Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys())}.")
37
+
38
+ if tokenizer_name is None:
39
+ tokenizer_names = TOKENIZER_CLASSES
40
+ else:
41
+ tokenizer_names = {tokenizer_name: getattr(transformers, tokenizer_name + "Fast")}
42
+
43
+ logger.info(f"Loading tokenizer classes: {tokenizer_names}")
44
+
45
+ for tokenizer_name in tokenizer_names:
46
+ tokenizer_class = TOKENIZER_CLASSES[tokenizer_name]
47
+
48
+ add_prefix = True
49
+ if checkpoint_name is None:
50
+ checkpoint_names = list(tokenizer_class.max_model_input_sizes.keys())
51
+ else:
52
+ checkpoint_names = [checkpoint_name]
53
+
54
+ logger.info(f"For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}")
55
+
56
+ for checkpoint in checkpoint_names:
57
+ logger.info(f"Loading {tokenizer_class.__class__.__name__} {checkpoint}")
58
+
59
+ # Load tokenizer
60
+ tokenizer = tokenizer_class.from_pretrained(checkpoint, force_download=force_download)
61
+
62
+ # Save fast tokenizer
63
+ logger.info(f"Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}")
64
+
65
+ # For organization names we create sub-directories
66
+ if "/" in checkpoint:
67
+ checkpoint_directory, checkpoint_prefix_name = checkpoint.split("/")
68
+ dump_path_full = os.path.join(dump_path, checkpoint_directory)
69
+ elif add_prefix:
70
+ checkpoint_prefix_name = checkpoint
71
+ dump_path_full = dump_path
72
+ else:
73
+ checkpoint_prefix_name = None
74
+ dump_path_full = dump_path
75
+
76
+ logger.info(f"=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}")
77
+
78
+ if checkpoint in list(tokenizer.pretrained_vocab_files_map.values())[0]:
79
+ file_path = list(tokenizer.pretrained_vocab_files_map.values())[0][checkpoint]
80
+ next_char = file_path.split(checkpoint)[-1][0]
81
+ if next_char == "/":
82
+ dump_path_full = os.path.join(dump_path_full, checkpoint_prefix_name)
83
+ checkpoint_prefix_name = None
84
+
85
+ logger.info(f"=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}")
86
+
87
+ file_names = tokenizer.save_pretrained(
88
+ dump_path_full, legacy_format=False, filename_prefix=checkpoint_prefix_name
89
+ )
90
+ logger.info(f"=> File names {file_names}")
91
+
92
+ for file_name in file_names:
93
+ if not file_name.endswith("tokenizer.json"):
94
+ os.remove(file_name)
95
+ logger.info(f"=> removing {file_name}")
96
+
97
+
98
+ if __name__ == "__main__":
99
+ parser = argparse.ArgumentParser()
100
+ # Required parameters
101
+ parser.add_argument(
102
+ "--dump_path", default=None, type=str, required=True, help="Path to output generated fast tokenizer files."
103
+ )
104
+ parser.add_argument(
105
+ "--tokenizer_name",
106
+ default=None,
107
+ type=str,
108
+ help=(
109
+ f"Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will "
110
+ "download and convert all the checkpoints from AWS."
111
+ ),
112
+ )
113
+ parser.add_argument(
114
+ "--checkpoint_name",
115
+ default=None,
116
+ type=str,
117
+ help="Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.",
118
+ )
119
+ parser.add_argument(
120
+ "--force_download",
121
+ action="store_true",
122
+ help="Re-download checkpoints.",
123
+ )
124
+ args = parser.parse_args()
125
+
126
+ convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
env-llmeval/lib/python3.10/site-packages/transformers/convert_tf_hub_seq_to_seq_bert_to_pytorch.py ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Convert Seq2Seq TF Hub checkpoint."""
16
+
17
+
18
+ import argparse
19
+
20
+ from . import (
21
+ BertConfig,
22
+ BertGenerationConfig,
23
+ BertGenerationDecoder,
24
+ BertGenerationEncoder,
25
+ load_tf_weights_in_bert_generation,
26
+ logging,
27
+ )
28
+
29
+
30
+ logging.set_verbosity_info()
31
+
32
+
33
+ def convert_tf_checkpoint_to_pytorch(tf_hub_path, pytorch_dump_path, is_encoder_named_decoder, vocab_size, is_encoder):
34
+ # Initialise PyTorch model
35
+ bert_config = BertConfig.from_pretrained(
36
+ "google-bert/bert-large-cased",
37
+ vocab_size=vocab_size,
38
+ max_position_embeddings=512,
39
+ is_decoder=True,
40
+ add_cross_attention=True,
41
+ )
42
+ bert_config_dict = bert_config.to_dict()
43
+ del bert_config_dict["type_vocab_size"]
44
+ config = BertGenerationConfig(**bert_config_dict)
45
+ if is_encoder:
46
+ model = BertGenerationEncoder(config)
47
+ else:
48
+ model = BertGenerationDecoder(config)
49
+ print(f"Building PyTorch model from configuration: {config}")
50
+
51
+ # Load weights from tf checkpoint
52
+ load_tf_weights_in_bert_generation(
53
+ model,
54
+ tf_hub_path,
55
+ model_class="bert",
56
+ is_encoder_named_decoder=is_encoder_named_decoder,
57
+ is_encoder=is_encoder,
58
+ )
59
+
60
+ # Save pytorch-model
61
+ print(f"Save PyTorch model and config to {pytorch_dump_path}")
62
+ model.save_pretrained(pytorch_dump_path)
63
+
64
+
65
+ if __name__ == "__main__":
66
+ parser = argparse.ArgumentParser()
67
+ # Required parameters
68
+ parser.add_argument(
69
+ "--tf_hub_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
70
+ )
71
+ parser.add_argument(
72
+ "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
73
+ )
74
+ parser.add_argument(
75
+ "--is_encoder_named_decoder",
76
+ action="store_true",
77
+ help="If decoder has to be renamed to encoder in PyTorch model.",
78
+ )
79
+ parser.add_argument("--is_encoder", action="store_true", help="If model is an encoder.")
80
+ parser.add_argument("--vocab_size", default=50358, type=int, help="Vocab size of model")
81
+ args = parser.parse_args()
82
+ convert_tf_checkpoint_to_pytorch(
83
+ args.tf_hub_path,
84
+ args.pytorch_dump_path,
85
+ args.is_encoder_named_decoder,
86
+ args.vocab_size,
87
+ is_encoder=args.is_encoder,
88
+ )
env-llmeval/lib/python3.10/site-packages/transformers/dependency_versions_table.py ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # THIS FILE HAS BEEN AUTOGENERATED. To update:
2
+ # 1. modify the `_deps` dict in setup.py
3
+ # 2. run `make deps_table_update``
4
+ deps = {
5
+ "Pillow": "Pillow>=10.0.1,<=15.0",
6
+ "accelerate": "accelerate>=0.21.0",
7
+ "av": "av==9.2.0",
8
+ "beautifulsoup4": "beautifulsoup4",
9
+ "codecarbon": "codecarbon==1.2.0",
10
+ "cookiecutter": "cookiecutter==1.7.3",
11
+ "dataclasses": "dataclasses",
12
+ "datasets": "datasets!=2.5.0",
13
+ "decord": "decord==0.6.0",
14
+ "deepspeed": "deepspeed>=0.9.3",
15
+ "diffusers": "diffusers",
16
+ "dill": "dill<0.3.5",
17
+ "evaluate": "evaluate>=0.2.0",
18
+ "faiss-cpu": "faiss-cpu",
19
+ "fastapi": "fastapi",
20
+ "filelock": "filelock",
21
+ "flax": "flax>=0.4.1,<=0.7.0",
22
+ "fsspec": "fsspec<2023.10.0",
23
+ "ftfy": "ftfy",
24
+ "fugashi": "fugashi>=1.0",
25
+ "GitPython": "GitPython<3.1.19",
26
+ "hf-doc-builder": "hf-doc-builder>=0.3.0",
27
+ "huggingface-hub": "huggingface-hub>=0.19.3,<1.0",
28
+ "importlib_metadata": "importlib_metadata",
29
+ "ipadic": "ipadic>=1.0.0,<2.0",
30
+ "isort": "isort>=5.5.4",
31
+ "jax": "jax>=0.4.1,<=0.4.13",
32
+ "jaxlib": "jaxlib>=0.4.1,<=0.4.13",
33
+ "jieba": "jieba",
34
+ "kenlm": "kenlm",
35
+ "keras": "keras<2.16",
36
+ "keras-nlp": "keras-nlp>=0.3.1",
37
+ "librosa": "librosa",
38
+ "nltk": "nltk",
39
+ "natten": "natten>=0.14.6,<0.15.0",
40
+ "numpy": "numpy>=1.17",
41
+ "onnxconverter-common": "onnxconverter-common",
42
+ "onnxruntime-tools": "onnxruntime-tools>=1.4.2",
43
+ "onnxruntime": "onnxruntime>=1.4.0",
44
+ "opencv-python": "opencv-python",
45
+ "optuna": "optuna",
46
+ "optax": "optax>=0.0.8,<=0.1.4",
47
+ "packaging": "packaging>=20.0",
48
+ "parameterized": "parameterized",
49
+ "phonemizer": "phonemizer",
50
+ "protobuf": "protobuf",
51
+ "psutil": "psutil",
52
+ "pyyaml": "pyyaml>=5.1",
53
+ "pydantic": "pydantic",
54
+ "pytest": "pytest>=7.2.0,<8.0.0",
55
+ "pytest-timeout": "pytest-timeout",
56
+ "pytest-xdist": "pytest-xdist",
57
+ "python": "python>=3.8.0",
58
+ "ray[tune]": "ray[tune]>=2.7.0",
59
+ "regex": "regex!=2019.12.17",
60
+ "requests": "requests",
61
+ "rhoknp": "rhoknp>=1.1.0,<1.3.1",
62
+ "rjieba": "rjieba",
63
+ "rouge-score": "rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1",
64
+ "ruff": "ruff==0.1.5",
65
+ "sacrebleu": "sacrebleu>=1.4.12,<2.0.0",
66
+ "sacremoses": "sacremoses",
67
+ "safetensors": "safetensors>=0.4.1",
68
+ "sagemaker": "sagemaker>=2.31.0",
69
+ "scikit-learn": "scikit-learn",
70
+ "sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
71
+ "sigopt": "sigopt",
72
+ "starlette": "starlette",
73
+ "sudachipy": "sudachipy>=0.6.6",
74
+ "sudachidict_core": "sudachidict_core>=20220729",
75
+ "tensorboard": "tensorboard",
76
+ "tensorflow-cpu": "tensorflow-cpu>=2.6,<2.16",
77
+ "tensorflow": "tensorflow>=2.6,<2.16",
78
+ "tensorflow-text": "tensorflow-text<2.16",
79
+ "tf2onnx": "tf2onnx",
80
+ "timeout-decorator": "timeout-decorator",
81
+ "timm": "timm",
82
+ "tokenizers": "tokenizers>=0.14,<0.19",
83
+ "torch": "torch",
84
+ "torchaudio": "torchaudio",
85
+ "torchvision": "torchvision",
86
+ "pyctcdecode": "pyctcdecode>=0.4.0",
87
+ "tqdm": "tqdm>=4.27",
88
+ "unidic": "unidic>=1.0.2",
89
+ "unidic_lite": "unidic_lite>=1.0.7",
90
+ "urllib3": "urllib3<2.0.0",
91
+ "uvicorn": "uvicorn",
92
+ }
env-llmeval/lib/python3.10/site-packages/transformers/feature_extraction_sequence_utils.py ADDED
@@ -0,0 +1,371 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """
16
+ Sequence feature extraction class for common feature extractors to preprocess sequences.
17
+ """
18
+ from typing import Dict, List, Optional, Union
19
+
20
+ import numpy as np
21
+
22
+ from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
23
+ from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
24
+
25
+
26
+ logger = logging.get_logger(__name__)
27
+
28
+
29
+ class SequenceFeatureExtractor(FeatureExtractionMixin):
30
+ """
31
+ This is a general feature extraction class for speech recognition.
32
+
33
+ Args:
34
+ feature_size (`int`):
35
+ The feature dimension of the extracted features.
36
+ sampling_rate (`int`):
37
+ The sampling rate at which the audio files should be digitalized expressed in hertz (Hz).
38
+ padding_value (`float`):
39
+ The value that is used to fill the padding values / vectors.
40
+ """
41
+
42
+ def __init__(self, feature_size: int, sampling_rate: int, padding_value: float, **kwargs):
43
+ self.feature_size = feature_size
44
+ self.sampling_rate = sampling_rate
45
+ self.padding_value = padding_value
46
+
47
+ self.padding_side = kwargs.pop("padding_side", "right")
48
+ self.return_attention_mask = kwargs.pop("return_attention_mask", True)
49
+
50
+ super().__init__(**kwargs)
51
+
52
+ def pad(
53
+ self,
54
+ processed_features: Union[
55
+ BatchFeature,
56
+ List[BatchFeature],
57
+ Dict[str, BatchFeature],
58
+ Dict[str, List[BatchFeature]],
59
+ List[Dict[str, BatchFeature]],
60
+ ],
61
+ padding: Union[bool, str, PaddingStrategy] = True,
62
+ max_length: Optional[int] = None,
63
+ truncation: bool = False,
64
+ pad_to_multiple_of: Optional[int] = None,
65
+ return_attention_mask: Optional[bool] = None,
66
+ return_tensors: Optional[Union[str, TensorType]] = None,
67
+ ) -> BatchFeature:
68
+ """
69
+ Pad input values / input vectors or a batch of input values / input vectors up to predefined length or to the
70
+ max sequence length in the batch.
71
+
72
+ Padding side (left/right) padding values are defined at the feature extractor level (with `self.padding_side`,
73
+ `self.padding_value`)
74
+
75
+ <Tip>
76
+
77
+ If the `processed_features` passed are dictionary of numpy arrays, PyTorch tensors or TensorFlow tensors, the
78
+ result will use the same type unless you provide a different tensor type with `return_tensors`. In the case of
79
+ PyTorch tensors, you will lose the specific device of your tensors however.
80
+
81
+ </Tip>
82
+
83
+ Args:
84
+ processed_features ([`BatchFeature`], list of [`BatchFeature`], `Dict[str, List[float]]`, `Dict[str, List[List[float]]` or `List[Dict[str, List[float]]]`):
85
+ Processed inputs. Can represent one input ([`BatchFeature`] or `Dict[str, List[float]]`) or a batch of
86
+ input values / vectors (list of [`BatchFeature`], *Dict[str, List[List[float]]]* or *List[Dict[str,
87
+ List[float]]]*) so you can use this method during preprocessing as well as in a PyTorch Dataloader
88
+ collate function.
89
+
90
+ Instead of `List[float]` you can have tensors (numpy arrays, PyTorch tensors or TensorFlow tensors),
91
+ see the note above for the return type.
92
+ padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`):
93
+ Select a strategy to pad the returned sequences (according to the model's padding side and padding
94
+ index) among:
95
+
96
+ - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
97
+ sequence if provided).
98
+ - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
99
+ acceptable input length for the model if that argument is not provided.
100
+ - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
101
+ lengths).
102
+ max_length (`int`, *optional*):
103
+ Maximum length of the returned list and optionally padding length (see above).
104
+ truncation (`bool`):
105
+ Activates truncation to cut input sequences longer than `max_length` to `max_length`.
106
+ pad_to_multiple_of (`int`, *optional*):
107
+ If set will pad the sequence to a multiple of the provided value.
108
+
109
+ This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability
110
+ `>= 7.5` (Volta), or on TPUs which benefit from having sequence lengths be a multiple of 128.
111
+ return_attention_mask (`bool`, *optional*):
112
+ Whether to return the attention mask. If left to the default, will return the attention mask according
113
+ to the specific feature_extractor's default.
114
+
115
+ [What are attention masks?](../glossary#attention-mask)
116
+ return_tensors (`str` or [`~utils.TensorType`], *optional*):
117
+ If set, will return tensors instead of list of python integers. Acceptable values are:
118
+
119
+ - `'tf'`: Return TensorFlow `tf.constant` objects.
120
+ - `'pt'`: Return PyTorch `torch.Tensor` objects.
121
+ - `'np'`: Return Numpy `np.ndarray` objects.
122
+ """
123
+ # If we have a list of dicts, let's convert it in a dict of lists
124
+ # We do this to allow using this method as a collate_fn function in PyTorch Dataloader
125
+ if isinstance(processed_features, (list, tuple)) and isinstance(processed_features[0], (dict, BatchFeature)):
126
+ processed_features = {
127
+ key: [example[key] for example in processed_features] for key in processed_features[0].keys()
128
+ }
129
+
130
+ # The model's main input name, usually `input_values`, has be passed for padding
131
+ if self.model_input_names[0] not in processed_features:
132
+ raise ValueError(
133
+ "You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`"
134
+ f" to this method that includes {self.model_input_names[0]}, but you provided"
135
+ f" {list(processed_features.keys())}"
136
+ )
137
+
138
+ required_input = processed_features[self.model_input_names[0]]
139
+ return_attention_mask = (
140
+ return_attention_mask if return_attention_mask is not None else self.return_attention_mask
141
+ )
142
+
143
+ if len(required_input) == 0:
144
+ if return_attention_mask:
145
+ processed_features["attention_mask"] = []
146
+ return processed_features
147
+
148
+ # If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
149
+ # and rebuild them afterwards if no return_tensors is specified
150
+ # Note that we lose the specific device the tensor may be on for PyTorch
151
+
152
+ first_element = required_input[0]
153
+ if isinstance(first_element, (list, tuple)):
154
+ # first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
155
+ index = 0
156
+ while len(required_input[index]) == 0:
157
+ index += 1
158
+ if index < len(required_input):
159
+ first_element = required_input[index][0]
160
+
161
+ if return_tensors is None:
162
+ if is_tf_tensor(first_element):
163
+ return_tensors = "tf"
164
+ elif is_torch_tensor(first_element):
165
+ return_tensors = "pt"
166
+ elif isinstance(first_element, (int, float, list, tuple, np.ndarray)):
167
+ return_tensors = "np"
168
+ else:
169
+ raise ValueError(
170
+ f"type of {first_element} unknown: {type(first_element)}. "
171
+ "Should be one of a python, numpy, pytorch or tensorflow object."
172
+ )
173
+
174
+ for key, value in processed_features.items():
175
+ if isinstance(value[0], (int, float)):
176
+ processed_features[key] = to_numpy(value)
177
+ else:
178
+ processed_features[key] = [to_numpy(v) for v in value]
179
+
180
+ # Convert padding_strategy in PaddingStrategy
181
+ padding_strategy = self._get_padding_strategies(padding=padding, max_length=max_length)
182
+
183
+ required_input = processed_features[self.model_input_names[0]]
184
+
185
+ batch_size = len(required_input)
186
+ if not all(len(v) == batch_size for v in processed_features.values()):
187
+ raise ValueError("Some items in the output dictionary have a different batch size than others.")
188
+
189
+ truncated_inputs = []
190
+ for i in range(batch_size):
191
+ inputs = {k: v[i] for k, v in processed_features.items()}
192
+ # truncation
193
+ inputs_slice = self._truncate(
194
+ inputs,
195
+ max_length=max_length,
196
+ pad_to_multiple_of=pad_to_multiple_of,
197
+ truncation=truncation,
198
+ )
199
+ truncated_inputs.append(inputs_slice)
200
+
201
+ if padding_strategy == PaddingStrategy.LONGEST:
202
+ # make sure that `max_length` cannot be longer than the longest truncated length
203
+ max_length = max(len(input_slice[self.model_input_names[0]]) for input_slice in truncated_inputs)
204
+ padding_strategy = PaddingStrategy.MAX_LENGTH
205
+
206
+ batch_outputs = {}
207
+ for i in range(batch_size):
208
+ # padding
209
+ outputs = self._pad(
210
+ truncated_inputs[i],
211
+ max_length=max_length,
212
+ padding_strategy=padding_strategy,
213
+ pad_to_multiple_of=pad_to_multiple_of,
214
+ return_attention_mask=return_attention_mask,
215
+ )
216
+
217
+ for key, value in outputs.items():
218
+ if key not in batch_outputs:
219
+ batch_outputs[key] = []
220
+ if value.dtype is np.dtype(np.float64):
221
+ value = value.astype(np.float32)
222
+ batch_outputs[key].append(value)
223
+
224
+ return BatchFeature(batch_outputs, tensor_type=return_tensors)
225
+
226
+ def _pad(
227
+ self,
228
+ processed_features: Union[Dict[str, np.ndarray], BatchFeature],
229
+ max_length: Optional[int] = None,
230
+ padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
231
+ pad_to_multiple_of: Optional[int] = None,
232
+ return_attention_mask: Optional[bool] = None,
233
+ ) -> dict:
234
+ """
235
+ Pad inputs (on left/right and up to predefined length or max length in the batch)
236
+
237
+ Args:
238
+ processed_features (`Union[Dict[str, np.ndarray], BatchFeature]`):
239
+ Dictionary of input values (`np.ndarray[float]`) / input vectors (`List[np.ndarray[float]]`) or batch
240
+ of inputs values (`List[np.ndarray[int]]`) / input vectors (`List[np.ndarray[int]]`)
241
+ max_length (`int`, *optional*):
242
+ Maximum length of the returned list and optionally padding length (see below)
243
+ padding_strategy (`PaddingStrategy`, *optional*, default to `PaddingStrategy.DO_NOT_PAD`):
244
+ PaddingStrategy to use for padding.
245
+
246
+ - PaddingStrategy.LONGEST Pad to the longest sequence in the batch
247
+ - PaddingStrategy.MAX_LENGTH: Pad to the max length (default)
248
+ - PaddingStrategy.DO_NOT_PAD: Do not pad
249
+ The feature_extractor padding sides are defined in self.padding_side:
250
+
251
+ - 'left': pads on the left of the sequences
252
+ - 'right': pads on the right of the sequences
253
+ pad_to_multiple_of (`int`, *optional*):
254
+ Integer if set will pad the sequence to a multiple of the provided value. This is especially useful to
255
+ enable the use of Tensor Core on NVIDIA hardware with compute capability `>= 7.5` (Volta), or on TPUs
256
+ which benefit from having sequence lengths be a multiple of 128.
257
+ return_attention_mask (`bool`, *optional*):
258
+ Set to False to avoid returning attention mask (default: set to model specifics)
259
+ """
260
+ required_input = processed_features[self.model_input_names[0]]
261
+
262
+ if padding_strategy == PaddingStrategy.LONGEST:
263
+ max_length = len(required_input)
264
+
265
+ if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
266
+ max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
267
+
268
+ needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(required_input) < max_length
269
+
270
+ if return_attention_mask and "attention_mask" not in processed_features:
271
+ processed_features["attention_mask"] = np.ones(len(required_input), dtype=np.int32)
272
+
273
+ if needs_to_be_padded:
274
+ difference = max_length - len(required_input)
275
+ if self.padding_side == "right":
276
+ if return_attention_mask:
277
+ processed_features["attention_mask"] = np.pad(
278
+ processed_features["attention_mask"], (0, difference)
279
+ )
280
+ padding_shape = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
281
+ processed_features[self.model_input_names[0]] = np.pad(
282
+ required_input, padding_shape, "constant", constant_values=self.padding_value
283
+ )
284
+ elif self.padding_side == "left":
285
+ if return_attention_mask:
286
+ processed_features["attention_mask"] = np.pad(
287
+ processed_features["attention_mask"], (difference, 0)
288
+ )
289
+ padding_shape = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
290
+ processed_features[self.model_input_names[0]] = np.pad(
291
+ required_input, padding_shape, "constant", constant_values=self.padding_value
292
+ )
293
+ else:
294
+ raise ValueError("Invalid padding strategy:" + str(self.padding_side))
295
+
296
+ return processed_features
297
+
298
+ def _truncate(
299
+ self,
300
+ processed_features: Union[Dict[str, np.ndarray], BatchFeature],
301
+ max_length: Optional[int] = None,
302
+ pad_to_multiple_of: Optional[int] = None,
303
+ truncation: Optional[bool] = None,
304
+ ):
305
+ """
306
+ Truncate inputs to predefined length or max length in the batch
307
+
308
+ Args:
309
+ processed_features(`Union[Dict[str, np.ndarray], BatchFeature]`):
310
+ Dictionary of input values (`np.ndarray[float]`) / input vectors (`List[np.ndarray[float]]`) or batch
311
+ of inputs values (`List[np.ndarray[int]]`) / input vectors (`List[np.ndarray[int]]`)
312
+ max_length (`int`, *optional*):
313
+ maximum length of the returned list and optionally padding length (see below)
314
+ pad_to_multiple_of (`int`, *optional*) :
315
+ Integer if set will pad the sequence to a multiple of the provided value. This is especially useful to
316
+ enable the use of Tensor Core on NVIDIA hardware with compute capability `>= 7.5` (Volta), or on TPUs
317
+ which benefit from having sequence lengths be a multiple of 128.
318
+ truncation (`bool`, *optional*):
319
+ Activates truncation to cut input sequences longer than `max_length` to `max_length`.
320
+ """
321
+ if not truncation:
322
+ return processed_features
323
+ elif truncation and max_length is None:
324
+ raise ValueError("When setting ``truncation=True``, make sure that ``max_length`` is defined.")
325
+
326
+ required_input = processed_features[self.model_input_names[0]]
327
+
328
+ # find `max_length` that fits `pad_to_multiple_of`
329
+ if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
330
+ max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
331
+
332
+ needs_to_be_truncated = len(required_input) > max_length
333
+
334
+ if needs_to_be_truncated:
335
+ processed_features[self.model_input_names[0]] = processed_features[self.model_input_names[0]][:max_length]
336
+ if "attention_mask" in processed_features:
337
+ processed_features["attention_mask"] = processed_features["attention_mask"][:max_length]
338
+
339
+ return processed_features
340
+
341
+ def _get_padding_strategies(self, padding=False, max_length=None):
342
+ """
343
+ Find the correct padding strategy
344
+ """
345
+
346
+ # Get padding strategy
347
+ if padding is not False:
348
+ if padding is True:
349
+ padding_strategy = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
350
+ elif not isinstance(padding, PaddingStrategy):
351
+ padding_strategy = PaddingStrategy(padding)
352
+ elif isinstance(padding, PaddingStrategy):
353
+ padding_strategy = padding
354
+ else:
355
+ padding_strategy = PaddingStrategy.DO_NOT_PAD
356
+
357
+ # Set max length if needed
358
+ if max_length is None:
359
+ if padding_strategy == PaddingStrategy.MAX_LENGTH:
360
+ raise ValueError(
361
+ f"When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined"
362
+ )
363
+
364
+ # Test if we have a padding value
365
+ if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
366
+ raise ValueError(
367
+ "Asking to pad but the feature_extractor does not have a padding value. Please select a value to use"
368
+ " as `padding_value`. For example: `feature_extractor.padding_value = 0.0`."
369
+ )
370
+
371
+ return padding_strategy
env-llmeval/lib/python3.10/site-packages/transformers/file_utils.py ADDED
@@ -0,0 +1,133 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """
15
+ File utilities: utilities related to download and cache models
16
+
17
+ This module should not be update anymore and is only left for backward compatibility.
18
+ """
19
+
20
+ from huggingface_hub import get_full_repo_name # for backward compatibility
21
+ from huggingface_hub.constants import HF_HUB_DISABLE_TELEMETRY as DISABLE_TELEMETRY # for backward compatibility
22
+
23
+ from . import __version__
24
+
25
+ # Backward compatibility imports, to make sure all those objects can be found in file_utils
26
+ from .utils import (
27
+ CLOUDFRONT_DISTRIB_PREFIX,
28
+ CONFIG_NAME,
29
+ DUMMY_INPUTS,
30
+ DUMMY_MASK,
31
+ ENV_VARS_TRUE_AND_AUTO_VALUES,
32
+ ENV_VARS_TRUE_VALUES,
33
+ FEATURE_EXTRACTOR_NAME,
34
+ FLAX_WEIGHTS_NAME,
35
+ HF_MODULES_CACHE,
36
+ HUGGINGFACE_CO_PREFIX,
37
+ HUGGINGFACE_CO_RESOLVE_ENDPOINT,
38
+ MODEL_CARD_NAME,
39
+ MULTIPLE_CHOICE_DUMMY_INPUTS,
40
+ PYTORCH_PRETRAINED_BERT_CACHE,
41
+ PYTORCH_TRANSFORMERS_CACHE,
42
+ S3_BUCKET_PREFIX,
43
+ SENTENCEPIECE_UNDERLINE,
44
+ SPIECE_UNDERLINE,
45
+ TF2_WEIGHTS_NAME,
46
+ TF_WEIGHTS_NAME,
47
+ TORCH_FX_REQUIRED_VERSION,
48
+ TRANSFORMERS_CACHE,
49
+ TRANSFORMERS_DYNAMIC_MODULE_NAME,
50
+ USE_JAX,
51
+ USE_TF,
52
+ USE_TORCH,
53
+ WEIGHTS_INDEX_NAME,
54
+ WEIGHTS_NAME,
55
+ ContextManagers,
56
+ DummyObject,
57
+ EntryNotFoundError,
58
+ ExplicitEnum,
59
+ ModelOutput,
60
+ PaddingStrategy,
61
+ PushToHubMixin,
62
+ RepositoryNotFoundError,
63
+ RevisionNotFoundError,
64
+ TensorType,
65
+ _LazyModule,
66
+ add_code_sample_docstrings,
67
+ add_end_docstrings,
68
+ add_start_docstrings,
69
+ add_start_docstrings_to_model_forward,
70
+ cached_property,
71
+ copy_func,
72
+ default_cache_path,
73
+ define_sagemaker_information,
74
+ get_cached_models,
75
+ get_file_from_repo,
76
+ get_torch_version,
77
+ has_file,
78
+ http_user_agent,
79
+ is_apex_available,
80
+ is_bs4_available,
81
+ is_coloredlogs_available,
82
+ is_datasets_available,
83
+ is_detectron2_available,
84
+ is_faiss_available,
85
+ is_flax_available,
86
+ is_ftfy_available,
87
+ is_g2p_en_available,
88
+ is_in_notebook,
89
+ is_ipex_available,
90
+ is_librosa_available,
91
+ is_offline_mode,
92
+ is_onnx_available,
93
+ is_pandas_available,
94
+ is_phonemizer_available,
95
+ is_protobuf_available,
96
+ is_psutil_available,
97
+ is_py3nvml_available,
98
+ is_pyctcdecode_available,
99
+ is_pytesseract_available,
100
+ is_pytorch_quantization_available,
101
+ is_rjieba_available,
102
+ is_sagemaker_dp_enabled,
103
+ is_sagemaker_mp_enabled,
104
+ is_scipy_available,
105
+ is_sentencepiece_available,
106
+ is_seqio_available,
107
+ is_sklearn_available,
108
+ is_soundfile_availble,
109
+ is_spacy_available,
110
+ is_speech_available,
111
+ is_tensor,
112
+ is_tensorflow_probability_available,
113
+ is_tf2onnx_available,
114
+ is_tf_available,
115
+ is_timm_available,
116
+ is_tokenizers_available,
117
+ is_torch_available,
118
+ is_torch_bf16_available,
119
+ is_torch_cuda_available,
120
+ is_torch_fx_available,
121
+ is_torch_fx_proxy,
122
+ is_torch_mps_available,
123
+ is_torch_tf32_available,
124
+ is_torch_xla_available,
125
+ is_torchaudio_available,
126
+ is_training_run_on_sagemaker,
127
+ is_vision_available,
128
+ replace_return_docstrings,
129
+ requires_backends,
130
+ to_numpy,
131
+ to_py_obj,
132
+ torch_only_method,
133
+ )
env-llmeval/lib/python3.10/site-packages/transformers/generation_flax_utils.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The Google AI Flax Team Authors, and The HuggingFace Inc. team.
3
+ # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ import warnings
18
+
19
+ from .generation import FlaxGenerationMixin
20
+
21
+
22
+ class FlaxGenerationMixin(FlaxGenerationMixin):
23
+ # warning at import time
24
+ warnings.warn(
25
+ "Importing `FlaxGenerationMixin` from `src/transformers/generation_flax_utils.py` is deprecated and will "
26
+ "be removed in Transformers v4.40. Import as `from transformers import FlaxGenerationMixin` instead.",
27
+ FutureWarning,
28
+ )
env-llmeval/lib/python3.10/site-packages/transformers/hf_argparser.py ADDED
@@ -0,0 +1,419 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import dataclasses
16
+ import json
17
+ import sys
18
+ import types
19
+ from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
20
+ from copy import copy
21
+ from enum import Enum
22
+ from inspect import isclass
23
+ from pathlib import Path
24
+ from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
25
+
26
+ import yaml
27
+
28
+
29
+ DataClass = NewType("DataClass", Any)
30
+ DataClassType = NewType("DataClassType", Any)
31
+
32
+
33
+ # From https://stackoverflow.com/questions/15008758/parsing-boolean-values-with-argparse
34
+ def string_to_bool(v):
35
+ if isinstance(v, bool):
36
+ return v
37
+ if v.lower() in ("yes", "true", "t", "y", "1"):
38
+ return True
39
+ elif v.lower() in ("no", "false", "f", "n", "0"):
40
+ return False
41
+ else:
42
+ raise ArgumentTypeError(
43
+ f"Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive)."
44
+ )
45
+
46
+
47
+ def make_choice_type_function(choices: list) -> Callable[[str], Any]:
48
+ """
49
+ Creates a mapping function from each choices string representation to the actual value. Used to support multiple
50
+ value types for a single argument.
51
+
52
+ Args:
53
+ choices (list): List of choices.
54
+
55
+ Returns:
56
+ Callable[[str], Any]: Mapping function from string representation to actual value for each choice.
57
+ """
58
+ str_to_choice = {str(choice): choice for choice in choices}
59
+ return lambda arg: str_to_choice.get(arg, arg)
60
+
61
+
62
+ def HfArg(
63
+ *,
64
+ aliases: Union[str, List[str]] = None,
65
+ help: str = None,
66
+ default: Any = dataclasses.MISSING,
67
+ default_factory: Callable[[], Any] = dataclasses.MISSING,
68
+ metadata: dict = None,
69
+ **kwargs,
70
+ ) -> dataclasses.Field:
71
+ """Argument helper enabling a concise syntax to create dataclass fields for parsing with `HfArgumentParser`.
72
+
73
+ Example comparing the use of `HfArg` and `dataclasses.field`:
74
+ ```
75
+ @dataclass
76
+ class Args:
77
+ regular_arg: str = dataclasses.field(default="Huggingface", metadata={"aliases": ["--example", "-e"], "help": "This syntax could be better!"})
78
+ hf_arg: str = HfArg(default="Huggingface", aliases=["--example", "-e"], help="What a nice syntax!")
79
+ ```
80
+
81
+ Args:
82
+ aliases (Union[str, List[str]], optional):
83
+ Single string or list of strings of aliases to pass on to argparse, e.g. `aliases=["--example", "-e"]`.
84
+ Defaults to None.
85
+ help (str, optional): Help string to pass on to argparse that can be displayed with --help. Defaults to None.
86
+ default (Any, optional):
87
+ Default value for the argument. If not default or default_factory is specified, the argument is required.
88
+ Defaults to dataclasses.MISSING.
89
+ default_factory (Callable[[], Any], optional):
90
+ The default_factory is a 0-argument function called to initialize a field's value. It is useful to provide
91
+ default values for mutable types, e.g. lists: `default_factory=list`. Mutually exclusive with `default=`.
92
+ Defaults to dataclasses.MISSING.
93
+ metadata (dict, optional): Further metadata to pass on to `dataclasses.field`. Defaults to None.
94
+
95
+ Returns:
96
+ Field: A `dataclasses.Field` with the desired properties.
97
+ """
98
+ if metadata is None:
99
+ # Important, don't use as default param in function signature because dict is mutable and shared across function calls
100
+ metadata = {}
101
+ if aliases is not None:
102
+ metadata["aliases"] = aliases
103
+ if help is not None:
104
+ metadata["help"] = help
105
+
106
+ return dataclasses.field(metadata=metadata, default=default, default_factory=default_factory, **kwargs)
107
+
108
+
109
+ class HfArgumentParser(ArgumentParser):
110
+ """
111
+ This subclass of `argparse.ArgumentParser` uses type hints on dataclasses to generate arguments.
112
+
113
+ The class is designed to play well with the native argparse. In particular, you can add more (non-dataclass backed)
114
+ arguments to the parser after initialization and you'll get the output back after parsing as an additional
115
+ namespace. Optional: To create sub argument groups use the `_argument_group_name` attribute in the dataclass.
116
+ """
117
+
118
+ dataclass_types: Iterable[DataClassType]
119
+
120
+ def __init__(self, dataclass_types: Union[DataClassType, Iterable[DataClassType]], **kwargs):
121
+ """
122
+ Args:
123
+ dataclass_types:
124
+ Dataclass type, or list of dataclass types for which we will "fill" instances with the parsed args.
125
+ kwargs (`Dict[str, Any]`, *optional*):
126
+ Passed to `argparse.ArgumentParser()` in the regular way.
127
+ """
128
+ # To make the default appear when using --help
129
+ if "formatter_class" not in kwargs:
130
+ kwargs["formatter_class"] = ArgumentDefaultsHelpFormatter
131
+ super().__init__(**kwargs)
132
+ if dataclasses.is_dataclass(dataclass_types):
133
+ dataclass_types = [dataclass_types]
134
+ self.dataclass_types = list(dataclass_types)
135
+ for dtype in self.dataclass_types:
136
+ self._add_dataclass_arguments(dtype)
137
+
138
+ @staticmethod
139
+ def _parse_dataclass_field(parser: ArgumentParser, field: dataclasses.Field):
140
+ field_name = f"--{field.name}"
141
+ kwargs = field.metadata.copy()
142
+ # field.metadata is not used at all by Data Classes,
143
+ # it is provided as a third-party extension mechanism.
144
+ if isinstance(field.type, str):
145
+ raise RuntimeError(
146
+ "Unresolved type detected, which should have been done with the help of "
147
+ "`typing.get_type_hints` method by default"
148
+ )
149
+
150
+ aliases = kwargs.pop("aliases", [])
151
+ if isinstance(aliases, str):
152
+ aliases = [aliases]
153
+
154
+ origin_type = getattr(field.type, "__origin__", field.type)
155
+ if origin_type is Union or (hasattr(types, "UnionType") and isinstance(origin_type, types.UnionType)):
156
+ if str not in field.type.__args__ and (
157
+ len(field.type.__args__) != 2 or type(None) not in field.type.__args__
158
+ ):
159
+ raise ValueError(
160
+ "Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because"
161
+ " the argument parser only supports one type per argument."
162
+ f" Problem encountered in field '{field.name}'."
163
+ )
164
+ if type(None) not in field.type.__args__:
165
+ # filter `str` in Union
166
+ field.type = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
167
+ origin_type = getattr(field.type, "__origin__", field.type)
168
+ elif bool not in field.type.__args__:
169
+ # filter `NoneType` in Union (except for `Union[bool, NoneType]`)
170
+ field.type = (
171
+ field.type.__args__[0] if isinstance(None, field.type.__args__[1]) else field.type.__args__[1]
172
+ )
173
+ origin_type = getattr(field.type, "__origin__", field.type)
174
+
175
+ # A variable to store kwargs for a boolean field, if needed
176
+ # so that we can init a `no_*` complement argument (see below)
177
+ bool_kwargs = {}
178
+ if origin_type is Literal or (isinstance(field.type, type) and issubclass(field.type, Enum)):
179
+ if origin_type is Literal:
180
+ kwargs["choices"] = field.type.__args__
181
+ else:
182
+ kwargs["choices"] = [x.value for x in field.type]
183
+
184
+ kwargs["type"] = make_choice_type_function(kwargs["choices"])
185
+
186
+ if field.default is not dataclasses.MISSING:
187
+ kwargs["default"] = field.default
188
+ else:
189
+ kwargs["required"] = True
190
+ elif field.type is bool or field.type == Optional[bool]:
191
+ # Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
192
+ # We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
193
+ bool_kwargs = copy(kwargs)
194
+
195
+ # Hack because type=bool in argparse does not behave as we want.
196
+ kwargs["type"] = string_to_bool
197
+ if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
198
+ # Default value is False if we have no default when of type bool.
199
+ default = False if field.default is dataclasses.MISSING else field.default
200
+ # This is the value that will get picked if we don't include --field_name in any way
201
+ kwargs["default"] = default
202
+ # This tells argparse we accept 0 or 1 value after --field_name
203
+ kwargs["nargs"] = "?"
204
+ # This is the value that will get picked if we do --field_name (without value)
205
+ kwargs["const"] = True
206
+ elif isclass(origin_type) and issubclass(origin_type, list):
207
+ kwargs["type"] = field.type.__args__[0]
208
+ kwargs["nargs"] = "+"
209
+ if field.default_factory is not dataclasses.MISSING:
210
+ kwargs["default"] = field.default_factory()
211
+ elif field.default is dataclasses.MISSING:
212
+ kwargs["required"] = True
213
+ else:
214
+ kwargs["type"] = field.type
215
+ if field.default is not dataclasses.MISSING:
216
+ kwargs["default"] = field.default
217
+ elif field.default_factory is not dataclasses.MISSING:
218
+ kwargs["default"] = field.default_factory()
219
+ else:
220
+ kwargs["required"] = True
221
+ parser.add_argument(field_name, *aliases, **kwargs)
222
+
223
+ # Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
224
+ # Order is important for arguments with the same destination!
225
+ # We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
226
+ # here and we do not need those changes/additional keys.
227
+ if field.default is True and (field.type is bool or field.type == Optional[bool]):
228
+ bool_kwargs["default"] = False
229
+ parser.add_argument(f"--no_{field.name}", action="store_false", dest=field.name, **bool_kwargs)
230
+
231
+ def _add_dataclass_arguments(self, dtype: DataClassType):
232
+ if hasattr(dtype, "_argument_group_name"):
233
+ parser = self.add_argument_group(dtype._argument_group_name)
234
+ else:
235
+ parser = self
236
+
237
+ try:
238
+ type_hints: Dict[str, type] = get_type_hints(dtype)
239
+ except NameError:
240
+ raise RuntimeError(
241
+ f"Type resolution failed for {dtype}. Try declaring the class in global scope or "
242
+ "removing line of `from __future__ import annotations` which opts in Postponed "
243
+ "Evaluation of Annotations (PEP 563)"
244
+ )
245
+ except TypeError as ex:
246
+ # Remove this block when we drop Python 3.9 support
247
+ if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(ex):
248
+ python_version = ".".join(map(str, sys.version_info[:3]))
249
+ raise RuntimeError(
250
+ f"Type resolution failed for {dtype} on Python {python_version}. Try removing "
251
+ "line of `from __future__ import annotations` which opts in union types as "
252
+ "`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To "
253
+ "support Python versions that lower than 3.10, you need to use "
254
+ "`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of "
255
+ "`X | None`."
256
+ ) from ex
257
+ raise
258
+
259
+ for field in dataclasses.fields(dtype):
260
+ if not field.init:
261
+ continue
262
+ field.type = type_hints[field.name]
263
+ self._parse_dataclass_field(parser, field)
264
+
265
+ def parse_args_into_dataclasses(
266
+ self,
267
+ args=None,
268
+ return_remaining_strings=False,
269
+ look_for_args_file=True,
270
+ args_filename=None,
271
+ args_file_flag=None,
272
+ ) -> Tuple[DataClass, ...]:
273
+ """
274
+ Parse command-line args into instances of the specified dataclass types.
275
+
276
+ This relies on argparse's `ArgumentParser.parse_known_args`. See the doc at:
277
+ docs.python.org/3.7/library/argparse.html#argparse.ArgumentParser.parse_args
278
+
279
+ Args:
280
+ args:
281
+ List of strings to parse. The default is taken from sys.argv. (same as argparse.ArgumentParser)
282
+ return_remaining_strings:
283
+ If true, also return a list of remaining argument strings.
284
+ look_for_args_file:
285
+ If true, will look for a ".args" file with the same base name as the entry point script for this
286
+ process, and will append its potential content to the command line args.
287
+ args_filename:
288
+ If not None, will uses this file instead of the ".args" file specified in the previous argument.
289
+ args_file_flag:
290
+ If not None, will look for a file in the command-line args specified with this flag. The flag can be
291
+ specified multiple times and precedence is determined by the order (last one wins).
292
+
293
+ Returns:
294
+ Tuple consisting of:
295
+
296
+ - the dataclass instances in the same order as they were passed to the initializer.abspath
297
+ - if applicable, an additional namespace for more (non-dataclass backed) arguments added to the parser
298
+ after initialization.
299
+ - The potential list of remaining argument strings. (same as argparse.ArgumentParser.parse_known_args)
300
+ """
301
+
302
+ if args_file_flag or args_filename or (look_for_args_file and len(sys.argv)):
303
+ args_files = []
304
+
305
+ if args_filename:
306
+ args_files.append(Path(args_filename))
307
+ elif look_for_args_file and len(sys.argv):
308
+ args_files.append(Path(sys.argv[0]).with_suffix(".args"))
309
+
310
+ # args files specified via command line flag should overwrite default args files so we add them last
311
+ if args_file_flag:
312
+ # Create special parser just to extract the args_file_flag values
313
+ args_file_parser = ArgumentParser()
314
+ args_file_parser.add_argument(args_file_flag, type=str, action="append")
315
+
316
+ # Use only remaining args for further parsing (remove the args_file_flag)
317
+ cfg, args = args_file_parser.parse_known_args(args=args)
318
+ cmd_args_file_paths = vars(cfg).get(args_file_flag.lstrip("-"), None)
319
+
320
+ if cmd_args_file_paths:
321
+ args_files.extend([Path(p) for p in cmd_args_file_paths])
322
+
323
+ file_args = []
324
+ for args_file in args_files:
325
+ if args_file.exists():
326
+ file_args += args_file.read_text().split()
327
+
328
+ # in case of duplicate arguments the last one has precedence
329
+ # args specified via the command line should overwrite args from files, so we add them last
330
+ args = file_args + args if args is not None else file_args + sys.argv[1:]
331
+ namespace, remaining_args = self.parse_known_args(args=args)
332
+ outputs = []
333
+ for dtype in self.dataclass_types:
334
+ keys = {f.name for f in dataclasses.fields(dtype) if f.init}
335
+ inputs = {k: v for k, v in vars(namespace).items() if k in keys}
336
+ for k in keys:
337
+ delattr(namespace, k)
338
+ obj = dtype(**inputs)
339
+ outputs.append(obj)
340
+ if len(namespace.__dict__) > 0:
341
+ # additional namespace.
342
+ outputs.append(namespace)
343
+ if return_remaining_strings:
344
+ return (*outputs, remaining_args)
345
+ else:
346
+ if remaining_args:
347
+ raise ValueError(f"Some specified arguments are not used by the HfArgumentParser: {remaining_args}")
348
+
349
+ return (*outputs,)
350
+
351
+ def parse_dict(self, args: Dict[str, Any], allow_extra_keys: bool = False) -> Tuple[DataClass, ...]:
352
+ """
353
+ Alternative helper method that does not use `argparse` at all, instead uses a dict and populating the dataclass
354
+ types.
355
+
356
+ Args:
357
+ args (`dict`):
358
+ dict containing config values
359
+ allow_extra_keys (`bool`, *optional*, defaults to `False`):
360
+ Defaults to False. If False, will raise an exception if the dict contains keys that are not parsed.
361
+
362
+ Returns:
363
+ Tuple consisting of:
364
+
365
+ - the dataclass instances in the same order as they were passed to the initializer.
366
+ """
367
+ unused_keys = set(args.keys())
368
+ outputs = []
369
+ for dtype in self.dataclass_types:
370
+ keys = {f.name for f in dataclasses.fields(dtype) if f.init}
371
+ inputs = {k: v for k, v in args.items() if k in keys}
372
+ unused_keys.difference_update(inputs.keys())
373
+ obj = dtype(**inputs)
374
+ outputs.append(obj)
375
+ if not allow_extra_keys and unused_keys:
376
+ raise ValueError(f"Some keys are not used by the HfArgumentParser: {sorted(unused_keys)}")
377
+ return tuple(outputs)
378
+
379
+ def parse_json_file(self, json_file: str, allow_extra_keys: bool = False) -> Tuple[DataClass, ...]:
380
+ """
381
+ Alternative helper method that does not use `argparse` at all, instead loading a json file and populating the
382
+ dataclass types.
383
+
384
+ Args:
385
+ json_file (`str` or `os.PathLike`):
386
+ File name of the json file to parse
387
+ allow_extra_keys (`bool`, *optional*, defaults to `False`):
388
+ Defaults to False. If False, will raise an exception if the json file contains keys that are not
389
+ parsed.
390
+
391
+ Returns:
392
+ Tuple consisting of:
393
+
394
+ - the dataclass instances in the same order as they were passed to the initializer.
395
+ """
396
+ with open(Path(json_file), encoding="utf-8") as open_json_file:
397
+ data = json.loads(open_json_file.read())
398
+ outputs = self.parse_dict(data, allow_extra_keys=allow_extra_keys)
399
+ return tuple(outputs)
400
+
401
+ def parse_yaml_file(self, yaml_file: str, allow_extra_keys: bool = False) -> Tuple[DataClass, ...]:
402
+ """
403
+ Alternative helper method that does not use `argparse` at all, instead loading a yaml file and populating the
404
+ dataclass types.
405
+
406
+ Args:
407
+ yaml_file (`str` or `os.PathLike`):
408
+ File name of the yaml file to parse
409
+ allow_extra_keys (`bool`, *optional*, defaults to `False`):
410
+ Defaults to False. If False, will raise an exception if the json file contains keys that are not
411
+ parsed.
412
+
413
+ Returns:
414
+ Tuple consisting of:
415
+
416
+ - the dataclass instances in the same order as they were passed to the initializer.
417
+ """
418
+ outputs = self.parse_dict(yaml.safe_load(Path(yaml_file).read_text()), allow_extra_keys=allow_extra_keys)
419
+ return tuple(outputs)
env-llmeval/lib/python3.10/site-packages/transformers/hyperparameter_search.py ADDED
@@ -0,0 +1,141 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023-present the HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ from .integrations import (
17
+ is_optuna_available,
18
+ is_ray_tune_available,
19
+ is_sigopt_available,
20
+ is_wandb_available,
21
+ run_hp_search_optuna,
22
+ run_hp_search_ray,
23
+ run_hp_search_sigopt,
24
+ run_hp_search_wandb,
25
+ )
26
+ from .trainer_utils import (
27
+ HPSearchBackend,
28
+ default_hp_space_optuna,
29
+ default_hp_space_ray,
30
+ default_hp_space_sigopt,
31
+ default_hp_space_wandb,
32
+ )
33
+ from .utils import logging
34
+
35
+
36
+ logger = logging.get_logger(__name__)
37
+
38
+
39
+ class HyperParamSearchBackendBase:
40
+ name: str
41
+ pip_package: str = None
42
+
43
+ @staticmethod
44
+ def is_available():
45
+ raise NotImplementedError
46
+
47
+ def run(self, trainer, n_trials: int, direction: str, **kwargs):
48
+ raise NotImplementedError
49
+
50
+ def default_hp_space(self, trial):
51
+ raise NotImplementedError
52
+
53
+ def ensure_available(self):
54
+ if not self.is_available():
55
+ raise RuntimeError(
56
+ f"You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}."
57
+ )
58
+
59
+ @classmethod
60
+ def pip_install(cls):
61
+ return f"`pip install {cls.pip_package or cls.name}`"
62
+
63
+
64
+ class OptunaBackend(HyperParamSearchBackendBase):
65
+ name = "optuna"
66
+
67
+ @staticmethod
68
+ def is_available():
69
+ return is_optuna_available()
70
+
71
+ def run(self, trainer, n_trials: int, direction: str, **kwargs):
72
+ return run_hp_search_optuna(trainer, n_trials, direction, **kwargs)
73
+
74
+ def default_hp_space(self, trial):
75
+ return default_hp_space_optuna(trial)
76
+
77
+
78
+ class RayTuneBackend(HyperParamSearchBackendBase):
79
+ name = "ray"
80
+ pip_package = "'ray[tune]'"
81
+
82
+ @staticmethod
83
+ def is_available():
84
+ return is_ray_tune_available()
85
+
86
+ def run(self, trainer, n_trials: int, direction: str, **kwargs):
87
+ return run_hp_search_ray(trainer, n_trials, direction, **kwargs)
88
+
89
+ def default_hp_space(self, trial):
90
+ return default_hp_space_ray(trial)
91
+
92
+
93
+ class SigOptBackend(HyperParamSearchBackendBase):
94
+ name = "sigopt"
95
+
96
+ @staticmethod
97
+ def is_available():
98
+ return is_sigopt_available()
99
+
100
+ def run(self, trainer, n_trials: int, direction: str, **kwargs):
101
+ return run_hp_search_sigopt(trainer, n_trials, direction, **kwargs)
102
+
103
+ def default_hp_space(self, trial):
104
+ return default_hp_space_sigopt(trial)
105
+
106
+
107
+ class WandbBackend(HyperParamSearchBackendBase):
108
+ name = "wandb"
109
+
110
+ @staticmethod
111
+ def is_available():
112
+ return is_wandb_available()
113
+
114
+ def run(self, trainer, n_trials: int, direction: str, **kwargs):
115
+ return run_hp_search_wandb(trainer, n_trials, direction, **kwargs)
116
+
117
+ def default_hp_space(self, trial):
118
+ return default_hp_space_wandb(trial)
119
+
120
+
121
+ ALL_HYPERPARAMETER_SEARCH_BACKENDS = {
122
+ HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
123
+ }
124
+
125
+
126
+ def default_hp_search_backend() -> str:
127
+ available_backends = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
128
+ if len(available_backends) > 0:
129
+ name = available_backends[0].name
130
+ if len(available_backends) > 1:
131
+ logger.info(
132
+ f"{len(available_backends)} hyperparameter search backends available. Using {name} as the default."
133
+ )
134
+ return name
135
+ raise RuntimeError(
136
+ "No hyperparameter search backend available.\n"
137
+ + "\n".join(
138
+ f" - To install {backend.name} run {backend.pip_install()}"
139
+ for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values()
140
+ )
141
+ )
env-llmeval/lib/python3.10/site-packages/transformers/image_processing_utils.py ADDED
@@ -0,0 +1,793 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import copy
17
+ import json
18
+ import os
19
+ import warnings
20
+ from io import BytesIO
21
+ from typing import Any, Dict, Iterable, List, Optional, Tuple, Union
22
+
23
+ import numpy as np
24
+ import requests
25
+
26
+ from .dynamic_module_utils import custom_object_save
27
+ from .feature_extraction_utils import BatchFeature as BaseBatchFeature
28
+ from .image_transforms import center_crop, normalize, rescale
29
+ from .image_utils import ChannelDimension
30
+ from .utils import (
31
+ IMAGE_PROCESSOR_NAME,
32
+ PushToHubMixin,
33
+ add_model_info_to_auto_map,
34
+ cached_file,
35
+ copy_func,
36
+ download_url,
37
+ is_offline_mode,
38
+ is_remote_url,
39
+ is_vision_available,
40
+ logging,
41
+ )
42
+
43
+
44
+ if is_vision_available():
45
+ from PIL import Image
46
+
47
+ logger = logging.get_logger(__name__)
48
+
49
+
50
+ # TODO: Move BatchFeature to be imported by both image_processing_utils and image_processing_utils
51
+ # We override the class string here, but logic is the same.
52
+ class BatchFeature(BaseBatchFeature):
53
+ r"""
54
+ Holds the output of the image processor specific `__call__` methods.
55
+
56
+ This class is derived from a python dictionary and can be used as a dictionary.
57
+
58
+ Args:
59
+ data (`dict`):
60
+ Dictionary of lists/arrays/tensors returned by the __call__ method ('pixel_values', etc.).
61
+ tensor_type (`Union[None, str, TensorType]`, *optional*):
62
+ You can give a tensor_type here to convert the lists of integers in PyTorch/TensorFlow/Numpy Tensors at
63
+ initialization.
64
+ """
65
+
66
+
67
+ # TODO: (Amy) - factor out the common parts of this and the feature extractor
68
+ class ImageProcessingMixin(PushToHubMixin):
69
+ """
70
+ This is an image processor mixin used to provide saving/loading functionality for sequential and image feature
71
+ extractors.
72
+ """
73
+
74
+ _auto_class = None
75
+
76
+ def __init__(self, **kwargs):
77
+ """Set elements of `kwargs` as attributes."""
78
+ # This key was saved while we still used `XXXFeatureExtractor` for image processing. Now we use
79
+ # `XXXImageProcessor`, this attribute and its value are misleading.
80
+ kwargs.pop("feature_extractor_type", None)
81
+ # Pop "processor_class" as it should be saved as private attribute
82
+ self._processor_class = kwargs.pop("processor_class", None)
83
+ # Additional attributes without default values
84
+ for key, value in kwargs.items():
85
+ try:
86
+ setattr(self, key, value)
87
+ except AttributeError as err:
88
+ logger.error(f"Can't set {key} with value {value} for {self}")
89
+ raise err
90
+
91
+ def _set_processor_class(self, processor_class: str):
92
+ """Sets processor class as an attribute."""
93
+ self._processor_class = processor_class
94
+
95
+ @classmethod
96
+ def from_pretrained(
97
+ cls,
98
+ pretrained_model_name_or_path: Union[str, os.PathLike],
99
+ cache_dir: Optional[Union[str, os.PathLike]] = None,
100
+ force_download: bool = False,
101
+ local_files_only: bool = False,
102
+ token: Optional[Union[str, bool]] = None,
103
+ revision: str = "main",
104
+ **kwargs,
105
+ ):
106
+ r"""
107
+ Instantiate a type of [`~image_processing_utils.ImageProcessingMixin`] from an image processor.
108
+
109
+ Args:
110
+ pretrained_model_name_or_path (`str` or `os.PathLike`):
111
+ This can be either:
112
+
113
+ - a string, the *model id* of a pretrained image_processor hosted inside a model repo on
114
+ huggingface.co.
115
+ - a path to a *directory* containing a image processor file saved using the
116
+ [`~image_processing_utils.ImageProcessingMixin.save_pretrained`] method, e.g.,
117
+ `./my_model_directory/`.
118
+ - a path or url to a saved image processor JSON *file*, e.g.,
119
+ `./my_model_directory/preprocessor_config.json`.
120
+ cache_dir (`str` or `os.PathLike`, *optional*):
121
+ Path to a directory in which a downloaded pretrained model image processor should be cached if the
122
+ standard cache should not be used.
123
+ force_download (`bool`, *optional*, defaults to `False`):
124
+ Whether or not to force to (re-)download the image processor files and override the cached versions if
125
+ they exist.
126
+ resume_download (`bool`, *optional*, defaults to `False`):
127
+ Whether or not to delete incompletely received file. Attempts to resume the download if such a file
128
+ exists.
129
+ proxies (`Dict[str, str]`, *optional*):
130
+ A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',
131
+ 'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request.
132
+ token (`str` or `bool`, *optional*):
133
+ The token to use as HTTP bearer authorization for remote files. If `True`, or not specified, will use
134
+ the token generated when running `huggingface-cli login` (stored in `~/.huggingface`).
135
+ revision (`str`, *optional*, defaults to `"main"`):
136
+ The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
137
+ git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any
138
+ identifier allowed by git.
139
+
140
+
141
+ <Tip>
142
+
143
+ To test a pull request you made on the Hub, you can pass `revision="refs/pr/<pr_number>".
144
+
145
+ </Tip>
146
+
147
+ return_unused_kwargs (`bool`, *optional*, defaults to `False`):
148
+ If `False`, then this function returns just the final image processor object. If `True`, then this
149
+ functions returns a `Tuple(image_processor, unused_kwargs)` where *unused_kwargs* is a dictionary
150
+ consisting of the key/value pairs whose keys are not image processor attributes: i.e., the part of
151
+ `kwargs` which has not been used to update `image_processor` and is otherwise ignored.
152
+ subfolder (`str`, *optional*, defaults to `""`):
153
+ In case the relevant files are located inside a subfolder of the model repo on huggingface.co, you can
154
+ specify the folder name here.
155
+ kwargs (`Dict[str, Any]`, *optional*):
156
+ The values in kwargs of any keys which are image processor attributes will be used to override the
157
+ loaded values. Behavior concerning key/value pairs whose keys are *not* image processor attributes is
158
+ controlled by the `return_unused_kwargs` keyword parameter.
159
+
160
+ Returns:
161
+ A image processor of type [`~image_processing_utils.ImageProcessingMixin`].
162
+
163
+ Examples:
164
+
165
+ ```python
166
+ # We can't instantiate directly the base class *ImageProcessingMixin* so let's show the examples on a
167
+ # derived class: *CLIPImageProcessor*
168
+ image_processor = CLIPImageProcessor.from_pretrained(
169
+ "openai/clip-vit-base-patch32"
170
+ ) # Download image_processing_config from huggingface.co and cache.
171
+ image_processor = CLIPImageProcessor.from_pretrained(
172
+ "./test/saved_model/"
173
+ ) # E.g. image processor (or model) was saved using *save_pretrained('./test/saved_model/')*
174
+ image_processor = CLIPImageProcessor.from_pretrained("./test/saved_model/preprocessor_config.json")
175
+ image_processor = CLIPImageProcessor.from_pretrained(
176
+ "openai/clip-vit-base-patch32", do_normalize=False, foo=False
177
+ )
178
+ assert image_processor.do_normalize is False
179
+ image_processor, unused_kwargs = CLIPImageProcessor.from_pretrained(
180
+ "openai/clip-vit-base-patch32", do_normalize=False, foo=False, return_unused_kwargs=True
181
+ )
182
+ assert image_processor.do_normalize is False
183
+ assert unused_kwargs == {"foo": False}
184
+ ```"""
185
+ kwargs["cache_dir"] = cache_dir
186
+ kwargs["force_download"] = force_download
187
+ kwargs["local_files_only"] = local_files_only
188
+ kwargs["revision"] = revision
189
+
190
+ use_auth_token = kwargs.pop("use_auth_token", None)
191
+ if use_auth_token is not None:
192
+ warnings.warn(
193
+ "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.",
194
+ FutureWarning,
195
+ )
196
+ if token is not None:
197
+ raise ValueError(
198
+ "`token` and `use_auth_token` are both specified. Please set only the argument `token`."
199
+ )
200
+ token = use_auth_token
201
+
202
+ if token is not None:
203
+ kwargs["token"] = token
204
+
205
+ image_processor_dict, kwargs = cls.get_image_processor_dict(pretrained_model_name_or_path, **kwargs)
206
+
207
+ return cls.from_dict(image_processor_dict, **kwargs)
208
+
209
+ def save_pretrained(self, save_directory: Union[str, os.PathLike], push_to_hub: bool = False, **kwargs):
210
+ """
211
+ Save an image processor object to the directory `save_directory`, so that it can be re-loaded using the
212
+ [`~image_processing_utils.ImageProcessingMixin.from_pretrained`] class method.
213
+
214
+ Args:
215
+ save_directory (`str` or `os.PathLike`):
216
+ Directory where the image processor JSON file will be saved (will be created if it does not exist).
217
+ push_to_hub (`bool`, *optional*, defaults to `False`):
218
+ Whether or not to push your model to the Hugging Face model hub after saving it. You can specify the
219
+ repository you want to push to with `repo_id` (will default to the name of `save_directory` in your
220
+ namespace).
221
+ kwargs (`Dict[str, Any]`, *optional*):
222
+ Additional key word arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method.
223
+ """
224
+ use_auth_token = kwargs.pop("use_auth_token", None)
225
+
226
+ if use_auth_token is not None:
227
+ warnings.warn(
228
+ "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.",
229
+ FutureWarning,
230
+ )
231
+ if kwargs.get("token", None) is not None:
232
+ raise ValueError(
233
+ "`token` and `use_auth_token` are both specified. Please set only the argument `token`."
234
+ )
235
+ kwargs["token"] = use_auth_token
236
+
237
+ if os.path.isfile(save_directory):
238
+ raise AssertionError(f"Provided path ({save_directory}) should be a directory, not a file")
239
+
240
+ os.makedirs(save_directory, exist_ok=True)
241
+
242
+ if push_to_hub:
243
+ commit_message = kwargs.pop("commit_message", None)
244
+ repo_id = kwargs.pop("repo_id", save_directory.split(os.path.sep)[-1])
245
+ repo_id = self._create_repo(repo_id, **kwargs)
246
+ files_timestamps = self._get_files_timestamps(save_directory)
247
+
248
+ # If we have a custom config, we copy the file defining it in the folder and set the attributes so it can be
249
+ # loaded from the Hub.
250
+ if self._auto_class is not None:
251
+ custom_object_save(self, save_directory, config=self)
252
+
253
+ # If we save using the predefined names, we can load using `from_pretrained`
254
+ output_image_processor_file = os.path.join(save_directory, IMAGE_PROCESSOR_NAME)
255
+
256
+ self.to_json_file(output_image_processor_file)
257
+ logger.info(f"Image processor saved in {output_image_processor_file}")
258
+
259
+ if push_to_hub:
260
+ self._upload_modified_files(
261
+ save_directory,
262
+ repo_id,
263
+ files_timestamps,
264
+ commit_message=commit_message,
265
+ token=kwargs.get("token"),
266
+ )
267
+
268
+ return [output_image_processor_file]
269
+
270
+ @classmethod
271
+ def get_image_processor_dict(
272
+ cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs
273
+ ) -> Tuple[Dict[str, Any], Dict[str, Any]]:
274
+ """
275
+ From a `pretrained_model_name_or_path`, resolve to a dictionary of parameters, to be used for instantiating a
276
+ image processor of type [`~image_processor_utils.ImageProcessingMixin`] using `from_dict`.
277
+
278
+ Parameters:
279
+ pretrained_model_name_or_path (`str` or `os.PathLike`):
280
+ The identifier of the pre-trained checkpoint from which we want the dictionary of parameters.
281
+ subfolder (`str`, *optional*, defaults to `""`):
282
+ In case the relevant files are located inside a subfolder of the model repo on huggingface.co, you can
283
+ specify the folder name here.
284
+
285
+ Returns:
286
+ `Tuple[Dict, Dict]`: The dictionary(ies) that will be used to instantiate the image processor object.
287
+ """
288
+ cache_dir = kwargs.pop("cache_dir", None)
289
+ force_download = kwargs.pop("force_download", False)
290
+ resume_download = kwargs.pop("resume_download", False)
291
+ proxies = kwargs.pop("proxies", None)
292
+ token = kwargs.pop("token", None)
293
+ use_auth_token = kwargs.pop("use_auth_token", None)
294
+ local_files_only = kwargs.pop("local_files_only", False)
295
+ revision = kwargs.pop("revision", None)
296
+ subfolder = kwargs.pop("subfolder", "")
297
+
298
+ from_pipeline = kwargs.pop("_from_pipeline", None)
299
+ from_auto_class = kwargs.pop("_from_auto", False)
300
+
301
+ if use_auth_token is not None:
302
+ warnings.warn(
303
+ "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.",
304
+ FutureWarning,
305
+ )
306
+ if token is not None:
307
+ raise ValueError(
308
+ "`token` and `use_auth_token` are both specified. Please set only the argument `token`."
309
+ )
310
+ token = use_auth_token
311
+
312
+ user_agent = {"file_type": "image processor", "from_auto_class": from_auto_class}
313
+ if from_pipeline is not None:
314
+ user_agent["using_pipeline"] = from_pipeline
315
+
316
+ if is_offline_mode() and not local_files_only:
317
+ logger.info("Offline mode: forcing local_files_only=True")
318
+ local_files_only = True
319
+
320
+ pretrained_model_name_or_path = str(pretrained_model_name_or_path)
321
+ is_local = os.path.isdir(pretrained_model_name_or_path)
322
+ if os.path.isdir(pretrained_model_name_or_path):
323
+ image_processor_file = os.path.join(pretrained_model_name_or_path, IMAGE_PROCESSOR_NAME)
324
+ if os.path.isfile(pretrained_model_name_or_path):
325
+ resolved_image_processor_file = pretrained_model_name_or_path
326
+ is_local = True
327
+ elif is_remote_url(pretrained_model_name_or_path):
328
+ image_processor_file = pretrained_model_name_or_path
329
+ resolved_image_processor_file = download_url(pretrained_model_name_or_path)
330
+ else:
331
+ image_processor_file = IMAGE_PROCESSOR_NAME
332
+ try:
333
+ # Load from local folder or from cache or download from model Hub and cache
334
+ resolved_image_processor_file = cached_file(
335
+ pretrained_model_name_or_path,
336
+ image_processor_file,
337
+ cache_dir=cache_dir,
338
+ force_download=force_download,
339
+ proxies=proxies,
340
+ resume_download=resume_download,
341
+ local_files_only=local_files_only,
342
+ token=token,
343
+ user_agent=user_agent,
344
+ revision=revision,
345
+ subfolder=subfolder,
346
+ )
347
+ except EnvironmentError:
348
+ # Raise any environment error raise by `cached_file`. It will have a helpful error message adapted to
349
+ # the original exception.
350
+ raise
351
+ except Exception:
352
+ # For any other exception, we throw a generic error.
353
+ raise EnvironmentError(
354
+ f"Can't load image processor for '{pretrained_model_name_or_path}'. If you were trying to load"
355
+ " it from 'https://huggingface.co/models', make sure you don't have a local directory with the"
356
+ f" same name. Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a"
357
+ f" directory containing a {IMAGE_PROCESSOR_NAME} file"
358
+ )
359
+
360
+ try:
361
+ # Load image_processor dict
362
+ with open(resolved_image_processor_file, "r", encoding="utf-8") as reader:
363
+ text = reader.read()
364
+ image_processor_dict = json.loads(text)
365
+
366
+ except json.JSONDecodeError:
367
+ raise EnvironmentError(
368
+ f"It looks like the config file at '{resolved_image_processor_file}' is not a valid JSON file."
369
+ )
370
+
371
+ if is_local:
372
+ logger.info(f"loading configuration file {resolved_image_processor_file}")
373
+ else:
374
+ logger.info(
375
+ f"loading configuration file {image_processor_file} from cache at {resolved_image_processor_file}"
376
+ )
377
+
378
+ if "auto_map" in image_processor_dict and not is_local:
379
+ image_processor_dict["auto_map"] = add_model_info_to_auto_map(
380
+ image_processor_dict["auto_map"], pretrained_model_name_or_path
381
+ )
382
+
383
+ return image_processor_dict, kwargs
384
+
385
+ @classmethod
386
+ def from_dict(cls, image_processor_dict: Dict[str, Any], **kwargs):
387
+ """
388
+ Instantiates a type of [`~image_processing_utils.ImageProcessingMixin`] from a Python dictionary of parameters.
389
+
390
+ Args:
391
+ image_processor_dict (`Dict[str, Any]`):
392
+ Dictionary that will be used to instantiate the image processor object. Such a dictionary can be
393
+ retrieved from a pretrained checkpoint by leveraging the
394
+ [`~image_processing_utils.ImageProcessingMixin.to_dict`] method.
395
+ kwargs (`Dict[str, Any]`):
396
+ Additional parameters from which to initialize the image processor object.
397
+
398
+ Returns:
399
+ [`~image_processing_utils.ImageProcessingMixin`]: The image processor object instantiated from those
400
+ parameters.
401
+ """
402
+ image_processor_dict = image_processor_dict.copy()
403
+ return_unused_kwargs = kwargs.pop("return_unused_kwargs", False)
404
+
405
+ # The `size` parameter is a dict and was previously an int or tuple in feature extractors.
406
+ # We set `size` here directly to the `image_processor_dict` so that it is converted to the appropriate
407
+ # dict within the image processor and isn't overwritten if `size` is passed in as a kwarg.
408
+ if "size" in kwargs and "size" in image_processor_dict:
409
+ image_processor_dict["size"] = kwargs.pop("size")
410
+ if "crop_size" in kwargs and "crop_size" in image_processor_dict:
411
+ image_processor_dict["crop_size"] = kwargs.pop("crop_size")
412
+
413
+ image_processor = cls(**image_processor_dict)
414
+
415
+ # Update image_processor with kwargs if needed
416
+ to_remove = []
417
+ for key, value in kwargs.items():
418
+ if hasattr(image_processor, key):
419
+ setattr(image_processor, key, value)
420
+ to_remove.append(key)
421
+ for key in to_remove:
422
+ kwargs.pop(key, None)
423
+
424
+ logger.info(f"Image processor {image_processor}")
425
+ if return_unused_kwargs:
426
+ return image_processor, kwargs
427
+ else:
428
+ return image_processor
429
+
430
+ def to_dict(self) -> Dict[str, Any]:
431
+ """
432
+ Serializes this instance to a Python dictionary.
433
+
434
+ Returns:
435
+ `Dict[str, Any]`: Dictionary of all the attributes that make up this image processor instance.
436
+ """
437
+ output = copy.deepcopy(self.__dict__)
438
+ output["image_processor_type"] = self.__class__.__name__
439
+
440
+ return output
441
+
442
+ @classmethod
443
+ def from_json_file(cls, json_file: Union[str, os.PathLike]):
444
+ """
445
+ Instantiates a image processor of type [`~image_processing_utils.ImageProcessingMixin`] from the path to a JSON
446
+ file of parameters.
447
+
448
+ Args:
449
+ json_file (`str` or `os.PathLike`):
450
+ Path to the JSON file containing the parameters.
451
+
452
+ Returns:
453
+ A image processor of type [`~image_processing_utils.ImageProcessingMixin`]: The image_processor object
454
+ instantiated from that JSON file.
455
+ """
456
+ with open(json_file, "r", encoding="utf-8") as reader:
457
+ text = reader.read()
458
+ image_processor_dict = json.loads(text)
459
+ return cls(**image_processor_dict)
460
+
461
+ def to_json_string(self) -> str:
462
+ """
463
+ Serializes this instance to a JSON string.
464
+
465
+ Returns:
466
+ `str`: String containing all the attributes that make up this feature_extractor instance in JSON format.
467
+ """
468
+ dictionary = self.to_dict()
469
+
470
+ for key, value in dictionary.items():
471
+ if isinstance(value, np.ndarray):
472
+ dictionary[key] = value.tolist()
473
+
474
+ # make sure private name "_processor_class" is correctly
475
+ # saved as "processor_class"
476
+ _processor_class = dictionary.pop("_processor_class", None)
477
+ if _processor_class is not None:
478
+ dictionary["processor_class"] = _processor_class
479
+
480
+ return json.dumps(dictionary, indent=2, sort_keys=True) + "\n"
481
+
482
+ def to_json_file(self, json_file_path: Union[str, os.PathLike]):
483
+ """
484
+ Save this instance to a JSON file.
485
+
486
+ Args:
487
+ json_file_path (`str` or `os.PathLike`):
488
+ Path to the JSON file in which this image_processor instance's parameters will be saved.
489
+ """
490
+ with open(json_file_path, "w", encoding="utf-8") as writer:
491
+ writer.write(self.to_json_string())
492
+
493
+ def __repr__(self):
494
+ return f"{self.__class__.__name__} {self.to_json_string()}"
495
+
496
+ @classmethod
497
+ def register_for_auto_class(cls, auto_class="AutoImageProcessor"):
498
+ """
499
+ Register this class with a given auto class. This should only be used for custom image processors as the ones
500
+ in the library are already mapped with `AutoImageProcessor `.
501
+
502
+ <Tip warning={true}>
503
+
504
+ This API is experimental and may have some slight breaking changes in the next releases.
505
+
506
+ </Tip>
507
+
508
+ Args:
509
+ auto_class (`str` or `type`, *optional*, defaults to `"AutoImageProcessor "`):
510
+ The auto class to register this new image processor with.
511
+ """
512
+ if not isinstance(auto_class, str):
513
+ auto_class = auto_class.__name__
514
+
515
+ import transformers.models.auto as auto_module
516
+
517
+ if not hasattr(auto_module, auto_class):
518
+ raise ValueError(f"{auto_class} is not a valid auto class.")
519
+
520
+ cls._auto_class = auto_class
521
+
522
+ def fetch_images(self, image_url_or_urls: Union[str, List[str]]):
523
+ """
524
+ Convert a single or a list of urls into the corresponding `PIL.Image` objects.
525
+
526
+ If a single url is passed, the return value will be a single object. If a list is passed a list of objects is
527
+ returned.
528
+ """
529
+ headers = {
530
+ "User-Agent": (
531
+ "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0"
532
+ " Safari/537.36"
533
+ )
534
+ }
535
+ if isinstance(image_url_or_urls, list):
536
+ return [self.fetch_images(x) for x in image_url_or_urls]
537
+ elif isinstance(image_url_or_urls, str):
538
+ response = requests.get(image_url_or_urls, stream=True, headers=headers)
539
+ response.raise_for_status()
540
+ return Image.open(BytesIO(response.content))
541
+ else:
542
+ raise ValueError(f"only a single or a list of entries is supported but got type={type(image_url_or_urls)}")
543
+
544
+
545
+ class BaseImageProcessor(ImageProcessingMixin):
546
+ def __init__(self, **kwargs):
547
+ super().__init__(**kwargs)
548
+
549
+ def __call__(self, images, **kwargs) -> BatchFeature:
550
+ """Preprocess an image or a batch of images."""
551
+ return self.preprocess(images, **kwargs)
552
+
553
+ def preprocess(self, images, **kwargs) -> BatchFeature:
554
+ raise NotImplementedError("Each image processor must implement its own preprocess method")
555
+
556
+ def rescale(
557
+ self,
558
+ image: np.ndarray,
559
+ scale: float,
560
+ data_format: Optional[Union[str, ChannelDimension]] = None,
561
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
562
+ **kwargs,
563
+ ) -> np.ndarray:
564
+ """
565
+ Rescale an image by a scale factor. image = image * scale.
566
+
567
+ Args:
568
+ image (`np.ndarray`):
569
+ Image to rescale.
570
+ scale (`float`):
571
+ The scaling factor to rescale pixel values by.
572
+ data_format (`str` or `ChannelDimension`, *optional*):
573
+ The channel dimension format for the output image. If unset, the channel dimension format of the input
574
+ image is used. Can be one of:
575
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
576
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
577
+ input_data_format (`ChannelDimension` or `str`, *optional*):
578
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
579
+ from the input image. Can be one of:
580
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
581
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
582
+
583
+ Returns:
584
+ `np.ndarray`: The rescaled image.
585
+ """
586
+ return rescale(image, scale=scale, data_format=data_format, input_data_format=input_data_format, **kwargs)
587
+
588
+ def normalize(
589
+ self,
590
+ image: np.ndarray,
591
+ mean: Union[float, Iterable[float]],
592
+ std: Union[float, Iterable[float]],
593
+ data_format: Optional[Union[str, ChannelDimension]] = None,
594
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
595
+ **kwargs,
596
+ ) -> np.ndarray:
597
+ """
598
+ Normalize an image. image = (image - image_mean) / image_std.
599
+
600
+ Args:
601
+ image (`np.ndarray`):
602
+ Image to normalize.
603
+ mean (`float` or `Iterable[float]`):
604
+ Image mean to use for normalization.
605
+ std (`float` or `Iterable[float]`):
606
+ Image standard deviation to use for normalization.
607
+ data_format (`str` or `ChannelDimension`, *optional*):
608
+ The channel dimension format for the output image. If unset, the channel dimension format of the input
609
+ image is used. Can be one of:
610
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
611
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
612
+ input_data_format (`ChannelDimension` or `str`, *optional*):
613
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
614
+ from the input image. Can be one of:
615
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
616
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
617
+
618
+ Returns:
619
+ `np.ndarray`: The normalized image.
620
+ """
621
+ return normalize(
622
+ image, mean=mean, std=std, data_format=data_format, input_data_format=input_data_format, **kwargs
623
+ )
624
+
625
+ def center_crop(
626
+ self,
627
+ image: np.ndarray,
628
+ size: Dict[str, int],
629
+ data_format: Optional[Union[str, ChannelDimension]] = None,
630
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
631
+ **kwargs,
632
+ ) -> np.ndarray:
633
+ """
634
+ Center crop an image to `(size["height"], size["width"])`. If the input size is smaller than `crop_size` along
635
+ any edge, the image is padded with 0's and then center cropped.
636
+
637
+ Args:
638
+ image (`np.ndarray`):
639
+ Image to center crop.
640
+ size (`Dict[str, int]`):
641
+ Size of the output image.
642
+ data_format (`str` or `ChannelDimension`, *optional*):
643
+ The channel dimension format for the output image. If unset, the channel dimension format of the input
644
+ image is used. Can be one of:
645
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
646
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
647
+ input_data_format (`ChannelDimension` or `str`, *optional*):
648
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
649
+ from the input image. Can be one of:
650
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
651
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
652
+ """
653
+ size = get_size_dict(size)
654
+ if "height" not in size or "width" not in size:
655
+ raise ValueError(f"The size dictionary must have keys 'height' and 'width'. Got {size.keys()}")
656
+ return center_crop(
657
+ image,
658
+ size=(size["height"], size["width"]),
659
+ data_format=data_format,
660
+ input_data_format=input_data_format,
661
+ **kwargs,
662
+ )
663
+
664
+
665
+ VALID_SIZE_DICT_KEYS = ({"height", "width"}, {"shortest_edge"}, {"shortest_edge", "longest_edge"}, {"longest_edge"})
666
+
667
+
668
+ def is_valid_size_dict(size_dict):
669
+ if not isinstance(size_dict, dict):
670
+ return False
671
+
672
+ size_dict_keys = set(size_dict.keys())
673
+ for allowed_keys in VALID_SIZE_DICT_KEYS:
674
+ if size_dict_keys == allowed_keys:
675
+ return True
676
+ return False
677
+
678
+
679
+ def convert_to_size_dict(
680
+ size, max_size: Optional[int] = None, default_to_square: bool = True, height_width_order: bool = True
681
+ ):
682
+ # By default, if size is an int we assume it represents a tuple of (size, size).
683
+ if isinstance(size, int) and default_to_square:
684
+ if max_size is not None:
685
+ raise ValueError("Cannot specify both size as an int, with default_to_square=True and max_size")
686
+ return {"height": size, "width": size}
687
+ # In other configs, if size is an int and default_to_square is False, size represents the length of
688
+ # the shortest edge after resizing.
689
+ elif isinstance(size, int) and not default_to_square:
690
+ size_dict = {"shortest_edge": size}
691
+ if max_size is not None:
692
+ size_dict["longest_edge"] = max_size
693
+ return size_dict
694
+ # Otherwise, if size is a tuple it's either (height, width) or (width, height)
695
+ elif isinstance(size, (tuple, list)) and height_width_order:
696
+ return {"height": size[0], "width": size[1]}
697
+ elif isinstance(size, (tuple, list)) and not height_width_order:
698
+ return {"height": size[1], "width": size[0]}
699
+ elif size is None and max_size is not None:
700
+ if default_to_square:
701
+ raise ValueError("Cannot specify both default_to_square=True and max_size")
702
+ return {"longest_edge": max_size}
703
+
704
+ raise ValueError(f"Could not convert size input to size dict: {size}")
705
+
706
+
707
+ def get_size_dict(
708
+ size: Union[int, Iterable[int], Dict[str, int]] = None,
709
+ max_size: Optional[int] = None,
710
+ height_width_order: bool = True,
711
+ default_to_square: bool = True,
712
+ param_name="size",
713
+ ) -> dict:
714
+ """
715
+ Converts the old size parameter in the config into the new dict expected in the config. This is to ensure backwards
716
+ compatibility with the old image processor configs and removes ambiguity over whether the tuple is in (height,
717
+ width) or (width, height) format.
718
+
719
+ - If `size` is tuple, it is converted to `{"height": size[0], "width": size[1]}` or `{"height": size[1], "width":
720
+ size[0]}` if `height_width_order` is `False`.
721
+ - If `size` is an int, and `default_to_square` is `True`, it is converted to `{"height": size, "width": size}`.
722
+ - If `size` is an int and `default_to_square` is False, it is converted to `{"shortest_edge": size}`. If `max_size`
723
+ is set, it is added to the dict as `{"longest_edge": max_size}`.
724
+
725
+ Args:
726
+ size (`Union[int, Iterable[int], Dict[str, int]]`, *optional*):
727
+ The `size` parameter to be cast into a size dictionary.
728
+ max_size (`Optional[int]`, *optional*):
729
+ The `max_size` parameter to be cast into a size dictionary.
730
+ height_width_order (`bool`, *optional*, defaults to `True`):
731
+ If `size` is a tuple, whether it's in (height, width) or (width, height) order.
732
+ default_to_square (`bool`, *optional*, defaults to `True`):
733
+ If `size` is an int, whether to default to a square image or not.
734
+ """
735
+ if not isinstance(size, dict):
736
+ size_dict = convert_to_size_dict(size, max_size, default_to_square, height_width_order)
737
+ logger.info(
738
+ f"{param_name} should be a dictionary on of the following set of keys: {VALID_SIZE_DICT_KEYS}, got {size}."
739
+ f" Converted to {size_dict}.",
740
+ )
741
+ else:
742
+ size_dict = size
743
+
744
+ if not is_valid_size_dict(size_dict):
745
+ raise ValueError(
746
+ f"{param_name} must have one of the following set of keys: {VALID_SIZE_DICT_KEYS}, got {size_dict.keys()}"
747
+ )
748
+ return size_dict
749
+
750
+
751
+ def select_best_resolution(original_size: tuple, possible_resolutions: list) -> tuple:
752
+ """
753
+ Selects the best resolution from a list of possible resolutions based on the original size.
754
+
755
+ This is done by calculating the effective and wasted resolution for each possible resolution.
756
+
757
+ The best fit resolution is the one that maximizes the effective resolution and minimizes the wasted resolution.
758
+
759
+ Args:
760
+ original_size (tuple):
761
+ The original size of the image in the format (height, width).
762
+ possible_resolutions (list):
763
+ A list of possible resolutions in the format [(height1, width1), (height2, width2), ...].
764
+
765
+ Returns:
766
+ tuple: The best fit resolution in the format (height, width).
767
+ """
768
+ original_height, original_width = original_size
769
+ best_fit = None
770
+ max_effective_resolution = 0
771
+ min_wasted_resolution = float("inf")
772
+
773
+ for height, width in possible_resolutions:
774
+ scale = min(width / original_width, height / original_height)
775
+ downscaled_width, downscaled_height = int(original_width * scale), int(original_height * scale)
776
+ effective_resolution = min(downscaled_width * downscaled_height, original_width * original_height)
777
+ wasted_resolution = (width * height) - effective_resolution
778
+
779
+ if effective_resolution > max_effective_resolution or (
780
+ effective_resolution == max_effective_resolution and wasted_resolution < min_wasted_resolution
781
+ ):
782
+ max_effective_resolution = effective_resolution
783
+ min_wasted_resolution = wasted_resolution
784
+ best_fit = (height, width)
785
+
786
+ return best_fit
787
+
788
+
789
+ ImageProcessingMixin.push_to_hub = copy_func(ImageProcessingMixin.push_to_hub)
790
+ if ImageProcessingMixin.push_to_hub.__doc__ is not None:
791
+ ImageProcessingMixin.push_to_hub.__doc__ = ImageProcessingMixin.push_to_hub.__doc__.format(
792
+ object="image processor", object_class="AutoImageProcessor", object_files="image processor file"
793
+ )
env-llmeval/lib/python3.10/site-packages/transformers/image_transforms.py ADDED
@@ -0,0 +1,801 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import warnings
17
+ from typing import Iterable, List, Optional, Tuple, Union
18
+
19
+ import numpy as np
20
+
21
+ from .image_utils import (
22
+ ChannelDimension,
23
+ ImageInput,
24
+ get_channel_dimension_axis,
25
+ get_image_size,
26
+ infer_channel_dimension_format,
27
+ )
28
+ from .utils import ExplicitEnum, TensorType, is_jax_tensor, is_tf_tensor, is_torch_tensor
29
+ from .utils.import_utils import (
30
+ is_flax_available,
31
+ is_tf_available,
32
+ is_torch_available,
33
+ is_vision_available,
34
+ requires_backends,
35
+ )
36
+
37
+
38
+ if is_vision_available():
39
+ import PIL
40
+
41
+ from .image_utils import PILImageResampling
42
+
43
+ if is_torch_available():
44
+ import torch
45
+
46
+ if is_tf_available():
47
+ import tensorflow as tf
48
+
49
+ if is_flax_available():
50
+ import jax.numpy as jnp
51
+
52
+
53
+ def to_channel_dimension_format(
54
+ image: np.ndarray,
55
+ channel_dim: Union[ChannelDimension, str],
56
+ input_channel_dim: Optional[Union[ChannelDimension, str]] = None,
57
+ ) -> np.ndarray:
58
+ """
59
+ Converts `image` to the channel dimension format specified by `channel_dim`.
60
+
61
+ Args:
62
+ image (`numpy.ndarray`):
63
+ The image to have its channel dimension set.
64
+ channel_dim (`ChannelDimension`):
65
+ The channel dimension format to use.
66
+ input_channel_dim (`ChannelDimension`, *optional*):
67
+ The channel dimension format of the input image. If not provided, it will be inferred from the input image.
68
+
69
+ Returns:
70
+ `np.ndarray`: The image with the channel dimension set to `channel_dim`.
71
+ """
72
+ if not isinstance(image, np.ndarray):
73
+ raise ValueError(f"Input image must be of type np.ndarray, got {type(image)}")
74
+
75
+ if input_channel_dim is None:
76
+ input_channel_dim = infer_channel_dimension_format(image)
77
+
78
+ target_channel_dim = ChannelDimension(channel_dim)
79
+ if input_channel_dim == target_channel_dim:
80
+ return image
81
+
82
+ if target_channel_dim == ChannelDimension.FIRST:
83
+ image = image.transpose((2, 0, 1))
84
+ elif target_channel_dim == ChannelDimension.LAST:
85
+ image = image.transpose((1, 2, 0))
86
+ else:
87
+ raise ValueError("Unsupported channel dimension format: {}".format(channel_dim))
88
+
89
+ return image
90
+
91
+
92
+ def rescale(
93
+ image: np.ndarray,
94
+ scale: float,
95
+ data_format: Optional[ChannelDimension] = None,
96
+ dtype: np.dtype = np.float32,
97
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
98
+ ) -> np.ndarray:
99
+ """
100
+ Rescales `image` by `scale`.
101
+
102
+ Args:
103
+ image (`np.ndarray`):
104
+ The image to rescale.
105
+ scale (`float`):
106
+ The scale to use for rescaling the image.
107
+ data_format (`ChannelDimension`, *optional*):
108
+ The channel dimension format of the image. If not provided, it will be the same as the input image.
109
+ dtype (`np.dtype`, *optional*, defaults to `np.float32`):
110
+ The dtype of the output image. Defaults to `np.float32`. Used for backwards compatibility with feature
111
+ extractors.
112
+ input_data_format (`ChannelDimension`, *optional*):
113
+ The channel dimension format of the input image. If not provided, it will be inferred from the input image.
114
+
115
+ Returns:
116
+ `np.ndarray`: The rescaled image.
117
+ """
118
+ if not isinstance(image, np.ndarray):
119
+ raise ValueError(f"Input image must be of type np.ndarray, got {type(image)}")
120
+
121
+ rescaled_image = image * scale
122
+ if data_format is not None:
123
+ rescaled_image = to_channel_dimension_format(rescaled_image, data_format, input_data_format)
124
+
125
+ rescaled_image = rescaled_image.astype(dtype)
126
+
127
+ return rescaled_image
128
+
129
+
130
+ def _rescale_for_pil_conversion(image):
131
+ """
132
+ Detects whether or not the image needs to be rescaled before being converted to a PIL image.
133
+
134
+ The assumption is that if the image is of type `np.float` and all values are between 0 and 1, it needs to be
135
+ rescaled.
136
+ """
137
+ if image.dtype == np.uint8:
138
+ do_rescale = False
139
+ elif np.allclose(image, image.astype(int)):
140
+ if np.all(0 <= image) and np.all(image <= 255):
141
+ do_rescale = False
142
+ else:
143
+ raise ValueError(
144
+ "The image to be converted to a PIL image contains values outside the range [0, 255], "
145
+ f"got [{image.min()}, {image.max()}] which cannot be converted to uint8."
146
+ )
147
+ elif np.all(0 <= image) and np.all(image <= 1):
148
+ do_rescale = True
149
+ else:
150
+ raise ValueError(
151
+ "The image to be converted to a PIL image contains values outside the range [0, 1], "
152
+ f"got [{image.min()}, {image.max()}] which cannot be converted to uint8."
153
+ )
154
+ return do_rescale
155
+
156
+
157
+ def to_pil_image(
158
+ image: Union[np.ndarray, "PIL.Image.Image", "torch.Tensor", "tf.Tensor", "jnp.ndarray"],
159
+ do_rescale: Optional[bool] = None,
160
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
161
+ ) -> "PIL.Image.Image":
162
+ """
163
+ Converts `image` to a PIL Image. Optionally rescales it and puts the channel dimension back as the last axis if
164
+ needed.
165
+
166
+ Args:
167
+ image (`PIL.Image.Image` or `numpy.ndarray` or `torch.Tensor` or `tf.Tensor`):
168
+ The image to convert to the `PIL.Image` format.
169
+ do_rescale (`bool`, *optional*):
170
+ Whether or not to apply the scaling factor (to make pixel values integers between 0 and 255). Will default
171
+ to `True` if the image type is a floating type and casting to `int` would result in a loss of precision,
172
+ and `False` otherwise.
173
+ input_data_format (`ChannelDimension`, *optional*):
174
+ The channel dimension format of the input image. If unset, will use the inferred format from the input.
175
+
176
+ Returns:
177
+ `PIL.Image.Image`: The converted image.
178
+ """
179
+ requires_backends(to_pil_image, ["vision"])
180
+
181
+ if isinstance(image, PIL.Image.Image):
182
+ return image
183
+
184
+ # Convert all tensors to numpy arrays before converting to PIL image
185
+ if is_torch_tensor(image) or is_tf_tensor(image):
186
+ image = image.numpy()
187
+ elif is_jax_tensor(image):
188
+ image = np.array(image)
189
+ elif not isinstance(image, np.ndarray):
190
+ raise ValueError("Input image type not supported: {}".format(type(image)))
191
+
192
+ # If the channel has been moved to first dim, we put it back at the end.
193
+ image = to_channel_dimension_format(image, ChannelDimension.LAST, input_data_format)
194
+
195
+ # If there is a single channel, we squeeze it, as otherwise PIL can't handle it.
196
+ image = np.squeeze(image, axis=-1) if image.shape[-1] == 1 else image
197
+
198
+ # PIL.Image can only store uint8 values so we rescale the image to be between 0 and 255 if needed.
199
+ do_rescale = _rescale_for_pil_conversion(image) if do_rescale is None else do_rescale
200
+
201
+ if do_rescale:
202
+ image = rescale(image, 255)
203
+
204
+ image = image.astype(np.uint8)
205
+ return PIL.Image.fromarray(image)
206
+
207
+
208
+ # Logic adapted from torchvision resizing logic: https://github.com/pytorch/vision/blob/511924c1ced4ce0461197e5caa64ce5b9e558aab/torchvision/transforms/functional.py#L366
209
+ def get_resize_output_image_size(
210
+ input_image: np.ndarray,
211
+ size: Union[int, Tuple[int, int], List[int], Tuple[int]],
212
+ default_to_square: bool = True,
213
+ max_size: Optional[int] = None,
214
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
215
+ ) -> tuple:
216
+ """
217
+ Find the target (height, width) dimension of the output image after resizing given the input image and the desired
218
+ size.
219
+
220
+ Args:
221
+ input_image (`np.ndarray`):
222
+ The image to resize.
223
+ size (`int` or `Tuple[int, int]` or List[int] or Tuple[int]):
224
+ The size to use for resizing the image. If `size` is a sequence like (h, w), output size will be matched to
225
+ this.
226
+
227
+ If `size` is an int and `default_to_square` is `True`, then image will be resized to (size, size). If
228
+ `size` is an int and `default_to_square` is `False`, then smaller edge of the image will be matched to this
229
+ number. i.e, if height > width, then image will be rescaled to (size * height / width, size).
230
+ default_to_square (`bool`, *optional*, defaults to `True`):
231
+ How to convert `size` when it is a single int. If set to `True`, the `size` will be converted to a square
232
+ (`size`,`size`). If set to `False`, will replicate
233
+ [`torchvision.transforms.Resize`](https://pytorch.org/vision/stable/transforms.html#torchvision.transforms.Resize)
234
+ with support for resizing only the smallest edge and providing an optional `max_size`.
235
+ max_size (`int`, *optional*):
236
+ The maximum allowed for the longer edge of the resized image: if the longer edge of the image is greater
237
+ than `max_size` after being resized according to `size`, then the image is resized again so that the longer
238
+ edge is equal to `max_size`. As a result, `size` might be overruled, i.e the smaller edge may be shorter
239
+ than `size`. Only used if `default_to_square` is `False`.
240
+ input_data_format (`ChannelDimension`, *optional*):
241
+ The channel dimension format of the input image. If unset, will use the inferred format from the input.
242
+
243
+ Returns:
244
+ `tuple`: The target (height, width) dimension of the output image after resizing.
245
+ """
246
+ if isinstance(size, (tuple, list)):
247
+ if len(size) == 2:
248
+ return tuple(size)
249
+ elif len(size) == 1:
250
+ # Perform same logic as if size was an int
251
+ size = size[0]
252
+ else:
253
+ raise ValueError("size must have 1 or 2 elements if it is a list or tuple")
254
+
255
+ if default_to_square:
256
+ return (size, size)
257
+
258
+ height, width = get_image_size(input_image, input_data_format)
259
+ short, long = (width, height) if width <= height else (height, width)
260
+ requested_new_short = size
261
+
262
+ new_short, new_long = requested_new_short, int(requested_new_short * long / short)
263
+
264
+ if max_size is not None:
265
+ if max_size <= requested_new_short:
266
+ raise ValueError(
267
+ f"max_size = {max_size} must be strictly greater than the requested "
268
+ f"size for the smaller edge size = {size}"
269
+ )
270
+ if new_long > max_size:
271
+ new_short, new_long = int(max_size * new_short / new_long), max_size
272
+
273
+ return (new_long, new_short) if width <= height else (new_short, new_long)
274
+
275
+
276
+ def resize(
277
+ image: np.ndarray,
278
+ size: Tuple[int, int],
279
+ resample: "PILImageResampling" = None,
280
+ reducing_gap: Optional[int] = None,
281
+ data_format: Optional[ChannelDimension] = None,
282
+ return_numpy: bool = True,
283
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
284
+ ) -> np.ndarray:
285
+ """
286
+ Resizes `image` to `(height, width)` specified by `size` using the PIL library.
287
+
288
+ Args:
289
+ image (`np.ndarray`):
290
+ The image to resize.
291
+ size (`Tuple[int, int]`):
292
+ The size to use for resizing the image.
293
+ resample (`int`, *optional*, defaults to `PILImageResampling.BILINEAR`):
294
+ The filter to user for resampling.
295
+ reducing_gap (`int`, *optional*):
296
+ Apply optimization by resizing the image in two steps. The bigger `reducing_gap`, the closer the result to
297
+ the fair resampling. See corresponding Pillow documentation for more details.
298
+ data_format (`ChannelDimension`, *optional*):
299
+ The channel dimension format of the output image. If unset, will use the inferred format from the input.
300
+ return_numpy (`bool`, *optional*, defaults to `True`):
301
+ Whether or not to return the resized image as a numpy array. If False a `PIL.Image.Image` object is
302
+ returned.
303
+ input_data_format (`ChannelDimension`, *optional*):
304
+ The channel dimension format of the input image. If unset, will use the inferred format from the input.
305
+
306
+ Returns:
307
+ `np.ndarray`: The resized image.
308
+ """
309
+ requires_backends(resize, ["vision"])
310
+
311
+ resample = resample if resample is not None else PILImageResampling.BILINEAR
312
+
313
+ if not len(size) == 2:
314
+ raise ValueError("size must have 2 elements")
315
+
316
+ # For all transformations, we want to keep the same data format as the input image unless otherwise specified.
317
+ # The resized image from PIL will always have channels last, so find the input format first.
318
+ if input_data_format is None:
319
+ input_data_format = infer_channel_dimension_format(image)
320
+ data_format = input_data_format if data_format is None else data_format
321
+
322
+ # To maintain backwards compatibility with the resizing done in previous image feature extractors, we use
323
+ # the pillow library to resize the image and then convert back to numpy
324
+ do_rescale = False
325
+ if not isinstance(image, PIL.Image.Image):
326
+ do_rescale = _rescale_for_pil_conversion(image)
327
+ image = to_pil_image(image, do_rescale=do_rescale, input_data_format=input_data_format)
328
+ height, width = size
329
+ # PIL images are in the format (width, height)
330
+ resized_image = image.resize((width, height), resample=resample, reducing_gap=reducing_gap)
331
+
332
+ if return_numpy:
333
+ resized_image = np.array(resized_image)
334
+ # If the input image channel dimension was of size 1, then it is dropped when converting to a PIL image
335
+ # so we need to add it back if necessary.
336
+ resized_image = np.expand_dims(resized_image, axis=-1) if resized_image.ndim == 2 else resized_image
337
+ # The image is always in channels last format after converting from a PIL image
338
+ resized_image = to_channel_dimension_format(
339
+ resized_image, data_format, input_channel_dim=ChannelDimension.LAST
340
+ )
341
+ # If an image was rescaled to be in the range [0, 255] before converting to a PIL image, then we need to
342
+ # rescale it back to the original range.
343
+ resized_image = rescale(resized_image, 1 / 255) if do_rescale else resized_image
344
+ return resized_image
345
+
346
+
347
+ def normalize(
348
+ image: np.ndarray,
349
+ mean: Union[float, Iterable[float]],
350
+ std: Union[float, Iterable[float]],
351
+ data_format: Optional[ChannelDimension] = None,
352
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
353
+ ) -> np.ndarray:
354
+ """
355
+ Normalizes `image` using the mean and standard deviation specified by `mean` and `std`.
356
+
357
+ image = (image - mean) / std
358
+
359
+ Args:
360
+ image (`np.ndarray`):
361
+ The image to normalize.
362
+ mean (`float` or `Iterable[float]`):
363
+ The mean to use for normalization.
364
+ std (`float` or `Iterable[float]`):
365
+ The standard deviation to use for normalization.
366
+ data_format (`ChannelDimension`, *optional*):
367
+ The channel dimension format of the output image. If unset, will use the inferred format from the input.
368
+ input_data_format (`ChannelDimension`, *optional*):
369
+ The channel dimension format of the input image. If unset, will use the inferred format from the input.
370
+ """
371
+ if not isinstance(image, np.ndarray):
372
+ raise ValueError("image must be a numpy array")
373
+
374
+ if input_data_format is None:
375
+ input_data_format = infer_channel_dimension_format(image)
376
+ channel_axis = get_channel_dimension_axis(image, input_data_format=input_data_format)
377
+ num_channels = image.shape[channel_axis]
378
+
379
+ # We cast to float32 to avoid errors that can occur when subtracting uint8 values.
380
+ # We preserve the original dtype if it is a float type to prevent upcasting float16.
381
+ if not np.issubdtype(image.dtype, np.floating):
382
+ image = image.astype(np.float32)
383
+
384
+ if isinstance(mean, Iterable):
385
+ if len(mean) != num_channels:
386
+ raise ValueError(f"mean must have {num_channels} elements if it is an iterable, got {len(mean)}")
387
+ else:
388
+ mean = [mean] * num_channels
389
+ mean = np.array(mean, dtype=image.dtype)
390
+
391
+ if isinstance(std, Iterable):
392
+ if len(std) != num_channels:
393
+ raise ValueError(f"std must have {num_channels} elements if it is an iterable, got {len(std)}")
394
+ else:
395
+ std = [std] * num_channels
396
+ std = np.array(std, dtype=image.dtype)
397
+
398
+ if input_data_format == ChannelDimension.LAST:
399
+ image = (image - mean) / std
400
+ else:
401
+ image = ((image.T - mean) / std).T
402
+
403
+ image = to_channel_dimension_format(image, data_format, input_data_format) if data_format is not None else image
404
+ return image
405
+
406
+
407
+ def center_crop(
408
+ image: np.ndarray,
409
+ size: Tuple[int, int],
410
+ data_format: Optional[Union[str, ChannelDimension]] = None,
411
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
412
+ return_numpy: Optional[bool] = None,
413
+ ) -> np.ndarray:
414
+ """
415
+ Crops the `image` to the specified `size` using a center crop. Note that if the image is too small to be cropped to
416
+ the size given, it will be padded (so the returned result will always be of size `size`).
417
+
418
+ Args:
419
+ image (`np.ndarray`):
420
+ The image to crop.
421
+ size (`Tuple[int, int]`):
422
+ The target size for the cropped image.
423
+ data_format (`str` or `ChannelDimension`, *optional*):
424
+ The channel dimension format for the output image. Can be one of:
425
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
426
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
427
+ If unset, will use the inferred format of the input image.
428
+ input_data_format (`str` or `ChannelDimension`, *optional*):
429
+ The channel dimension format for the input image. Can be one of:
430
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
431
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
432
+ If unset, will use the inferred format of the input image.
433
+ return_numpy (`bool`, *optional*):
434
+ Whether or not to return the cropped image as a numpy array. Used for backwards compatibility with the
435
+ previous ImageFeatureExtractionMixin method.
436
+ - Unset: will return the same type as the input image.
437
+ - `True`: will return a numpy array.
438
+ - `False`: will return a `PIL.Image.Image` object.
439
+ Returns:
440
+ `np.ndarray`: The cropped image.
441
+ """
442
+ requires_backends(center_crop, ["vision"])
443
+
444
+ if return_numpy is not None:
445
+ warnings.warn("return_numpy is deprecated and will be removed in v.4.33", FutureWarning)
446
+
447
+ return_numpy = True if return_numpy is None else return_numpy
448
+
449
+ if not isinstance(image, np.ndarray):
450
+ raise ValueError(f"Input image must be of type np.ndarray, got {type(image)}")
451
+
452
+ if not isinstance(size, Iterable) or len(size) != 2:
453
+ raise ValueError("size must have 2 elements representing the height and width of the output image")
454
+
455
+ if input_data_format is None:
456
+ input_data_format = infer_channel_dimension_format(image)
457
+ output_data_format = data_format if data_format is not None else input_data_format
458
+
459
+ # We perform the crop in (C, H, W) format and then convert to the output format
460
+ image = to_channel_dimension_format(image, ChannelDimension.FIRST, input_data_format)
461
+
462
+ orig_height, orig_width = get_image_size(image, ChannelDimension.FIRST)
463
+ crop_height, crop_width = size
464
+ crop_height, crop_width = int(crop_height), int(crop_width)
465
+
466
+ # In case size is odd, (image_shape[0] + size[0]) // 2 won't give the proper result.
467
+ top = (orig_height - crop_height) // 2
468
+ bottom = top + crop_height
469
+ # In case size is odd, (image_shape[1] + size[1]) // 2 won't give the proper result.
470
+ left = (orig_width - crop_width) // 2
471
+ right = left + crop_width
472
+
473
+ # Check if cropped area is within image boundaries
474
+ if top >= 0 and bottom <= orig_height and left >= 0 and right <= orig_width:
475
+ image = image[..., top:bottom, left:right]
476
+ image = to_channel_dimension_format(image, output_data_format, ChannelDimension.FIRST)
477
+ return image
478
+
479
+ # Otherwise, we may need to pad if the image is too small. Oh joy...
480
+ new_height = max(crop_height, orig_height)
481
+ new_width = max(crop_width, orig_width)
482
+ new_shape = image.shape[:-2] + (new_height, new_width)
483
+ new_image = np.zeros_like(image, shape=new_shape)
484
+
485
+ # If the image is too small, pad it with zeros
486
+ top_pad = (new_height - orig_height) // 2
487
+ bottom_pad = top_pad + orig_height
488
+ left_pad = (new_width - orig_width) // 2
489
+ right_pad = left_pad + orig_width
490
+ new_image[..., top_pad:bottom_pad, left_pad:right_pad] = image
491
+
492
+ top += top_pad
493
+ bottom += top_pad
494
+ left += left_pad
495
+ right += left_pad
496
+
497
+ new_image = new_image[..., max(0, top) : min(new_height, bottom), max(0, left) : min(new_width, right)]
498
+ new_image = to_channel_dimension_format(new_image, output_data_format, ChannelDimension.FIRST)
499
+
500
+ if not return_numpy:
501
+ new_image = to_pil_image(new_image)
502
+
503
+ return new_image
504
+
505
+
506
+ def _center_to_corners_format_torch(bboxes_center: "torch.Tensor") -> "torch.Tensor":
507
+ center_x, center_y, width, height = bboxes_center.unbind(-1)
508
+ bbox_corners = torch.stack(
509
+ # top left x, top left y, bottom right x, bottom right y
510
+ [(center_x - 0.5 * width), (center_y - 0.5 * height), (center_x + 0.5 * width), (center_y + 0.5 * height)],
511
+ dim=-1,
512
+ )
513
+ return bbox_corners
514
+
515
+
516
+ def _center_to_corners_format_numpy(bboxes_center: np.ndarray) -> np.ndarray:
517
+ center_x, center_y, width, height = bboxes_center.T
518
+ bboxes_corners = np.stack(
519
+ # top left x, top left y, bottom right x, bottom right y
520
+ [center_x - 0.5 * width, center_y - 0.5 * height, center_x + 0.5 * width, center_y + 0.5 * height],
521
+ axis=-1,
522
+ )
523
+ return bboxes_corners
524
+
525
+
526
+ def _center_to_corners_format_tf(bboxes_center: "tf.Tensor") -> "tf.Tensor":
527
+ center_x, center_y, width, height = tf.unstack(bboxes_center, axis=-1)
528
+ bboxes_corners = tf.stack(
529
+ # top left x, top left y, bottom right x, bottom right y
530
+ [center_x - 0.5 * width, center_y - 0.5 * height, center_x + 0.5 * width, center_y + 0.5 * height],
531
+ axis=-1,
532
+ )
533
+ return bboxes_corners
534
+
535
+
536
+ # 2 functions below inspired by https://github.com/facebookresearch/detr/blob/master/util/box_ops.py
537
+ def center_to_corners_format(bboxes_center: TensorType) -> TensorType:
538
+ """
539
+ Converts bounding boxes from center format to corners format.
540
+
541
+ center format: contains the coordinate for the center of the box and its width, height dimensions
542
+ (center_x, center_y, width, height)
543
+ corners format: contains the coodinates for the top-left and bottom-right corners of the box
544
+ (top_left_x, top_left_y, bottom_right_x, bottom_right_y)
545
+ """
546
+ # Function is used during model forward pass, so we use the input framework if possible, without
547
+ # converting to numpy
548
+ if is_torch_tensor(bboxes_center):
549
+ return _center_to_corners_format_torch(bboxes_center)
550
+ elif isinstance(bboxes_center, np.ndarray):
551
+ return _center_to_corners_format_numpy(bboxes_center)
552
+ elif is_tf_tensor(bboxes_center):
553
+ return _center_to_corners_format_tf(bboxes_center)
554
+
555
+ raise ValueError(f"Unsupported input type {type(bboxes_center)}")
556
+
557
+
558
+ def _corners_to_center_format_torch(bboxes_corners: "torch.Tensor") -> "torch.Tensor":
559
+ top_left_x, top_left_y, bottom_right_x, bottom_right_y = bboxes_corners.unbind(-1)
560
+ b = [
561
+ (top_left_x + bottom_right_x) / 2, # center x
562
+ (top_left_y + bottom_right_y) / 2, # center y
563
+ (bottom_right_x - top_left_x), # width
564
+ (bottom_right_y - top_left_y), # height
565
+ ]
566
+ return torch.stack(b, dim=-1)
567
+
568
+
569
+ def _corners_to_center_format_numpy(bboxes_corners: np.ndarray) -> np.ndarray:
570
+ top_left_x, top_left_y, bottom_right_x, bottom_right_y = bboxes_corners.T
571
+ bboxes_center = np.stack(
572
+ [
573
+ (top_left_x + bottom_right_x) / 2, # center x
574
+ (top_left_y + bottom_right_y) / 2, # center y
575
+ (bottom_right_x - top_left_x), # width
576
+ (bottom_right_y - top_left_y), # height
577
+ ],
578
+ axis=-1,
579
+ )
580
+ return bboxes_center
581
+
582
+
583
+ def _corners_to_center_format_tf(bboxes_corners: "tf.Tensor") -> "tf.Tensor":
584
+ top_left_x, top_left_y, bottom_right_x, bottom_right_y = tf.unstack(bboxes_corners, axis=-1)
585
+ bboxes_center = tf.stack(
586
+ [
587
+ (top_left_x + bottom_right_x) / 2, # center x
588
+ (top_left_y + bottom_right_y) / 2, # center y
589
+ (bottom_right_x - top_left_x), # width
590
+ (bottom_right_y - top_left_y), # height
591
+ ],
592
+ axis=-1,
593
+ )
594
+ return bboxes_center
595
+
596
+
597
+ def corners_to_center_format(bboxes_corners: TensorType) -> TensorType:
598
+ """
599
+ Converts bounding boxes from corners format to center format.
600
+
601
+ corners format: contains the coordinates for the top-left and bottom-right corners of the box
602
+ (top_left_x, top_left_y, bottom_right_x, bottom_right_y)
603
+ center format: contains the coordinate for the center of the box and its the width, height dimensions
604
+ (center_x, center_y, width, height)
605
+ """
606
+ # Inverse function accepts different input types so implemented here too
607
+ if is_torch_tensor(bboxes_corners):
608
+ return _corners_to_center_format_torch(bboxes_corners)
609
+ elif isinstance(bboxes_corners, np.ndarray):
610
+ return _corners_to_center_format_numpy(bboxes_corners)
611
+ elif is_tf_tensor(bboxes_corners):
612
+ return _corners_to_center_format_tf(bboxes_corners)
613
+
614
+ raise ValueError(f"Unsupported input type {type(bboxes_corners)}")
615
+
616
+
617
+ # 2 functions below copied from https://github.com/cocodataset/panopticapi/blob/master/panopticapi/utils.py
618
+ # Copyright (c) 2018, Alexander Kirillov
619
+ # All rights reserved.
620
+ def rgb_to_id(color):
621
+ """
622
+ Converts RGB color to unique ID.
623
+ """
624
+ if isinstance(color, np.ndarray) and len(color.shape) == 3:
625
+ if color.dtype == np.uint8:
626
+ color = color.astype(np.int32)
627
+ return color[:, :, 0] + 256 * color[:, :, 1] + 256 * 256 * color[:, :, 2]
628
+ return int(color[0] + 256 * color[1] + 256 * 256 * color[2])
629
+
630
+
631
+ def id_to_rgb(id_map):
632
+ """
633
+ Converts unique ID to RGB color.
634
+ """
635
+ if isinstance(id_map, np.ndarray):
636
+ id_map_copy = id_map.copy()
637
+ rgb_shape = tuple(list(id_map.shape) + [3])
638
+ rgb_map = np.zeros(rgb_shape, dtype=np.uint8)
639
+ for i in range(3):
640
+ rgb_map[..., i] = id_map_copy % 256
641
+ id_map_copy //= 256
642
+ return rgb_map
643
+ color = []
644
+ for _ in range(3):
645
+ color.append(id_map % 256)
646
+ id_map //= 256
647
+ return color
648
+
649
+
650
+ class PaddingMode(ExplicitEnum):
651
+ """
652
+ Enum class for the different padding modes to use when padding images.
653
+ """
654
+
655
+ CONSTANT = "constant"
656
+ REFLECT = "reflect"
657
+ REPLICATE = "replicate"
658
+ SYMMETRIC = "symmetric"
659
+
660
+
661
+ def pad(
662
+ image: np.ndarray,
663
+ padding: Union[int, Tuple[int, int], Iterable[Tuple[int, int]]],
664
+ mode: PaddingMode = PaddingMode.CONSTANT,
665
+ constant_values: Union[float, Iterable[float]] = 0.0,
666
+ data_format: Optional[Union[str, ChannelDimension]] = None,
667
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
668
+ ) -> np.ndarray:
669
+ """
670
+ Pads the `image` with the specified (height, width) `padding` and `mode`.
671
+
672
+ Args:
673
+ image (`np.ndarray`):
674
+ The image to pad.
675
+ padding (`int` or `Tuple[int, int]` or `Iterable[Tuple[int, int]]`):
676
+ Padding to apply to the edges of the height, width axes. Can be one of three formats:
677
+ - `((before_height, after_height), (before_width, after_width))` unique pad widths for each axis.
678
+ - `((before, after),)` yields same before and after pad for height and width.
679
+ - `(pad,)` or int is a shortcut for before = after = pad width for all axes.
680
+ mode (`PaddingMode`):
681
+ The padding mode to use. Can be one of:
682
+ - `"constant"`: pads with a constant value.
683
+ - `"reflect"`: pads with the reflection of the vector mirrored on the first and last values of the
684
+ vector along each axis.
685
+ - `"replicate"`: pads with the replication of the last value on the edge of the array along each axis.
686
+ - `"symmetric"`: pads with the reflection of the vector mirrored along the edge of the array.
687
+ constant_values (`float` or `Iterable[float]`, *optional*):
688
+ The value to use for the padding if `mode` is `"constant"`.
689
+ data_format (`str` or `ChannelDimension`, *optional*):
690
+ The channel dimension format for the output image. Can be one of:
691
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
692
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
693
+ If unset, will use same as the input image.
694
+ input_data_format (`str` or `ChannelDimension`, *optional*):
695
+ The channel dimension format for the input image. Can be one of:
696
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
697
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
698
+ If unset, will use the inferred format of the input image.
699
+
700
+ Returns:
701
+ `np.ndarray`: The padded image.
702
+
703
+ """
704
+ if input_data_format is None:
705
+ input_data_format = infer_channel_dimension_format(image)
706
+
707
+ def _expand_for_data_format(values):
708
+ """
709
+ Convert values to be in the format expected by np.pad based on the data format.
710
+ """
711
+ if isinstance(values, (int, float)):
712
+ values = ((values, values), (values, values))
713
+ elif isinstance(values, tuple) and len(values) == 1:
714
+ values = ((values[0], values[0]), (values[0], values[0]))
715
+ elif isinstance(values, tuple) and len(values) == 2 and isinstance(values[0], int):
716
+ values = (values, values)
717
+ elif isinstance(values, tuple) and len(values) == 2 and isinstance(values[0], tuple):
718
+ values = values
719
+ else:
720
+ raise ValueError(f"Unsupported format: {values}")
721
+
722
+ # add 0 for channel dimension
723
+ values = ((0, 0), *values) if input_data_format == ChannelDimension.FIRST else (*values, (0, 0))
724
+
725
+ # Add additional padding if there's a batch dimension
726
+ values = (0, *values) if image.ndim == 4 else values
727
+ return values
728
+
729
+ padding = _expand_for_data_format(padding)
730
+
731
+ if mode == PaddingMode.CONSTANT:
732
+ constant_values = _expand_for_data_format(constant_values)
733
+ image = np.pad(image, padding, mode="constant", constant_values=constant_values)
734
+ elif mode == PaddingMode.REFLECT:
735
+ image = np.pad(image, padding, mode="reflect")
736
+ elif mode == PaddingMode.REPLICATE:
737
+ image = np.pad(image, padding, mode="edge")
738
+ elif mode == PaddingMode.SYMMETRIC:
739
+ image = np.pad(image, padding, mode="symmetric")
740
+ else:
741
+ raise ValueError(f"Invalid padding mode: {mode}")
742
+
743
+ image = to_channel_dimension_format(image, data_format, input_data_format) if data_format is not None else image
744
+ return image
745
+
746
+
747
+ # TODO (Amy): Accept 1/3/4 channel numpy array as input and return np.array as default
748
+ def convert_to_rgb(image: ImageInput) -> ImageInput:
749
+ """
750
+ Converts an image to RGB format. Only converts if the image is of type PIL.Image.Image, otherwise returns the image
751
+ as is.
752
+
753
+ Args:
754
+ image (Image):
755
+ The image to convert.
756
+ """
757
+ requires_backends(convert_to_rgb, ["vision"])
758
+
759
+ if not isinstance(image, PIL.Image.Image):
760
+ return image
761
+
762
+ image = image.convert("RGB")
763
+ return image
764
+
765
+
766
+ def flip_channel_order(
767
+ image: np.ndarray,
768
+ data_format: Optional[ChannelDimension] = None,
769
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
770
+ ) -> np.ndarray:
771
+ """
772
+ Flips the channel order of the image.
773
+
774
+ If the image is in RGB format, it will be converted to BGR and vice versa.
775
+
776
+ Args:
777
+ image (`np.ndarray`):
778
+ The image to flip.
779
+ data_format (`ChannelDimension`, *optional*):
780
+ The channel dimension format for the output image. Can be one of:
781
+ - `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
782
+ - `ChannelDimension.LAST`: image in (height, width, num_channels) format.
783
+ If unset, will use same as the input image.
784
+ input_data_format (`ChannelDimension`, *optional*):
785
+ The channel dimension format for the input image. Can be one of:
786
+ - `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
787
+ - `ChannelDimension.LAST`: image in (height, width, num_channels) format.
788
+ If unset, will use the inferred format of the input image.
789
+ """
790
+ input_data_format = infer_channel_dimension_format(image) if input_data_format is None else input_data_format
791
+
792
+ if input_data_format == ChannelDimension.LAST:
793
+ image = image[..., ::-1]
794
+ elif input_data_format == ChannelDimension.FIRST:
795
+ image = image[::-1, ...]
796
+ else:
797
+ raise ValueError(f"Unsupported channel dimension: {input_data_format}")
798
+
799
+ if data_format is not None:
800
+ image = to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format)
801
+ return image
env-llmeval/lib/python3.10/site-packages/transformers/integrations/__init__.py ADDED
@@ -0,0 +1,158 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ..utils import _LazyModule
17
+
18
+
19
+ _import_structure = {
20
+ "aqlm": ["replace_with_aqlm_linear"],
21
+ "awq": [
22
+ "fuse_awq_modules",
23
+ "post_init_awq_exllama_modules",
24
+ "replace_with_awq_linear",
25
+ ],
26
+ "bitsandbytes": [
27
+ "get_keys_to_not_convert",
28
+ "replace_8bit_linear",
29
+ "replace_with_bnb_linear",
30
+ "set_module_8bit_tensor_to_device",
31
+ "set_module_quantized_tensor_to_device",
32
+ ],
33
+ "deepspeed": [
34
+ "HfDeepSpeedConfig",
35
+ "HfTrainerDeepSpeedConfig",
36
+ "deepspeed_config",
37
+ "deepspeed_init",
38
+ "deepspeed_load_checkpoint",
39
+ "deepspeed_optim_sched",
40
+ "is_deepspeed_available",
41
+ "is_deepspeed_zero3_enabled",
42
+ "set_hf_deepspeed_config",
43
+ "unset_hf_deepspeed_config",
44
+ ],
45
+ "integration_utils": [
46
+ "INTEGRATION_TO_CALLBACK",
47
+ "AzureMLCallback",
48
+ "ClearMLCallback",
49
+ "CodeCarbonCallback",
50
+ "CometCallback",
51
+ "DagsHubCallback",
52
+ "DVCLiveCallback",
53
+ "FlyteCallback",
54
+ "MLflowCallback",
55
+ "NeptuneCallback",
56
+ "NeptuneMissingConfiguration",
57
+ "TensorBoardCallback",
58
+ "WandbCallback",
59
+ "get_available_reporting_integrations",
60
+ "get_reporting_integration_callbacks",
61
+ "hp_params",
62
+ "is_azureml_available",
63
+ "is_clearml_available",
64
+ "is_codecarbon_available",
65
+ "is_comet_available",
66
+ "is_dagshub_available",
67
+ "is_dvclive_available",
68
+ "is_flyte_deck_standard_available",
69
+ "is_flytekit_available",
70
+ "is_mlflow_available",
71
+ "is_neptune_available",
72
+ "is_optuna_available",
73
+ "is_ray_available",
74
+ "is_ray_tune_available",
75
+ "is_sigopt_available",
76
+ "is_tensorboard_available",
77
+ "is_wandb_available",
78
+ "rewrite_logs",
79
+ "run_hp_search_optuna",
80
+ "run_hp_search_ray",
81
+ "run_hp_search_sigopt",
82
+ "run_hp_search_wandb",
83
+ ],
84
+ "peft": ["PeftAdapterMixin"],
85
+ "quanto": ["replace_with_quanto_layers"],
86
+ }
87
+
88
+ if TYPE_CHECKING:
89
+ from .aqlm import replace_with_aqlm_linear
90
+ from .awq import (
91
+ fuse_awq_modules,
92
+ post_init_awq_exllama_modules,
93
+ replace_with_awq_linear,
94
+ )
95
+ from .bitsandbytes import (
96
+ get_keys_to_not_convert,
97
+ replace_8bit_linear,
98
+ replace_with_bnb_linear,
99
+ set_module_8bit_tensor_to_device,
100
+ set_module_quantized_tensor_to_device,
101
+ )
102
+ from .deepspeed import (
103
+ HfDeepSpeedConfig,
104
+ HfTrainerDeepSpeedConfig,
105
+ deepspeed_config,
106
+ deepspeed_init,
107
+ deepspeed_load_checkpoint,
108
+ deepspeed_optim_sched,
109
+ is_deepspeed_available,
110
+ is_deepspeed_zero3_enabled,
111
+ set_hf_deepspeed_config,
112
+ unset_hf_deepspeed_config,
113
+ )
114
+ from .integration_utils import (
115
+ INTEGRATION_TO_CALLBACK,
116
+ AzureMLCallback,
117
+ ClearMLCallback,
118
+ CodeCarbonCallback,
119
+ CometCallback,
120
+ DagsHubCallback,
121
+ DVCLiveCallback,
122
+ FlyteCallback,
123
+ MLflowCallback,
124
+ NeptuneCallback,
125
+ NeptuneMissingConfiguration,
126
+ TensorBoardCallback,
127
+ WandbCallback,
128
+ get_available_reporting_integrations,
129
+ get_reporting_integration_callbacks,
130
+ hp_params,
131
+ is_azureml_available,
132
+ is_clearml_available,
133
+ is_codecarbon_available,
134
+ is_comet_available,
135
+ is_dagshub_available,
136
+ is_dvclive_available,
137
+ is_flyte_deck_standard_available,
138
+ is_flytekit_available,
139
+ is_mlflow_available,
140
+ is_neptune_available,
141
+ is_optuna_available,
142
+ is_ray_available,
143
+ is_ray_tune_available,
144
+ is_sigopt_available,
145
+ is_tensorboard_available,
146
+ is_wandb_available,
147
+ rewrite_logs,
148
+ run_hp_search_optuna,
149
+ run_hp_search_ray,
150
+ run_hp_search_sigopt,
151
+ run_hp_search_wandb,
152
+ )
153
+ from .peft import PeftAdapterMixin
154
+ from .quanto import replace_with_quanto_layers
155
+ else:
156
+ import sys
157
+
158
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
env-llmeval/lib/python3.10/site-packages/transformers/integrations/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (2.52 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/integrations/__pycache__/aqlm.cpython-310.pyc ADDED
Binary file (2.76 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/integrations/__pycache__/awq.cpython-310.pyc ADDED
Binary file (11.1 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/integrations/__pycache__/bitsandbytes.cpython-310.pyc ADDED
Binary file (9.95 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/integrations/__pycache__/deepspeed.cpython-310.pyc ADDED
Binary file (12 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/integrations/__pycache__/integration_utils.cpython-310.pyc ADDED
Binary file (63.3 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/integrations/__pycache__/peft.cpython-310.pyc ADDED
Binary file (17.2 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/integrations/__pycache__/quanto.cpython-310.pyc ADDED
Binary file (2.83 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/integrations/__pycache__/tpu.cpython-310.pyc ADDED
Binary file (865 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/integrations/aqlm.py ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ "AQLM (Additive Quantization of Language Model) integration file"
15
+
16
+
17
+ from ..utils import is_accelerate_available, is_aqlm_available, is_torch_available
18
+
19
+
20
+ if is_torch_available():
21
+ import torch.nn as nn
22
+
23
+
24
+ def replace_with_aqlm_linear(
25
+ model,
26
+ quantization_config=None,
27
+ linear_weights_not_to_quantize=None,
28
+ current_key_name=None,
29
+ has_been_replaced=False,
30
+ ):
31
+ """
32
+ Public method that recursively replaces the Linear layers of the given model with AQLM quantized layers.
33
+ `accelerate` is needed to use this method. Returns the converted model and a boolean that indicates if the
34
+ conversion has been successfull or not.
35
+
36
+ Args:
37
+ model (`torch.nn.Module`):
38
+ The model to convert, can be any `torch.nn.Module` instance.
39
+ quantization_config (`AqlmConfig`):
40
+ The quantization config object that contains the quantization parameters.
41
+ linear_weights_not_to_quantize (`list[str]`, *optional*):
42
+ A list of nn.Linear weights to not convert. If a parameter path is in the list (e.g. `lm_head.weight`), the corresponding module will not be
43
+ converted.
44
+ current_key_name (`list`, *optional*):
45
+ A list that contains the current key name. This is used for recursion and should not be passed by the user.
46
+ has_been_replaced (`bool`, *optional*):
47
+ A boolean that indicates if the conversion has been successful or not. This is used for recursion and
48
+ should not be passed by the user.
49
+ """
50
+ if not is_aqlm_available():
51
+ raise ValueError("AQLM is not available. Please install it with `pip install aqlm[cpu,gpu]`")
52
+
53
+ if not is_accelerate_available():
54
+ raise ValueError("AQLM requires Accelerate to be installed: `pip install accelerate`")
55
+
56
+ if linear_weights_not_to_quantize is None:
57
+ linear_weights_not_to_quantize = []
58
+
59
+ from accelerate import init_empty_weights
60
+ from aqlm import QuantizedLinear
61
+
62
+ for name, module in model.named_children():
63
+ if current_key_name is None:
64
+ current_key_name = []
65
+ current_key_name.append(name)
66
+
67
+ if isinstance(module, nn.Linear):
68
+ # Check if the current key is not in the `linear_weights_not_to_quantize`
69
+ if ".".join(current_key_name) + ".weight" not in linear_weights_not_to_quantize:
70
+ with init_empty_weights():
71
+ in_features = module.in_features
72
+ out_features = module.out_features
73
+
74
+ model._modules[name] = QuantizedLinear(
75
+ in_features,
76
+ out_features,
77
+ bias=module.bias is not None,
78
+ in_group_size=quantization_config.in_group_size,
79
+ out_group_size=quantization_config.out_group_size,
80
+ num_codebooks=quantization_config.num_codebooks,
81
+ nbits_per_codebook=quantization_config.nbits_per_codebook,
82
+ )
83
+ has_been_replaced = True
84
+
85
+ # Store the module class in case we need to transpose the weight later
86
+ model._modules[name].source_cls = type(module)
87
+ # Force requires grad to False to avoid unexpected errors
88
+ model._modules[name].requires_grad_(False)
89
+ if len(list(module.children())) > 0:
90
+ _, has_been_replaced = replace_with_aqlm_linear(
91
+ module,
92
+ quantization_config=quantization_config,
93
+ linear_weights_not_to_quantize=linear_weights_not_to_quantize,
94
+ current_key_name=current_key_name,
95
+ has_been_replaced=has_been_replaced,
96
+ )
97
+ # Remove the last key for recursion
98
+ current_key_name.pop(-1)
99
+ return model, has_been_replaced
env-llmeval/lib/python3.10/site-packages/transformers/integrations/awq.py ADDED
@@ -0,0 +1,421 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ "AWQ (Activation aware Weight Quantization) integration file"
15
+ from ..activations import ACT2FN
16
+ from ..modeling_utils import PreTrainedModel
17
+ from ..utils import is_auto_awq_available, is_torch_available
18
+ from ..utils.quantization_config import (
19
+ AwqBackendPackingMethod,
20
+ AwqConfig,
21
+ AWQLinearVersion,
22
+ ExllamaVersion,
23
+ )
24
+
25
+
26
+ if is_torch_available():
27
+ import torch
28
+ import torch.nn as nn
29
+
30
+
31
+ AWQ_FUSED_MAPPINGS = {
32
+ "mistral": {
33
+ "attention": ["q_proj", "k_proj", "v_proj", "o_proj"],
34
+ "mlp": ["gate_proj", "up_proj", "down_proj"],
35
+ "layernorm": ["input_layernorm", "post_attention_layernorm", "norm"],
36
+ "use_alibi": False,
37
+ },
38
+ "mixtral": {
39
+ "attention": ["q_proj", "k_proj", "v_proj", "o_proj"],
40
+ "mlp": ["w1", "w3", "w2"],
41
+ "layernorm": ["input_layernorm", "post_attention_layernorm", "norm"],
42
+ "use_alibi": False,
43
+ "rope_theta": 1000000.0,
44
+ },
45
+ "llama": {
46
+ "attention": ["q_proj", "k_proj", "v_proj", "o_proj"],
47
+ "mlp": ["gate_proj", "up_proj", "down_proj"],
48
+ "layernorm": ["input_layernorm", "post_attention_layernorm", "norm"],
49
+ "use_alibi": False,
50
+ },
51
+ "llava": {
52
+ "attention": ["q_proj", "k_proj", "v_proj", "o_proj"],
53
+ "mlp": ["gate_proj", "up_proj", "down_proj"],
54
+ "layernorm": ["input_layernorm", "post_attention_layernorm", "norm"],
55
+ "use_alibi": False,
56
+ },
57
+ }
58
+
59
+
60
+ def replace_with_awq_linear(
61
+ model,
62
+ modules_to_not_convert=None,
63
+ quantization_config=None,
64
+ current_key_name=None,
65
+ has_been_replaced=False,
66
+ ) -> bool:
67
+ """
68
+ Public method that recursively replaces the Linear layers of the given model with AWQ quantized layers.
69
+ `accelerate` is needed to use this method. Returns the converted model and a boolean that indicates if the
70
+ conversion has been successfull or not.
71
+
72
+ During the module replacement, we also infer the backend to use through the `quantization_config` object.
73
+
74
+ Args:
75
+ model (`torch.nn.Module`):
76
+ The model to convert, can be any `torch.nn.Module` instance.
77
+ quantization_config (`AwqConfig`):
78
+ The quantization config object that contains the quantization parameters.
79
+ modules_to_not_convert (`list`, *optional*):
80
+ A list of modules to not convert. If a module name is in the list (e.g. `lm_head`), it will not be
81
+ converted.
82
+ current_key_name (`list`, *optional*):
83
+ A list that contains the current key name. This is used for recursion and should not be passed by the user.
84
+ has_been_replaced (`bool`, *optional*):
85
+ A boolean that indicates if the conversion has been successful or not. This is used for recursion and
86
+ should not be passed by the user.
87
+ """
88
+ if modules_to_not_convert is None:
89
+ modules_to_not_convert = []
90
+
91
+ backend = quantization_config.backend
92
+
93
+ if not is_auto_awq_available():
94
+ raise ValueError(
95
+ "AWQ (either `autoawq` or `llmawq`) is not available. Please install it with `pip install autoawq` or check out the installation guide in https://github.com/mit-han-lab/llm-awq"
96
+ )
97
+
98
+ if backend == AwqBackendPackingMethod.AUTOAWQ:
99
+ if quantization_config.version == AWQLinearVersion.GEMM:
100
+ from awq.modules.linear.gemm import WQLinear_GEMM
101
+
102
+ target_cls = WQLinear_GEMM
103
+ elif quantization_config.version == AWQLinearVersion.GEMV:
104
+ from awq.modules.linear.gemv import WQLinear_GEMV
105
+
106
+ target_cls = WQLinear_GEMV
107
+ elif quantization_config.version == AWQLinearVersion.EXLLAMA:
108
+ if quantization_config.exllama_config["version"] == ExllamaVersion.ONE:
109
+ from awq.modules.linear.exllama import WQLinear_Exllama
110
+
111
+ target_cls = WQLinear_Exllama
112
+ elif quantization_config.exllama_config["version"] == ExllamaVersion.TWO:
113
+ from awq.modules.linear.exllamav2 import WQLinear_ExllamaV2
114
+
115
+ target_cls = WQLinear_ExllamaV2
116
+ else:
117
+ raise ValueError(f"Unrecognized Exllama version: {quantization_config.exllama_config['version']}")
118
+ else:
119
+ raise ValueError(f"Unrecognized AWQ version: {quantization_config.version}")
120
+ else:
121
+ from awq.quantize.qmodule import WQLinear
122
+
123
+ target_cls = WQLinear
124
+
125
+ for name, module in model.named_children():
126
+ if current_key_name is None:
127
+ current_key_name = []
128
+ current_key_name.append(name)
129
+
130
+ if isinstance(module, nn.Linear) and name not in modules_to_not_convert:
131
+ # Check if the current key is not in the `modules_to_not_convert`
132
+ if not any(key in ".".join(current_key_name) for key in modules_to_not_convert):
133
+ in_features = module.in_features
134
+ out_features = module.out_features
135
+
136
+ model._modules[name] = target_cls(
137
+ w_bit=quantization_config.bits,
138
+ group_size=quantization_config.group_size,
139
+ in_features=in_features,
140
+ out_features=out_features,
141
+ bias=module.bias is not None,
142
+ dev=module.weight.device,
143
+ )
144
+ has_been_replaced = True
145
+
146
+ # Force requires grad to False to avoid unexpected errors
147
+ model._modules[name].requires_grad_(False)
148
+ if len(list(module.children())) > 0:
149
+ _, has_been_replaced = replace_with_awq_linear(
150
+ module,
151
+ modules_to_not_convert=modules_to_not_convert,
152
+ current_key_name=current_key_name,
153
+ quantization_config=quantization_config,
154
+ has_been_replaced=has_been_replaced,
155
+ )
156
+ # Remove the last key for recursion
157
+ current_key_name.pop(-1)
158
+ return model, has_been_replaced
159
+
160
+
161
+ def get_modules_to_fuse(model, quantization_config):
162
+ """
163
+ Returns the fusing mapping given the quantization config and the model
164
+
165
+ Args:
166
+ model (`~PreTrainedModel`):
167
+ The model to fuse - note this model should have been converted into AWQ format beforehand.
168
+ quantization_config (`~transformers.quantization_config.AWQConfig`):
169
+ The quantization configuration to use.
170
+ """
171
+ if not isinstance(model, PreTrainedModel):
172
+ raise ValueError(f"The model should be an instance of `PreTrainedModel`, got {model.__class__.__name__}")
173
+
174
+ # Always default to `quantization_config.modules_to_fuse`
175
+ if quantization_config.modules_to_fuse is not None:
176
+ current_fused_mapping = quantization_config.modules_to_fuse
177
+ current_fused_mapping["max_seq_len"] = quantization_config.fuse_max_seq_len
178
+ elif model.config.model_type in AWQ_FUSED_MAPPINGS:
179
+ current_fused_mapping = AWQ_FUSED_MAPPINGS[model.config.model_type]
180
+
181
+ # Properly deal with the case where we have a multi-modal model as well (e.g. Llava)
182
+ if not hasattr(model.config, "text_config"):
183
+ config = model.config
184
+ else:
185
+ config = model.config.text_config
186
+
187
+ # Handle hidden_size, num_attention_heads, num_key_value_heads on our own.
188
+ hidden_size = config.hidden_size
189
+ num_attention_heads = config.num_attention_heads
190
+ num_key_value_heads = getattr(config, "num_key_value_heads", num_attention_heads)
191
+
192
+ # Fill `current_fused_mapping` with the expected values
193
+ current_fused_mapping["hidden_size"] = hidden_size
194
+ current_fused_mapping["num_attention_heads"] = num_attention_heads
195
+ current_fused_mapping["num_key_value_heads"] = num_key_value_heads
196
+ current_fused_mapping["max_seq_len"] = quantization_config.fuse_max_seq_len
197
+ else:
198
+ raise ValueError(
199
+ "Fusing mapping not found either on the quantization config or the supported `AWQ_FUSED_MAPPINGS`. Please pass a `fused_mapping` argument"
200
+ " in the `quantization_config` or raise an issue on transformers https://github.com/huggingface/transformers to add its support."
201
+ )
202
+ return current_fused_mapping
203
+
204
+
205
+ def fuse_awq_modules(model, quantization_config):
206
+ """
207
+ Optionally fuse some modules in the model to speedup inference.
208
+
209
+ Args:
210
+ model (`~PreTrainedModel`):
211
+ The model to fuse - note this model should have been converted into AWQ format beforehand.
212
+ quantization_config (`Union[AwqConfig, dict]`):
213
+ The quantization configuration to use.
214
+ """
215
+ # We need to convert it from dict in order to get an AwqConfig object
216
+ # otherwise the fields `backend` etc. will not be available
217
+ # https://github.com/huggingface/transformers/pull/27411#discussion_r1414044495
218
+ if isinstance(quantization_config, dict):
219
+ quantization_config = AwqConfig.from_dict(quantization_config)
220
+ backend = quantization_config.backend
221
+
222
+ modules_to_fuse = get_modules_to_fuse(model, quantization_config)
223
+ modules_to_not_convert = getattr(quantization_config, "modules_to_not_convert", None)
224
+
225
+ if backend == AwqBackendPackingMethod.AUTOAWQ:
226
+ from awq.modules.fused.attn import QuantAttentionFused
227
+ from awq.modules.fused.mlp import QuantFusedMLP
228
+ from awq.modules.fused.norm import FasterTransformerRMSNorm
229
+ else:
230
+ raise ValueError("Fusing is only supported for the AutoAWQ backend")
231
+
232
+ for name, module in model.named_modules():
233
+ if modules_to_not_convert is not None:
234
+ if any(module_name_to_not_convert in name for module_name_to_not_convert in modules_to_not_convert):
235
+ continue
236
+
237
+ # Replace layer norms
238
+ _fuse_awq_layernorm(modules_to_fuse["layernorm"], module, FasterTransformerRMSNorm)
239
+
240
+ # Replace MLP layers
241
+ _fuse_awq_mlp(model, name, modules_to_fuse["mlp"], module, QuantFusedMLP)
242
+
243
+ # Replace attention layers
244
+ _fuse_awq_attention_layers(model, module, modules_to_fuse, name, QuantAttentionFused)
245
+ return model
246
+
247
+
248
+ def _fuse_awq_layernorm(fuse_module_names, module, target_cls):
249
+ """
250
+ Fuse the LayerNorm layers into a target class using autoawq
251
+
252
+ Args:
253
+ fuse_module_names (`List[str]`):
254
+ The list of module names to fuse
255
+ module (`nn.Module`):
256
+ The pytorch parent module that has layernorm modules to fuse
257
+ target_cls (`~autoawq.FasterTransformerRMSNorm`):
258
+ The `FasterTransformerRMSNorm` class as it only supports that class
259
+ for now.
260
+ """
261
+ for module_name in fuse_module_names:
262
+ if hasattr(module, module_name):
263
+ old_module = getattr(module, module_name)
264
+ module._modules[module_name] = target_cls(
265
+ old_module.weight,
266
+ old_module.variance_epsilon,
267
+ ).to(old_module.weight.device)
268
+ del old_module
269
+
270
+
271
+ def _fuse_awq_mlp(model, current_module_name, fuse_module_names, module, target_cls):
272
+ """
273
+ Fuse the MLP layers into a target class using autoawq
274
+
275
+ Args:
276
+ model (`~PreTrainedModel`):
277
+ The input pretrained model
278
+ current_module_name (`str`):
279
+ The current submodule name
280
+ fuse_module_names (`List[str]`):
281
+ The list of module names to fuse. For the MLP layers it has to be an array
282
+ of length 3 that consists of the 3 MLP layers in the order (gate (dense layer post-attention) / up / down layers)
283
+ module (`nn.Module`):
284
+ The pytorch parent module that has layernorm modules to fuse
285
+ target_cls (`~autoawq.QuantFusedMLP`):
286
+ The `QuantFusedMLP` class as it only supports that class
287
+ for now.
288
+ """
289
+ if len(fuse_module_names) == 0:
290
+ return
291
+
292
+ if hasattr(module, fuse_module_names[0]):
293
+ gate_proj = getattr(module, fuse_module_names[0])
294
+ up_proj = getattr(module, fuse_module_names[1])
295
+ down_proj = getattr(module, fuse_module_names[2])
296
+
297
+ previous_device = gate_proj.qweight.device
298
+
299
+ # Deal also with the case model has `text_config` attribute
300
+ hidden_act = (
301
+ model.config.hidden_act
302
+ if not hasattr(model.config, "text_config")
303
+ else model.config.text_config.hidden_act
304
+ )
305
+ activation_fn = ACT2FN[hidden_act]
306
+ new_module = target_cls(gate_proj, down_proj, up_proj, activation_fn)
307
+
308
+ parent_name, child_name = current_module_name.rsplit(".", 1)
309
+ parent = model.get_submodule(parent_name)
310
+ setattr(parent, child_name, new_module.to(previous_device))
311
+
312
+ del gate_proj, up_proj, down_proj
313
+
314
+
315
+ def _fuse_awq_attention_layers(model, module, modules_to_fuse, current_module_name, target_cls):
316
+ """
317
+ Fuse the Attention layers into a target class using autoawq
318
+
319
+ Args:
320
+ model (`~PreTrainedModel`):
321
+ The input pretrained model
322
+ module (`nn.Module`):
323
+ The pytorch parent module that has layernorm modules to fuse
324
+ modules_to_fuse (`List[str]`):
325
+ The module fusing mapping. The dictionary has to contain a field `attention` with attention module names
326
+ in the correct order: q, k, v, o layer
327
+ current_module_name (`str`):
328
+ The current submodule name
329
+ target_cls (`~autoawq.QuantAttentionFused`):
330
+ The `QuantAttentionFused` class as it only supports that class
331
+ for now.
332
+ """
333
+ from awq.modules.linear import WQLinear_GEMM, WQLinear_GEMV
334
+
335
+ if len(modules_to_fuse["attention"]) == 0:
336
+ return
337
+
338
+ if hasattr(module, modules_to_fuse["attention"][0]):
339
+ # First, we pack the QKV layers together
340
+ q_proj = getattr(module, modules_to_fuse["attention"][0])
341
+
342
+ if isinstance(q_proj, WQLinear_GEMV):
343
+ linear_target_cls = WQLinear_GEMV
344
+ cat_dim = 0
345
+ elif isinstance(q_proj, WQLinear_GEMM):
346
+ linear_target_cls = WQLinear_GEMM
347
+ cat_dim = 1
348
+ else:
349
+ raise ValueError("Unsupported q_proj type: {type(q_proj)}")
350
+
351
+ previous_device = q_proj.qweight.device
352
+
353
+ k_proj = getattr(module, modules_to_fuse["attention"][1])
354
+ v_proj = getattr(module, modules_to_fuse["attention"][2])
355
+ o_proj = getattr(module, modules_to_fuse["attention"][3])
356
+
357
+ bias = torch.cat([q_proj.bias, k_proj.bias, v_proj.bias], dim=0) if q_proj.bias is not None else None
358
+
359
+ qkv_layer = linear_target_cls(
360
+ q_proj.w_bit,
361
+ q_proj.group_size,
362
+ q_proj.in_features,
363
+ q_proj.out_features + k_proj.out_features + v_proj.out_features,
364
+ q_proj.bias is not None,
365
+ next(iter(module.state_dict().values())).device,
366
+ )
367
+
368
+ qkv_layer.qweight = torch.cat([q_proj.qweight, k_proj.qweight, v_proj.qweight], dim=cat_dim)
369
+ qkv_layer.qzeros = torch.cat([q_proj.qzeros, k_proj.qzeros, v_proj.qzeros], dim=cat_dim)
370
+ qkv_layer.scales = torch.cat([q_proj.scales, k_proj.scales, v_proj.scales], dim=cat_dim)
371
+
372
+ if isinstance(qkv_layer, WQLinear_GEMV):
373
+ qkv_layer.split_k_iters = q_proj.split_k_iters
374
+
375
+ qkv_layer.bias = bias
376
+
377
+ fused_attention_layer = target_cls(
378
+ modules_to_fuse["hidden_size"],
379
+ modules_to_fuse["num_attention_heads"],
380
+ modules_to_fuse["num_key_value_heads"],
381
+ qkv_layer,
382
+ o_proj,
383
+ previous_device,
384
+ modules_to_fuse["max_seq_len"],
385
+ use_alibi=modules_to_fuse["use_alibi"],
386
+ # The default value in autoawq is set to 10000.0
387
+ rope_theta=modules_to_fuse.get("rope_theta", 10000.0),
388
+ )
389
+
390
+ fused_attention_layer.is_hf_transformers = True
391
+
392
+ parent_name, child_name = current_module_name.rsplit(".", 1)
393
+ parent = model.get_submodule(parent_name)
394
+ setattr(parent, child_name, fused_attention_layer.to(previous_device))
395
+
396
+ del q_proj, k_proj, v_proj, o_proj
397
+
398
+
399
+ def post_init_awq_exllama_modules(model, exllama_config):
400
+ """
401
+ Runs post init for Exllama layers which performs:
402
+ - Weights unpacking, reordering and repacking
403
+ - Devices scratch space allocation
404
+ """
405
+
406
+ if exllama_config["version"] == ExllamaVersion.ONE:
407
+ from awq.modules.linear.exllama import exllama_post_init
408
+
409
+ model = exllama_post_init(model)
410
+ elif exllama_config["version"] == ExllamaVersion.TWO:
411
+ from awq.modules.linear.exllamav2 import exllamav2_post_init
412
+
413
+ model = exllamav2_post_init(
414
+ model,
415
+ max_input_len=exllama_config["max_input_len"],
416
+ max_batch_size=exllama_config["max_batch_size"],
417
+ )
418
+ else:
419
+ raise ValueError(f"Unrecognized Exllama version: {exllama_config['version']}")
420
+
421
+ return model
env-llmeval/lib/python3.10/site-packages/transformers/integrations/bitsandbytes.py ADDED
@@ -0,0 +1,321 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import importlib.metadata
2
+ import warnings
3
+ from copy import deepcopy
4
+ from inspect import signature
5
+
6
+ from packaging import version
7
+
8
+ from ..utils import is_accelerate_available, is_bitsandbytes_available, logging
9
+
10
+
11
+ if is_bitsandbytes_available():
12
+ import bitsandbytes as bnb
13
+ import torch
14
+ import torch.nn as nn
15
+
16
+ from ..pytorch_utils import Conv1D
17
+
18
+ if is_accelerate_available():
19
+ from accelerate import init_empty_weights
20
+ from accelerate.utils import find_tied_parameters
21
+
22
+ logger = logging.get_logger(__name__)
23
+
24
+
25
+ def set_module_quantized_tensor_to_device(module, tensor_name, device, value=None, quantized_stats=None):
26
+ """
27
+ A helper function to set a given tensor (parameter of buffer) of a module on a specific device (note that doing
28
+ `param.to(device)` creates a new tensor not linked to the parameter, which is why we need this function). The
29
+ function is adapted from `set_module_tensor_to_device` function from accelerate that is adapted to support the
30
+ class `Int8Params` from `bitsandbytes`.
31
+
32
+ Args:
33
+ module (`torch.nn.Module`):
34
+ The module in which the tensor we want to move lives.
35
+ tensor_name (`str`):
36
+ The full name of the parameter/buffer.
37
+ device (`int`, `str` or `torch.device`):
38
+ The device on which to set the tensor.
39
+ value (`torch.Tensor`, *optional*):
40
+ The value of the tensor (useful when going from the meta device to any other device).
41
+ quantized_stats (`dict[str, Any]`, *optional*):
42
+ Dict with items for either 4-bit or 8-bit serialization
43
+ """
44
+ # Recurse if needed
45
+ if "." in tensor_name:
46
+ splits = tensor_name.split(".")
47
+ for split in splits[:-1]:
48
+ new_module = getattr(module, split)
49
+ if new_module is None:
50
+ raise ValueError(f"{module} has no attribute {split}.")
51
+ module = new_module
52
+ tensor_name = splits[-1]
53
+
54
+ if tensor_name not in module._parameters and tensor_name not in module._buffers:
55
+ raise ValueError(f"{module} does not have a parameter or a buffer named {tensor_name}.")
56
+ is_buffer = tensor_name in module._buffers
57
+ old_value = getattr(module, tensor_name)
58
+
59
+ if old_value.device == torch.device("meta") and device not in ["meta", torch.device("meta")] and value is None:
60
+ raise ValueError(f"{tensor_name} is on the meta device, we need a `value` to put in on {device}.")
61
+
62
+ prequantized_loading = quantized_stats is not None
63
+ if is_buffer or not is_bitsandbytes_available():
64
+ is_8bit = False
65
+ is_4bit = False
66
+ else:
67
+ is_4bit = hasattr(bnb.nn, "Params4bit") and isinstance(module._parameters[tensor_name], bnb.nn.Params4bit)
68
+ is_8bit = isinstance(module._parameters[tensor_name], bnb.nn.Int8Params)
69
+
70
+ if is_8bit or is_4bit:
71
+ param = module._parameters[tensor_name]
72
+ if param.device.type != "cuda":
73
+ if value is None:
74
+ new_value = old_value.to(device)
75
+ elif isinstance(value, torch.Tensor):
76
+ new_value = value.to("cpu")
77
+ else:
78
+ new_value = torch.tensor(value, device="cpu")
79
+
80
+ # Support models using `Conv1D` in place of `nn.Linear` (e.g. openai-community/gpt2) by transposing the weight matrix prior to quantization.
81
+ # Since weights are saved in the correct "orientation", we skip transposing when loading.
82
+ if issubclass(module.source_cls, Conv1D) and not prequantized_loading:
83
+ new_value = new_value.T
84
+
85
+ kwargs = old_value.__dict__
86
+
87
+ if prequantized_loading != (new_value.dtype in (torch.int8, torch.uint8)):
88
+ raise ValueError(
89
+ f"Value dtype `{new_value.dtype}` is not compatible with parameter quantization status."
90
+ )
91
+
92
+ if is_8bit:
93
+ is_8bit_serializable = version.parse(importlib.metadata.version("bitsandbytes")) > version.parse(
94
+ "0.37.2"
95
+ )
96
+ if new_value.dtype in (torch.int8, torch.uint8) and not is_8bit_serializable:
97
+ raise ValueError(
98
+ "Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. "
99
+ "Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`."
100
+ )
101
+ new_value = bnb.nn.Int8Params(new_value, requires_grad=False, **kwargs).to(device)
102
+ if prequantized_loading:
103
+ setattr(new_value, "SCB", quantized_stats["SCB"].to(device))
104
+ elif is_4bit:
105
+ if prequantized_loading:
106
+ is_4bit_serializable = version.parse(importlib.metadata.version("bitsandbytes")) >= version.parse(
107
+ "0.41.3"
108
+ )
109
+ if new_value.dtype in (torch.int8, torch.uint8) and not is_4bit_serializable:
110
+ raise ValueError(
111
+ "Detected 4-bit weights but the version of bitsandbytes is not compatible with 4-bit serialization. "
112
+ "Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`."
113
+ )
114
+ new_value = bnb.nn.Params4bit.from_prequantized(
115
+ data=new_value,
116
+ quantized_stats=quantized_stats,
117
+ requires_grad=False,
118
+ device=device,
119
+ **kwargs,
120
+ )
121
+ else:
122
+ new_value = bnb.nn.Params4bit(new_value, requires_grad=False, **kwargs).to(device)
123
+ module._parameters[tensor_name] = new_value
124
+
125
+ else:
126
+ if value is None:
127
+ new_value = old_value.to(device)
128
+ elif isinstance(value, torch.Tensor):
129
+ new_value = value.to(device)
130
+ else:
131
+ new_value = torch.tensor(value, device=device)
132
+
133
+ if is_buffer:
134
+ module._buffers[tensor_name] = new_value
135
+ else:
136
+ new_value = nn.Parameter(new_value, requires_grad=old_value.requires_grad)
137
+ module._parameters[tensor_name] = new_value
138
+
139
+
140
+ def _replace_with_bnb_linear(
141
+ model,
142
+ modules_to_not_convert=None,
143
+ current_key_name=None,
144
+ quantization_config=None,
145
+ has_been_replaced=False,
146
+ ):
147
+ """
148
+ Private method that wraps the recursion for module replacement.
149
+
150
+ Returns the converted model and a boolean that indicates if the conversion has been successfull or not.
151
+ """
152
+ for name, module in model.named_children():
153
+ if current_key_name is None:
154
+ current_key_name = []
155
+ current_key_name.append(name)
156
+
157
+ if (isinstance(module, nn.Linear) or isinstance(module, Conv1D)) and name not in modules_to_not_convert:
158
+ # Check if the current key is not in the `modules_to_not_convert`
159
+ if not any(key in ".".join(current_key_name) for key in modules_to_not_convert):
160
+ with init_empty_weights():
161
+ if isinstance(module, Conv1D):
162
+ in_features, out_features = module.weight.shape
163
+ else:
164
+ in_features = module.in_features
165
+ out_features = module.out_features
166
+
167
+ if quantization_config.quantization_method() == "llm_int8":
168
+ model._modules[name] = bnb.nn.Linear8bitLt(
169
+ in_features,
170
+ out_features,
171
+ module.bias is not None,
172
+ has_fp16_weights=quantization_config.llm_int8_has_fp16_weight,
173
+ threshold=quantization_config.llm_int8_threshold,
174
+ )
175
+ has_been_replaced = True
176
+ else:
177
+ if (
178
+ quantization_config.llm_int8_skip_modules is not None
179
+ and name in quantization_config.llm_int8_skip_modules
180
+ ):
181
+ pass
182
+ else:
183
+ extra_kwargs = (
184
+ {"quant_storage": quantization_config.bnb_4bit_quant_storage}
185
+ if "quant_storage" in list(signature(bnb.nn.Linear4bit).parameters)
186
+ else {}
187
+ )
188
+ model._modules[name] = bnb.nn.Linear4bit(
189
+ in_features,
190
+ out_features,
191
+ module.bias is not None,
192
+ quantization_config.bnb_4bit_compute_dtype,
193
+ compress_statistics=quantization_config.bnb_4bit_use_double_quant,
194
+ quant_type=quantization_config.bnb_4bit_quant_type,
195
+ **extra_kwargs,
196
+ )
197
+ has_been_replaced = True
198
+ # Store the module class in case we need to transpose the weight later
199
+ model._modules[name].source_cls = type(module)
200
+ # Force requires grad to False to avoid unexpected errors
201
+ model._modules[name].requires_grad_(False)
202
+ if len(list(module.children())) > 0:
203
+ _, has_been_replaced = _replace_with_bnb_linear(
204
+ module,
205
+ modules_to_not_convert,
206
+ current_key_name,
207
+ quantization_config,
208
+ has_been_replaced=has_been_replaced,
209
+ )
210
+ # Remove the last key for recursion
211
+ current_key_name.pop(-1)
212
+ return model, has_been_replaced
213
+
214
+
215
+ def replace_with_bnb_linear(model, modules_to_not_convert=None, current_key_name=None, quantization_config=None):
216
+ """
217
+ A helper function to replace all `torch.nn.Linear` modules by `bnb.nn.Linear8bit` modules from the `bitsandbytes`
218
+ library. This will enable running your models using mixed int8 precision as described by the paper `LLM.int8():
219
+ 8-bit Matrix Multiplication for Transformers at Scale`. Make sure `bitsandbytes` compiled with the correct CUDA
220
+ version of your hardware is installed before running this function. `pip install -i https://test.pypi.org/simple/
221
+ bitsandbytes`
222
+
223
+ The function will be run recursively and replace all `torch.nn.Linear` modules except for the `lm_head` that should
224
+ be kept as a `torch.nn.Linear` module. The replacement is done under `init_empty_weights` context manager so no
225
+ CPU/GPU memory is required to run this function. Int8 mixed-precision matrix decomposition works by separating a
226
+ matrix multiplication into two streams: (1) and systematic feature outlier stream matrix multiplied in fp16
227
+ (0.01%), (2) a regular stream of int8 matrix multiplication (99.9%). With this method, int8 inference with no
228
+ predictive degradation is possible for very large models (>=176B parameters).
229
+
230
+ Parameters:
231
+ model (`torch.nn.Module`):
232
+ Input model or `torch.nn.Module` as the function is run recursively.
233
+ modules_to_not_convert (`List[`str`]`, *optional*, defaults to `["lm_head"]`):
234
+ Names of the modules to not convert in `Linear8bitLt`. In practice we keep the `lm_head` in full precision
235
+ for numerical stability reasons.
236
+ current_key_name (`List[`str`]`, *optional*):
237
+ An array to track the current key of the recursion. This is used to check whether the current key (part of
238
+ it) is not in the list of modules to not convert (for instances modules that are offloaded to `cpu` or
239
+ `disk`).
240
+ """
241
+ modules_to_not_convert = ["lm_head"] if modules_to_not_convert is None else modules_to_not_convert
242
+ model, has_been_replaced = _replace_with_bnb_linear(
243
+ model, modules_to_not_convert, current_key_name, quantization_config
244
+ )
245
+
246
+ if not has_been_replaced:
247
+ logger.warning(
248
+ "You are loading your model in 8bit or 4bit but no linear modules were found in your model."
249
+ " Please double check your model architecture, or submit an issue on github if you think this is"
250
+ " a bug."
251
+ )
252
+
253
+ return model
254
+
255
+
256
+ # For backward compatibility
257
+ def replace_8bit_linear(*args, **kwargs):
258
+ warnings.warn(
259
+ "`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead",
260
+ FutureWarning,
261
+ )
262
+ return replace_with_bnb_linear(*args, **kwargs)
263
+
264
+
265
+ # For backward compatiblity
266
+ def set_module_8bit_tensor_to_device(*args, **kwargs):
267
+ warnings.warn(
268
+ "`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead",
269
+ FutureWarning,
270
+ )
271
+ return set_module_quantized_tensor_to_device(*args, **kwargs)
272
+
273
+
274
+ def get_keys_to_not_convert(model):
275
+ r"""
276
+ An utility function to get the key of the module to keep in full precision if any For example for CausalLM modules
277
+ we may want to keep the lm_head in full precision for numerical stability reasons. For other architectures, we want
278
+ to keep the tied weights of the model. The function will return a list of the keys of the modules to not convert in
279
+ int8.
280
+
281
+ Parameters:
282
+ model (`torch.nn.Module`):
283
+ Input model
284
+ """
285
+ # Create a copy of the model and tie the weights, then
286
+ # check if it contains tied weights
287
+ tied_model = deepcopy(model) # this has 0 cost since it is done inside `init_empty_weights` context manager`
288
+ tied_model.tie_weights()
289
+
290
+ tied_params = find_tied_parameters(tied_model)
291
+ # For compatibility with Accelerate < 0.18
292
+ if isinstance(tied_params, dict):
293
+ tied_keys = sum(list(tied_params.values()), []) + list(tied_params.keys())
294
+ else:
295
+ tied_keys = sum(tied_params, [])
296
+ has_tied_params = len(tied_keys) > 0
297
+
298
+ # If there is not tied weights, we want to keep the lm_head(output_embedding) in full precision
299
+ if not has_tied_params:
300
+ output_emb = model.get_output_embeddings()
301
+ if output_emb is not None:
302
+ list_last_module = [name for name, module in model.named_modules() if id(module) == id(output_emb)]
303
+ return list_last_module
304
+
305
+ # otherwise, no tied weights, no output embedding defined, simply keep the last module in full precision
306
+ list_modules = list(model.named_parameters())
307
+ list_last_module = [list_modules[-1][0]]
308
+ # add last module together with tied weights
309
+ intersection = set(list_last_module) - set(tied_keys)
310
+ list_untouched = list(set(tied_keys)) + list(intersection)
311
+
312
+ # remove ".weight" from the keys
313
+ names_to_remove = [".weight", ".bias"]
314
+ filtered_module_names = []
315
+ for name in list_untouched:
316
+ for name_to_remove in names_to_remove:
317
+ if name_to_remove in name:
318
+ name = name.replace(name_to_remove, "")
319
+ filtered_module_names.append(name)
320
+
321
+ return filtered_module_names
env-llmeval/lib/python3.10/site-packages/transformers/integrations/deepspeed.py ADDED
@@ -0,0 +1,438 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """
15
+ Integration with Deepspeed
16
+ """
17
+ import copy
18
+ import importlib.metadata as importlib_metadata
19
+ import importlib.util
20
+ import weakref
21
+ from functools import partialmethod
22
+
23
+ from ..dependency_versions_check import dep_version_check
24
+ from ..utils import is_accelerate_available, is_torch_available, logging
25
+
26
+
27
+ if is_torch_available():
28
+ import torch
29
+
30
+
31
+ logger = logging.get_logger(__name__)
32
+
33
+
34
+ def is_deepspeed_available():
35
+ package_exists = importlib.util.find_spec("deepspeed") is not None
36
+
37
+ # Check we're not importing a "deepspeed" directory somewhere but the actual library by trying to grab the version
38
+ # AND checking it has an author field in the metadata that is HuggingFace.
39
+ if package_exists:
40
+ try:
41
+ _ = importlib_metadata.metadata("deepspeed")
42
+ return True
43
+ except importlib_metadata.PackageNotFoundError:
44
+ return False
45
+
46
+
47
+ if is_accelerate_available() and is_deepspeed_available():
48
+ from accelerate.utils.deepspeed import HfDeepSpeedConfig as DeepSpeedConfig
49
+ else:
50
+ # Inherits from a dummy `object` if accelerate is not available, so that python succeeds to import this file.
51
+ # Deepspeed glue code will never inherit this dummy object as it checks if accelerate is available.
52
+ from builtins import object as DeepSpeedConfig
53
+
54
+
55
+ class HfDeepSpeedConfig(DeepSpeedConfig):
56
+ """
57
+ This object contains a DeepSpeed configuration dictionary and can be quickly queried for things like zero stage.
58
+
59
+ A `weakref` of this object is stored in the module's globals to be able to access the config from areas where
60
+ things like the Trainer object is not available (e.g. `from_pretrained` and `_get_resized_embeddings`). Therefore
61
+ it's important that this object remains alive while the program is still running.
62
+
63
+ [`Trainer`] uses the `HfTrainerDeepSpeedConfig` subclass instead. That subclass has logic to sync the configuration
64
+ with values of [`TrainingArguments`] by replacing special placeholder values: `"auto"`. Without this special logic
65
+ the DeepSpeed configuration is not modified in any way.
66
+
67
+ Args:
68
+ config_file_or_dict (`Union[str, Dict]`): path to DeepSpeed config file or dict.
69
+
70
+ """
71
+
72
+ def __init__(self, config_file_or_dict):
73
+ # set global weakref object
74
+ set_hf_deepspeed_config(self)
75
+ dep_version_check("accelerate")
76
+ dep_version_check("deepspeed")
77
+ super().__init__(config_file_or_dict)
78
+
79
+
80
+ class HfTrainerDeepSpeedConfig(HfDeepSpeedConfig):
81
+ """
82
+ The `HfTrainerDeepSpeedConfig` object is meant to be created during `TrainingArguments` object creation and has the
83
+ same lifespan as the latter.
84
+ """
85
+
86
+ def __init__(self, config_file_or_dict):
87
+ super().__init__(config_file_or_dict)
88
+ self._dtype = None
89
+ self.mismatches = []
90
+
91
+ def dtype(self):
92
+ if self._dtype is None:
93
+ raise ValueError("trainer_config_process() wasn't called yet to tell dtype")
94
+ return self._dtype
95
+
96
+ def is_auto(self, ds_key_long):
97
+ val = self.get_value(ds_key_long)
98
+ if val is None:
99
+ return False
100
+ else:
101
+ return val == "auto"
102
+
103
+ def fill_match(self, ds_key_long, hf_val, hf_key=None, must_match=True):
104
+ """
105
+ A utility method that massages the config file and can optionally verify that the values match.
106
+
107
+ 1. Replace "auto" values with `TrainingArguments` value.
108
+
109
+ 2. If it wasn't "auto" and `must_match` is true, then check that DS config matches Trainer
110
+ config values and if mismatched add the entry to `self.mismatched` - will assert during
111
+ `trainer_config_finalize` for one or more mismatches.
112
+
113
+ """
114
+ config, ds_key = self.find_config_node(ds_key_long)
115
+ if config is None:
116
+ return
117
+
118
+ if config.get(ds_key) == "auto":
119
+ config[ds_key] = hf_val
120
+ return
121
+
122
+ if not must_match:
123
+ return
124
+
125
+ ds_val = config.get(ds_key)
126
+ if ds_val is not None and ds_val != hf_val:
127
+ self.mismatches.append(f"- ds {ds_key_long}={ds_val} vs hf {hf_key}={hf_val}")
128
+
129
+ fill_only = partialmethod(fill_match, must_match=False)
130
+
131
+ def trainer_config_process(self, args, auto_find_batch_size=False):
132
+ """
133
+ Adjust the config with `TrainingArguments` values. This stage is run during `TrainingArguments` object
134
+ creation.
135
+ """
136
+ # DeepSpeed does:
137
+ # train_batch_size = world_size * train_micro_batch_size_per_gpu * gradient_accumulation_steps
138
+ train_batch_size = args.world_size * args.per_device_train_batch_size * args.gradient_accumulation_steps
139
+ self.fill_match(
140
+ "train_micro_batch_size_per_gpu",
141
+ args.per_device_train_batch_size,
142
+ "per_device_train_batch_size",
143
+ not auto_find_batch_size,
144
+ )
145
+ self.fill_match(
146
+ "gradient_accumulation_steps",
147
+ args.gradient_accumulation_steps,
148
+ "gradient_accumulation_steps",
149
+ )
150
+ self.fill_match(
151
+ "train_batch_size",
152
+ train_batch_size,
153
+ "train_batch_size (calculated)",
154
+ not auto_find_batch_size,
155
+ )
156
+ self.fill_match("gradient_clipping", args.max_grad_norm, "max_grad_norm")
157
+
158
+ self.fill_match("optimizer.params.lr", args.learning_rate, "learning_rate")
159
+ self.fill_match(
160
+ "optimizer.params.betas",
161
+ [args.adam_beta1, args.adam_beta2],
162
+ "adam_beta1+adam_beta2",
163
+ )
164
+ self.fill_match("optimizer.params.eps", args.adam_epsilon, "adam_epsilon")
165
+ self.fill_match("optimizer.params.weight_decay", args.weight_decay, "weight_decay")
166
+
167
+ self.fill_only("scheduler.params.warmup_min_lr", 0) # not a trainer arg
168
+ self.fill_match("scheduler.params.warmup_max_lr", args.learning_rate, "learning_rate")
169
+ # total_num_steps - will get set in trainer_config_finalize
170
+
171
+ # fp16
172
+ if args.fp16 or args.fp16_full_eval:
173
+ fp16_backend = "apex" if args.fp16_backend == "apex" else "amp"
174
+ else:
175
+ fp16_backend = None
176
+
177
+ if args.save_on_each_node:
178
+ # deepspeed uses shared storage by default. Let's override this setting if save_on_each_node == True
179
+ self.config["checkpoint"] = self.config.get("checkpoint", {})
180
+ self.config["checkpoint"]["use_node_local_storage"] = args.save_on_each_node
181
+
182
+ # amp: similar to the pytorch native amp - it has a bunch of optional params but we won't set
183
+ # any here unless the user did the work
184
+ self.fill_match(
185
+ "fp16.enabled",
186
+ ((args.fp16 or args.fp16_full_eval) and fp16_backend == "amp"),
187
+ "fp16|fp16_full_eval+fp16_backend(amp)",
188
+ )
189
+
190
+ # apex: delegates amp work to apex (which needs to be available), but it cannot be used with any
191
+ # ZeRO features
192
+ self.fill_match("amp.enabled", fp16_backend == "apex", "fp16+fp16_backend(apex)")
193
+ self.fill_match("amp.opt_level", args.fp16_opt_level, "fp16_opt_level")
194
+
195
+ self.fill_match("bf16.enabled", (args.bf16 or args.bf16_full_eval), "bf16|bf16_full_eval")
196
+
197
+ # deepspeed's default mode is fp16 unless there is a config that says differently
198
+ if self.is_true("bf16.enabled"):
199
+ self._dtype = torch.bfloat16
200
+ elif self.is_false("fp16.enabled"):
201
+ self._dtype = torch.float32
202
+ else:
203
+ self._dtype = torch.float16
204
+
205
+ def trainer_config_finalize(self, args, model, num_training_steps):
206
+ """
207
+ This stage is run after we have the model and know num_training_steps.
208
+
209
+ Now we can complete the configuration process.
210
+ """
211
+ # zero
212
+
213
+ # deal with config keys that use `auto` value and rely on model's hidden_size
214
+ hidden_size_based_keys = [
215
+ "zero_optimization.reduce_bucket_size",
216
+ "zero_optimization.stage3_prefetch_bucket_size",
217
+ "zero_optimization.stage3_param_persistence_threshold",
218
+ ]
219
+ hidden_size_auto_keys = [x for x in hidden_size_based_keys if self.is_auto(x)]
220
+
221
+ if len(hidden_size_auto_keys) > 0:
222
+ if hasattr(model.config, "hidden_size"):
223
+ hidden_size = model.config.hidden_size
224
+ elif hasattr(model.config, "hidden_sizes"):
225
+ # if there are many hidden sizes pick the largest one
226
+ hidden_size = max(model.config.hidden_sizes)
227
+ else:
228
+ raise ValueError(
229
+ "The model's config file has neither `hidden_size` nor `hidden_sizes` entry, "
230
+ "therefore it's not possible to automatically fill out the following `auto` entries "
231
+ f"in the DeepSpeed config file: {hidden_size_auto_keys}. You can fix that by replacing "
232
+ "`auto` values for these keys with an integer value of your choice."
233
+ )
234
+
235
+ self.fill_only("zero_optimization.reduce_bucket_size", hidden_size * hidden_size)
236
+ if self.is_zero3():
237
+ # automatically assign the optimal config values based on model config
238
+ self.fill_only(
239
+ "zero_optimization.stage3_prefetch_bucket_size",
240
+ 0.9 * hidden_size * hidden_size,
241
+ )
242
+ self.fill_only(
243
+ "zero_optimization.stage3_param_persistence_threshold",
244
+ 10 * hidden_size,
245
+ )
246
+
247
+ # scheduler
248
+ self.fill_match(
249
+ "scheduler.params.total_num_steps",
250
+ num_training_steps,
251
+ "num_training_steps (calculated)",
252
+ )
253
+ self.fill_match(
254
+ "scheduler.params.warmup_num_steps",
255
+ args.get_warmup_steps(num_training_steps),
256
+ "warmup_steps",
257
+ )
258
+
259
+ if len(self.mismatches) > 0:
260
+ mismatches = "\n".join(self.mismatches)
261
+ raise ValueError(
262
+ "Please correct the following DeepSpeed config values that mismatch TrainingArguments"
263
+ f" values:\n{mismatches}\nThe easiest method is to set these DeepSpeed config values to 'auto'."
264
+ )
265
+
266
+
267
+ # keep the config object global to be able to access it anywhere during TrainingArguments life-cycle
268
+ _hf_deepspeed_config_weak_ref = None
269
+
270
+
271
+ def set_hf_deepspeed_config(hf_deepspeed_config_obj):
272
+ # this is a special weakref global object to allow us to get to Deepspeed config from APIs
273
+ # that don't have an easy way to get to the Deepspeed config outside of the Trainer domain.
274
+ global _hf_deepspeed_config_weak_ref
275
+ # will go away automatically when HfDeepSpeedConfig is destroyed (when TrainingArguments is destroyed)
276
+ _hf_deepspeed_config_weak_ref = weakref.ref(hf_deepspeed_config_obj)
277
+
278
+
279
+ def unset_hf_deepspeed_config():
280
+ # useful for unit tests to ensure the global state doesn't leak - call from `tearDown` method
281
+ global _hf_deepspeed_config_weak_ref
282
+ _hf_deepspeed_config_weak_ref = None
283
+
284
+
285
+ def is_deepspeed_zero3_enabled():
286
+ if _hf_deepspeed_config_weak_ref is not None and _hf_deepspeed_config_weak_ref() is not None:
287
+ return _hf_deepspeed_config_weak_ref().is_zero3()
288
+ else:
289
+ return False
290
+
291
+
292
+ def deepspeed_config():
293
+ if _hf_deepspeed_config_weak_ref is not None and _hf_deepspeed_config_weak_ref() is not None:
294
+ return _hf_deepspeed_config_weak_ref().config
295
+ else:
296
+ return None
297
+
298
+
299
+ def deepspeed_optim_sched(trainer, hf_deepspeed_config, args, num_training_steps, model_parameters):
300
+ """
301
+ A convenience wrapper that deals with optimizer and lr scheduler configuration.
302
+ """
303
+ from accelerate.utils import DummyOptim, DummyScheduler
304
+
305
+ config = hf_deepspeed_config.config
306
+
307
+ # Mixing and matching DS schedulers and optimizers is supported unless Offload is enabled in which case it's:
308
+ # 1. DS scheduler + DS optimizer: Yes
309
+ # 2. HF scheduler + HF optimizer: Mostly*
310
+ # 3. DS scheduler + HF optimizer: Mostly*
311
+ # 4. HF scheduler + DS optimizer: Yes
312
+ #
313
+ # Mostly*: All non-native DeepSpeed optimizers that have both CPU and GPU implementation should work (except LAMB)
314
+
315
+ optimizer = None
316
+ if "optimizer" in config:
317
+ if args.adafactor:
318
+ raise ValueError(
319
+ "--adafactor was passed, but also found `optimizer` configured in the DeepSpeed config. "
320
+ "Only one optimizer can be configured."
321
+ )
322
+ optimizer = DummyOptim(params=model_parameters)
323
+ else:
324
+ if hf_deepspeed_config.is_offload():
325
+ logger.info(
326
+ "Detected ZeRO Offload and non-DeepSpeed optimizers: This combination should work as long as the"
327
+ " custom optimizer has both CPU and GPU implementation (except LAMB)"
328
+ )
329
+
330
+ # ds supports Adam, OneBitAdam, and Lamb optimizers and can import other optimizers from torch.
331
+ # But trainer uses AdamW by default.
332
+ optimizer = trainer.create_optimizer()
333
+ # To use other optimizers requires voiding warranty with: `zero_allow_untested_optimizer`
334
+ config["zero_allow_untested_optimizer"] = True
335
+
336
+ lr_scheduler = None
337
+ if "scheduler" in config:
338
+ lr_scheduler = DummyScheduler(optimizer)
339
+ else:
340
+ if isinstance(optimizer, DummyOptim):
341
+
342
+ def _lr_scheduler_callable(optimizer):
343
+ # create a shallow copy first, so later modifications do not affect original trainer
344
+ trainer_copy = copy.copy(trainer)
345
+ # at the time _lr_scheduler_callable is called, trainer.lr_scheduler has been set
346
+ # update it to None so that we can re-create a new scheduler
347
+ trainer_copy.lr_scheduler = None
348
+ lr_scheduler = trainer_copy.create_scheduler(
349
+ num_training_steps=num_training_steps, optimizer=optimizer
350
+ )
351
+ return lr_scheduler
352
+
353
+ lr_scheduler = DummyScheduler(optimizer, lr_scheduler_callable=_lr_scheduler_callable)
354
+ else:
355
+ lr_scheduler = trainer.create_scheduler(num_training_steps=num_training_steps, optimizer=optimizer)
356
+
357
+ return optimizer, lr_scheduler
358
+
359
+
360
+ def deepspeed_init(trainer, num_training_steps, inference=False):
361
+ """
362
+ Init DeepSpeed, after updating the DeepSpeed configuration with any relevant Trainer's args.
363
+
364
+ If `resume_from_checkpoint` was passed then an attempt to resume from a previously saved checkpoint will be made.
365
+
366
+ Args:
367
+ trainer: Trainer object
368
+ num_training_steps: per single gpu
369
+ resume_from_checkpoint: path to a checkpoint if to resume from after normal DeepSpeedEngine load
370
+ inference: launch in inference mode (no optimizer and no lr scheduler)
371
+ auto_find_batch_size: whether to ignore the `train_micro_batch_size_per_gpu` argument as it's being
372
+ set automatically by the auto batch size finder
373
+
374
+ Returns: optimizer, lr_scheduler
375
+
376
+ We may use `deepspeed_init` more than once during the life of Trainer, when we do - it's a temp hack based on:
377
+ https://github.com/microsoft/DeepSpeed/issues/1394#issuecomment-937405374 until Deepspeed fixes a bug where it
378
+ can't resume from a checkpoint after it did some stepping https://github.com/microsoft/DeepSpeed/issues/1612
379
+
380
+ """
381
+ from deepspeed.utils import logger as ds_logger
382
+
383
+ model = trainer.model
384
+ args = trainer.args
385
+
386
+ hf_deepspeed_config = trainer.accelerator.state.deepspeed_plugin.hf_ds_config
387
+
388
+ # resume config update - some bits like `model` and `num_training_steps` only become available during train
389
+ hf_deepspeed_config.trainer_config_finalize(args, model, num_training_steps)
390
+
391
+ # set the Deepspeed log level consistent with the Trainer
392
+ ds_logger.setLevel(args.get_process_log_level())
393
+
394
+ if inference:
395
+ # only Z3 makes sense for the inference
396
+ if not hf_deepspeed_config.is_zero3():
397
+ raise ValueError("ZeRO inference only makes sense with ZeRO Stage 3 - please adjust your config")
398
+
399
+ # in case the training config is re-used for inference
400
+ hf_deepspeed_config.del_config_sub_tree("optimizer")
401
+ hf_deepspeed_config.del_config_sub_tree("lr_scheduler")
402
+ optimizer, lr_scheduler = None, None
403
+ model_parameters = None
404
+ else:
405
+ trainer.optimizer = None # important for when deepspeed_init is used as re-init
406
+ model_parameters = list(filter(lambda p: p.requires_grad, model.parameters()))
407
+ optimizer, lr_scheduler = deepspeed_optim_sched(
408
+ trainer, hf_deepspeed_config, args, num_training_steps, model_parameters
409
+ )
410
+
411
+ # keep for quick debug:
412
+ # from pprint import pprint; pprint(config)
413
+
414
+ return optimizer, lr_scheduler
415
+
416
+
417
+ def deepspeed_load_checkpoint(deepspeed_engine, checkpoint_path, load_module_strict=True):
418
+ # it's possible that the user is trying to resume from model_path, which doesn't necessarily
419
+ # contain a deepspeed checkpoint. e.g. examples just check if the dir exists and assume it's
420
+ # a resume from a checkpoint and not just a local pretrained weight. So we check here if the
421
+ # path contains what looks like a deepspeed checkpoint
422
+ import glob
423
+
424
+ deepspeed_checkpoint_dirs = sorted(glob.glob(f"{checkpoint_path}/global_step*"))
425
+
426
+ if len(deepspeed_checkpoint_dirs) > 0:
427
+ logger.info(f"Attempting to resume from {checkpoint_path}")
428
+ # this magically updates self.optimizer and self.lr_scheduler
429
+ load_path, _ = deepspeed_engine.load_checkpoint(
430
+ checkpoint_path,
431
+ load_module_strict=load_module_strict,
432
+ load_optimizer_states=True,
433
+ load_lr_scheduler_states=True,
434
+ )
435
+ if load_path is None:
436
+ raise ValueError(f"[deepspeed] failed to resume from checkpoint {checkpoint_path}")
437
+ else:
438
+ raise ValueError(f"Can't find a valid checkpoint at {checkpoint_path}")