applied-ai-018 commited on
Commit
9d619f0
·
verified ·
1 Parent(s): 7ec69d0

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. ckpts/universal/global_step40/zero/20.mlp.dense_h_to_4h.weight/exp_avg.pt +3 -0
  2. ckpts/universal/global_step40/zero/21.input_layernorm.weight/exp_avg.pt +3 -0
  3. ckpts/universal/global_step40/zero/21.input_layernorm.weight/exp_avg_sq.pt +3 -0
  4. ckpts/universal/global_step40/zero/24.mlp.dense_4h_to_h.weight/fp32.pt +3 -0
  5. ckpts/universal/global_step40/zero/9.attention.query_key_value.weight/exp_avg.pt +3 -0
  6. ckpts/universal/global_step40/zero/9.attention.query_key_value.weight/exp_avg_sq.pt +3 -0
  7. venv/lib/python3.10/site-packages/scipy/__pycache__/__init__.cpython-310.pyc +0 -0
  8. venv/lib/python3.10/site-packages/scipy/__pycache__/_distributor_init.cpython-310.pyc +0 -0
  9. venv/lib/python3.10/site-packages/scipy/__pycache__/conftest.cpython-310.pyc +0 -0
  10. venv/lib/python3.10/site-packages/scipy/__pycache__/version.cpython-310.pyc +0 -0
  11. venv/lib/python3.10/site-packages/transformers/__init__.py +0 -0
  12. venv/lib/python3.10/site-packages/transformers/activations.py +239 -0
  13. venv/lib/python3.10/site-packages/transformers/activations_tf.py +147 -0
  14. venv/lib/python3.10/site-packages/transformers/audio_utils.py +825 -0
  15. venv/lib/python3.10/site-packages/transformers/cache_utils.py +435 -0
  16. venv/lib/python3.10/site-packages/transformers/configuration_utils.py +1133 -0
  17. venv/lib/python3.10/site-packages/transformers/convert_graph_to_onnx.py +551 -0
  18. venv/lib/python3.10/site-packages/transformers/convert_pytorch_checkpoint_to_tf2.py +448 -0
  19. venv/lib/python3.10/site-packages/transformers/convert_slow_tokenizer.py +1534 -0
  20. venv/lib/python3.10/site-packages/transformers/convert_slow_tokenizers_checkpoints_to_fast.py +126 -0
  21. venv/lib/python3.10/site-packages/transformers/convert_tf_hub_seq_to_seq_bert_to_pytorch.py +88 -0
  22. venv/lib/python3.10/site-packages/transformers/debug_utils.py +346 -0
  23. venv/lib/python3.10/site-packages/transformers/deepspeed.py +40 -0
  24. venv/lib/python3.10/site-packages/transformers/dependency_versions_check.py +63 -0
  25. venv/lib/python3.10/site-packages/transformers/dependency_versions_table.py +92 -0
  26. venv/lib/python3.10/site-packages/transformers/dynamic_module_utils.py +633 -0
  27. venv/lib/python3.10/site-packages/transformers/feature_extraction_sequence_utils.py +371 -0
  28. venv/lib/python3.10/site-packages/transformers/feature_extraction_utils.py +684 -0
  29. venv/lib/python3.10/site-packages/transformers/file_utils.py +133 -0
  30. venv/lib/python3.10/site-packages/transformers/hf_argparser.py +424 -0
  31. venv/lib/python3.10/site-packages/transformers/hyperparameter_search.py +141 -0
  32. venv/lib/python3.10/site-packages/transformers/image_processing_utils.py +793 -0
  33. venv/lib/python3.10/site-packages/transformers/image_transforms.py +803 -0
  34. venv/lib/python3.10/site-packages/transformers/image_utils.py +769 -0
  35. venv/lib/python3.10/site-packages/transformers/keras_callbacks.py +413 -0
  36. venv/lib/python3.10/site-packages/transformers/kernels/deformable_detr/ms_deform_attn.h +61 -0
  37. venv/lib/python3.10/site-packages/transformers/kernels/deta/cpu/ms_deform_attn_cpu.cpp +40 -0
  38. venv/lib/python3.10/site-packages/transformers/kernels/deta/cpu/ms_deform_attn_cpu.h +32 -0
  39. venv/lib/python3.10/site-packages/transformers/kernels/deta/cuda/ms_deform_attn_cuda.cu +156 -0
  40. venv/lib/python3.10/site-packages/transformers/kernels/deta/cuda/ms_deform_attn_cuda.cuh +1467 -0
  41. venv/lib/python3.10/site-packages/transformers/kernels/deta/cuda/ms_deform_attn_cuda.h +29 -0
  42. venv/lib/python3.10/site-packages/transformers/kernels/deta/cuda/ms_deform_im2col_cuda.cuh +1327 -0
  43. venv/lib/python3.10/site-packages/transformers/kernels/deta/ms_deform_attn.h +61 -0
  44. venv/lib/python3.10/site-packages/transformers/kernels/deta/vision.cpp +16 -0
  45. venv/lib/python3.10/site-packages/transformers/kernels/yoso/common.h +10 -0
  46. venv/lib/python3.10/site-packages/transformers/kernels/yoso/common_cuda.h +9 -0
  47. venv/lib/python3.10/site-packages/transformers/kernels/yoso/common_cuda_device.h +79 -0
  48. venv/lib/python3.10/site-packages/transformers/kernels/yoso/fast_lsh_cumulation.cu +588 -0
  49. venv/lib/python3.10/site-packages/transformers/kernels/yoso/fast_lsh_cumulation.h +71 -0
  50. venv/lib/python3.10/site-packages/transformers/kernels/yoso/fast_lsh_cumulation_cuda.cu +825 -0
ckpts/universal/global_step40/zero/20.mlp.dense_h_to_4h.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e1d0c9cb96f2f4f78bc1a2de4f2f085f8358d28e882af40a91f63d1d5bb9bfe6
3
+ size 33555612
ckpts/universal/global_step40/zero/21.input_layernorm.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:23a3b6986a1c570a68dbb8be524f72d8d347a423f62f9a34bed6fc5e47fba2b2
3
+ size 9372
ckpts/universal/global_step40/zero/21.input_layernorm.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a0325875a882e07bb92bc0358d8f0d73e1528334f94a425df2f6de67d9dc8ea8
3
+ size 9387
ckpts/universal/global_step40/zero/24.mlp.dense_4h_to_h.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:80c435baca506f968914c996fa87eb126f6751b9ced996886742e9fbb46cc8c6
3
+ size 33555533
ckpts/universal/global_step40/zero/9.attention.query_key_value.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c9dceb22235cb446718fdaba61928daffcb6a12d58d08dc88c8485a313892380
3
+ size 50332828
ckpts/universal/global_step40/zero/9.attention.query_key_value.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fa70b78cb63a46e3cae1591c64fd0a8d624fb56b4e8148968cd78b9b1f609609
3
+ size 50332843
venv/lib/python3.10/site-packages/scipy/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (3.66 kB). View file
 
venv/lib/python3.10/site-packages/scipy/__pycache__/_distributor_init.cpython-310.pyc ADDED
Binary file (808 Bytes). View file
 
venv/lib/python3.10/site-packages/scipy/__pycache__/conftest.cpython-310.pyc ADDED
Binary file (6.39 kB). View file
 
venv/lib/python3.10/site-packages/scipy/__pycache__/version.cpython-310.pyc ADDED
Binary file (320 Bytes). View file
 
venv/lib/python3.10/site-packages/transformers/__init__.py ADDED
The diff for this file is too large to render. See raw diff
 
venv/lib/python3.10/site-packages/transformers/activations.py ADDED
@@ -0,0 +1,239 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import math
16
+ from collections import OrderedDict
17
+
18
+ import torch
19
+ from packaging import version
20
+ from torch import Tensor, nn
21
+
22
+ from .utils import logging
23
+
24
+
25
+ logger = logging.get_logger(__name__)
26
+
27
+
28
+ class PytorchGELUTanh(nn.Module):
29
+ """
30
+ A fast C implementation of the tanh approximation of the GeLU activation function. See
31
+ https://arxiv.org/abs/1606.08415.
32
+
33
+ This implementation is equivalent to NewGELU and FastGELU but much faster. However, it is not an exact numerical
34
+ match due to rounding errors.
35
+ """
36
+
37
+ def __init__(self):
38
+ super().__init__()
39
+ if version.parse(torch.__version__) < version.parse("1.12.0"):
40
+ raise ImportError(
41
+ f"You are using torch=={torch.__version__}, but torch>=1.12.0 is required to use "
42
+ "PytorchGELUTanh. Please upgrade torch."
43
+ )
44
+
45
+ def forward(self, input: Tensor) -> Tensor:
46
+ return nn.functional.gelu(input, approximate="tanh")
47
+
48
+
49
+ class NewGELUActivation(nn.Module):
50
+ """
51
+ Implementation of the GELU activation function currently in Google BERT repo (identical to OpenAI GPT). Also see
52
+ the Gaussian Error Linear Units paper: https://arxiv.org/abs/1606.08415
53
+ """
54
+
55
+ def forward(self, input: Tensor) -> Tensor:
56
+ return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi) * (input + 0.044715 * torch.pow(input, 3.0))))
57
+
58
+
59
+ class GELUActivation(nn.Module):
60
+ """
61
+ Original Implementation of the GELU activation function in Google BERT repo when initially created. For
62
+ information: OpenAI GPT's GELU is slightly different (and gives slightly different results): 0.5 * x * (1 +
63
+ torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) This is now written in C in nn.functional
64
+ Also see the Gaussian Error Linear Units paper: https://arxiv.org/abs/1606.08415
65
+ """
66
+
67
+ def __init__(self, use_gelu_python: bool = False):
68
+ super().__init__()
69
+ if use_gelu_python:
70
+ self.act = self._gelu_python
71
+ else:
72
+ self.act = nn.functional.gelu
73
+
74
+ def _gelu_python(self, input: Tensor) -> Tensor:
75
+ return input * 0.5 * (1.0 + torch.erf(input / math.sqrt(2.0)))
76
+
77
+ def forward(self, input: Tensor) -> Tensor:
78
+ return self.act(input)
79
+
80
+
81
+ class FastGELUActivation(nn.Module):
82
+ """
83
+ Applies GELU approximation that is slower than QuickGELU but more accurate. See: https://github.com/hendrycks/GELUs
84
+ """
85
+
86
+ def forward(self, input: Tensor) -> Tensor:
87
+ return 0.5 * input * (1.0 + torch.tanh(input * 0.7978845608 * (1.0 + 0.044715 * input * input)))
88
+
89
+
90
+ class QuickGELUActivation(nn.Module):
91
+ """
92
+ Applies GELU approximation that is fast but somewhat inaccurate. See: https://github.com/hendrycks/GELUs
93
+ """
94
+
95
+ def forward(self, input: Tensor) -> Tensor:
96
+ return input * torch.sigmoid(1.702 * input)
97
+
98
+
99
+ class ClippedGELUActivation(nn.Module):
100
+ """
101
+ Clip the range of possible GeLU outputs between [min, max]. This is especially useful for quantization purpose, as
102
+ it allows mapping negatives values in the GeLU spectrum. For more information on this trick, please refer to
103
+ https://arxiv.org/abs/2004.09602.
104
+
105
+ Gaussian Error Linear Unit. Original Implementation of the gelu activation function in Google Bert repo when
106
+ initially created.
107
+
108
+ For information: OpenAI GPT's gelu is slightly different (and gives slightly different results): 0.5 * x * (1 +
109
+ torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))). See https://arxiv.org/abs/1606.08415
110
+ """
111
+
112
+ def __init__(self, min: float, max: float):
113
+ if min > max:
114
+ raise ValueError(f"min should be < max (got min: {min}, max: {max})")
115
+
116
+ super().__init__()
117
+ self.min = min
118
+ self.max = max
119
+
120
+ def forward(self, x: Tensor) -> Tensor:
121
+ return torch.clip(gelu(x), self.min, self.max)
122
+
123
+
124
+ class AccurateGELUActivation(nn.Module):
125
+ """
126
+ Applies GELU approximation that is faster than default and more accurate than QuickGELU. See:
127
+ https://github.com/hendrycks/GELUs
128
+
129
+ Implemented along with MEGA (Moving Average Equipped Gated Attention)
130
+ """
131
+
132
+ def __init__(self):
133
+ super().__init__()
134
+ self.precomputed_constant = math.sqrt(2 / math.pi)
135
+
136
+ def forward(self, input: Tensor) -> Tensor:
137
+ return 0.5 * input * (1 + torch.tanh(self.precomputed_constant * (input + 0.044715 * torch.pow(input, 3))))
138
+
139
+
140
+ class MishActivation(nn.Module):
141
+ """
142
+ See Mish: A Self-Regularized Non-Monotonic Activation Function (Misra., https://arxiv.org/abs/1908.08681). Also
143
+ visit the official repository for the paper: https://github.com/digantamisra98/Mish
144
+ """
145
+
146
+ def __init__(self):
147
+ super().__init__()
148
+ if version.parse(torch.__version__) < version.parse("1.9.0"):
149
+ self.act = self._mish_python
150
+ else:
151
+ self.act = nn.functional.mish
152
+
153
+ def _mish_python(self, input: Tensor) -> Tensor:
154
+ return input * torch.tanh(nn.functional.softplus(input))
155
+
156
+ def forward(self, input: Tensor) -> Tensor:
157
+ return self.act(input)
158
+
159
+
160
+ class LinearActivation(nn.Module):
161
+ """
162
+ Applies the linear activation function, i.e. forwarding input directly to output.
163
+ """
164
+
165
+ def forward(self, input: Tensor) -> Tensor:
166
+ return input
167
+
168
+
169
+ class LaplaceActivation(nn.Module):
170
+ """
171
+ Applies elementwise activation based on Laplace function, introduced in MEGA as an attention activation. See
172
+ https://arxiv.org/abs/2209.10655
173
+
174
+ Inspired by squared relu, but with bounded range and gradient for better stability
175
+ """
176
+
177
+ def forward(self, input, mu=0.707107, sigma=0.282095):
178
+ input = (input - mu).div(sigma * math.sqrt(2.0))
179
+ return 0.5 * (1.0 + torch.erf(input))
180
+
181
+
182
+ class ReLUSquaredActivation(nn.Module):
183
+ """
184
+ Applies the relu^2 activation introduced in https://arxiv.org/abs/2109.08668v2
185
+ """
186
+
187
+ def forward(self, input):
188
+ relu_applied = nn.functional.relu(input)
189
+ squared = torch.square(relu_applied)
190
+ return squared
191
+
192
+
193
+ class ClassInstantier(OrderedDict):
194
+ def __getitem__(self, key):
195
+ content = super().__getitem__(key)
196
+ cls, kwargs = content if isinstance(content, tuple) else (content, {})
197
+ return cls(**kwargs)
198
+
199
+
200
+ ACT2CLS = {
201
+ "gelu": GELUActivation,
202
+ "gelu_10": (ClippedGELUActivation, {"min": -10, "max": 10}),
203
+ "gelu_fast": FastGELUActivation,
204
+ "gelu_new": NewGELUActivation,
205
+ "gelu_python": (GELUActivation, {"use_gelu_python": True}),
206
+ "gelu_pytorch_tanh": PytorchGELUTanh,
207
+ "gelu_accurate": AccurateGELUActivation,
208
+ "laplace": LaplaceActivation,
209
+ "leaky_relu": nn.LeakyReLU,
210
+ "linear": LinearActivation,
211
+ "mish": MishActivation,
212
+ "quick_gelu": QuickGELUActivation,
213
+ "relu": nn.ReLU,
214
+ "relu2": ReLUSquaredActivation,
215
+ "relu6": nn.ReLU6,
216
+ "sigmoid": nn.Sigmoid,
217
+ "silu": nn.SiLU,
218
+ "swish": nn.SiLU,
219
+ "tanh": nn.Tanh,
220
+ }
221
+ ACT2FN = ClassInstantier(ACT2CLS)
222
+
223
+
224
+ def get_activation(activation_string):
225
+ if activation_string in ACT2FN:
226
+ return ACT2FN[activation_string]
227
+ else:
228
+ raise KeyError(f"function {activation_string} not found in ACT2FN mapping {list(ACT2FN.keys())}")
229
+
230
+
231
+ # For backwards compatibility with: from activations import gelu_python
232
+ gelu_python = get_activation("gelu_python")
233
+ gelu_new = get_activation("gelu_new")
234
+ gelu = get_activation("gelu")
235
+ gelu_fast = get_activation("gelu_fast")
236
+ quick_gelu = get_activation("quick_gelu")
237
+ silu = get_activation("silu")
238
+ mish = get_activation("mish")
239
+ linear_act = get_activation("linear")
venv/lib/python3.10/site-packages/transformers/activations_tf.py ADDED
@@ -0,0 +1,147 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import math
16
+
17
+ import tensorflow as tf
18
+ from packaging.version import parse
19
+
20
+
21
+ try:
22
+ import tf_keras as keras
23
+ except (ModuleNotFoundError, ImportError):
24
+ import keras
25
+
26
+ if parse(keras.__version__).major > 2:
27
+ raise ValueError(
28
+ "Your currently installed version of Keras is Keras 3, but this is not yet supported in "
29
+ "Transformers. Please install the backwards-compatible tf-keras package with "
30
+ "`pip install tf-keras`."
31
+ )
32
+
33
+
34
+ def _gelu(x):
35
+ """
36
+ Gaussian Error Linear Unit. Original Implementation of the gelu activation function in Google Bert repo when
37
+ initially created. For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):
38
+ 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) Also see
39
+ https://arxiv.org/abs/1606.08415
40
+ """
41
+ x = tf.convert_to_tensor(x)
42
+ cdf = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0), x.dtype)))
43
+
44
+ return x * cdf
45
+
46
+
47
+ def _gelu_new(x):
48
+ """
49
+ Gaussian Error Linear Unit. This is a smoother version of the GELU. Original paper: https://arxiv.org/abs/1606.0841
50
+
51
+ Args:
52
+ x: float Tensor to perform activation
53
+
54
+ Returns:
55
+ `x` with the GELU activation applied.
56
+ """
57
+ x = tf.convert_to_tensor(x)
58
+ pi = tf.cast(math.pi, x.dtype)
59
+ coeff = tf.cast(0.044715, x.dtype)
60
+ cdf = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi) * (x + coeff * tf.pow(x, 3))))
61
+
62
+ return x * cdf
63
+
64
+
65
+ def mish(x):
66
+ x = tf.convert_to_tensor(x)
67
+
68
+ return x * tf.tanh(tf.math.softplus(x))
69
+
70
+
71
+ def gelu_fast(x):
72
+ x = tf.convert_to_tensor(x)
73
+ coeff1 = tf.cast(0.044715, x.dtype)
74
+ coeff2 = tf.cast(0.7978845608, x.dtype)
75
+
76
+ return 0.5 * x * (1.0 + tf.tanh(x * coeff2 * (1.0 + coeff1 * x * x)))
77
+
78
+
79
+ def quick_gelu(x):
80
+ x = tf.convert_to_tensor(x)
81
+ coeff = tf.cast(1.702, x.dtype)
82
+ return x * tf.math.sigmoid(coeff * x)
83
+
84
+
85
+ def gelu_10(x):
86
+ """
87
+ Clip the range of possible GeLU outputs between [-10, 10]. This is especially useful for quantization purpose, as
88
+ it allows mapping 2 negatives values in the GeLU spectrum. For more information on this trick, please refer to
89
+ https://arxiv.org/abs/2004.09602
90
+
91
+ Gaussian Error Linear Unit. Original Implementation of the gelu activation function in Google Bert repo when
92
+ initially created. For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):
93
+ 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) Also see
94
+ https://arxiv.org/abs/1606.08415 :param x: :return:
95
+ """
96
+ return tf.clip_by_value(_gelu(x), -10, 10)
97
+
98
+
99
+ def glu(x, axis=-1):
100
+ """
101
+ Gated Linear Unit. Implementation as defined in the original paper (see https://arxiv.org/abs/1612.08083), where
102
+ the input `x` is split in two halves across a dimension (`axis`), A and B, returning A * sigmoid(B).
103
+
104
+ Args:
105
+ `x`: float Tensor to perform activation
106
+ `axis`: dimension across which `x` be split in half
107
+
108
+ Returns:
109
+ `x` with the GLU activation applied (with its size halved across the dimension `axis`).
110
+ """
111
+ a, b = tf.split(x, 2, axis=axis)
112
+ return a * tf.math.sigmoid(b)
113
+
114
+
115
+ if parse(tf.version.VERSION) >= parse("2.4"):
116
+
117
+ def approximate_gelu_wrap(x):
118
+ return keras.activations.gelu(x, approximate=True)
119
+
120
+ gelu = keras.activations.gelu
121
+ gelu_new = approximate_gelu_wrap
122
+ else:
123
+ gelu = _gelu
124
+ gelu_new = _gelu_new
125
+
126
+
127
+ ACT2FN = {
128
+ "gelu": gelu,
129
+ "gelu_10": gelu_10,
130
+ "gelu_fast": gelu_fast,
131
+ "gelu_new": gelu_new,
132
+ "glu": glu,
133
+ "mish": mish,
134
+ "quick_gelu": quick_gelu,
135
+ "relu": keras.activations.relu,
136
+ "sigmoid": keras.activations.sigmoid,
137
+ "silu": keras.activations.swish,
138
+ "swish": keras.activations.swish,
139
+ "tanh": keras.activations.tanh,
140
+ }
141
+
142
+
143
+ def get_tf_activation(activation_string):
144
+ if activation_string in ACT2FN:
145
+ return ACT2FN[activation_string]
146
+ else:
147
+ raise KeyError(f"function {activation_string} not found in ACT2FN mapping {list(ACT2FN.keys())}")
venv/lib/python3.10/site-packages/transformers/audio_utils.py ADDED
@@ -0,0 +1,825 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Inc. team and the librosa & torchaudio authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """
16
+ Audio processing functions to extract features from audio waveforms. This code is pure numpy to support all frameworks
17
+ and remove unnecessary dependencies.
18
+ """
19
+ import warnings
20
+ from typing import Optional, Tuple, Union
21
+
22
+ import numpy as np
23
+
24
+
25
+ def hertz_to_mel(freq: Union[float, np.ndarray], mel_scale: str = "htk") -> Union[float, np.ndarray]:
26
+ """
27
+ Convert frequency from hertz to mels.
28
+
29
+ Args:
30
+ freq (`float` or `np.ndarray`):
31
+ The frequency, or multiple frequencies, in hertz (Hz).
32
+ mel_scale (`str`, *optional*, defaults to `"htk"`):
33
+ The mel frequency scale to use, `"htk"`, `"kaldi"` or `"slaney"`.
34
+
35
+ Returns:
36
+ `float` or `np.ndarray`: The frequencies on the mel scale.
37
+ """
38
+
39
+ if mel_scale not in ["slaney", "htk", "kaldi"]:
40
+ raise ValueError('mel_scale should be one of "htk", "slaney" or "kaldi".')
41
+
42
+ if mel_scale == "htk":
43
+ return 2595.0 * np.log10(1.0 + (freq / 700.0))
44
+ elif mel_scale == "kaldi":
45
+ return 1127.0 * np.log(1.0 + (freq / 700.0))
46
+
47
+ min_log_hertz = 1000.0
48
+ min_log_mel = 15.0
49
+ logstep = 27.0 / np.log(6.4)
50
+ mels = 3.0 * freq / 200.0
51
+
52
+ if isinstance(freq, np.ndarray):
53
+ log_region = freq >= min_log_hertz
54
+ mels[log_region] = min_log_mel + np.log(freq[log_region] / min_log_hertz) * logstep
55
+ elif freq >= min_log_hertz:
56
+ mels = min_log_mel + np.log(freq / min_log_hertz) * logstep
57
+
58
+ return mels
59
+
60
+
61
+ def mel_to_hertz(mels: Union[float, np.ndarray], mel_scale: str = "htk") -> Union[float, np.ndarray]:
62
+ """
63
+ Convert frequency from mels to hertz.
64
+
65
+ Args:
66
+ mels (`float` or `np.ndarray`):
67
+ The frequency, or multiple frequencies, in mels.
68
+ mel_scale (`str`, *optional*, `"htk"`):
69
+ The mel frequency scale to use, `"htk"`, `"kaldi"` or `"slaney"`.
70
+
71
+ Returns:
72
+ `float` or `np.ndarray`: The frequencies in hertz.
73
+ """
74
+
75
+ if mel_scale not in ["slaney", "htk", "kaldi"]:
76
+ raise ValueError('mel_scale should be one of "htk", "slaney" or "kaldi".')
77
+
78
+ if mel_scale == "htk":
79
+ return 700.0 * (np.power(10, mels / 2595.0) - 1.0)
80
+ elif mel_scale == "kaldi":
81
+ return 700.0 * (np.exp(mels / 1127.0) - 1.0)
82
+
83
+ min_log_hertz = 1000.0
84
+ min_log_mel = 15.0
85
+ logstep = np.log(6.4) / 27.0
86
+ freq = 200.0 * mels / 3.0
87
+
88
+ if isinstance(mels, np.ndarray):
89
+ log_region = mels >= min_log_mel
90
+ freq[log_region] = min_log_hertz * np.exp(logstep * (mels[log_region] - min_log_mel))
91
+ elif mels >= min_log_mel:
92
+ freq = min_log_hertz * np.exp(logstep * (mels - min_log_mel))
93
+
94
+ return freq
95
+
96
+
97
+ def hertz_to_octave(
98
+ freq: Union[float, np.ndarray], tuning: Optional[float] = 0.0, bins_per_octave: Optional[int] = 12
99
+ ):
100
+ """
101
+ Convert frequency from hertz to fractional octave numbers.
102
+ Adapted from *librosa*.
103
+
104
+ Args:
105
+ freq (`float` or `np.ndarray`):
106
+ The frequency, or multiple frequencies, in hertz (Hz).
107
+ tuning (`float`, defaults to `0.`):
108
+ Tuning deviation from the Stuttgart pitch (A440) in (fractional) bins per octave.
109
+ bins_per_octave (`int`, defaults to `12`):
110
+ Number of bins per octave.
111
+
112
+ Returns:
113
+ `float` or `np.ndarray`: The frequencies on the octave scale.
114
+ """
115
+ stuttgart_pitch = 440.0 * 2.0 ** (tuning / bins_per_octave)
116
+ octave = np.log2(freq / (float(stuttgart_pitch) / 16))
117
+ return octave
118
+
119
+
120
+ def _create_triangular_filter_bank(fft_freqs: np.ndarray, filter_freqs: np.ndarray) -> np.ndarray:
121
+ """
122
+ Creates a triangular filter bank.
123
+
124
+ Adapted from *torchaudio* and *librosa*.
125
+
126
+ Args:
127
+ fft_freqs (`np.ndarray` of shape `(num_frequency_bins,)`):
128
+ Discrete frequencies of the FFT bins in Hz.
129
+ filter_freqs (`np.ndarray` of shape `(num_mel_filters,)`):
130
+ Center frequencies of the triangular filters to create, in Hz.
131
+
132
+ Returns:
133
+ `np.ndarray` of shape `(num_frequency_bins, num_mel_filters)`
134
+ """
135
+ filter_diff = np.diff(filter_freqs)
136
+ slopes = np.expand_dims(filter_freqs, 0) - np.expand_dims(fft_freqs, 1)
137
+ down_slopes = -slopes[:, :-2] / filter_diff[:-1]
138
+ up_slopes = slopes[:, 2:] / filter_diff[1:]
139
+ return np.maximum(np.zeros(1), np.minimum(down_slopes, up_slopes))
140
+
141
+
142
+ def chroma_filter_bank(
143
+ num_frequency_bins: int,
144
+ num_chroma: int,
145
+ sampling_rate: int,
146
+ tuning: float = 0.0,
147
+ power: Optional[float] = 2.0,
148
+ weighting_parameters: Optional[Tuple[float]] = (5.0, 2),
149
+ start_at_c_chroma: Optional[bool] = True,
150
+ ):
151
+ """
152
+ Creates a chroma filter bank, i.e a linear transformation to project spectrogram bins onto chroma bins.
153
+
154
+ Adapted from *librosa*.
155
+
156
+ Args:
157
+ num_frequency_bins (`int`):
158
+ Number of frequencies used to compute the spectrogram (should be the same as in `stft`).
159
+ num_chroma (`int`):
160
+ Number of chroma bins (i.e pitch classes).
161
+ sampling_rate (`float`):
162
+ Sample rate of the audio waveform.
163
+ tuning (`float`):
164
+ Tuning deviation from A440 in fractions of a chroma bin.
165
+ power (`float`, *optional*, defaults to 2.0):
166
+ If 12.0, normalizes each column with their L2 norm. If 1.0, normalizes each column with their L1 norm.
167
+ weighting_parameters (`Tuple[float]`, *optional*, defaults to `(5., 2.)`):
168
+ If specified, apply a Gaussian weighting parameterized by the first element of the tuple being the center and
169
+ the second element being the Gaussian half-width.
170
+ start_at_c_chroma (`float`, *optional*, defaults to `True`):
171
+ If True, the filter bank will start at the 'C' pitch class. Otherwise, it will start at 'A'.
172
+ Returns:
173
+ `np.ndarray` of shape `(num_frequency_bins, num_chroma)`
174
+ """
175
+ # Get the FFT bins, not counting the DC component
176
+ frequencies = np.linspace(0, sampling_rate, num_frequency_bins, endpoint=False)[1:]
177
+
178
+ freq_bins = num_chroma * hertz_to_octave(frequencies, tuning=tuning, bins_per_octave=num_chroma)
179
+
180
+ # make up a value for the 0 Hz bin = 1.5 octaves below bin 1
181
+ # (so chroma is 50% rotated from bin 1, and bin width is broad)
182
+ freq_bins = np.concatenate(([freq_bins[0] - 1.5 * num_chroma], freq_bins))
183
+
184
+ bins_width = np.concatenate((np.maximum(freq_bins[1:] - freq_bins[:-1], 1.0), [1]))
185
+
186
+ chroma_filters = np.subtract.outer(freq_bins, np.arange(0, num_chroma, dtype="d")).T
187
+
188
+ num_chroma2 = np.round(float(num_chroma) / 2)
189
+
190
+ # Project into range -num_chroma/2 .. num_chroma/2
191
+ # add on fixed offset of 10*num_chroma to ensure all values passed to
192
+ # rem are positive
193
+ chroma_filters = np.remainder(chroma_filters + num_chroma2 + 10 * num_chroma, num_chroma) - num_chroma2
194
+
195
+ # Gaussian bumps - 2*D to make them narrower
196
+ chroma_filters = np.exp(-0.5 * (2 * chroma_filters / np.tile(bins_width, (num_chroma, 1))) ** 2)
197
+
198
+ # normalize each column
199
+ if power is not None:
200
+ chroma_filters = chroma_filters / np.sum(chroma_filters**power, axis=0, keepdims=True) ** (1.0 / power)
201
+
202
+ # Maybe apply scaling for fft bins
203
+ if weighting_parameters is not None:
204
+ center, half_width = weighting_parameters
205
+ chroma_filters *= np.tile(
206
+ np.exp(-0.5 * (((freq_bins / num_chroma - center) / half_width) ** 2)),
207
+ (num_chroma, 1),
208
+ )
209
+
210
+ if start_at_c_chroma:
211
+ chroma_filters = np.roll(chroma_filters, -3 * (num_chroma // 12), axis=0)
212
+
213
+ # remove aliasing columns, copy to ensure row-contiguity
214
+ return np.ascontiguousarray(chroma_filters[:, : int(1 + num_frequency_bins / 2)])
215
+
216
+
217
+ def mel_filter_bank(
218
+ num_frequency_bins: int,
219
+ num_mel_filters: int,
220
+ min_frequency: float,
221
+ max_frequency: float,
222
+ sampling_rate: int,
223
+ norm: Optional[str] = None,
224
+ mel_scale: str = "htk",
225
+ triangularize_in_mel_space: bool = False,
226
+ ) -> np.ndarray:
227
+ """
228
+ Creates a frequency bin conversion matrix used to obtain a mel spectrogram. This is called a *mel filter bank*, and
229
+ various implementation exist, which differ in the number of filters, the shape of the filters, the way the filters
230
+ are spaced, the bandwidth of the filters, and the manner in which the spectrum is warped. The goal of these
231
+ features is to approximate the non-linear human perception of the variation in pitch with respect to the frequency.
232
+
233
+ Different banks of mel filters were introduced in the literature. The following variations are supported:
234
+
235
+ - MFCC FB-20: introduced in 1980 by Davis and Mermelstein, it assumes a sampling frequency of 10 kHz and a speech
236
+ bandwidth of `[0, 4600]` Hz.
237
+ - MFCC FB-24 HTK: from the Cambridge HMM Toolkit (HTK) (1995) uses a filter bank of 24 filters for a speech
238
+ bandwidth of `[0, 8000]` Hz. This assumes sampling rate ≥ 16 kHz.
239
+ - MFCC FB-40: from the Auditory Toolbox for MATLAB written by Slaney in 1998, assumes a sampling rate of 16 kHz and
240
+ speech bandwidth of `[133, 6854]` Hz. This version also includes area normalization.
241
+ - HFCC-E FB-29 (Human Factor Cepstral Coefficients) of Skowronski and Harris (2004), assumes a sampling rate of
242
+ 12.5 kHz and speech bandwidth of `[0, 6250]` Hz.
243
+
244
+ This code is adapted from *torchaudio* and *librosa*. Note that the default parameters of torchaudio's
245
+ `melscale_fbanks` implement the `"htk"` filters while librosa uses the `"slaney"` implementation.
246
+
247
+ Args:
248
+ num_frequency_bins (`int`):
249
+ Number of frequencies used to compute the spectrogram (should be the same as in `stft`).
250
+ num_mel_filters (`int`):
251
+ Number of mel filters to generate.
252
+ min_frequency (`float`):
253
+ Lowest frequency of interest in Hz.
254
+ max_frequency (`float`):
255
+ Highest frequency of interest in Hz. This should not exceed `sampling_rate / 2`.
256
+ sampling_rate (`int`):
257
+ Sample rate of the audio waveform.
258
+ norm (`str`, *optional*):
259
+ If `"slaney"`, divide the triangular mel weights by the width of the mel band (area normalization).
260
+ mel_scale (`str`, *optional*, defaults to `"htk"`):
261
+ The mel frequency scale to use, `"htk"`, `"kaldi"` or `"slaney"`.
262
+ triangularize_in_mel_space (`bool`, *optional*, defaults to `False`):
263
+ If this option is enabled, the triangular filter is applied in mel space rather than frequency space. This
264
+ should be set to `true` in order to get the same results as `torchaudio` when computing mel filters.
265
+
266
+ Returns:
267
+ `np.ndarray` of shape (`num_frequency_bins`, `num_mel_filters`): Triangular filter bank matrix. This is a
268
+ projection matrix to go from a spectrogram to a mel spectrogram.
269
+ """
270
+ if norm is not None and norm != "slaney":
271
+ raise ValueError('norm must be one of None or "slaney"')
272
+
273
+ # center points of the triangular mel filters
274
+ mel_min = hertz_to_mel(min_frequency, mel_scale=mel_scale)
275
+ mel_max = hertz_to_mel(max_frequency, mel_scale=mel_scale)
276
+ mel_freqs = np.linspace(mel_min, mel_max, num_mel_filters + 2)
277
+ filter_freqs = mel_to_hertz(mel_freqs, mel_scale=mel_scale)
278
+
279
+ if triangularize_in_mel_space:
280
+ # frequencies of FFT bins in Hz, but filters triangularized in mel space
281
+ fft_bin_width = sampling_rate / (num_frequency_bins * 2)
282
+ fft_freqs = hertz_to_mel(fft_bin_width * np.arange(num_frequency_bins), mel_scale=mel_scale)
283
+ filter_freqs = mel_freqs
284
+ else:
285
+ # frequencies of FFT bins in Hz
286
+ fft_freqs = np.linspace(0, sampling_rate // 2, num_frequency_bins)
287
+
288
+ mel_filters = _create_triangular_filter_bank(fft_freqs, filter_freqs)
289
+
290
+ if norm is not None and norm == "slaney":
291
+ # Slaney-style mel is scaled to be approx constant energy per channel
292
+ enorm = 2.0 / (filter_freqs[2 : num_mel_filters + 2] - filter_freqs[:num_mel_filters])
293
+ mel_filters *= np.expand_dims(enorm, 0)
294
+
295
+ if (mel_filters.max(axis=0) == 0.0).any():
296
+ warnings.warn(
297
+ "At least one mel filter has all zero values. "
298
+ f"The value for `num_mel_filters` ({num_mel_filters}) may be set too high. "
299
+ f"Or, the value for `num_frequency_bins` ({num_frequency_bins}) may be set too low."
300
+ )
301
+
302
+ return mel_filters
303
+
304
+
305
+ def optimal_fft_length(window_length: int) -> int:
306
+ """
307
+ Finds the best FFT input size for a given `window_length`. This function takes a given window length and, if not
308
+ already a power of two, rounds it up to the next power or two.
309
+
310
+ The FFT algorithm works fastest when the length of the input is a power of two, which may be larger than the size
311
+ of the window or analysis frame. For example, if the window is 400 samples, using an FFT input size of 512 samples
312
+ is more optimal than an FFT size of 400 samples. Using a larger FFT size does not affect the detected frequencies,
313
+ it simply gives a higher frequency resolution (i.e. the frequency bins are smaller).
314
+ """
315
+ return 2 ** int(np.ceil(np.log2(window_length)))
316
+
317
+
318
+ def window_function(
319
+ window_length: int,
320
+ name: str = "hann",
321
+ periodic: bool = True,
322
+ frame_length: Optional[int] = None,
323
+ center: bool = True,
324
+ ) -> np.ndarray:
325
+ """
326
+ Returns an array containing the specified window. This window is intended to be used with `stft`.
327
+
328
+ The following window types are supported:
329
+
330
+ - `"boxcar"`: a rectangular window
331
+ - `"hamming"`: the Hamming window
332
+ - `"hann"`: the Hann window
333
+ - `"povey"`: the Povey window
334
+
335
+ Args:
336
+ window_length (`int`):
337
+ The length of the window in samples.
338
+ name (`str`, *optional*, defaults to `"hann"`):
339
+ The name of the window function.
340
+ periodic (`bool`, *optional*, defaults to `True`):
341
+ Whether the window is periodic or symmetric.
342
+ frame_length (`int`, *optional*):
343
+ The length of the analysis frames in samples. Provide a value for `frame_length` if the window is smaller
344
+ than the frame length, so that it will be zero-padded.
345
+ center (`bool`, *optional*, defaults to `True`):
346
+ Whether to center the window inside the FFT buffer. Only used when `frame_length` is provided.
347
+
348
+ Returns:
349
+ `np.ndarray` of shape `(window_length,)` or `(frame_length,)` containing the window.
350
+ """
351
+ length = window_length + 1 if periodic else window_length
352
+
353
+ if name == "boxcar":
354
+ window = np.ones(length)
355
+ elif name in ["hamming", "hamming_window"]:
356
+ window = np.hamming(length)
357
+ elif name in ["hann", "hann_window"]:
358
+ window = np.hanning(length)
359
+ elif name in ["povey"]:
360
+ window = np.power(np.hanning(length), 0.85)
361
+ else:
362
+ raise ValueError(f"Unknown window function '{name}'")
363
+
364
+ if periodic:
365
+ window = window[:-1]
366
+
367
+ if frame_length is None:
368
+ return window
369
+
370
+ if window_length > frame_length:
371
+ raise ValueError(
372
+ f"Length of the window ({window_length}) may not be larger than frame_length ({frame_length})"
373
+ )
374
+
375
+ padded_window = np.zeros(frame_length)
376
+ offset = (frame_length - window_length) // 2 if center else 0
377
+ padded_window[offset : offset + window_length] = window
378
+ return padded_window
379
+
380
+
381
+ # TODO This method does not support batching yet as we are mainly focused on inference.
382
+ def spectrogram(
383
+ waveform: np.ndarray,
384
+ window: np.ndarray,
385
+ frame_length: int,
386
+ hop_length: int,
387
+ fft_length: Optional[int] = None,
388
+ power: Optional[float] = 1.0,
389
+ center: bool = True,
390
+ pad_mode: str = "reflect",
391
+ onesided: bool = True,
392
+ preemphasis: Optional[float] = None,
393
+ mel_filters: Optional[np.ndarray] = None,
394
+ mel_floor: float = 1e-10,
395
+ log_mel: Optional[str] = None,
396
+ reference: float = 1.0,
397
+ min_value: float = 1e-10,
398
+ db_range: Optional[float] = None,
399
+ remove_dc_offset: Optional[bool] = None,
400
+ dtype: np.dtype = np.float32,
401
+ ) -> np.ndarray:
402
+ """
403
+ Calculates a spectrogram over one waveform using the Short-Time Fourier Transform.
404
+
405
+ This function can create the following kinds of spectrograms:
406
+
407
+ - amplitude spectrogram (`power = 1.0`)
408
+ - power spectrogram (`power = 2.0`)
409
+ - complex-valued spectrogram (`power = None`)
410
+ - log spectrogram (use `log_mel` argument)
411
+ - mel spectrogram (provide `mel_filters`)
412
+ - log-mel spectrogram (provide `mel_filters` and `log_mel`)
413
+
414
+ How this works:
415
+
416
+ 1. The input waveform is split into frames of size `frame_length` that are partially overlapping by `frame_length
417
+ - hop_length` samples.
418
+ 2. Each frame is multiplied by the window and placed into a buffer of size `fft_length`.
419
+ 3. The DFT is taken of each windowed frame.
420
+ 4. The results are stacked into a spectrogram.
421
+
422
+ We make a distinction between the following "blocks" of sample data, each of which may have a different lengths:
423
+
424
+ - The analysis frame. This is the size of the time slices that the input waveform is split into.
425
+ - The window. Each analysis frame is multiplied by the window to avoid spectral leakage.
426
+ - The FFT input buffer. The length of this determines how many frequency bins are in the spectrogram.
427
+
428
+ In this implementation, the window is assumed to be zero-padded to have the same size as the analysis frame. A
429
+ padded window can be obtained from `window_function()`. The FFT input buffer may be larger than the analysis frame,
430
+ typically the next power of two.
431
+
432
+ Note: This function is not optimized for speed yet. It should be mostly compatible with `librosa.stft` and
433
+ `torchaudio.functional.transforms.Spectrogram`, although it is more flexible due to the different ways spectrograms
434
+ can be constructed.
435
+
436
+ Args:
437
+ waveform (`np.ndarray` of shape `(length,)`):
438
+ The input waveform. This must be a single real-valued, mono waveform.
439
+ window (`np.ndarray` of shape `(frame_length,)`):
440
+ The windowing function to apply, including zero-padding if necessary. The actual window length may be
441
+ shorter than `frame_length`, but we're assuming the array has already been zero-padded.
442
+ frame_length (`int`):
443
+ The length of the analysis frames in samples. With librosa this is always equal to `fft_length` but we also
444
+ allow smaller sizes.
445
+ hop_length (`int`):
446
+ The stride between successive analysis frames in samples.
447
+ fft_length (`int`, *optional*):
448
+ The size of the FFT buffer in samples. This determines how many frequency bins the spectrogram will have.
449
+ For optimal speed, this should be a power of two. If `None`, uses `frame_length`.
450
+ power (`float`, *optional*, defaults to 1.0):
451
+ If 1.0, returns the amplitude spectrogram. If 2.0, returns the power spectrogram. If `None`, returns
452
+ complex numbers.
453
+ center (`bool`, *optional*, defaults to `True`):
454
+ Whether to pad the waveform so that frame `t` is centered around time `t * hop_length`. If `False`, frame
455
+ `t` will start at time `t * hop_length`.
456
+ pad_mode (`str`, *optional*, defaults to `"reflect"`):
457
+ Padding mode used when `center` is `True`. Possible values are: `"constant"` (pad with zeros), `"edge"`
458
+ (pad with edge values), `"reflect"` (pads with mirrored values).
459
+ onesided (`bool`, *optional*, defaults to `True`):
460
+ If True, only computes the positive frequencies and returns a spectrogram containing `fft_length // 2 + 1`
461
+ frequency bins. If False, also computes the negative frequencies and returns `fft_length` frequency bins.
462
+ preemphasis (`float`, *optional*)
463
+ Coefficient for a low-pass filter that applies pre-emphasis before the DFT.
464
+ mel_filters (`np.ndarray` of shape `(num_freq_bins, num_mel_filters)`, *optional*):
465
+ The mel filter bank. If supplied, applies a this filter bank to create a mel spectrogram.
466
+ mel_floor (`float`, *optional*, defaults to 1e-10):
467
+ Minimum value of mel frequency banks.
468
+ log_mel (`str`, *optional*):
469
+ How to convert the spectrogram to log scale. Possible options are: `None` (don't convert), `"log"` (take
470
+ the natural logarithm) `"log10"` (take the base-10 logarithm), `"dB"` (convert to decibels). Can only be
471
+ used when `power` is not `None`.
472
+ reference (`float`, *optional*, defaults to 1.0):
473
+ Sets the input spectrogram value that corresponds to 0 dB. For example, use `np.max(spectrogram)` to set
474
+ the loudest part to 0 dB. Must be greater than zero.
475
+ min_value (`float`, *optional*, defaults to `1e-10`):
476
+ The spectrogram will be clipped to this minimum value before conversion to decibels, to avoid taking
477
+ `log(0)`. For a power spectrogram, the default of `1e-10` corresponds to a minimum of -100 dB. For an
478
+ amplitude spectrogram, the value `1e-5` corresponds to -100 dB. Must be greater than zero.
479
+ db_range (`float`, *optional*):
480
+ Sets the maximum dynamic range in decibels. For example, if `db_range = 80`, the difference between the
481
+ peak value and the smallest value will never be more than 80 dB. Must be greater than zero.
482
+ remove_dc_offset (`bool`, *optional*):
483
+ Subtract mean from waveform on each frame, applied before pre-emphasis. This should be set to `true` in
484
+ order to get the same results as `torchaudio.compliance.kaldi.fbank` when computing mel filters.
485
+ dtype (`np.dtype`, *optional*, defaults to `np.float32`):
486
+ Data type of the spectrogram tensor. If `power` is None, this argument is ignored and the dtype will be
487
+ `np.complex64`.
488
+
489
+ Returns:
490
+ `nd.array` containing a spectrogram of shape `(num_frequency_bins, length)` for a regular spectrogram or shape
491
+ `(num_mel_filters, length)` for a mel spectrogram.
492
+ """
493
+ window_length = len(window)
494
+
495
+ if fft_length is None:
496
+ fft_length = frame_length
497
+
498
+ if frame_length > fft_length:
499
+ raise ValueError(f"frame_length ({frame_length}) may not be larger than fft_length ({fft_length})")
500
+
501
+ if window_length != frame_length:
502
+ raise ValueError(f"Length of the window ({window_length}) must equal frame_length ({frame_length})")
503
+
504
+ if hop_length <= 0:
505
+ raise ValueError("hop_length must be greater than zero")
506
+
507
+ if waveform.ndim != 1:
508
+ raise ValueError(f"Input waveform must have only one dimension, shape is {waveform.shape}")
509
+
510
+ if np.iscomplexobj(waveform):
511
+ raise ValueError("Complex-valued input waveforms are not currently supported")
512
+
513
+ if power is None and mel_filters is not None:
514
+ raise ValueError(
515
+ "You have provided `mel_filters` but `power` is `None`. Mel spectrogram computation is not yet supported for complex-valued spectrogram."
516
+ "Specify `power` to fix this issue."
517
+ )
518
+
519
+ # center pad the waveform
520
+ if center:
521
+ padding = [(int(frame_length // 2), int(frame_length // 2))]
522
+ waveform = np.pad(waveform, padding, mode=pad_mode)
523
+
524
+ # promote to float64, since np.fft uses float64 internally
525
+ waveform = waveform.astype(np.float64)
526
+ window = window.astype(np.float64)
527
+
528
+ # split waveform into frames of frame_length size
529
+ num_frames = int(1 + np.floor((waveform.size - frame_length) / hop_length))
530
+
531
+ num_frequency_bins = (fft_length // 2) + 1 if onesided else fft_length
532
+ spectrogram = np.empty((num_frames, num_frequency_bins), dtype=np.complex64)
533
+
534
+ # rfft is faster than fft
535
+ fft_func = np.fft.rfft if onesided else np.fft.fft
536
+ buffer = np.zeros(fft_length)
537
+
538
+ timestep = 0
539
+ for frame_idx in range(num_frames):
540
+ buffer[:frame_length] = waveform[timestep : timestep + frame_length]
541
+
542
+ if remove_dc_offset:
543
+ buffer[:frame_length] = buffer[:frame_length] - buffer[:frame_length].mean()
544
+
545
+ if preemphasis is not None:
546
+ buffer[1:frame_length] -= preemphasis * buffer[: frame_length - 1]
547
+ buffer[0] *= 1 - preemphasis
548
+
549
+ buffer[:frame_length] *= window
550
+
551
+ spectrogram[frame_idx] = fft_func(buffer)
552
+ timestep += hop_length
553
+
554
+ # note: ** is much faster than np.power
555
+ if power is not None:
556
+ spectrogram = np.abs(spectrogram, dtype=np.float64) ** power
557
+
558
+ spectrogram = spectrogram.T
559
+
560
+ if mel_filters is not None:
561
+ spectrogram = np.maximum(mel_floor, np.dot(mel_filters.T, spectrogram))
562
+
563
+ if power is not None and log_mel is not None:
564
+ if log_mel == "log":
565
+ spectrogram = np.log(spectrogram)
566
+ elif log_mel == "log10":
567
+ spectrogram = np.log10(spectrogram)
568
+ elif log_mel == "dB":
569
+ if power == 1.0:
570
+ spectrogram = amplitude_to_db(spectrogram, reference, min_value, db_range)
571
+ elif power == 2.0:
572
+ spectrogram = power_to_db(spectrogram, reference, min_value, db_range)
573
+ else:
574
+ raise ValueError(f"Cannot use log_mel option '{log_mel}' with power {power}")
575
+ else:
576
+ raise ValueError(f"Unknown log_mel option: {log_mel}")
577
+
578
+ spectrogram = np.asarray(spectrogram, dtype)
579
+
580
+ return spectrogram
581
+
582
+
583
+ def power_to_db(
584
+ spectrogram: np.ndarray,
585
+ reference: float = 1.0,
586
+ min_value: float = 1e-10,
587
+ db_range: Optional[float] = None,
588
+ ) -> np.ndarray:
589
+ """
590
+ Converts a power spectrogram to the decibel scale. This computes `10 * log10(spectrogram / reference)`, using basic
591
+ logarithm properties for numerical stability.
592
+
593
+ The motivation behind applying the log function on the (mel) spectrogram is that humans do not hear loudness on a
594
+ linear scale. Generally to double the perceived volume of a sound we need to put 8 times as much energy into it.
595
+ This means that large variations in energy may not sound all that different if the sound is loud to begin with.
596
+ This compression operation makes the (mel) spectrogram features match more closely what humans actually hear.
597
+
598
+ Based on the implementation of `librosa.power_to_db`.
599
+
600
+ Args:
601
+ spectrogram (`np.ndarray`):
602
+ The input power (mel) spectrogram. Note that a power spectrogram has the amplitudes squared!
603
+ reference (`float`, *optional*, defaults to 1.0):
604
+ Sets the input spectrogram value that corresponds to 0 dB. For example, use `np.max(spectrogram)` to set
605
+ the loudest part to 0 dB. Must be greater than zero.
606
+ min_value (`float`, *optional*, defaults to `1e-10`):
607
+ The spectrogram will be clipped to this minimum value before conversion to decibels, to avoid taking
608
+ `log(0)`. The default of `1e-10` corresponds to a minimum of -100 dB. Must be greater than zero.
609
+ db_range (`float`, *optional*):
610
+ Sets the maximum dynamic range in decibels. For example, if `db_range = 80`, the difference between the
611
+ peak value and the smallest value will never be more than 80 dB. Must be greater than zero.
612
+
613
+ Returns:
614
+ `np.ndarray`: the spectrogram in decibels
615
+ """
616
+ if reference <= 0.0:
617
+ raise ValueError("reference must be greater than zero")
618
+ if min_value <= 0.0:
619
+ raise ValueError("min_value must be greater than zero")
620
+
621
+ reference = max(min_value, reference)
622
+
623
+ spectrogram = np.clip(spectrogram, a_min=min_value, a_max=None)
624
+ spectrogram = 10.0 * (np.log10(spectrogram) - np.log10(reference))
625
+
626
+ if db_range is not None:
627
+ if db_range <= 0.0:
628
+ raise ValueError("db_range must be greater than zero")
629
+ spectrogram = np.clip(spectrogram, a_min=spectrogram.max() - db_range, a_max=None)
630
+
631
+ return spectrogram
632
+
633
+
634
+ def amplitude_to_db(
635
+ spectrogram: np.ndarray,
636
+ reference: float = 1.0,
637
+ min_value: float = 1e-5,
638
+ db_range: Optional[float] = None,
639
+ ) -> np.ndarray:
640
+ """
641
+ Converts an amplitude spectrogram to the decibel scale. This computes `20 * log10(spectrogram / reference)`, using
642
+ basic logarithm properties for numerical stability.
643
+
644
+ The motivation behind applying the log function on the (mel) spectrogram is that humans do not hear loudness on a
645
+ linear scale. Generally to double the perceived volume of a sound we need to put 8 times as much energy into it.
646
+ This means that large variations in energy may not sound all that different if the sound is loud to begin with.
647
+ This compression operation makes the (mel) spectrogram features match more closely what humans actually hear.
648
+
649
+ Args:
650
+ spectrogram (`np.ndarray`):
651
+ The input amplitude (mel) spectrogram.
652
+ reference (`float`, *optional*, defaults to 1.0):
653
+ Sets the input spectrogram value that corresponds to 0 dB. For example, use `np.max(spectrogram)` to set
654
+ the loudest part to 0 dB. Must be greater than zero.
655
+ min_value (`float`, *optional*, defaults to `1e-5`):
656
+ The spectrogram will be clipped to this minimum value before conversion to decibels, to avoid taking
657
+ `log(0)`. The default of `1e-5` corresponds to a minimum of -100 dB. Must be greater than zero.
658
+ db_range (`float`, *optional*):
659
+ Sets the maximum dynamic range in decibels. For example, if `db_range = 80`, the difference between the
660
+ peak value and the smallest value will never be more than 80 dB. Must be greater than zero.
661
+
662
+ Returns:
663
+ `np.ndarray`: the spectrogram in decibels
664
+ """
665
+ if reference <= 0.0:
666
+ raise ValueError("reference must be greater than zero")
667
+ if min_value <= 0.0:
668
+ raise ValueError("min_value must be greater than zero")
669
+
670
+ reference = max(min_value, reference)
671
+
672
+ spectrogram = np.clip(spectrogram, a_min=min_value, a_max=None)
673
+ spectrogram = 20.0 * (np.log10(spectrogram) - np.log10(reference))
674
+
675
+ if db_range is not None:
676
+ if db_range <= 0.0:
677
+ raise ValueError("db_range must be greater than zero")
678
+ spectrogram = np.clip(spectrogram, a_min=spectrogram.max() - db_range, a_max=None)
679
+
680
+ return spectrogram
681
+
682
+
683
+ ### deprecated functions below this line ###
684
+
685
+
686
+ def get_mel_filter_banks(
687
+ nb_frequency_bins: int,
688
+ nb_mel_filters: int,
689
+ frequency_min: float,
690
+ frequency_max: float,
691
+ sample_rate: int,
692
+ norm: Optional[str] = None,
693
+ mel_scale: str = "htk",
694
+ ) -> np.array:
695
+ warnings.warn(
696
+ "The function `get_mel_filter_banks` is deprecated and will be removed in version 4.31.0 of Transformers",
697
+ FutureWarning,
698
+ )
699
+ return mel_filter_bank(
700
+ num_frequency_bins=nb_frequency_bins,
701
+ num_mel_filters=nb_mel_filters,
702
+ min_frequency=frequency_min,
703
+ max_frequency=frequency_max,
704
+ sampling_rate=sample_rate,
705
+ norm=norm,
706
+ mel_scale=mel_scale,
707
+ )
708
+
709
+
710
+ def fram_wave(waveform: np.array, hop_length: int = 160, fft_window_size: int = 400, center: bool = True):
711
+ """
712
+ In order to compute the short time fourier transform, the waveform needs to be split in overlapping windowed
713
+ segments called `frames`.
714
+
715
+ The window length (window_length) defines how much of the signal is contained in each frame, while the hop length
716
+ defines the step between the beginning of each new frame.
717
+
718
+
719
+ Args:
720
+ waveform (`np.array` of shape `(sample_length,)`):
721
+ The raw waveform which will be split into smaller chunks.
722
+ hop_length (`int`, *optional*, defaults to 160):
723
+ Step between each window of the waveform.
724
+ fft_window_size (`int`, *optional*, defaults to 400):
725
+ Defines the size of the window.
726
+ center (`bool`, defaults to `True`):
727
+ Whether or not to center each frame around the middle of the frame. Centering is done by reflecting the
728
+ waveform on the left and on the right.
729
+
730
+ Return:
731
+ framed_waveform (`np.array` of shape `(waveform.shape // hop_length , fft_window_size)`):
732
+ The framed waveforms that can be fed to `np.fft`.
733
+ """
734
+ warnings.warn(
735
+ "The function `fram_wave` is deprecated and will be removed in version 4.31.0 of Transformers",
736
+ FutureWarning,
737
+ )
738
+ frames = []
739
+ for i in range(0, waveform.shape[0] + 1, hop_length):
740
+ if center:
741
+ half_window = (fft_window_size - 1) // 2 + 1
742
+ start = i - half_window if i > half_window else 0
743
+ end = i + half_window if i < waveform.shape[0] - half_window else waveform.shape[0]
744
+ frame = waveform[start:end]
745
+ if start == 0:
746
+ padd_width = (-i + half_window, 0)
747
+ frame = np.pad(frame, pad_width=padd_width, mode="reflect")
748
+
749
+ elif end == waveform.shape[0]:
750
+ padd_width = (0, (i - waveform.shape[0] + half_window))
751
+ frame = np.pad(frame, pad_width=padd_width, mode="reflect")
752
+
753
+ else:
754
+ frame = waveform[i : i + fft_window_size]
755
+ frame_width = frame.shape[0]
756
+ if frame_width < waveform.shape[0]:
757
+ frame = np.lib.pad(
758
+ frame, pad_width=(0, fft_window_size - frame_width), mode="constant", constant_values=0
759
+ )
760
+ frames.append(frame)
761
+
762
+ frames = np.stack(frames, 0)
763
+ return frames
764
+
765
+
766
+ def stft(frames: np.array, windowing_function: np.array, fft_window_size: int = None):
767
+ """
768
+ Calculates the complex Short-Time Fourier Transform (STFT) of the given framed signal. Should give the same results
769
+ as `torch.stft`.
770
+
771
+ Args:
772
+ frames (`np.array` of dimension `(num_frames, fft_window_size)`):
773
+ A framed audio signal obtained using `audio_utils.fram_wav`.
774
+ windowing_function (`np.array` of dimension `(nb_frequency_bins, nb_mel_filters)`:
775
+ A array reprensenting the function that will be used to reduces the amplitude of the discontinuities at the
776
+ boundaries of each frame when computing the STFT. Each frame will be multiplied by the windowing_function.
777
+ For more information on the discontinuities, called *Spectral leakage*, refer to [this
778
+ tutorial]https://download.ni.com/evaluation/pxi/Understanding%20FFTs%20and%20Windowing.pdf
779
+ fft_window_size (`int`, *optional*):
780
+ Size of the window om which the Fourier transform is applied. This controls the frequency resolution of the
781
+ spectrogram. 400 means that the fourrier transform is computed on windows of 400 samples. The number of
782
+ frequency bins (`nb_frequency_bins`) used to divide the window into equal strips is equal to
783
+ `(1+fft_window_size)//2`. An increase of the fft_window_size slows the calculus time proportionnally.
784
+
785
+ Example:
786
+
787
+ ```python
788
+ >>> from transformers.audio_utils import stft, fram_wave
789
+ >>> import numpy as np
790
+
791
+ >>> audio = np.random.rand(50)
792
+ >>> fft_window_size = 10
793
+ >>> hop_length = 2
794
+ >>> framed_audio = fram_wave(audio, hop_length, fft_window_size)
795
+ >>> spectrogram = stft(framed_audio, np.hanning(fft_window_size + 1))
796
+ ```
797
+
798
+ Returns:
799
+ spectrogram (`np.ndarray`):
800
+ A spectrogram of shape `(num_frames, nb_frequency_bins)` obtained using the STFT algorithm
801
+ """
802
+ warnings.warn(
803
+ "The function `stft` is deprecated and will be removed in version 4.31.0 of Transformers",
804
+ FutureWarning,
805
+ )
806
+ frame_size = frames.shape[1]
807
+
808
+ if fft_window_size is None:
809
+ fft_window_size = frame_size
810
+
811
+ if fft_window_size < frame_size:
812
+ raise ValueError("FFT size must greater or equal the frame size")
813
+ # number of FFT bins to store
814
+ nb_frequency_bins = (fft_window_size >> 1) + 1
815
+
816
+ spectrogram = np.empty((len(frames), nb_frequency_bins), dtype=np.complex64)
817
+ fft_signal = np.zeros(fft_window_size)
818
+
819
+ for f, frame in enumerate(frames):
820
+ if windowing_function is not None:
821
+ np.multiply(frame, windowing_function, out=fft_signal[:frame_size])
822
+ else:
823
+ fft_signal[:frame_size] = frame
824
+ spectrogram[f] = np.fft.fft(fft_signal, axis=0)[:nb_frequency_bins]
825
+ return spectrogram.T
venv/lib/python3.10/site-packages/transformers/cache_utils.py ADDED
@@ -0,0 +1,435 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dataclasses import dataclass
2
+ from typing import Any, Dict, List, Optional, Tuple
3
+
4
+ import torch
5
+
6
+ from .configuration_utils import PretrainedConfig
7
+ from .utils import logging
8
+
9
+
10
+ logger = logging.get_logger(__name__)
11
+
12
+
13
+ @dataclass
14
+ class Cache:
15
+ """
16
+ Base, abstract class for all caches. The actual data structure is specific to each subclass.
17
+ """
18
+
19
+ def update(
20
+ self,
21
+ key_states: torch.Tensor,
22
+ value_states: torch.Tensor,
23
+ layer_idx: int,
24
+ cache_kwargs: Optional[Dict[str, Any]] = None,
25
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
26
+ """
27
+ Updates the cache with the new `key_states` and `value_states` for the layer `layer_idx`.
28
+
29
+ Parameters:
30
+ key_states (`torch.Tensor`):
31
+ The new key states to cache.
32
+ value_states (`torch.Tensor`):
33
+ The new value states to cache.
34
+ layer_idx (`int`):
35
+ The index of the layer to cache the states for.
36
+ cache_kwargs (`Dict[str, Any]`, `optional`):
37
+ Additional arguments for the cache subclass. These are specific to each subclass and allow new types of
38
+ cache to be created.
39
+
40
+ Return:
41
+ A tuple containing the updated key and value states.
42
+ """
43
+ raise NotImplementedError("Make sure to implement `update` in a subclass.")
44
+
45
+ def get_seq_length(self, layer_idx: Optional[int] = 0) -> int:
46
+ """Returns the sequence length of the cached states. A layer index can be optionally passed."""
47
+ raise NotImplementedError("Make sure to implement `get_seq_length` in a subclass.")
48
+
49
+ def get_max_length(self) -> Optional[int]:
50
+ """Returns the maximum sequence length of the cached states, if there is any."""
51
+ raise NotImplementedError("Make sure to implement `get_max_length` in a subclass.")
52
+
53
+ def get_usable_length(self, new_seq_length: int, layer_idx: Optional[int] = 0) -> int:
54
+ """Given the sequence length of the new inputs, returns the usable length of the cache."""
55
+ # Cache without size limit -> all cache is usable
56
+ # Cache with size limit -> if the length cache plus the length of the new inputs is larger the maximum cache
57
+ # length, we will need to evict part of the cache (and thus not all cache is usable)
58
+ max_length = self.get_max_length()
59
+ previous_seq_length = self.get_seq_length(layer_idx)
60
+ if max_length is not None and previous_seq_length + new_seq_length > max_length:
61
+ return max_length - new_seq_length
62
+ return previous_seq_length
63
+
64
+ @property
65
+ def seen_tokens(self):
66
+ logger.warning_once(
67
+ "The `seen_tokens` attribute is deprecated and will be removed in v4.41. Use the `cache_position` "
68
+ "model input instead."
69
+ )
70
+ if hasattr(self, "_seen_tokens"):
71
+ return self._seen_tokens
72
+ else:
73
+ return None
74
+
75
+
76
+ class DynamicCache(Cache):
77
+ """
78
+ A cache that grows dynamically as more tokens are generated. This is the default for generative models.
79
+
80
+ It stores the Key and Value states as a list of tensors, one for each layer. The expected shape for each tensor is
81
+ `[batch_size, num_heads, seq_len, head_dim]`.
82
+ """
83
+
84
+ def __init__(self) -> None:
85
+ self.key_cache: List[torch.Tensor] = []
86
+ self.value_cache: List[torch.Tensor] = []
87
+ self._seen_tokens = 0 # Used in `generate` to keep tally of how many tokens the cache has seen
88
+
89
+ def __getitem__(self, layer_idx: int) -> List[Tuple[torch.Tensor]]:
90
+ """
91
+ Support for backwards-compatible `past_key_value` indexing, e.g. `past_key_value[0][0].shape[2]` to get the
92
+ sequence length.
93
+ """
94
+ if layer_idx < len(self):
95
+ return (self.key_cache[layer_idx], self.value_cache[layer_idx])
96
+ else:
97
+ raise KeyError(f"Cache only has {len(self)} layers, attempted to access layer with index {layer_idx}")
98
+
99
+ def __iter__(self):
100
+ """
101
+ Support for backwards-compatible `past_key_value` iteration, e.g. `for x in past_key_value:` to iterate over
102
+ keys and values
103
+ """
104
+ for layer_idx in range(len(self)):
105
+ yield (self.key_cache[layer_idx], self.value_cache[layer_idx])
106
+
107
+ def __len__(self):
108
+ """
109
+ Support for backwards-compatible `past_key_value` length, e.g. `len(past_key_value)`. This value corresponds
110
+ to the number of layers in the model.
111
+ """
112
+ return len(self.key_cache)
113
+
114
+ def update(
115
+ self,
116
+ key_states: torch.Tensor,
117
+ value_states: torch.Tensor,
118
+ layer_idx: int,
119
+ cache_kwargs: Optional[Dict[str, Any]] = None,
120
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
121
+ """
122
+ Updates the cache with the new `key_states` and `value_states` for the layer `layer_idx`.
123
+
124
+ Parameters:
125
+ key_states (`torch.Tensor`):
126
+ The new key states to cache.
127
+ value_states (`torch.Tensor`):
128
+ The new value states to cache.
129
+ layer_idx (`int`):
130
+ The index of the layer to cache the states for.
131
+ cache_kwargs (`Dict[str, Any]`, `optional`):
132
+ Additional arguments for the cache subclass. No additional arguments are used in `DynamicCache`.
133
+
134
+ Return:
135
+ A tuple containing the updated key and value states.
136
+ """
137
+ # Update the number of seen tokens
138
+ if layer_idx == 0:
139
+ self._seen_tokens += key_states.shape[-2]
140
+
141
+ # Update the cache
142
+ if len(self.key_cache) <= layer_idx:
143
+ self.key_cache.append(key_states)
144
+ self.value_cache.append(value_states)
145
+ else:
146
+ self.key_cache[layer_idx] = torch.cat([self.key_cache[layer_idx], key_states], dim=-2)
147
+ self.value_cache[layer_idx] = torch.cat([self.value_cache[layer_idx], value_states], dim=-2)
148
+
149
+ return self.key_cache[layer_idx], self.value_cache[layer_idx]
150
+
151
+ def get_seq_length(self, layer_idx: Optional[int] = 0) -> int:
152
+ """Returns the sequence length of the cached states. A layer index can be optionally passed."""
153
+ if len(self.key_cache) <= layer_idx:
154
+ return 0
155
+ return self.key_cache[layer_idx].shape[-2]
156
+
157
+ def get_max_length(self) -> Optional[int]:
158
+ """Returns the maximum sequence length of the cached states. DynamicCache does not have a maximum length."""
159
+ return None
160
+
161
+ def reorder_cache(self, beam_idx: torch.LongTensor):
162
+ """Reorders the cache for beam search, given the selected beam indices."""
163
+ for layer_idx in range(len(self.key_cache)):
164
+ device = self.key_cache[layer_idx].device
165
+ self.key_cache[layer_idx] = self.key_cache[layer_idx].index_select(0, beam_idx.to(device))
166
+ device = self.value_cache[layer_idx].device
167
+ self.value_cache[layer_idx] = self.value_cache[layer_idx].index_select(0, beam_idx.to(device))
168
+
169
+ def to_legacy_cache(self) -> Tuple[Tuple[torch.Tensor], Tuple[torch.Tensor]]:
170
+ """Converts the `DynamicCache` instance into the its equivalent in the legacy cache format."""
171
+ legacy_cache = ()
172
+ for layer_idx in range(len(self)):
173
+ legacy_cache += ((self.key_cache[layer_idx], self.value_cache[layer_idx]),)
174
+ return legacy_cache
175
+
176
+ @classmethod
177
+ def from_legacy_cache(cls, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None) -> "DynamicCache":
178
+ """Converts a cache in the legacy cache format into an equivalent `DynamicCache`."""
179
+ cache = cls()
180
+ if past_key_values is not None:
181
+ for layer_idx in range(len(past_key_values)):
182
+ key_states, value_states = past_key_values[layer_idx]
183
+ cache.update(key_states, value_states, layer_idx)
184
+ return cache
185
+
186
+
187
+ class SinkCache(Cache):
188
+ """
189
+ A cache that as described in the [Attention Sinks paper](https://arxiv.org/abs/2309.17453). It allows the model to
190
+ generate beyond the length of its context window, without losing fluency in the conversation. As it discards past
191
+ tokens, the model will lose the ability to generate tokens that depend on the context that was discarded.
192
+
193
+ It stores the Key and Value states as a list of tensors, one for each layer. The expected shape for each tensor is
194
+ `[batch_size, num_heads, seq_len, head_dim]`.
195
+
196
+ Parameters:
197
+ window_length (`int`):
198
+ The length of the context window.
199
+ num_sink_tokens (`int`):
200
+ The number of sink tokens. See the original paper for more information.
201
+ """
202
+
203
+ def __init__(self, window_length: int, num_sink_tokens: int) -> None:
204
+ self.key_cache: List[torch.Tensor] = []
205
+ self.value_cache: List[torch.Tensor] = []
206
+ self.window_length = window_length
207
+ self.num_sink_tokens = num_sink_tokens
208
+ self.cos_sin_cache = {}
209
+ self._seen_tokens = 0 # Used in `generate` to keep tally of how many tokens the cache has seen
210
+
211
+ @staticmethod
212
+ def _rotate_half(x):
213
+ x1 = x[..., : x.shape[-1] // 2]
214
+ x2 = x[..., x.shape[-1] // 2 :]
215
+ return torch.cat((-x2, x1), dim=-1)
216
+
217
+ def _apply_key_rotary_pos_emb(
218
+ self, key_states: torch.Tensor, cos: torch.Tensor, sin: torch.Tensor
219
+ ) -> torch.Tensor:
220
+ rotated_key_states = (key_states * cos) + (self._rotate_half(key_states) * sin)
221
+ return rotated_key_states
222
+
223
+ def _get_rerotation_cos_sin(
224
+ self, key_states: torch.Tensor, cos: torch.Tensor, sin: torch.Tensor
225
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
226
+ if key_states.shape[-2] not in self.cos_sin_cache:
227
+ # Upcast to float32 temporarily for better accuracy
228
+ cos = cos.to(torch.float32)
229
+ sin = sin.to(torch.float32)
230
+
231
+ # Compute the cos and sin required for back- and forward-rotating to one position earlier in the sequence
232
+ original_cos = cos[self.num_sink_tokens + key_states.shape[-2] :]
233
+ shifted_cos = cos[self.num_sink_tokens : -key_states.shape[-2]]
234
+ original_sin = sin[self.num_sink_tokens + key_states.shape[-2] :]
235
+ shifted_sin = sin[self.num_sink_tokens : -key_states.shape[-2]]
236
+ rerotation_cos = original_cos * shifted_cos + original_sin * shifted_sin
237
+ rerotation_sin = -original_sin * shifted_cos + original_cos * shifted_sin
238
+
239
+ self.cos_sin_cache[key_states.shape[-2]] = (
240
+ rerotation_cos.to(key_states.dtype).unsqueeze(0),
241
+ rerotation_sin.to(key_states.dtype).unsqueeze(0),
242
+ )
243
+ return self.cos_sin_cache[key_states.shape[-2]]
244
+
245
+ def get_seq_length(self, layer_idx: Optional[int] = 0) -> int:
246
+ """Returns the sequence length of the cached states. A layer index can be optionally passed."""
247
+ # Workaround to make 'key_states.shape[-2] + past_key_value.get_seq_length(self.layer_idx)' <= window_length
248
+ if len(self.key_cache) <= layer_idx:
249
+ return 0
250
+ return self.key_cache[layer_idx].shape[-2]
251
+
252
+ def get_max_length(self) -> Optional[int]:
253
+ """Returns the maximum sequence length of the cached states."""
254
+ return self.window_length
255
+
256
+ def update(
257
+ self,
258
+ key_states: torch.Tensor,
259
+ value_states: torch.Tensor,
260
+ layer_idx: int,
261
+ cache_kwargs: Optional[Dict[str, Any]] = None,
262
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
263
+ """
264
+ Updates the cache with the new `key_states` and `value_states` for the layer `layer_idx`.
265
+
266
+ Parameters:
267
+ key_states (`torch.Tensor`):
268
+ The new key states to cache.
269
+ value_states (`torch.Tensor`):
270
+ The new value states to cache.
271
+ layer_idx (`int`):
272
+ The index of the layer to cache the states for.
273
+ cache_kwargs (`Dict[str, Any]`, `optional`):
274
+ Additional arguments for the cache subclass. The following arguments can be used in `SinkCache`: `sin`,
275
+ `cos` and `partial_rotation_size`. These arguments are used with models using RoPE, to recompute the
276
+ rotation as the tokens are shifted.
277
+
278
+ Return:
279
+ A tuple containing the updated key and value states.
280
+ """
281
+ # Optional kwargs for `SinkCache` -- needed on models using RoPE. `partial_rotation_size` is used on models
282
+ # with partially rotated position embeddings, like Phi or Persimmon.
283
+ sin = cache_kwargs.get("sin")
284
+ cos = cache_kwargs.get("cos")
285
+ partial_rotation_size = cache_kwargs.get("partial_rotation_size")
286
+ using_rope = cos is not None and sin is not None
287
+
288
+ # Update the number of seen tokens
289
+ if layer_idx == 0:
290
+ self._seen_tokens += key_states.shape[-2]
291
+
292
+ # [bsz, num_heads, seq_len, head_dim]
293
+ if len(self.key_cache) <= layer_idx:
294
+ # Empty cache
295
+ self.key_cache.append(key_states)
296
+ self.value_cache.append(value_states)
297
+
298
+ elif key_states.shape[-2] + self.get_seq_length(layer_idx) < self.window_length:
299
+ # Growing cache
300
+ self.key_cache[layer_idx] = torch.cat([self.key_cache[layer_idx], key_states], dim=-2)
301
+ self.value_cache[layer_idx] = torch.cat([self.value_cache[layer_idx], value_states], dim=-2)
302
+
303
+ else:
304
+ # Shifting cache
305
+ keys_to_keep = self.key_cache[layer_idx][
306
+ :, :, -self.window_length + self.num_sink_tokens + key_states.shape[-2] :
307
+ ]
308
+
309
+ # On RoPE models, we need to recompute the Key rotation as the tokens are shifted
310
+ if using_rope:
311
+ rerotation_cos, rerotation_sin = self._get_rerotation_cos_sin(
312
+ key_states, cos[: self.window_length], sin[: self.window_length]
313
+ )
314
+ if partial_rotation_size is not None:
315
+ keys_to_keep, keys_pass = (
316
+ keys_to_keep[..., :partial_rotation_size],
317
+ keys_to_keep[..., partial_rotation_size:],
318
+ )
319
+ keys_to_keep = self._apply_key_rotary_pos_emb(keys_to_keep, rerotation_cos, rerotation_sin)
320
+ if partial_rotation_size is not None:
321
+ keys_to_keep = torch.cat((keys_to_keep, keys_pass), dim=-1)
322
+
323
+ # Concatenate sink tokens, shifted & rotated tokens (if needed), and new tokens
324
+ sink_keys = self.key_cache[layer_idx][:, :, : self.num_sink_tokens]
325
+ self.key_cache[layer_idx] = torch.cat([sink_keys, keys_to_keep, key_states], dim=-2)
326
+
327
+ sink_values = self.value_cache[layer_idx][:, :, : self.num_sink_tokens]
328
+ values_to_keep = self.value_cache[layer_idx][
329
+ :, :, -self.window_length + self.num_sink_tokens + value_states.shape[-2] :
330
+ ]
331
+ self.value_cache[layer_idx] = torch.cat([sink_values, values_to_keep, value_states], dim=-2)
332
+
333
+ return self.key_cache[layer_idx], self.value_cache[layer_idx]
334
+
335
+ def reorder_cache(self, beam_idx: torch.LongTensor):
336
+ """Reorders the cache for beam search, given the selected beam indices."""
337
+ for layer_idx in range(len(self.key_cache)):
338
+ device = self.key_cache[layer_idx].device
339
+ self.key_cache[layer_idx] = self.key_cache[layer_idx].index_select(0, beam_idx.to(device))
340
+ device = self.value_cache[layer_idx].device
341
+ self.value_cache[layer_idx] = self.value_cache[layer_idx].index_select(0, beam_idx.to(device))
342
+
343
+
344
+ class StaticCache(Cache):
345
+ """
346
+ Static Cache class to be used with `torch.compile(model)`.
347
+
348
+ Parameters:
349
+ config (`PretrainedConfig):
350
+ The configuration file defining the `max_position_embeddings`, `hidden_size` and `num_attention_heads`
351
+ required to initialize the static cache.
352
+ max_batch_size (`int`):
353
+ The maximum batch size with which the model will be used.
354
+ max_cache_len (`int`):
355
+ The maximum sequence length with which the model will be used.
356
+ device (`torch.device`):
357
+ The device on which the cache should be initialized. Should be the same as the layer.
358
+ dtype (*optional*, defaults to `torch.float32`):
359
+ The default `dtype` to use when initializing the layer.
360
+ """
361
+
362
+ def __init__(self, config: PretrainedConfig, max_batch_size: int, max_cache_len: int, device, dtype=None) -> None:
363
+ super().__init__()
364
+ self.max_batch_size = max_batch_size
365
+ self.max_cache_len = config.max_position_embeddings if max_cache_len is None else max_cache_len
366
+ # Some model define a custom `head_dim` != config.hidden_size // config.num_attention_heads
367
+ self.head_dim = (
368
+ config.head_dim if hasattr(config, "head_dim") else config.hidden_size // config.num_attention_heads
369
+ )
370
+
371
+ self.dtype = dtype if dtype is not None else torch.float32
372
+ self.num_key_value_heads = (
373
+ config.num_attention_heads if config.num_key_value_heads is None else config.num_key_value_heads
374
+ )
375
+
376
+ cache_shape = (max_batch_size, self.num_key_value_heads, self.max_cache_len, self.head_dim)
377
+ self.key_cache: torch.Tensor = torch.zeros(cache_shape, dtype=self.dtype, device=device)
378
+ self.value_cache: torch.Tensor = torch.zeros(cache_shape, dtype=self.dtype, device=device)
379
+
380
+ def update(
381
+ self,
382
+ key_states: torch.Tensor,
383
+ value_states: torch.Tensor,
384
+ layer_idx: int,
385
+ cache_kwargs: Optional[Dict[str, Any]] = None,
386
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
387
+ """
388
+ Updates the cache with the new `key_states` and `value_states` for the layer `layer_idx`.
389
+ It is VERY important to index using a tensor, otherwise you introduce a copy to the device.
390
+
391
+ Parameters:
392
+ key_states (`torch.Tensor`):
393
+ The new key states to cache.
394
+ value_states (`torch.Tensor`):
395
+ The new value states to cache.
396
+ layer_idx (`int`):
397
+ The index of the layer to cache the states for. Kept for backward compatibility
398
+ cache_kwargs (`Dict[str, Any]`, `optional`):
399
+ Additional arguments for the cache subclass. The `StaticCache` just needs the `q_len`
400
+ to know how much of the cache it should overwrite.
401
+
402
+ Return:
403
+ A tuple containing the updated key and value states.
404
+ """
405
+ new_cache_positions = cache_kwargs.get("cache_position")
406
+ k_out = self.key_cache
407
+ v_out = self.value_cache
408
+
409
+ k_out[:, :, new_cache_positions] = key_states
410
+ v_out[:, :, new_cache_positions] = value_states
411
+
412
+ return k_out, v_out
413
+
414
+ def get_seq_length(self, layer_idx: Optional[int] = 0) -> int:
415
+ """Returns the sequence length of the cached states that were seen by the model. `layer_idx` kept for BC"""
416
+ # Occupied cache == any slot in the 3rd dim (sequence length) holds a non-zero value. To save on compute, let's
417
+ # limit the check to the first batch member and head dimension.
418
+ # TODO: This is error prone, a filled cache may be `0.0`. Let's use a stateless integer instead, after
419
+ # https://github.com/pytorch/pytorch/issues/120248 is fixed
420
+ return (self.key_cache[0, 0].any(dim=-1)).sum()
421
+
422
+ def get_max_length(self) -> Optional[int]:
423
+ """Returns the maximum sequence length of the cached states. DynamicCache does not have a maximum length."""
424
+ return self.max_cache_len
425
+
426
+ def reorder_cache(self, beam_idx: torch.LongTensor):
427
+ """Reorders the cache for beam search, given the selected beam indices."""
428
+ device = self.key_cache.device
429
+ self.key_cache = self.key_cache.index_select(0, beam_idx.to(device))
430
+ device = self.value_cache.device
431
+ self.value_cache = self.value_cache.index_select(0, beam_idx.to(device))
432
+
433
+ def to_legacy_cache(self):
434
+ """Dummy function for BC. We have to keep it because otherwise the call in the forward of models will break it"""
435
+ return None
venv/lib/python3.10/site-packages/transformers/configuration_utils.py ADDED
@@ -0,0 +1,1133 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
3
+ # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """ Configuration base class and utilities."""
17
+
18
+
19
+ import copy
20
+ import json
21
+ import os
22
+ import re
23
+ import warnings
24
+ from typing import Any, Dict, List, Optional, Tuple, Union
25
+
26
+ from packaging import version
27
+
28
+ from . import __version__
29
+ from .dynamic_module_utils import custom_object_save
30
+ from .utils import (
31
+ CONFIG_NAME,
32
+ PushToHubMixin,
33
+ add_model_info_to_auto_map,
34
+ cached_file,
35
+ copy_func,
36
+ download_url,
37
+ extract_commit_hash,
38
+ is_remote_url,
39
+ is_torch_available,
40
+ logging,
41
+ )
42
+
43
+
44
+ logger = logging.get_logger(__name__)
45
+
46
+ _re_configuration_file = re.compile(r"config\.(.*)\.json")
47
+
48
+
49
+ class PretrainedConfig(PushToHubMixin):
50
+ # no-format
51
+ r"""
52
+ Base class for all configuration classes. Handles a few parameters common to all models' configurations as well as
53
+ methods for loading/downloading/saving configurations.
54
+
55
+ <Tip>
56
+
57
+ A configuration file can be loaded and saved to disk. Loading the configuration file and using this file to
58
+ initialize a model does **not** load the model weights. It only affects the model's configuration.
59
+
60
+ </Tip>
61
+
62
+ Class attributes (overridden by derived classes):
63
+
64
+ - **model_type** (`str`) -- An identifier for the model type, serialized into the JSON file, and used to recreate
65
+ the correct object in [`~transformers.AutoConfig`].
66
+ - **is_composition** (`bool`) -- Whether the config class is composed of multiple sub-configs. In this case the
67
+ config has to be initialized from two or more configs of type [`~transformers.PretrainedConfig`] like:
68
+ [`~transformers.EncoderDecoderConfig`] or [`~RagConfig`].
69
+ - **keys_to_ignore_at_inference** (`List[str]`) -- A list of keys to ignore by default when looking at dictionary
70
+ outputs of the model during inference.
71
+ - **attribute_map** (`Dict[str, str]`) -- A dict that maps model specific attribute names to the standardized
72
+ naming of attributes.
73
+
74
+ Common attributes (present in all subclasses):
75
+
76
+ - **vocab_size** (`int`) -- The number of tokens in the vocabulary, which is also the first dimension of the
77
+ embeddings matrix (this attribute may be missing for models that don't have a text modality like ViT).
78
+ - **hidden_size** (`int`) -- The hidden size of the model.
79
+ - **num_attention_heads** (`int`) -- The number of attention heads used in the multi-head attention layers of the
80
+ model.
81
+ - **num_hidden_layers** (`int`) -- The number of blocks in the model.
82
+
83
+ Arg:
84
+ name_or_path (`str`, *optional*, defaults to `""`):
85
+ Store the string that was passed to [`PreTrainedModel.from_pretrained`] or
86
+ [`TFPreTrainedModel.from_pretrained`] as `pretrained_model_name_or_path` if the configuration was created
87
+ with such a method.
88
+ output_hidden_states (`bool`, *optional*, defaults to `False`):
89
+ Whether or not the model should return all hidden-states.
90
+ output_attentions (`bool`, *optional*, defaults to `False`):
91
+ Whether or not the model should returns all attentions.
92
+ return_dict (`bool`, *optional*, defaults to `True`):
93
+ Whether or not the model should return a [`~transformers.utils.ModelOutput`] instead of a plain tuple.
94
+ is_encoder_decoder (`bool`, *optional*, defaults to `False`):
95
+ Whether the model is used as an encoder/decoder or not.
96
+ is_decoder (`bool`, *optional*, defaults to `False`):
97
+ Whether the model is used as decoder or not (in which case it's used as an encoder).
98
+ cross_attention_hidden_size** (`bool`, *optional*):
99
+ The hidden size of the cross-attention layer in case the model is used as a decoder in an encoder-decoder
100
+ setting and the cross-attention hidden dimension differs from `self.config.hidden_size`.
101
+ add_cross_attention (`bool`, *optional*, defaults to `False`):
102
+ Whether cross-attention layers should be added to the model. Note, this option is only relevant for models
103
+ that can be used as decoder models within the [`EncoderDecoderModel`] class, which consists of all models
104
+ in `AUTO_MODELS_FOR_CAUSAL_LM`.
105
+ tie_encoder_decoder (`bool`, *optional*, defaults to `False`):
106
+ Whether all encoder weights should be tied to their equivalent decoder weights. This requires the encoder
107
+ and decoder model to have the exact same parameter names.
108
+ prune_heads (`Dict[int, List[int]]`, *optional*, defaults to `{}`):
109
+ Pruned heads of the model. The keys are the selected layer indices and the associated values, the list of
110
+ heads to prune in said layer.
111
+
112
+ For instance `{1: [0, 2], 2: [2, 3]}` will prune heads 0 and 2 on layer 1 and heads 2 and 3 on layer 2.
113
+ chunk_size_feed_forward (`int`, *optional*, defaults to `0`):
114
+ The chunk size of all feed forward layers in the residual attention blocks. A chunk size of `0` means that
115
+ the feed forward layer is not chunked. A chunk size of n means that the feed forward layer processes `n` <
116
+ sequence_length embeddings at a time. For more information on feed forward chunking, see [How does Feed
117
+ Forward Chunking work?](../glossary.html#feed-forward-chunking).
118
+
119
+ > Parameters for sequence generation
120
+
121
+ max_length (`int`, *optional*, defaults to 20):
122
+ Maximum length that will be used by default in the `generate` method of the model.
123
+ min_length (`int`, *optional*, defaults to 0):
124
+ Minimum length that will be used by default in the `generate` method of the model.
125
+ do_sample (`bool`, *optional*, defaults to `False`):
126
+ Flag that will be used by default in the `generate` method of the model. Whether or not to use sampling ;
127
+ use greedy decoding otherwise.
128
+ early_stopping (`bool`, *optional*, defaults to `False`):
129
+ Flag that will be used by default in the `generate` method of the model. Whether to stop the beam search
130
+ when at least `num_beams` sentences are finished per batch or not.
131
+ num_beams (`int`, *optional*, defaults to 1):
132
+ Number of beams for beam search that will be used by default in the `generate` method of the model. 1 means
133
+ no beam search.
134
+ num_beam_groups (`int`, *optional*, defaults to 1):
135
+ Number of groups to divide `num_beams` into in order to ensure diversity among different groups of beams
136
+ that will be used by default in the `generate` method of the model. 1 means no group beam search.
137
+ diversity_penalty (`float`, *optional*, defaults to 0.0):
138
+ Value to control diversity for group beam search. that will be used by default in the `generate` method of
139
+ the model. 0 means no diversity penalty. The higher the penalty, the more diverse are the outputs.
140
+ temperature (`float`, *optional*, defaults to 1.0):
141
+ The value used to module the next token probabilities that will be used by default in the `generate` method
142
+ of the model. Must be strictly positive.
143
+ top_k (`int`, *optional*, defaults to 50):
144
+ Number of highest probability vocabulary tokens to keep for top-k-filtering that will be used by default in
145
+ the `generate` method of the model.
146
+ top_p (`float`, *optional*, defaults to 1):
147
+ Value that will be used by default in the `generate` method of the model for `top_p`. If set to float < 1,
148
+ only the most probable tokens with probabilities that add up to `top_p` or higher are kept for generation.
149
+ typical_p (`float`, *optional*, defaults to 1):
150
+ Local typicality measures how similar the conditional probability of predicting a target token next is to
151
+ the expected conditional probability of predicting a random token next, given the partial text already
152
+ generated. If set to float < 1, the smallest set of the most locally typical tokens with probabilities that
153
+ add up to `typical_p` or higher are kept for generation. See [this
154
+ paper](https://arxiv.org/pdf/2202.00666.pdf) for more details.
155
+ repetition_penalty (`float`, *optional*, defaults to 1):
156
+ Parameter for repetition penalty that will be used by default in the `generate` method of the model. 1.0
157
+ means no penalty.
158
+ length_penalty (`float`, *optional*, defaults to 1):
159
+ Exponential penalty to the length that is used with beam-based generation. It is applied as an exponent to
160
+ the sequence length, which in turn is used to divide the score of the sequence. Since the score is the log
161
+ likelihood of the sequence (i.e. negative), `length_penalty` > 0.0 promotes longer sequences, while
162
+ `length_penalty` < 0.0 encourages shorter sequences.
163
+ no_repeat_ngram_size (`int`, *optional*, defaults to 0) -- Value that will be used by default in the
164
+ `generate` method of the model for `no_repeat_ngram_size`. If set to int > 0, all ngrams of that size can
165
+ only occur once.
166
+ encoder_no_repeat_ngram_size (`int`, *optional*, defaults to 0) -- Value that will be used by
167
+ default in the `generate` method of the model for `encoder_no_repeat_ngram_size`. If set to int > 0, all
168
+ ngrams of that size that occur in the `encoder_input_ids` cannot occur in the `decoder_input_ids`.
169
+ bad_words_ids (`List[int]`, *optional*):
170
+ List of token ids that are not allowed to be generated that will be used by default in the `generate`
171
+ method of the model. In order to get the tokens of the words that should not appear in the generated text,
172
+ use `tokenizer.encode(bad_word, add_prefix_space=True)`.
173
+ num_return_sequences (`int`, *optional*, defaults to 1):
174
+ Number of independently computed returned sequences for each element in the batch that will be used by
175
+ default in the `generate` method of the model.
176
+ output_scores (`bool`, *optional*, defaults to `False`):
177
+ Whether the model should return the logits when used for generation.
178
+ return_dict_in_generate (`bool`, *optional*, defaults to `False`):
179
+ Whether the model should return a [`~transformers.utils.ModelOutput`] instead of a `torch.LongTensor`.
180
+ forced_bos_token_id (`int`, *optional*):
181
+ The id of the token to force as the first generated token after the `decoder_start_token_id`. Useful for
182
+ multilingual models like [mBART](../model_doc/mbart) where the first generated token needs to be the target
183
+ language token.
184
+ forced_eos_token_id (`int`, *optional*):
185
+ The id of the token to force as the last generated token when `max_length` is reached.
186
+ remove_invalid_values (`bool`, *optional*):
187
+ Whether to remove possible _nan_ and _inf_ outputs of the model to prevent the generation method to crash.
188
+ Note that using `remove_invalid_values` can slow down generation.
189
+
190
+ > Parameters for fine-tuning tasks
191
+
192
+ architectures (`List[str]`, *optional*):
193
+ Model architectures that can be used with the model pretrained weights.
194
+ finetuning_task (`str`, *optional*):
195
+ Name of the task used to fine-tune the model. This can be used when converting from an original (TensorFlow
196
+ or PyTorch) checkpoint.
197
+ id2label (`Dict[int, str]`, *optional*):
198
+ A map from index (for instance prediction index, or target index) to label.
199
+ label2id (`Dict[str, int]`, *optional*): A map from label to index for the model.
200
+ num_labels (`int`, *optional*):
201
+ Number of labels to use in the last layer added to the model, typically for a classification task.
202
+ task_specific_params (`Dict[str, Any]`, *optional*):
203
+ Additional keyword arguments to store for the current task.
204
+ problem_type (`str`, *optional*):
205
+ Problem type for `XxxForSequenceClassification` models. Can be one of `"regression"`,
206
+ `"single_label_classification"` or `"multi_label_classification"`.
207
+
208
+ > Parameters linked to the tokenizer
209
+
210
+ tokenizer_class (`str`, *optional*):
211
+ The name of the associated tokenizer class to use (if none is set, will use the tokenizer associated to the
212
+ model by default).
213
+ prefix (`str`, *optional*):
214
+ A specific prompt that should be added at the beginning of each text before calling the model.
215
+ bos_token_id (`int`, *optional*): The id of the _beginning-of-stream_ token.
216
+ pad_token_id (`int`, *optional*): The id of the _padding_ token.
217
+ eos_token_id (`int`, *optional*): The id of the _end-of-stream_ token.
218
+ decoder_start_token_id (`int`, *optional*):
219
+ If an encoder-decoder model starts decoding with a different token than _bos_, the id of that token.
220
+ sep_token_id (`int`, *optional*): The id of the _separation_ token.
221
+
222
+ > PyTorch specific parameters
223
+
224
+ torchscript (`bool`, *optional*, defaults to `False`):
225
+ Whether or not the model should be used with Torchscript.
226
+ tie_word_embeddings (`bool`, *optional*, defaults to `True`):
227
+ Whether the model's input and output word embeddings should be tied. Note that this is only relevant if the
228
+ model has a output word embedding layer.
229
+ torch_dtype (`str`, *optional*):
230
+ The `dtype` of the weights. This attribute can be used to initialize the model to a non-default `dtype`
231
+ (which is normally `float32`) and thus allow for optimal storage allocation. For example, if the saved
232
+ model is `float16`, ideally we want to load it back using the minimal amount of memory needed to load
233
+ `float16` weights. Since the config object is stored in plain text, this attribute contains just the
234
+ floating type string without the `torch.` prefix. For example, for `torch.float16` ``torch_dtype` is the
235
+ `"float16"` string.
236
+
237
+ This attribute is currently not being used during model loading time, but this may change in the future
238
+ versions. But we can already start preparing for the future by saving the dtype with save_pretrained.
239
+
240
+ > TensorFlow specific parameters
241
+
242
+ use_bfloat16 (`bool`, *optional*, defaults to `False`):
243
+ Whether or not the model should use BFloat16 scalars (only used by some TensorFlow models).
244
+ tf_legacy_loss (`bool`, *optional*, defaults to `False`):
245
+ Whether the model should use legacy TensorFlow losses. Legacy losses have variable output shapes and may
246
+ not be XLA-compatible. This option is here for backward compatibility and will be removed in Transformers
247
+ v5.
248
+ """
249
+
250
+ model_type: str = ""
251
+ is_composition: bool = False
252
+ attribute_map: Dict[str, str] = {}
253
+ _auto_class: Optional[str] = None
254
+
255
+ def __setattr__(self, key, value):
256
+ if key in super().__getattribute__("attribute_map"):
257
+ key = super().__getattribute__("attribute_map")[key]
258
+ super().__setattr__(key, value)
259
+
260
+ def __getattribute__(self, key):
261
+ if key != "attribute_map" and key in super().__getattribute__("attribute_map"):
262
+ key = super().__getattribute__("attribute_map")[key]
263
+ return super().__getattribute__(key)
264
+
265
+ def __init__(self, **kwargs):
266
+ # Attributes with defaults
267
+ self.return_dict = kwargs.pop("return_dict", True)
268
+ self.output_hidden_states = kwargs.pop("output_hidden_states", False)
269
+ self.output_attentions = kwargs.pop("output_attentions", False)
270
+ self.torchscript = kwargs.pop("torchscript", False) # Only used by PyTorch models
271
+ self.torch_dtype = kwargs.pop("torch_dtype", None) # Only used by PyTorch models
272
+ self.use_bfloat16 = kwargs.pop("use_bfloat16", False)
273
+ self.tf_legacy_loss = kwargs.pop("tf_legacy_loss", False) # Only used by TensorFlow models
274
+ self.pruned_heads = kwargs.pop("pruned_heads", {})
275
+ self.tie_word_embeddings = kwargs.pop(
276
+ "tie_word_embeddings", True
277
+ ) # Whether input and output word embeddings should be tied for all MLM, LM and Seq2Seq models.
278
+ self.chunk_size_feed_forward = kwargs.pop("chunk_size_feed_forward", 0)
279
+
280
+ # Is decoder is used in encoder-decoder models to differentiate encoder from decoder
281
+ self.is_encoder_decoder = kwargs.pop("is_encoder_decoder", False)
282
+ self.is_decoder = kwargs.pop("is_decoder", False)
283
+ self.cross_attention_hidden_size = kwargs.pop("cross_attention_hidden_size", None)
284
+ self.add_cross_attention = kwargs.pop("add_cross_attention", False)
285
+ self.tie_encoder_decoder = kwargs.pop("tie_encoder_decoder", False)
286
+
287
+ # Retrocompatibility: Parameters for sequence generation. While we will keep the ability to load these
288
+ # parameters, saving them will be deprecated. In a distant future, we won't need to load them.
289
+ for parameter_name, default_value in self._get_generation_defaults().items():
290
+ setattr(self, parameter_name, kwargs.pop(parameter_name, default_value))
291
+
292
+ # Fine-tuning task arguments
293
+ self.architectures = kwargs.pop("architectures", None)
294
+ self.finetuning_task = kwargs.pop("finetuning_task", None)
295
+ self.id2label = kwargs.pop("id2label", None)
296
+ self.label2id = kwargs.pop("label2id", None)
297
+ if self.label2id is not None and not isinstance(self.label2id, dict):
298
+ raise ValueError("Argument label2id should be a dictionary.")
299
+ if self.id2label is not None:
300
+ if not isinstance(self.id2label, dict):
301
+ raise ValueError("Argument id2label should be a dictionary.")
302
+ num_labels = kwargs.pop("num_labels", None)
303
+ if num_labels is not None and len(self.id2label) != num_labels:
304
+ logger.warning(
305
+ f"You passed along `num_labels={num_labels}` with an incompatible id to label map: "
306
+ f"{self.id2label}. The number of labels wil be overwritten to {self.num_labels}."
307
+ )
308
+ self.id2label = {int(key): value for key, value in self.id2label.items()}
309
+ # Keys are always strings in JSON so convert ids to int here.
310
+ else:
311
+ self.num_labels = kwargs.pop("num_labels", 2)
312
+
313
+ if self.torch_dtype is not None and isinstance(self.torch_dtype, str):
314
+ # we will start using self.torch_dtype in v5, but to be consistent with
315
+ # from_pretrained's torch_dtype arg convert it to an actual torch.dtype object
316
+ if is_torch_available():
317
+ import torch
318
+
319
+ self.torch_dtype = getattr(torch, self.torch_dtype)
320
+
321
+ # Tokenizer arguments TODO: eventually tokenizer and models should share the same config
322
+ self.tokenizer_class = kwargs.pop("tokenizer_class", None)
323
+ self.prefix = kwargs.pop("prefix", None)
324
+ self.bos_token_id = kwargs.pop("bos_token_id", None)
325
+ self.pad_token_id = kwargs.pop("pad_token_id", None)
326
+ self.eos_token_id = kwargs.pop("eos_token_id", None)
327
+ self.sep_token_id = kwargs.pop("sep_token_id", None)
328
+
329
+ self.decoder_start_token_id = kwargs.pop("decoder_start_token_id", None)
330
+
331
+ # task specific arguments
332
+ self.task_specific_params = kwargs.pop("task_specific_params", None)
333
+
334
+ # regression / multi-label classification
335
+ self.problem_type = kwargs.pop("problem_type", None)
336
+ allowed_problem_types = ("regression", "single_label_classification", "multi_label_classification")
337
+ if self.problem_type is not None and self.problem_type not in allowed_problem_types:
338
+ raise ValueError(
339
+ f"The config parameter `problem_type` was not understood: received {self.problem_type} "
340
+ "but only 'regression', 'single_label_classification' and 'multi_label_classification' are valid."
341
+ )
342
+
343
+ # TPU arguments
344
+ if kwargs.pop("xla_device", None) is not None:
345
+ logger.warning(
346
+ "The `xla_device` argument has been deprecated in v4.4.0 of Transformers. It is ignored and you can "
347
+ "safely remove it from your `config.json` file."
348
+ )
349
+
350
+ # Name or path to the pretrained checkpoint
351
+ self._name_or_path = str(kwargs.pop("name_or_path", ""))
352
+ # Config hash
353
+ self._commit_hash = kwargs.pop("_commit_hash", None)
354
+
355
+ # Attention implementation to use, if relevant.
356
+ self._attn_implementation_internal = kwargs.pop("attn_implementation", None)
357
+
358
+ # Drop the transformers version info
359
+ self.transformers_version = kwargs.pop("transformers_version", None)
360
+
361
+ # Deal with gradient checkpointing
362
+ if kwargs.get("gradient_checkpointing", False):
363
+ warnings.warn(
364
+ "Passing `gradient_checkpointing` to a config initialization is deprecated and will be removed in v5 "
365
+ "Transformers. Using `model.gradient_checkpointing_enable()` instead, or if you are using the "
366
+ "`Trainer` API, pass `gradient_checkpointing=True` in your `TrainingArguments`."
367
+ )
368
+
369
+ # Additional attributes without default values
370
+ for key, value in kwargs.items():
371
+ try:
372
+ setattr(self, key, value)
373
+ except AttributeError as err:
374
+ logger.error(f"Can't set {key} with value {value} for {self}")
375
+ raise err
376
+
377
+ @property
378
+ def name_or_path(self) -> str:
379
+ return getattr(self, "_name_or_path", None)
380
+
381
+ @name_or_path.setter
382
+ def name_or_path(self, value):
383
+ self._name_or_path = str(value) # Make sure that name_or_path is a string (for JSON encoding)
384
+
385
+ @property
386
+ def use_return_dict(self) -> bool:
387
+ """
388
+ `bool`: Whether or not return [`~utils.ModelOutput`] instead of tuples.
389
+ """
390
+ # If torchscript is set, force `return_dict=False` to avoid jit errors
391
+ return self.return_dict and not self.torchscript
392
+
393
+ @property
394
+ def num_labels(self) -> int:
395
+ """
396
+ `int`: The number of labels for classification models.
397
+ """
398
+ return len(self.id2label)
399
+
400
+ @num_labels.setter
401
+ def num_labels(self, num_labels: int):
402
+ if not hasattr(self, "id2label") or self.id2label is None or len(self.id2label) != num_labels:
403
+ self.id2label = {i: f"LABEL_{i}" for i in range(num_labels)}
404
+ self.label2id = dict(zip(self.id2label.values(), self.id2label.keys()))
405
+
406
+ @property
407
+ def _attn_implementation(self):
408
+ # This property is made private for now (as it cannot be changed and a PreTrainedModel.use_attn_implementation method needs to be implemented.)
409
+ if hasattr(self, "_attn_implementation_internal"):
410
+ if self._attn_implementation_internal is None:
411
+ # `config.attn_implementation` should never be None, for backward compatibility.
412
+ return "eager"
413
+ else:
414
+ return self._attn_implementation_internal
415
+ else:
416
+ return "eager"
417
+
418
+ @_attn_implementation.setter
419
+ def _attn_implementation(self, value):
420
+ self._attn_implementation_internal = value
421
+
422
+ def save_pretrained(self, save_directory: Union[str, os.PathLike], push_to_hub: bool = False, **kwargs):
423
+ """
424
+ Save a configuration object to the directory `save_directory`, so that it can be re-loaded using the
425
+ [`~PretrainedConfig.from_pretrained`] class method.
426
+
427
+ Args:
428
+ save_directory (`str` or `os.PathLike`):
429
+ Directory where the configuration JSON file will be saved (will be created if it does not exist).
430
+ push_to_hub (`bool`, *optional*, defaults to `False`):
431
+ Whether or not to push your model to the Hugging Face model hub after saving it. You can specify the
432
+ repository you want to push to with `repo_id` (will default to the name of `save_directory` in your
433
+ namespace).
434
+ kwargs (`Dict[str, Any]`, *optional*):
435
+ Additional key word arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method.
436
+ """
437
+ self._set_token_in_kwargs(kwargs)
438
+
439
+ if os.path.isfile(save_directory):
440
+ raise AssertionError(f"Provided path ({save_directory}) should be a directory, not a file")
441
+
442
+ non_default_generation_parameters = {}
443
+ for parameter_name, default_value in self._get_generation_defaults().items():
444
+ if hasattr(self, parameter_name) and getattr(self, parameter_name) != default_value:
445
+ non_default_generation_parameters[parameter_name] = getattr(self, parameter_name)
446
+ if len(non_default_generation_parameters) > 0:
447
+ logger.warning(
448
+ "Some non-default generation parameters are set in the model config. These should go into a "
449
+ "GenerationConfig file (https://huggingface.co/docs/transformers/generation_strategies#save-a-custom-decoding-strategy-with-your-model) "
450
+ "instead. This warning will be raised to an exception in v4.41.\n"
451
+ f"Non-default generation parameters: {str(non_default_generation_parameters)}"
452
+ )
453
+
454
+ os.makedirs(save_directory, exist_ok=True)
455
+
456
+ if push_to_hub:
457
+ commit_message = kwargs.pop("commit_message", None)
458
+ repo_id = kwargs.pop("repo_id", save_directory.split(os.path.sep)[-1])
459
+ repo_id = self._create_repo(repo_id, **kwargs)
460
+ files_timestamps = self._get_files_timestamps(save_directory)
461
+
462
+ # If we have a custom config, we copy the file defining it in the folder and set the attributes so it can be
463
+ # loaded from the Hub.
464
+ if self._auto_class is not None:
465
+ custom_object_save(self, save_directory, config=self)
466
+
467
+ # If we save using the predefined names, we can load using `from_pretrained`
468
+ output_config_file = os.path.join(save_directory, CONFIG_NAME)
469
+
470
+ self.to_json_file(output_config_file, use_diff=True)
471
+ logger.info(f"Configuration saved in {output_config_file}")
472
+
473
+ if push_to_hub:
474
+ self._upload_modified_files(
475
+ save_directory,
476
+ repo_id,
477
+ files_timestamps,
478
+ commit_message=commit_message,
479
+ token=kwargs.get("token"),
480
+ )
481
+
482
+ @staticmethod
483
+ def _set_token_in_kwargs(kwargs, token=None):
484
+ """Temporary method to deal with `token` and `use_auth_token`.
485
+
486
+ This method is to avoid apply the same changes in all model config classes that overwrite `from_pretrained`.
487
+
488
+ Need to clean up `use_auth_token` in a follow PR.
489
+ """
490
+ # Some model config classes like CLIP define their own `from_pretrained` without the new argument `token` yet.
491
+ if token is None:
492
+ token = kwargs.pop("token", None)
493
+ use_auth_token = kwargs.pop("use_auth_token", None)
494
+
495
+ if use_auth_token is not None:
496
+ warnings.warn(
497
+ "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.",
498
+ FutureWarning,
499
+ )
500
+ if token is not None:
501
+ raise ValueError(
502
+ "`token` and `use_auth_token` are both specified. Please set only the argument `token`."
503
+ )
504
+ token = use_auth_token
505
+
506
+ if token is not None:
507
+ kwargs["token"] = token
508
+
509
+ @classmethod
510
+ def from_pretrained(
511
+ cls,
512
+ pretrained_model_name_or_path: Union[str, os.PathLike],
513
+ cache_dir: Optional[Union[str, os.PathLike]] = None,
514
+ force_download: bool = False,
515
+ local_files_only: bool = False,
516
+ token: Optional[Union[str, bool]] = None,
517
+ revision: str = "main",
518
+ **kwargs,
519
+ ) -> "PretrainedConfig":
520
+ r"""
521
+ Instantiate a [`PretrainedConfig`] (or a derived class) from a pretrained model configuration.
522
+
523
+ Args:
524
+ pretrained_model_name_or_path (`str` or `os.PathLike`):
525
+ This can be either:
526
+
527
+ - a string, the *model id* of a pretrained model configuration hosted inside a model repo on
528
+ huggingface.co.
529
+ - a path to a *directory* containing a configuration file saved using the
530
+ [`~PretrainedConfig.save_pretrained`] method, e.g., `./my_model_directory/`.
531
+ - a path or url to a saved configuration JSON *file*, e.g., `./my_model_directory/configuration.json`.
532
+ cache_dir (`str` or `os.PathLike`, *optional*):
533
+ Path to a directory in which a downloaded pretrained model configuration should be cached if the
534
+ standard cache should not be used.
535
+ force_download (`bool`, *optional*, defaults to `False`):
536
+ Whether or not to force to (re-)download the configuration files and override the cached versions if
537
+ they exist.
538
+ resume_download (`bool`, *optional*, defaults to `False`):
539
+ Whether or not to delete incompletely received file. Attempts to resume the download if such a file
540
+ exists.
541
+ proxies (`Dict[str, str]`, *optional*):
542
+ A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',
543
+ 'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request.
544
+ token (`str` or `bool`, *optional*):
545
+ The token to use as HTTP bearer authorization for remote files. If `True`, or not specified, will use
546
+ the token generated when running `huggingface-cli login` (stored in `~/.huggingface`).
547
+ revision (`str`, *optional*, defaults to `"main"`):
548
+ The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
549
+ git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any
550
+ identifier allowed by git.
551
+
552
+ <Tip>
553
+
554
+ To test a pull request you made on the Hub, you can pass `revision="refs/pr/<pr_number>".
555
+
556
+ </Tip>
557
+
558
+ return_unused_kwargs (`bool`, *optional*, defaults to `False`):
559
+ If `False`, then this function returns just the final configuration object.
560
+
561
+ If `True`, then this functions returns a `Tuple(config, unused_kwargs)` where *unused_kwargs* is a
562
+ dictionary consisting of the key/value pairs whose keys are not configuration attributes: i.e., the
563
+ part of `kwargs` which has not been used to update `config` and is otherwise ignored.
564
+ subfolder (`str`, *optional*, defaults to `""`):
565
+ In case the relevant files are located inside a subfolder of the model repo on huggingface.co, you can
566
+ specify the folder name here.
567
+ kwargs (`Dict[str, Any]`, *optional*):
568
+ The values in kwargs of any keys which are configuration attributes will be used to override the loaded
569
+ values. Behavior concerning key/value pairs whose keys are *not* configuration attributes is controlled
570
+ by the `return_unused_kwargs` keyword parameter.
571
+
572
+ Returns:
573
+ [`PretrainedConfig`]: The configuration object instantiated from this pretrained model.
574
+
575
+ Examples:
576
+
577
+ ```python
578
+ # We can't instantiate directly the base class *PretrainedConfig* so let's show the examples on a
579
+ # derived class: BertConfig
580
+ config = BertConfig.from_pretrained(
581
+ "google-bert/bert-base-uncased"
582
+ ) # Download configuration from huggingface.co and cache.
583
+ config = BertConfig.from_pretrained(
584
+ "./test/saved_model/"
585
+ ) # E.g. config (or model) was saved using *save_pretrained('./test/saved_model/')*
586
+ config = BertConfig.from_pretrained("./test/saved_model/my_configuration.json")
587
+ config = BertConfig.from_pretrained("google-bert/bert-base-uncased", output_attentions=True, foo=False)
588
+ assert config.output_attentions == True
589
+ config, unused_kwargs = BertConfig.from_pretrained(
590
+ "google-bert/bert-base-uncased", output_attentions=True, foo=False, return_unused_kwargs=True
591
+ )
592
+ assert config.output_attentions == True
593
+ assert unused_kwargs == {"foo": False}
594
+ ```"""
595
+ kwargs["cache_dir"] = cache_dir
596
+ kwargs["force_download"] = force_download
597
+ kwargs["local_files_only"] = local_files_only
598
+ kwargs["revision"] = revision
599
+
600
+ cls._set_token_in_kwargs(kwargs, token)
601
+
602
+ config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
603
+ if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
604
+ logger.warning(
605
+ f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
606
+ f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
607
+ )
608
+
609
+ return cls.from_dict(config_dict, **kwargs)
610
+
611
+ @classmethod
612
+ def get_config_dict(
613
+ cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs
614
+ ) -> Tuple[Dict[str, Any], Dict[str, Any]]:
615
+ """
616
+ From a `pretrained_model_name_or_path`, resolve to a dictionary of parameters, to be used for instantiating a
617
+ [`PretrainedConfig`] using `from_dict`.
618
+
619
+ Parameters:
620
+ pretrained_model_name_or_path (`str` or `os.PathLike`):
621
+ The identifier of the pre-trained checkpoint from which we want the dictionary of parameters.
622
+
623
+ Returns:
624
+ `Tuple[Dict, Dict]`: The dictionary(ies) that will be used to instantiate the configuration object.
625
+
626
+ """
627
+ cls._set_token_in_kwargs(kwargs)
628
+
629
+ original_kwargs = copy.deepcopy(kwargs)
630
+ # Get config dict associated with the base config file
631
+ config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs)
632
+ if "_commit_hash" in config_dict:
633
+ original_kwargs["_commit_hash"] = config_dict["_commit_hash"]
634
+
635
+ # That config file may point us toward another config file to use.
636
+ if "configuration_files" in config_dict:
637
+ configuration_file = get_configuration_file(config_dict["configuration_files"])
638
+ config_dict, kwargs = cls._get_config_dict(
639
+ pretrained_model_name_or_path, _configuration_file=configuration_file, **original_kwargs
640
+ )
641
+
642
+ return config_dict, kwargs
643
+
644
+ @classmethod
645
+ def _get_config_dict(
646
+ cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs
647
+ ) -> Tuple[Dict[str, Any], Dict[str, Any]]:
648
+ cache_dir = kwargs.pop("cache_dir", None)
649
+ force_download = kwargs.pop("force_download", False)
650
+ resume_download = kwargs.pop("resume_download", False)
651
+ proxies = kwargs.pop("proxies", None)
652
+ token = kwargs.pop("token", None)
653
+ local_files_only = kwargs.pop("local_files_only", False)
654
+ revision = kwargs.pop("revision", None)
655
+ trust_remote_code = kwargs.pop("trust_remote_code", None)
656
+ subfolder = kwargs.pop("subfolder", "")
657
+ from_pipeline = kwargs.pop("_from_pipeline", None)
658
+ from_auto_class = kwargs.pop("_from_auto", False)
659
+ commit_hash = kwargs.pop("_commit_hash", None)
660
+
661
+ if trust_remote_code is True:
662
+ logger.warning(
663
+ "The argument `trust_remote_code` is to be used with Auto classes. It has no effect here and is"
664
+ " ignored."
665
+ )
666
+
667
+ user_agent = {"file_type": "config", "from_auto_class": from_auto_class}
668
+ if from_pipeline is not None:
669
+ user_agent["using_pipeline"] = from_pipeline
670
+
671
+ pretrained_model_name_or_path = str(pretrained_model_name_or_path)
672
+
673
+ is_local = os.path.isdir(pretrained_model_name_or_path)
674
+ if os.path.isfile(os.path.join(subfolder, pretrained_model_name_or_path)):
675
+ # Special case when pretrained_model_name_or_path is a local file
676
+ resolved_config_file = pretrained_model_name_or_path
677
+ is_local = True
678
+ elif is_remote_url(pretrained_model_name_or_path):
679
+ configuration_file = pretrained_model_name_or_path
680
+ resolved_config_file = download_url(pretrained_model_name_or_path)
681
+ else:
682
+ configuration_file = kwargs.pop("_configuration_file", CONFIG_NAME)
683
+
684
+ try:
685
+ # Load from local folder or from cache or download from model Hub and cache
686
+ resolved_config_file = cached_file(
687
+ pretrained_model_name_or_path,
688
+ configuration_file,
689
+ cache_dir=cache_dir,
690
+ force_download=force_download,
691
+ proxies=proxies,
692
+ resume_download=resume_download,
693
+ local_files_only=local_files_only,
694
+ token=token,
695
+ user_agent=user_agent,
696
+ revision=revision,
697
+ subfolder=subfolder,
698
+ _commit_hash=commit_hash,
699
+ )
700
+ commit_hash = extract_commit_hash(resolved_config_file, commit_hash)
701
+ except EnvironmentError:
702
+ # Raise any environment error raise by `cached_file`. It will have a helpful error message adapted to
703
+ # the original exception.
704
+ raise
705
+ except Exception:
706
+ # For any other exception, we throw a generic error.
707
+ raise EnvironmentError(
708
+ f"Can't load the configuration of '{pretrained_model_name_or_path}'. If you were trying to load it"
709
+ " from 'https://huggingface.co/models', make sure you don't have a local directory with the same"
710
+ f" name. Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory"
711
+ f" containing a {configuration_file} file"
712
+ )
713
+
714
+ try:
715
+ # Load config dict
716
+ config_dict = cls._dict_from_json_file(resolved_config_file)
717
+ config_dict["_commit_hash"] = commit_hash
718
+ except (json.JSONDecodeError, UnicodeDecodeError):
719
+ raise EnvironmentError(
720
+ f"It looks like the config file at '{resolved_config_file}' is not a valid JSON file."
721
+ )
722
+
723
+ if is_local:
724
+ logger.info(f"loading configuration file {resolved_config_file}")
725
+ else:
726
+ logger.info(f"loading configuration file {configuration_file} from cache at {resolved_config_file}")
727
+
728
+ if "auto_map" in config_dict and not is_local:
729
+ config_dict["auto_map"] = add_model_info_to_auto_map(
730
+ config_dict["auto_map"], pretrained_model_name_or_path
731
+ )
732
+ return config_dict, kwargs
733
+
734
+ @classmethod
735
+ def from_dict(cls, config_dict: Dict[str, Any], **kwargs) -> "PretrainedConfig":
736
+ """
737
+ Instantiates a [`PretrainedConfig`] from a Python dictionary of parameters.
738
+
739
+ Args:
740
+ config_dict (`Dict[str, Any]`):
741
+ Dictionary that will be used to instantiate the configuration object. Such a dictionary can be
742
+ retrieved from a pretrained checkpoint by leveraging the [`~PretrainedConfig.get_config_dict`] method.
743
+ kwargs (`Dict[str, Any]`):
744
+ Additional parameters from which to initialize the configuration object.
745
+
746
+ Returns:
747
+ [`PretrainedConfig`]: The configuration object instantiated from those parameters.
748
+ """
749
+ return_unused_kwargs = kwargs.pop("return_unused_kwargs", False)
750
+ # Those arguments may be passed along for our internal telemetry.
751
+ # We remove them so they don't appear in `return_unused_kwargs`.
752
+ kwargs.pop("_from_auto", None)
753
+ kwargs.pop("_from_pipeline", None)
754
+ # The commit hash might have been updated in the `config_dict`, we don't want the kwargs to erase that update.
755
+ if "_commit_hash" in kwargs and "_commit_hash" in config_dict:
756
+ kwargs["_commit_hash"] = config_dict["_commit_hash"]
757
+
758
+ # We remove it from kwargs so that it does not appear in `return_unused_kwargs`.
759
+ config_dict["attn_implementation"] = kwargs.pop("attn_implementation", None)
760
+
761
+ config = cls(**config_dict)
762
+
763
+ if hasattr(config, "pruned_heads"):
764
+ config.pruned_heads = {int(key): value for key, value in config.pruned_heads.items()}
765
+
766
+ # Update config with kwargs if needed
767
+ if "num_labels" in kwargs and "id2label" in kwargs:
768
+ num_labels = kwargs["num_labels"]
769
+ id2label = kwargs["id2label"] if kwargs["id2label"] is not None else []
770
+ if len(id2label) != num_labels:
771
+ raise ValueError(
772
+ f"You passed along `num_labels={num_labels }` with an incompatible id to label map: "
773
+ f"{kwargs['id2label']}. Since those arguments are inconsistent with each other, you should remove "
774
+ "one of them."
775
+ )
776
+ to_remove = []
777
+ for key, value in kwargs.items():
778
+ if hasattr(config, key):
779
+ current_attr = getattr(config, key)
780
+ # To authorize passing a custom subconfig as kwarg in models that have nested configs.
781
+ if isinstance(current_attr, PretrainedConfig) and isinstance(value, dict):
782
+ value = current_attr.__class__(**value)
783
+ setattr(config, key, value)
784
+ if key != "torch_dtype":
785
+ to_remove.append(key)
786
+ for key in to_remove:
787
+ kwargs.pop(key, None)
788
+
789
+ logger.info(f"Model config {config}")
790
+ if return_unused_kwargs:
791
+ return config, kwargs
792
+ else:
793
+ return config
794
+
795
+ @classmethod
796
+ def from_json_file(cls, json_file: Union[str, os.PathLike]) -> "PretrainedConfig":
797
+ """
798
+ Instantiates a [`PretrainedConfig`] from the path to a JSON file of parameters.
799
+
800
+ Args:
801
+ json_file (`str` or `os.PathLike`):
802
+ Path to the JSON file containing the parameters.
803
+
804
+ Returns:
805
+ [`PretrainedConfig`]: The configuration object instantiated from that JSON file.
806
+
807
+ """
808
+ config_dict = cls._dict_from_json_file(json_file)
809
+ return cls(**config_dict)
810
+
811
+ @classmethod
812
+ def _dict_from_json_file(cls, json_file: Union[str, os.PathLike]):
813
+ with open(json_file, "r", encoding="utf-8") as reader:
814
+ text = reader.read()
815
+ return json.loads(text)
816
+
817
+ def __eq__(self, other):
818
+ return isinstance(other, PretrainedConfig) and (self.__dict__ == other.__dict__)
819
+
820
+ def __repr__(self):
821
+ return f"{self.__class__.__name__} {self.to_json_string()}"
822
+
823
+ def to_diff_dict(self) -> Dict[str, Any]:
824
+ """
825
+ Removes all attributes from config which correspond to the default config attributes for better readability and
826
+ serializes to a Python dictionary.
827
+
828
+ Returns:
829
+ `Dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance,
830
+ """
831
+ config_dict = self.to_dict()
832
+
833
+ # get the default config dict
834
+ default_config_dict = PretrainedConfig().to_dict()
835
+
836
+ # get class specific config dict
837
+ class_config_dict = self.__class__().to_dict() if not self.is_composition else {}
838
+
839
+ serializable_config_dict = {}
840
+
841
+ # only serialize values that differ from the default config
842
+ for key, value in config_dict.items():
843
+ if (
844
+ isinstance(getattr(self, key, None), PretrainedConfig)
845
+ and key in class_config_dict
846
+ and isinstance(class_config_dict[key], dict)
847
+ ):
848
+ # For nested configs we need to clean the diff recursively
849
+ diff = recursive_diff_dict(value, class_config_dict[key], config_obj=getattr(self, key, None))
850
+ if "model_type" in value:
851
+ # Needs to be set even if it's not in the diff
852
+ diff["model_type"] = value["model_type"]
853
+ if len(diff) > 0:
854
+ serializable_config_dict[key] = diff
855
+ elif (
856
+ key not in default_config_dict
857
+ or key == "transformers_version"
858
+ or value != default_config_dict[key]
859
+ or (key in class_config_dict and value != class_config_dict[key])
860
+ ):
861
+ serializable_config_dict[key] = value
862
+
863
+ if hasattr(self, "quantization_config"):
864
+ serializable_config_dict["quantization_config"] = (
865
+ self.quantization_config.to_dict()
866
+ if not isinstance(self.quantization_config, dict)
867
+ else self.quantization_config
868
+ )
869
+
870
+ # pop the `_pre_quantization_dtype` as torch.dtypes are not serializable.
871
+ _ = serializable_config_dict.pop("_pre_quantization_dtype", None)
872
+
873
+ self.dict_torch_dtype_to_str(serializable_config_dict)
874
+
875
+ if "_attn_implementation_internal" in serializable_config_dict:
876
+ del serializable_config_dict["_attn_implementation_internal"]
877
+
878
+ return serializable_config_dict
879
+
880
+ def to_dict(self) -> Dict[str, Any]:
881
+ """
882
+ Serializes this instance to a Python dictionary.
883
+
884
+ Returns:
885
+ `Dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance.
886
+ """
887
+ output = copy.deepcopy(self.__dict__)
888
+ if hasattr(self.__class__, "model_type"):
889
+ output["model_type"] = self.__class__.model_type
890
+ if "_auto_class" in output:
891
+ del output["_auto_class"]
892
+ if "_commit_hash" in output:
893
+ del output["_commit_hash"]
894
+ if "_attn_implementation_internal" in output:
895
+ del output["_attn_implementation_internal"]
896
+
897
+ # Transformers version when serializing the model
898
+ output["transformers_version"] = __version__
899
+
900
+ for key, value in output.items():
901
+ # Deal with nested configs like CLIP
902
+ if isinstance(value, PretrainedConfig):
903
+ value = value.to_dict()
904
+ del value["transformers_version"]
905
+
906
+ output[key] = value
907
+
908
+ if hasattr(self, "quantization_config"):
909
+ output["quantization_config"] = (
910
+ self.quantization_config.to_dict()
911
+ if not isinstance(self.quantization_config, dict)
912
+ else self.quantization_config
913
+ )
914
+
915
+ # pop the `_pre_quantization_dtype` as torch.dtypes are not serializable.
916
+ _ = output.pop("_pre_quantization_dtype", None)
917
+
918
+ self.dict_torch_dtype_to_str(output)
919
+
920
+ return output
921
+
922
+ def to_json_string(self, use_diff: bool = True) -> str:
923
+ """
924
+ Serializes this instance to a JSON string.
925
+
926
+ Args:
927
+ use_diff (`bool`, *optional*, defaults to `True`):
928
+ If set to `True`, only the difference between the config instance and the default `PretrainedConfig()`
929
+ is serialized to JSON string.
930
+
931
+ Returns:
932
+ `str`: String containing all the attributes that make up this configuration instance in JSON format.
933
+ """
934
+ if use_diff is True:
935
+ config_dict = self.to_diff_dict()
936
+ else:
937
+ config_dict = self.to_dict()
938
+ return json.dumps(config_dict, indent=2, sort_keys=True) + "\n"
939
+
940
+ def to_json_file(self, json_file_path: Union[str, os.PathLike], use_diff: bool = True):
941
+ """
942
+ Save this instance to a JSON file.
943
+
944
+ Args:
945
+ json_file_path (`str` or `os.PathLike`):
946
+ Path to the JSON file in which this configuration instance's parameters will be saved.
947
+ use_diff (`bool`, *optional*, defaults to `True`):
948
+ If set to `True`, only the difference between the config instance and the default `PretrainedConfig()`
949
+ is serialized to JSON file.
950
+ """
951
+ with open(json_file_path, "w", encoding="utf-8") as writer:
952
+ writer.write(self.to_json_string(use_diff=use_diff))
953
+
954
+ def update(self, config_dict: Dict[str, Any]):
955
+ """
956
+ Updates attributes of this class with attributes from `config_dict`.
957
+
958
+ Args:
959
+ config_dict (`Dict[str, Any]`): Dictionary of attributes that should be updated for this class.
960
+ """
961
+ for key, value in config_dict.items():
962
+ setattr(self, key, value)
963
+
964
+ def update_from_string(self, update_str: str):
965
+ """
966
+ Updates attributes of this class with attributes from `update_str`.
967
+
968
+ The expected format is ints, floats and strings as is, and for booleans use `true` or `false`. For example:
969
+ "n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"
970
+
971
+ The keys to change have to already exist in the config object.
972
+
973
+ Args:
974
+ update_str (`str`): String with attributes that should be updated for this class.
975
+
976
+ """
977
+
978
+ d = dict(x.split("=") for x in update_str.split(","))
979
+ for k, v in d.items():
980
+ if not hasattr(self, k):
981
+ raise ValueError(f"key {k} isn't in the original config dict")
982
+
983
+ old_v = getattr(self, k)
984
+ if isinstance(old_v, bool):
985
+ if v.lower() in ["true", "1", "y", "yes"]:
986
+ v = True
987
+ elif v.lower() in ["false", "0", "n", "no"]:
988
+ v = False
989
+ else:
990
+ raise ValueError(f"can't derive true or false from {v} (key {k})")
991
+ elif isinstance(old_v, int):
992
+ v = int(v)
993
+ elif isinstance(old_v, float):
994
+ v = float(v)
995
+ elif not isinstance(old_v, str):
996
+ raise ValueError(
997
+ f"You can only update int, float, bool or string values in the config, got {v} for key {k}"
998
+ )
999
+
1000
+ setattr(self, k, v)
1001
+
1002
+ def dict_torch_dtype_to_str(self, d: Dict[str, Any]) -> None:
1003
+ """
1004
+ Checks whether the passed dictionary and its nested dicts have a *torch_dtype* key and if it's not None,
1005
+ converts torch.dtype to a string of just the type. For example, `torch.float32` get converted into *"float32"*
1006
+ string, which can then be stored in the json format.
1007
+ """
1008
+ if d.get("torch_dtype", None) is not None and not isinstance(d["torch_dtype"], str):
1009
+ d["torch_dtype"] = str(d["torch_dtype"]).split(".")[1]
1010
+ for value in d.values():
1011
+ if isinstance(value, dict):
1012
+ self.dict_torch_dtype_to_str(value)
1013
+
1014
+ @classmethod
1015
+ def register_for_auto_class(cls, auto_class="AutoConfig"):
1016
+ """
1017
+ Register this class with a given auto class. This should only be used for custom configurations as the ones in
1018
+ the library are already mapped with `AutoConfig`.
1019
+
1020
+ <Tip warning={true}>
1021
+
1022
+ This API is experimental and may have some slight breaking changes in the next releases.
1023
+
1024
+ </Tip>
1025
+
1026
+ Args:
1027
+ auto_class (`str` or `type`, *optional*, defaults to `"AutoConfig"`):
1028
+ The auto class to register this new configuration with.
1029
+ """
1030
+ if not isinstance(auto_class, str):
1031
+ auto_class = auto_class.__name__
1032
+
1033
+ import transformers.models.auto as auto_module
1034
+
1035
+ if not hasattr(auto_module, auto_class):
1036
+ raise ValueError(f"{auto_class} is not a valid auto class.")
1037
+
1038
+ cls._auto_class = auto_class
1039
+
1040
+ @staticmethod
1041
+ def _get_generation_defaults() -> Dict[str, Any]:
1042
+ return {
1043
+ "max_length": 20,
1044
+ "min_length": 0,
1045
+ "do_sample": False,
1046
+ "early_stopping": False,
1047
+ "num_beams": 1,
1048
+ "num_beam_groups": 1,
1049
+ "diversity_penalty": 0.0,
1050
+ "temperature": 1.0,
1051
+ "top_k": 50,
1052
+ "top_p": 1.0,
1053
+ "typical_p": 1.0,
1054
+ "repetition_penalty": 1.0,
1055
+ "length_penalty": 1.0,
1056
+ "no_repeat_ngram_size": 0,
1057
+ "encoder_no_repeat_ngram_size": 0,
1058
+ "bad_words_ids": None,
1059
+ "num_return_sequences": 1,
1060
+ "output_scores": False,
1061
+ "return_dict_in_generate": False,
1062
+ "forced_bos_token_id": None,
1063
+ "forced_eos_token_id": None,
1064
+ "remove_invalid_values": False,
1065
+ "exponential_decay_length_penalty": None,
1066
+ "suppress_tokens": None,
1067
+ "begin_suppress_tokens": None,
1068
+ }
1069
+
1070
+ def _has_non_default_generation_parameters(self) -> bool:
1071
+ """
1072
+ Whether or not this instance holds non-default generation parameters.
1073
+ """
1074
+ for parameter_name, default_value in self._get_generation_defaults().items():
1075
+ if hasattr(self, parameter_name) and getattr(self, parameter_name) != default_value:
1076
+ return True
1077
+ return False
1078
+
1079
+
1080
+ def get_configuration_file(configuration_files: List[str]) -> str:
1081
+ """
1082
+ Get the configuration file to use for this version of transformers.
1083
+
1084
+ Args:
1085
+ configuration_files (`List[str]`): The list of available configuration files.
1086
+
1087
+ Returns:
1088
+ `str`: The configuration file to use.
1089
+ """
1090
+ configuration_files_map = {}
1091
+ for file_name in configuration_files:
1092
+ search = _re_configuration_file.search(file_name)
1093
+ if search is not None:
1094
+ v = search.groups()[0]
1095
+ configuration_files_map[v] = file_name
1096
+ available_versions = sorted(configuration_files_map.keys())
1097
+
1098
+ # Defaults to FULL_CONFIGURATION_FILE and then try to look at some newer versions.
1099
+ configuration_file = CONFIG_NAME
1100
+ transformers_version = version.parse(__version__)
1101
+ for v in available_versions:
1102
+ if version.parse(v) <= transformers_version:
1103
+ configuration_file = configuration_files_map[v]
1104
+ else:
1105
+ # No point going further since the versions are sorted.
1106
+ break
1107
+
1108
+ return configuration_file
1109
+
1110
+
1111
+ def recursive_diff_dict(dict_a, dict_b, config_obj=None):
1112
+ """
1113
+ Helper function to recursively take the diff between two nested dictionaries. The resulting diff only contains the
1114
+ values from `dict_a` that are different from values in `dict_b`.
1115
+ """
1116
+ diff = {}
1117
+ default = config_obj.__class__().to_dict() if config_obj is not None else {}
1118
+ for key, value in dict_a.items():
1119
+ obj_value = getattr(config_obj, str(key), None)
1120
+ if isinstance(obj_value, PretrainedConfig) and key in dict_b and isinstance(dict_b[key], dict):
1121
+ diff_value = recursive_diff_dict(value, dict_b[key], config_obj=obj_value)
1122
+ if len(diff_value) > 0:
1123
+ diff[key] = diff_value
1124
+ elif key not in dict_b or value != dict_b[key] or key not in default or value != default[key]:
1125
+ diff[key] = value
1126
+ return diff
1127
+
1128
+
1129
+ PretrainedConfig.push_to_hub = copy_func(PretrainedConfig.push_to_hub)
1130
+ if PretrainedConfig.push_to_hub.__doc__ is not None:
1131
+ PretrainedConfig.push_to_hub.__doc__ = PretrainedConfig.push_to_hub.__doc__.format(
1132
+ object="config", object_class="AutoConfig", object_files="configuration file"
1133
+ )
venv/lib/python3.10/site-packages/transformers/convert_graph_to_onnx.py ADDED
@@ -0,0 +1,551 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import warnings
16
+ from argparse import ArgumentParser
17
+ from os import listdir, makedirs
18
+ from pathlib import Path
19
+ from typing import Dict, List, Optional, Tuple
20
+
21
+ from packaging.version import Version, parse
22
+
23
+ from transformers.pipelines import Pipeline, pipeline
24
+ from transformers.tokenization_utils import BatchEncoding
25
+ from transformers.utils import ModelOutput, is_tf_available, is_torch_available
26
+
27
+
28
+ # This is the minimal required version to
29
+ # support some ONNX Runtime features
30
+ ORT_QUANTIZE_MINIMUM_VERSION = parse("1.4.0")
31
+
32
+
33
+ SUPPORTED_PIPELINES = [
34
+ "feature-extraction",
35
+ "ner",
36
+ "sentiment-analysis",
37
+ "fill-mask",
38
+ "question-answering",
39
+ "text-generation",
40
+ "translation_en_to_fr",
41
+ "translation_en_to_de",
42
+ "translation_en_to_ro",
43
+ ]
44
+
45
+
46
+ class OnnxConverterArgumentParser(ArgumentParser):
47
+ """
48
+ Wraps all the script arguments supported to export transformers models to ONNX IR
49
+ """
50
+
51
+ def __init__(self):
52
+ super().__init__("ONNX Converter")
53
+
54
+ self.add_argument(
55
+ "--pipeline",
56
+ type=str,
57
+ choices=SUPPORTED_PIPELINES,
58
+ default="feature-extraction",
59
+ )
60
+ self.add_argument(
61
+ "--model",
62
+ type=str,
63
+ required=True,
64
+ help="Model's id or path (ex: google-bert/bert-base-cased)",
65
+ )
66
+ self.add_argument("--tokenizer", type=str, help="Tokenizer's id or path (ex: google-bert/bert-base-cased)")
67
+ self.add_argument(
68
+ "--framework",
69
+ type=str,
70
+ choices=["pt", "tf"],
71
+ help="Framework for loading the model",
72
+ )
73
+ self.add_argument("--opset", type=int, default=11, help="ONNX opset to use")
74
+ self.add_argument(
75
+ "--check-loading",
76
+ action="store_true",
77
+ help="Check ONNX is able to load the model",
78
+ )
79
+ self.add_argument(
80
+ "--use-external-format",
81
+ action="store_true",
82
+ help="Allow exporting model >= than 2Gb",
83
+ )
84
+ self.add_argument(
85
+ "--quantize",
86
+ action="store_true",
87
+ help="Quantize the neural network to be run with int8",
88
+ )
89
+ self.add_argument("output")
90
+
91
+
92
+ def generate_identified_filename(filename: Path, identifier: str) -> Path:
93
+ """
94
+ Append a string-identifier at the end (before the extension, if any) to the provided filepath
95
+
96
+ Args:
97
+ filename: pathlib.Path The actual path object we would like to add an identifier suffix
98
+ identifier: The suffix to add
99
+
100
+ Returns: String with concatenated identifier at the end of the filename
101
+ """
102
+ return filename.parent.joinpath(filename.stem + identifier).with_suffix(filename.suffix)
103
+
104
+
105
+ def check_onnxruntime_requirements(minimum_version: Version):
106
+ """
107
+ Check onnxruntime is installed and if the installed version match is recent enough
108
+
109
+ Raises:
110
+ ImportError: If onnxruntime is not installed or too old version is found
111
+ """
112
+ try:
113
+ import onnxruntime
114
+
115
+ # Parse the version of the installed onnxruntime
116
+ ort_version = parse(onnxruntime.__version__)
117
+
118
+ # We require 1.4.0 minimum
119
+ if ort_version < ORT_QUANTIZE_MINIMUM_VERSION:
120
+ raise ImportError(
121
+ f"We found an older version of onnxruntime ({onnxruntime.__version__}) "
122
+ f"but we require onnxruntime to be >= {minimum_version} to enable all the conversions options.\n"
123
+ "Please update onnxruntime by running `pip install --upgrade onnxruntime`"
124
+ )
125
+
126
+ except ImportError:
127
+ raise ImportError(
128
+ "onnxruntime doesn't seem to be currently installed. "
129
+ "Please install the onnxruntime by running `pip install onnxruntime`"
130
+ " and relaunch the conversion."
131
+ )
132
+
133
+
134
+ def ensure_valid_input(model, tokens, input_names):
135
+ """
136
+ Ensure inputs are presented in the correct order, without any Non
137
+
138
+ Args:
139
+ model: The model used to forward the input data
140
+ tokens: BatchEncoding holding the input data
141
+ input_names: The name of the inputs
142
+
143
+ Returns: Tuple
144
+
145
+ """
146
+ print("Ensuring inputs are in correct order")
147
+
148
+ model_args_name = model.forward.__code__.co_varnames
149
+ model_args, ordered_input_names = [], []
150
+ for arg_name in model_args_name[1:]: # start at index 1 to skip "self" argument
151
+ if arg_name in input_names:
152
+ ordered_input_names.append(arg_name)
153
+ model_args.append(tokens[arg_name])
154
+ else:
155
+ print(f"{arg_name} is not present in the generated input list.")
156
+ break
157
+
158
+ print(f"Generated inputs order: {ordered_input_names}")
159
+ return ordered_input_names, tuple(model_args)
160
+
161
+
162
+ def infer_shapes(nlp: Pipeline, framework: str) -> Tuple[List[str], List[str], Dict, BatchEncoding]:
163
+ """
164
+ Attempt to infer the static vs dynamic axes for each input and output tensors for a specific model
165
+
166
+ Args:
167
+ nlp: The pipeline object holding the model to be exported
168
+ framework: The framework identifier to dispatch to the correct inference scheme (pt/tf)
169
+
170
+ Returns:
171
+
172
+ - List of the inferred input variable names
173
+ - List of the inferred output variable names
174
+ - Dictionary with input/output variables names as key and shape tensor as value
175
+ - a BatchEncoding reference which was used to infer all the above information
176
+ """
177
+
178
+ def build_shape_dict(name: str, tensor, is_input: bool, seq_len: int):
179
+ if isinstance(tensor, (tuple, list)):
180
+ return [build_shape_dict(name, t, is_input, seq_len) for t in tensor]
181
+
182
+ else:
183
+ # Let's assume batch is the first axis with only 1 element (~~ might not be always true ...)
184
+ axes = {[axis for axis, numel in enumerate(tensor.shape) if numel == 1][0]: "batch"}
185
+ if is_input:
186
+ if len(tensor.shape) == 2:
187
+ axes[1] = "sequence"
188
+ else:
189
+ raise ValueError(f"Unable to infer tensor axes ({len(tensor.shape)})")
190
+ else:
191
+ seq_axes = [dim for dim, shape in enumerate(tensor.shape) if shape == seq_len]
192
+ axes.update({dim: "sequence" for dim in seq_axes})
193
+
194
+ print(f"Found {'input' if is_input else 'output'} {name} with shape: {axes}")
195
+ return axes
196
+
197
+ tokens = nlp.tokenizer("This is a sample output", return_tensors=framework)
198
+ seq_len = tokens.input_ids.shape[-1]
199
+ outputs = nlp.model(**tokens) if framework == "pt" else nlp.model(tokens)
200
+ if isinstance(outputs, ModelOutput):
201
+ outputs = outputs.to_tuple()
202
+ if not isinstance(outputs, (list, tuple)):
203
+ outputs = (outputs,)
204
+
205
+ # Generate input names & axes
206
+ input_vars = list(tokens.keys())
207
+ input_dynamic_axes = {k: build_shape_dict(k, v, True, seq_len) for k, v in tokens.items()}
208
+
209
+ # flatten potentially grouped outputs (past for gpt2, attentions)
210
+ outputs_flat = []
211
+ for output in outputs:
212
+ if isinstance(output, (tuple, list)):
213
+ outputs_flat.extend(output)
214
+ else:
215
+ outputs_flat.append(output)
216
+
217
+ # Generate output names & axes
218
+ output_names = [f"output_{i}" for i in range(len(outputs_flat))]
219
+ output_dynamic_axes = {k: build_shape_dict(k, v, False, seq_len) for k, v in zip(output_names, outputs_flat)}
220
+
221
+ # Create the aggregated axes representation
222
+ dynamic_axes = dict(input_dynamic_axes, **output_dynamic_axes)
223
+ return input_vars, output_names, dynamic_axes, tokens
224
+
225
+
226
+ def load_graph_from_args(
227
+ pipeline_name: str, framework: str, model: str, tokenizer: Optional[str] = None, **models_kwargs
228
+ ) -> Pipeline:
229
+ """
230
+ Convert the set of arguments provided through the CLI to an actual pipeline reference (tokenizer + model
231
+
232
+ Args:
233
+ pipeline_name: The kind of pipeline to use (ner, question-answering, etc.)
234
+ framework: The actual model to convert the pipeline from ("pt" or "tf")
235
+ model: The model name which will be loaded by the pipeline
236
+ tokenizer: The tokenizer name which will be loaded by the pipeline, default to the model's value
237
+
238
+ Returns: Pipeline object
239
+
240
+ """
241
+ # If no tokenizer provided
242
+ if tokenizer is None:
243
+ tokenizer = model
244
+
245
+ # Check the wanted framework is available
246
+ if framework == "pt" and not is_torch_available():
247
+ raise Exception("Cannot convert because PyTorch is not installed. Please install torch first.")
248
+ if framework == "tf" and not is_tf_available():
249
+ raise Exception("Cannot convert because TF is not installed. Please install tensorflow first.")
250
+
251
+ print(f"Loading pipeline (model: {model}, tokenizer: {tokenizer})")
252
+
253
+ # Allocate tokenizer and model
254
+ return pipeline(pipeline_name, model=model, tokenizer=tokenizer, framework=framework, model_kwargs=models_kwargs)
255
+
256
+
257
+ def convert_pytorch(nlp: Pipeline, opset: int, output: Path, use_external_format: bool):
258
+ """
259
+ Export a PyTorch backed pipeline to ONNX Intermediate Representation (IR
260
+
261
+ Args:
262
+ nlp: The pipeline to be exported
263
+ opset: The actual version of the ONNX operator set to use
264
+ output: Path where will be stored the generated ONNX model
265
+ use_external_format: Split the model definition from its parameters to allow model bigger than 2GB
266
+
267
+ Returns:
268
+
269
+ """
270
+ if not is_torch_available():
271
+ raise Exception("Cannot convert because PyTorch is not installed. Please install torch first.")
272
+
273
+ import torch
274
+ from torch.onnx import export
275
+
276
+ print(f"Using framework PyTorch: {torch.__version__}")
277
+
278
+ with torch.no_grad():
279
+ input_names, output_names, dynamic_axes, tokens = infer_shapes(nlp, "pt")
280
+ ordered_input_names, model_args = ensure_valid_input(nlp.model, tokens, input_names)
281
+
282
+ export(
283
+ nlp.model,
284
+ model_args,
285
+ f=output.as_posix(),
286
+ input_names=ordered_input_names,
287
+ output_names=output_names,
288
+ dynamic_axes=dynamic_axes,
289
+ do_constant_folding=True,
290
+ opset_version=opset,
291
+ )
292
+
293
+
294
+ def convert_tensorflow(nlp: Pipeline, opset: int, output: Path):
295
+ """
296
+ Export a TensorFlow backed pipeline to ONNX Intermediate Representation (IR)
297
+
298
+ Args:
299
+ nlp: The pipeline to be exported
300
+ opset: The actual version of the ONNX operator set to use
301
+ output: Path where will be stored the generated ONNX model
302
+
303
+ Notes: TensorFlow cannot export model bigger than 2GB due to internal constraint from TensorFlow
304
+
305
+ """
306
+ if not is_tf_available():
307
+ raise Exception("Cannot convert because TF is not installed. Please install tensorflow first.")
308
+
309
+ print("/!\\ Please note TensorFlow doesn't support exporting model > 2Gb /!\\")
310
+
311
+ try:
312
+ import tensorflow as tf
313
+ import tf2onnx
314
+ from tf2onnx import __version__ as t2ov
315
+
316
+ print(f"Using framework TensorFlow: {tf.version.VERSION}, tf2onnx: {t2ov}")
317
+
318
+ # Build
319
+ input_names, output_names, dynamic_axes, tokens = infer_shapes(nlp, "tf")
320
+
321
+ # Forward
322
+ nlp.model.predict(tokens.data)
323
+ input_signature = [tf.TensorSpec.from_tensor(tensor, name=key) for key, tensor in tokens.items()]
324
+ model_proto, _ = tf2onnx.convert.from_keras(
325
+ nlp.model, input_signature, opset=opset, output_path=output.as_posix()
326
+ )
327
+
328
+ except ImportError as e:
329
+ raise Exception(
330
+ f"Cannot import {e.name} required to convert TF model to ONNX. Please install {e.name} first. {e}"
331
+ )
332
+
333
+
334
+ def convert(
335
+ framework: str,
336
+ model: str,
337
+ output: Path,
338
+ opset: int,
339
+ tokenizer: Optional[str] = None,
340
+ use_external_format: bool = False,
341
+ pipeline_name: str = "feature-extraction",
342
+ **model_kwargs,
343
+ ):
344
+ """
345
+ Convert the pipeline object to the ONNX Intermediate Representation (IR) format
346
+
347
+ Args:
348
+ framework: The framework the pipeline is backed by ("pt" or "tf")
349
+ model: The name of the model to load for the pipeline
350
+ output: The path where the ONNX graph will be stored
351
+ opset: The actual version of the ONNX operator set to use
352
+ tokenizer: The name of the model to load for the pipeline, default to the model's name if not provided
353
+ use_external_format:
354
+ Split the model definition from its parameters to allow model bigger than 2GB (PyTorch only)
355
+ pipeline_name: The kind of pipeline to instantiate (ner, question-answering, etc.)
356
+ model_kwargs: Keyword arguments to be forwarded to the model constructor
357
+
358
+ Returns:
359
+
360
+ """
361
+ warnings.warn(
362
+ "The `transformers.convert_graph_to_onnx` package is deprecated and will be removed in version 5 of"
363
+ " Transformers",
364
+ FutureWarning,
365
+ )
366
+ print(f"ONNX opset version set to: {opset}")
367
+
368
+ # Load the pipeline
369
+ nlp = load_graph_from_args(pipeline_name, framework, model, tokenizer, **model_kwargs)
370
+
371
+ if not output.parent.exists():
372
+ print(f"Creating folder {output.parent}")
373
+ makedirs(output.parent.as_posix())
374
+ elif len(listdir(output.parent.as_posix())) > 0:
375
+ raise Exception(f"Folder {output.parent.as_posix()} is not empty, aborting conversion")
376
+
377
+ # Export the graph
378
+ if framework == "pt":
379
+ convert_pytorch(nlp, opset, output, use_external_format)
380
+ else:
381
+ convert_tensorflow(nlp, opset, output)
382
+
383
+
384
+ def optimize(onnx_model_path: Path) -> Path:
385
+ """
386
+ Load the model at the specified path and let onnxruntime look at transformations on the graph to enable all the
387
+ optimizations possible
388
+
389
+ Args:
390
+ onnx_model_path: filepath where the model binary description is stored
391
+
392
+ Returns: Path where the optimized model binary description has been saved
393
+
394
+ """
395
+ from onnxruntime import InferenceSession, SessionOptions
396
+
397
+ # Generate model name with suffix "optimized"
398
+ opt_model_path = generate_identified_filename(onnx_model_path, "-optimized")
399
+ sess_option = SessionOptions()
400
+ sess_option.optimized_model_filepath = opt_model_path.as_posix()
401
+ _ = InferenceSession(onnx_model_path.as_posix(), sess_option)
402
+
403
+ print(f"Optimized model has been written at {opt_model_path}: \N{heavy check mark}")
404
+ print("/!\\ Optimized model contains hardware specific operators which might not be portable. /!\\")
405
+
406
+ return opt_model_path
407
+
408
+
409
+ def quantize(onnx_model_path: Path) -> Path:
410
+ """
411
+ Quantize the weights of the model from float32 to in8 to allow very efficient inference on modern CPU
412
+
413
+ Args:
414
+ onnx_model_path: Path to location the exported ONNX model is stored
415
+
416
+ Returns: The Path generated for the quantized
417
+ """
418
+ import onnx
419
+ import onnxruntime
420
+ from onnx.onnx_pb import ModelProto
421
+ from onnxruntime.quantization import QuantizationMode
422
+ from onnxruntime.quantization.onnx_quantizer import ONNXQuantizer
423
+ from onnxruntime.quantization.registry import IntegerOpsRegistry
424
+
425
+ # Load the ONNX model
426
+ onnx_model = onnx.load(onnx_model_path.as_posix())
427
+
428
+ if parse(onnx.__version__) < parse("1.5.0"):
429
+ print(
430
+ "Models larger than 2GB will fail to quantize due to protobuf constraint.\n"
431
+ "Please upgrade to onnxruntime >= 1.5.0."
432
+ )
433
+
434
+ # Copy it
435
+ copy_model = ModelProto()
436
+ copy_model.CopyFrom(onnx_model)
437
+
438
+ # Construct quantizer
439
+ # onnxruntime renamed input_qType to activation_qType in v1.13.1, so we
440
+ # check the onnxruntime version to ensure backward compatibility.
441
+ # See also: https://github.com/microsoft/onnxruntime/pull/12873
442
+ if parse(onnxruntime.__version__) < parse("1.13.1"):
443
+ quantizer = ONNXQuantizer(
444
+ model=copy_model,
445
+ per_channel=False,
446
+ reduce_range=False,
447
+ mode=QuantizationMode.IntegerOps,
448
+ static=False,
449
+ weight_qType=True,
450
+ input_qType=False,
451
+ tensors_range=None,
452
+ nodes_to_quantize=None,
453
+ nodes_to_exclude=None,
454
+ op_types_to_quantize=list(IntegerOpsRegistry),
455
+ )
456
+ else:
457
+ quantizer = ONNXQuantizer(
458
+ model=copy_model,
459
+ per_channel=False,
460
+ reduce_range=False,
461
+ mode=QuantizationMode.IntegerOps,
462
+ static=False,
463
+ weight_qType=True,
464
+ activation_qType=False,
465
+ tensors_range=None,
466
+ nodes_to_quantize=None,
467
+ nodes_to_exclude=None,
468
+ op_types_to_quantize=list(IntegerOpsRegistry),
469
+ )
470
+
471
+ # Quantize and export
472
+ quantizer.quantize_model()
473
+
474
+ # Append "-quantized" at the end of the model's name
475
+ quantized_model_path = generate_identified_filename(onnx_model_path, "-quantized")
476
+
477
+ # Save model
478
+ print(f"Quantized model has been written at {quantized_model_path}: \N{heavy check mark}")
479
+ onnx.save_model(quantizer.model.model, quantized_model_path.as_posix())
480
+
481
+ return quantized_model_path
482
+
483
+
484
+ def verify(path: Path):
485
+ from onnxruntime import InferenceSession, SessionOptions
486
+ from onnxruntime.capi.onnxruntime_pybind11_state import RuntimeException
487
+
488
+ print(f"Checking ONNX model loading from: {path} ...")
489
+ try:
490
+ onnx_options = SessionOptions()
491
+ _ = InferenceSession(path.as_posix(), onnx_options, providers=["CPUExecutionProvider"])
492
+ print(f"Model {path} correctly loaded: \N{heavy check mark}")
493
+ except RuntimeException as re:
494
+ print(f"Error while loading the model {re}: \N{heavy ballot x}")
495
+
496
+
497
+ if __name__ == "__main__":
498
+ parser = OnnxConverterArgumentParser()
499
+ args = parser.parse_args()
500
+
501
+ # Make sure output is absolute path
502
+ args.output = Path(args.output).absolute()
503
+
504
+ try:
505
+ print("\n====== Converting model to ONNX ======")
506
+ # Convert
507
+ convert(
508
+ args.framework,
509
+ args.model,
510
+ args.output,
511
+ args.opset,
512
+ args.tokenizer,
513
+ args.use_external_format,
514
+ args.pipeline,
515
+ )
516
+
517
+ if args.quantize:
518
+ # Ensure requirements for quantization on onnxruntime is met
519
+ check_onnxruntime_requirements(ORT_QUANTIZE_MINIMUM_VERSION)
520
+
521
+ # onnxruntime optimizations doesn't provide the same level of performances on TensorFlow than PyTorch
522
+ if args.framework == "tf":
523
+ print(
524
+ "\t Using TensorFlow might not provide the same optimization level compared to PyTorch.\n"
525
+ "\t For TensorFlow users you can try optimizing the model directly through onnxruntime_tools.\n"
526
+ "\t For more information, please refer to the onnxruntime documentation:\n"
527
+ "\t\thttps://github.com/microsoft/onnxruntime/tree/master/onnxruntime/python/tools/transformers\n"
528
+ )
529
+
530
+ print("\n====== Optimizing ONNX model ======")
531
+
532
+ # Quantization works best when using the optimized version of the model
533
+ args.optimized_output = optimize(args.output)
534
+
535
+ # Do the quantization on the right graph
536
+ args.quantized_output = quantize(args.optimized_output)
537
+
538
+ # And verify
539
+ if args.check_loading:
540
+ print("\n====== Check exported ONNX model(s) ======")
541
+ verify(args.output)
542
+
543
+ if hasattr(args, "optimized_output"):
544
+ verify(args.optimized_output)
545
+
546
+ if hasattr(args, "quantized_output"):
547
+ verify(args.quantized_output)
548
+
549
+ except Exception as e:
550
+ print(f"Error while converting the model: {e}")
551
+ exit(1)
venv/lib/python3.10/site-packages/transformers/convert_pytorch_checkpoint_to_tf2.py ADDED
@@ -0,0 +1,448 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Convert pytorch checkpoints to TensorFlow"""
16
+
17
+
18
+ import argparse
19
+ import os
20
+
21
+ from . import (
22
+ AlbertConfig,
23
+ BartConfig,
24
+ BertConfig,
25
+ CamembertConfig,
26
+ CTRLConfig,
27
+ DistilBertConfig,
28
+ DPRConfig,
29
+ ElectraConfig,
30
+ FlaubertConfig,
31
+ GPT2Config,
32
+ LayoutLMConfig,
33
+ LxmertConfig,
34
+ OpenAIGPTConfig,
35
+ RobertaConfig,
36
+ T5Config,
37
+ TFAlbertForPreTraining,
38
+ TFBartForConditionalGeneration,
39
+ TFBartForSequenceClassification,
40
+ TFBertForPreTraining,
41
+ TFBertForQuestionAnswering,
42
+ TFBertForSequenceClassification,
43
+ TFCamembertForMaskedLM,
44
+ TFCTRLLMHeadModel,
45
+ TFDistilBertForMaskedLM,
46
+ TFDistilBertForQuestionAnswering,
47
+ TFDPRContextEncoder,
48
+ TFDPRQuestionEncoder,
49
+ TFDPRReader,
50
+ TFElectraForPreTraining,
51
+ TFFlaubertWithLMHeadModel,
52
+ TFGPT2LMHeadModel,
53
+ TFLayoutLMForMaskedLM,
54
+ TFLxmertForPreTraining,
55
+ TFLxmertVisualFeatureEncoder,
56
+ TFOpenAIGPTLMHeadModel,
57
+ TFRobertaForCausalLM,
58
+ TFRobertaForMaskedLM,
59
+ TFRobertaForSequenceClassification,
60
+ TFT5ForConditionalGeneration,
61
+ TFTransfoXLLMHeadModel,
62
+ TFWav2Vec2Model,
63
+ TFXLMRobertaForMaskedLM,
64
+ TFXLMWithLMHeadModel,
65
+ TFXLNetLMHeadModel,
66
+ TransfoXLConfig,
67
+ Wav2Vec2Config,
68
+ Wav2Vec2Model,
69
+ XLMConfig,
70
+ XLMRobertaConfig,
71
+ XLNetConfig,
72
+ is_torch_available,
73
+ load_pytorch_checkpoint_in_tf2_model,
74
+ )
75
+ from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging
76
+
77
+
78
+ if is_torch_available():
79
+ import numpy as np
80
+ import torch
81
+
82
+ from . import (
83
+ AlbertForPreTraining,
84
+ BartForConditionalGeneration,
85
+ BertForPreTraining,
86
+ BertForQuestionAnswering,
87
+ BertForSequenceClassification,
88
+ CamembertForMaskedLM,
89
+ CTRLLMHeadModel,
90
+ DistilBertForMaskedLM,
91
+ DistilBertForQuestionAnswering,
92
+ DPRContextEncoder,
93
+ DPRQuestionEncoder,
94
+ DPRReader,
95
+ ElectraForPreTraining,
96
+ FlaubertWithLMHeadModel,
97
+ GPT2LMHeadModel,
98
+ LayoutLMForMaskedLM,
99
+ LxmertForPreTraining,
100
+ LxmertVisualFeatureEncoder,
101
+ OpenAIGPTLMHeadModel,
102
+ RobertaForMaskedLM,
103
+ RobertaForSequenceClassification,
104
+ T5ForConditionalGeneration,
105
+ TransfoXLLMHeadModel,
106
+ XLMRobertaForMaskedLM,
107
+ XLMWithLMHeadModel,
108
+ XLNetLMHeadModel,
109
+ )
110
+ from .pytorch_utils import is_torch_greater_or_equal_than_1_13
111
+
112
+
113
+ logging.set_verbosity_info()
114
+
115
+ MODEL_CLASSES = {
116
+ "bart": (
117
+ BartConfig,
118
+ TFBartForConditionalGeneration,
119
+ TFBartForSequenceClassification,
120
+ BartForConditionalGeneration,
121
+ ),
122
+ "bert": (
123
+ BertConfig,
124
+ TFBertForPreTraining,
125
+ BertForPreTraining,
126
+ ),
127
+ "google-bert/bert-large-uncased-whole-word-masking-finetuned-squad": (
128
+ BertConfig,
129
+ TFBertForQuestionAnswering,
130
+ BertForQuestionAnswering,
131
+ ),
132
+ "google-bert/bert-large-cased-whole-word-masking-finetuned-squad": (
133
+ BertConfig,
134
+ TFBertForQuestionAnswering,
135
+ BertForQuestionAnswering,
136
+ ),
137
+ "google-bert/bert-base-cased-finetuned-mrpc": (
138
+ BertConfig,
139
+ TFBertForSequenceClassification,
140
+ BertForSequenceClassification,
141
+ ),
142
+ "dpr": (
143
+ DPRConfig,
144
+ TFDPRQuestionEncoder,
145
+ TFDPRContextEncoder,
146
+ TFDPRReader,
147
+ DPRQuestionEncoder,
148
+ DPRContextEncoder,
149
+ DPRReader,
150
+ ),
151
+ "openai-community/gpt2": (
152
+ GPT2Config,
153
+ TFGPT2LMHeadModel,
154
+ GPT2LMHeadModel,
155
+ ),
156
+ "xlnet": (
157
+ XLNetConfig,
158
+ TFXLNetLMHeadModel,
159
+ XLNetLMHeadModel,
160
+ ),
161
+ "xlm": (
162
+ XLMConfig,
163
+ TFXLMWithLMHeadModel,
164
+ XLMWithLMHeadModel,
165
+ ),
166
+ "xlm-roberta": (
167
+ XLMRobertaConfig,
168
+ TFXLMRobertaForMaskedLM,
169
+ XLMRobertaForMaskedLM,
170
+ ),
171
+ "transfo-xl": (
172
+ TransfoXLConfig,
173
+ TFTransfoXLLMHeadModel,
174
+ TransfoXLLMHeadModel,
175
+ ),
176
+ "openai-community/openai-gpt": (
177
+ OpenAIGPTConfig,
178
+ TFOpenAIGPTLMHeadModel,
179
+ OpenAIGPTLMHeadModel,
180
+ ),
181
+ "roberta": (
182
+ RobertaConfig,
183
+ TFRobertaForCausalLM,
184
+ TFRobertaForMaskedLM,
185
+ RobertaForMaskedLM,
186
+ ),
187
+ "layoutlm": (
188
+ LayoutLMConfig,
189
+ TFLayoutLMForMaskedLM,
190
+ LayoutLMForMaskedLM,
191
+ ),
192
+ "FacebookAI/roberta-large-mnli": (
193
+ RobertaConfig,
194
+ TFRobertaForSequenceClassification,
195
+ RobertaForSequenceClassification,
196
+ ),
197
+ "camembert": (
198
+ CamembertConfig,
199
+ TFCamembertForMaskedLM,
200
+ CamembertForMaskedLM,
201
+ ),
202
+ "flaubert": (
203
+ FlaubertConfig,
204
+ TFFlaubertWithLMHeadModel,
205
+ FlaubertWithLMHeadModel,
206
+ ),
207
+ "distilbert": (
208
+ DistilBertConfig,
209
+ TFDistilBertForMaskedLM,
210
+ DistilBertForMaskedLM,
211
+ ),
212
+ "distilbert-base-distilled-squad": (
213
+ DistilBertConfig,
214
+ TFDistilBertForQuestionAnswering,
215
+ DistilBertForQuestionAnswering,
216
+ ),
217
+ "lxmert": (
218
+ LxmertConfig,
219
+ TFLxmertForPreTraining,
220
+ LxmertForPreTraining,
221
+ ),
222
+ "lxmert-visual-feature-encoder": (
223
+ LxmertConfig,
224
+ TFLxmertVisualFeatureEncoder,
225
+ LxmertVisualFeatureEncoder,
226
+ ),
227
+ "Salesforce/ctrl": (
228
+ CTRLConfig,
229
+ TFCTRLLMHeadModel,
230
+ CTRLLMHeadModel,
231
+ ),
232
+ "albert": (
233
+ AlbertConfig,
234
+ TFAlbertForPreTraining,
235
+ AlbertForPreTraining,
236
+ ),
237
+ "t5": (
238
+ T5Config,
239
+ TFT5ForConditionalGeneration,
240
+ T5ForConditionalGeneration,
241
+ ),
242
+ "electra": (
243
+ ElectraConfig,
244
+ TFElectraForPreTraining,
245
+ ElectraForPreTraining,
246
+ ),
247
+ "wav2vec2": (
248
+ Wav2Vec2Config,
249
+ TFWav2Vec2Model,
250
+ Wav2Vec2Model,
251
+ ),
252
+ }
253
+
254
+
255
+ def convert_pt_checkpoint_to_tf(
256
+ model_type, pytorch_checkpoint_path, config_file, tf_dump_path, compare_with_pt_model=False, use_cached_models=True
257
+ ):
258
+ if model_type not in MODEL_CLASSES:
259
+ raise ValueError(f"Unrecognized model type, should be one of {list(MODEL_CLASSES.keys())}.")
260
+
261
+ config_class, model_class, pt_model_class, aws_config_map = MODEL_CLASSES[model_type]
262
+
263
+ # Initialise TF model
264
+ if config_file in aws_config_map:
265
+ config_file = cached_file(config_file, CONFIG_NAME, force_download=not use_cached_models)
266
+ config = config_class.from_json_file(config_file)
267
+ config.output_hidden_states = True
268
+ config.output_attentions = True
269
+ print(f"Building TensorFlow model from configuration: {config}")
270
+ tf_model = model_class(config)
271
+
272
+ # Load weights from tf checkpoint
273
+ if pytorch_checkpoint_path in aws_config_map.keys():
274
+ pytorch_checkpoint_path = cached_file(
275
+ pytorch_checkpoint_path, WEIGHTS_NAME, force_download=not use_cached_models
276
+ )
277
+ # Load PyTorch checkpoint in tf2 model:
278
+ tf_model = load_pytorch_checkpoint_in_tf2_model(tf_model, pytorch_checkpoint_path)
279
+
280
+ if compare_with_pt_model:
281
+ tfo = tf_model(tf_model.dummy_inputs, training=False) # build the network
282
+
283
+ weights_only_kwarg = {"weights_only": True} if is_torch_greater_or_equal_than_1_13 else {}
284
+ state_dict = torch.load(
285
+ pytorch_checkpoint_path,
286
+ map_location="cpu",
287
+ **weights_only_kwarg,
288
+ )
289
+ pt_model = pt_model_class.from_pretrained(
290
+ pretrained_model_name_or_path=None, config=config, state_dict=state_dict
291
+ )
292
+
293
+ with torch.no_grad():
294
+ pto = pt_model(**pt_model.dummy_inputs)
295
+
296
+ np_pt = pto[0].numpy()
297
+ np_tf = tfo[0].numpy()
298
+ diff = np.amax(np.abs(np_pt - np_tf))
299
+ print(f"Max absolute difference between models outputs {diff}")
300
+ assert diff <= 2e-2, f"Error, model absolute difference is >2e-2: {diff}"
301
+
302
+ # Save pytorch-model
303
+ print(f"Save TensorFlow model to {tf_dump_path}")
304
+ tf_model.save_weights(tf_dump_path, save_format="h5")
305
+
306
+
307
+ def convert_all_pt_checkpoints_to_tf(
308
+ args_model_type,
309
+ tf_dump_path,
310
+ model_shortcut_names_or_path=None,
311
+ config_shortcut_names_or_path=None,
312
+ compare_with_pt_model=False,
313
+ use_cached_models=False,
314
+ remove_cached_files=False,
315
+ only_convert_finetuned_models=False,
316
+ ):
317
+ if args_model_type is None:
318
+ model_types = list(MODEL_CLASSES.keys())
319
+ else:
320
+ model_types = [args_model_type]
321
+
322
+ for j, model_type in enumerate(model_types, start=1):
323
+ print("=" * 100)
324
+ print(f" Converting model type {j}/{len(model_types)}: {model_type}")
325
+ print("=" * 100)
326
+ if model_type not in MODEL_CLASSES:
327
+ raise ValueError(f"Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys())}.")
328
+
329
+ config_class, model_class, pt_model_class, aws_model_maps, aws_config_map = MODEL_CLASSES[model_type]
330
+
331
+ if model_shortcut_names_or_path is None:
332
+ model_shortcut_names_or_path = list(aws_model_maps.keys())
333
+ if config_shortcut_names_or_path is None:
334
+ config_shortcut_names_or_path = model_shortcut_names_or_path
335
+
336
+ for i, (model_shortcut_name, config_shortcut_name) in enumerate(
337
+ zip(model_shortcut_names_or_path, config_shortcut_names_or_path), start=1
338
+ ):
339
+ print("-" * 100)
340
+ if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name:
341
+ if not only_convert_finetuned_models:
342
+ print(f" Skipping finetuned checkpoint {model_shortcut_name}")
343
+ continue
344
+ model_type = model_shortcut_name
345
+ elif only_convert_finetuned_models:
346
+ print(f" Skipping not finetuned checkpoint {model_shortcut_name}")
347
+ continue
348
+ print(
349
+ f" Converting checkpoint {i}/{len(aws_config_map)}: {model_shortcut_name} - model_type {model_type}"
350
+ )
351
+ print("-" * 100)
352
+
353
+ if config_shortcut_name in aws_config_map:
354
+ config_file = cached_file(config_shortcut_name, CONFIG_NAME, force_download=not use_cached_models)
355
+ else:
356
+ config_file = config_shortcut_name
357
+
358
+ if model_shortcut_name in aws_model_maps:
359
+ model_file = cached_file(model_shortcut_name, WEIGHTS_NAME, force_download=not use_cached_models)
360
+ else:
361
+ model_file = model_shortcut_name
362
+
363
+ if os.path.isfile(model_shortcut_name):
364
+ model_shortcut_name = "converted_model"
365
+
366
+ convert_pt_checkpoint_to_tf(
367
+ model_type=model_type,
368
+ pytorch_checkpoint_path=model_file,
369
+ config_file=config_file,
370
+ tf_dump_path=os.path.join(tf_dump_path, model_shortcut_name + "-tf_model.h5"),
371
+ compare_with_pt_model=compare_with_pt_model,
372
+ )
373
+ if remove_cached_files:
374
+ os.remove(config_file)
375
+ os.remove(model_file)
376
+
377
+
378
+ if __name__ == "__main__":
379
+ parser = argparse.ArgumentParser()
380
+ # Required parameters
381
+ parser.add_argument(
382
+ "--tf_dump_path", default=None, type=str, required=True, help="Path to the output Tensorflow dump file."
383
+ )
384
+ parser.add_argument(
385
+ "--model_type",
386
+ default=None,
387
+ type=str,
388
+ help=(
389
+ f"Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and "
390
+ "convert all the models from AWS."
391
+ ),
392
+ )
393
+ parser.add_argument(
394
+ "--pytorch_checkpoint_path",
395
+ default=None,
396
+ type=str,
397
+ help=(
398
+ "Path to the PyTorch checkpoint path or shortcut name to download from AWS. "
399
+ "If not given, will download and convert all the checkpoints from AWS."
400
+ ),
401
+ )
402
+ parser.add_argument(
403
+ "--config_file",
404
+ default=None,
405
+ type=str,
406
+ help=(
407
+ "The config json file corresponding to the pre-trained model. \n"
408
+ "This specifies the model architecture. If not given and "
409
+ "--pytorch_checkpoint_path is not given or is a shortcut name "
410
+ "use the configuration associated to the shortcut name on the AWS"
411
+ ),
412
+ )
413
+ parser.add_argument(
414
+ "--compare_with_pt_model", action="store_true", help="Compare Tensorflow and PyTorch model predictions."
415
+ )
416
+ parser.add_argument(
417
+ "--use_cached_models",
418
+ action="store_true",
419
+ help="Use cached models if possible instead of updating to latest checkpoint versions.",
420
+ )
421
+ parser.add_argument(
422
+ "--remove_cached_files",
423
+ action="store_true",
424
+ help="Remove pytorch models after conversion (save memory when converting in batches).",
425
+ )
426
+ parser.add_argument("--only_convert_finetuned_models", action="store_true", help="Only convert finetuned models.")
427
+ args = parser.parse_args()
428
+
429
+ # if args.pytorch_checkpoint_path is not None:
430
+ # convert_pt_checkpoint_to_tf(args.model_type.lower(),
431
+ # args.pytorch_checkpoint_path,
432
+ # args.config_file if args.config_file is not None else args.pytorch_checkpoint_path,
433
+ # args.tf_dump_path,
434
+ # compare_with_pt_model=args.compare_with_pt_model,
435
+ # use_cached_models=args.use_cached_models)
436
+ # else:
437
+ convert_all_pt_checkpoints_to_tf(
438
+ args.model_type.lower() if args.model_type is not None else None,
439
+ args.tf_dump_path,
440
+ model_shortcut_names_or_path=[args.pytorch_checkpoint_path]
441
+ if args.pytorch_checkpoint_path is not None
442
+ else None,
443
+ config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None,
444
+ compare_with_pt_model=args.compare_with_pt_model,
445
+ use_cached_models=args.use_cached_models,
446
+ remove_cached_files=args.remove_cached_files,
447
+ only_convert_finetuned_models=args.only_convert_finetuned_models,
448
+ )
venv/lib/python3.10/site-packages/transformers/convert_slow_tokenizer.py ADDED
@@ -0,0 +1,1534 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """
16
+ Utilities to convert slow tokenizers in their fast tokenizers counterparts.
17
+
18
+ All the conversions are grouped here to gather SentencePiece dependencies outside of the fast tokenizers files and
19
+ allow to make our dependency on SentencePiece optional.
20
+ """
21
+
22
+ import warnings
23
+ from typing import Dict, List, Tuple
24
+
25
+ from packaging import version
26
+ from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, processors
27
+ from tokenizers.models import BPE, Unigram, WordPiece
28
+
29
+ from .utils import is_protobuf_available, requires_backends
30
+ from .utils.import_utils import PROTOBUF_IMPORT_ERROR
31
+
32
+
33
+ def import_protobuf(error_message=""):
34
+ if is_protobuf_available():
35
+ import google.protobuf
36
+
37
+ if version.parse(google.protobuf.__version__) < version.parse("4.0.0"):
38
+ from transformers.utils import sentencepiece_model_pb2
39
+ else:
40
+ from transformers.utils import sentencepiece_model_pb2_new as sentencepiece_model_pb2
41
+ return sentencepiece_model_pb2
42
+ else:
43
+ raise ImportError(PROTOBUF_IMPORT_ERROR.format(error_message))
44
+
45
+
46
+ def _get_prepend_scheme(add_prefix_space: bool, original_tokenizer) -> str:
47
+ if add_prefix_space:
48
+ prepend_scheme = "always"
49
+ if hasattr(original_tokenizer, "legacy") and not original_tokenizer.legacy:
50
+ prepend_scheme = "first"
51
+ else:
52
+ prepend_scheme = "never"
53
+ return prepend_scheme
54
+
55
+
56
+ class SentencePieceExtractor:
57
+ """
58
+ Extractor implementation for SentencePiece trained models. https://github.com/google/sentencepiece
59
+ """
60
+
61
+ def __init__(self, model: str):
62
+ requires_backends(self, "sentencepiece")
63
+ from sentencepiece import SentencePieceProcessor
64
+
65
+ self.sp = SentencePieceProcessor()
66
+ self.sp.Load(model)
67
+
68
+ def extract(self, vocab_scores=None) -> Tuple[Dict[str, int], List[Tuple]]:
69
+ """
70
+ By default will return vocab and merges with respect to their order, by sending `vocab_scores` we're going to
71
+ order the merges with respect to the piece scores instead.
72
+ """
73
+ sp = self.sp
74
+ vocab = {sp.id_to_piece(index): index for index in range(sp.GetPieceSize())}
75
+
76
+ if vocab_scores is not None:
77
+ vocab_scores, reverse = dict(vocab_scores), True
78
+ else:
79
+ vocab_scores, reverse = vocab, False
80
+
81
+ # Merges
82
+ merges = []
83
+ for merge, piece_score in vocab_scores.items():
84
+ local = []
85
+ for index in range(1, len(merge)):
86
+ piece_l, piece_r = merge[:index], merge[index:]
87
+ if piece_l in vocab and piece_r in vocab:
88
+ local.append((piece_l, piece_r, piece_score))
89
+ local = sorted(local, key=lambda x: (vocab[x[0]], vocab[x[1]]))
90
+ merges.extend(local)
91
+
92
+ merges = sorted(merges, key=lambda val: val[2], reverse=reverse)
93
+ merges = [(val[0], val[1]) for val in merges]
94
+ return vocab, merges
95
+
96
+
97
+ class GemmaSentencePieceExtractor(SentencePieceExtractor):
98
+ def extract(self, vocab_scores=None) -> Tuple[Dict[str, int], List[Tuple]]:
99
+ """
100
+ By default will return vocab and merges with respect to their order, by sending `vocab_scores` we're going to
101
+ order the merges with respect to the piece scores instead.
102
+ """
103
+ sp = self.sp
104
+ vocab = {sp.id_to_piece(index): index for index in range(sp.GetPieceSize())}
105
+
106
+ # there is a missing token in the vocab. We have to do this to support merges
107
+ # "<0x09>" is the bytefallback for `\t`
108
+ vocab["\t"] = vocab.pop("<0x09>")
109
+
110
+ if vocab_scores is not None:
111
+ vocab_scores, reverse = dict(vocab_scores), True
112
+ else:
113
+ vocab_scores, reverse = vocab, False
114
+
115
+ # Merges
116
+ merges = []
117
+ for merge, piece_score in vocab_scores.items():
118
+ local = []
119
+ for index in range(1, len(merge)):
120
+ piece_l, piece_r = merge[:index], merge[index:]
121
+ if piece_l in vocab and piece_r in vocab:
122
+ local.append((piece_l, piece_r, piece_score))
123
+ local = sorted(local, key=lambda x: (vocab[x[0]], vocab[x[1]]))
124
+ merges.extend(local)
125
+
126
+ merges = sorted(merges, key=lambda val: val[2], reverse=reverse)
127
+ merges = [(val[0], val[1]) for val in merges]
128
+ return vocab, merges
129
+
130
+
131
+ def check_number_comma(piece: str) -> bool:
132
+ return len(piece) < 2 or piece[-1] != "," or not piece[-2].isdigit()
133
+
134
+
135
+ class Converter:
136
+ def __init__(self, original_tokenizer):
137
+ self.original_tokenizer = original_tokenizer
138
+
139
+ def converted(self) -> Tokenizer:
140
+ raise NotImplementedError()
141
+
142
+
143
+ class BertConverter(Converter):
144
+ def converted(self) -> Tokenizer:
145
+ vocab = self.original_tokenizer.vocab
146
+ tokenizer = Tokenizer(WordPiece(vocab, unk_token=str(self.original_tokenizer.unk_token)))
147
+
148
+ tokenize_chinese_chars = False
149
+ strip_accents = False
150
+ do_lower_case = False
151
+ if hasattr(self.original_tokenizer, "basic_tokenizer"):
152
+ tokenize_chinese_chars = self.original_tokenizer.basic_tokenizer.tokenize_chinese_chars
153
+ strip_accents = self.original_tokenizer.basic_tokenizer.strip_accents
154
+ do_lower_case = self.original_tokenizer.basic_tokenizer.do_lower_case
155
+
156
+ tokenizer.normalizer = normalizers.BertNormalizer(
157
+ clean_text=True,
158
+ handle_chinese_chars=tokenize_chinese_chars,
159
+ strip_accents=strip_accents,
160
+ lowercase=do_lower_case,
161
+ )
162
+ tokenizer.pre_tokenizer = pre_tokenizers.BertPreTokenizer()
163
+
164
+ cls = str(self.original_tokenizer.cls_token)
165
+ sep = str(self.original_tokenizer.sep_token)
166
+ cls_token_id = self.original_tokenizer.cls_token_id
167
+ sep_token_id = self.original_tokenizer.sep_token_id
168
+
169
+ tokenizer.post_processor = processors.TemplateProcessing(
170
+ single=f"{cls}:0 $A:0 {sep}:0",
171
+ pair=f"{cls}:0 $A:0 {sep}:0 $B:1 {sep}:1",
172
+ special_tokens=[
173
+ (cls, cls_token_id),
174
+ (sep, sep_token_id),
175
+ ],
176
+ )
177
+ tokenizer.decoder = decoders.WordPiece(prefix="##")
178
+
179
+ return tokenizer
180
+
181
+
182
+ class SplinterConverter(Converter):
183
+ def converted(self) -> Tokenizer:
184
+ vocab = self.original_tokenizer.vocab
185
+ tokenizer = Tokenizer(WordPiece(vocab, unk_token=str(self.original_tokenizer.unk_token)))
186
+
187
+ tokenize_chinese_chars = False
188
+ strip_accents = False
189
+ do_lower_case = False
190
+ if hasattr(self.original_tokenizer, "basic_tokenizer"):
191
+ tokenize_chinese_chars = self.original_tokenizer.basic_tokenizer.tokenize_chinese_chars
192
+ strip_accents = self.original_tokenizer.basic_tokenizer.strip_accents
193
+ do_lower_case = self.original_tokenizer.basic_tokenizer.do_lower_case
194
+
195
+ tokenizer.normalizer = normalizers.BertNormalizer(
196
+ clean_text=True,
197
+ handle_chinese_chars=tokenize_chinese_chars,
198
+ strip_accents=strip_accents,
199
+ lowercase=do_lower_case,
200
+ )
201
+ tokenizer.pre_tokenizer = pre_tokenizers.BertPreTokenizer()
202
+
203
+ cls = str(self.original_tokenizer.cls_token)
204
+ sep = str(self.original_tokenizer.sep_token)
205
+ question = str(self.original_tokenizer.question_token)
206
+ dot = "."
207
+ cls_token_id = self.original_tokenizer.cls_token_id
208
+ sep_token_id = self.original_tokenizer.sep_token_id
209
+ question_token_id = self.original_tokenizer.question_token_id
210
+ dot_token_id = self.original_tokenizer.convert_tokens_to_ids(".")
211
+
212
+ if self.original_tokenizer.padding_side == "right":
213
+ pair = f"{cls}:0 $A:0 {question} {dot} {sep}:0 $B:1 {sep}:1"
214
+ else:
215
+ pair = f"{cls}:0 $A:0 {sep}:0 $B:1 {question} {dot} {sep}:1"
216
+
217
+ tokenizer.post_processor = processors.TemplateProcessing(
218
+ single=f"{cls}:0 $A:0 {sep}:0",
219
+ pair=pair,
220
+ special_tokens=[
221
+ (cls, cls_token_id),
222
+ (sep, sep_token_id),
223
+ (question, question_token_id),
224
+ (dot, dot_token_id),
225
+ ],
226
+ )
227
+ tokenizer.decoder = decoders.WordPiece(prefix="##")
228
+
229
+ return tokenizer
230
+
231
+
232
+ class FunnelConverter(Converter):
233
+ def converted(self) -> Tokenizer:
234
+ vocab = self.original_tokenizer.vocab
235
+ tokenizer = Tokenizer(WordPiece(vocab, unk_token=str(self.original_tokenizer.unk_token)))
236
+
237
+ tokenize_chinese_chars = False
238
+ strip_accents = False
239
+ do_lower_case = False
240
+ if hasattr(self.original_tokenizer, "basic_tokenizer"):
241
+ tokenize_chinese_chars = self.original_tokenizer.basic_tokenizer.tokenize_chinese_chars
242
+ strip_accents = self.original_tokenizer.basic_tokenizer.strip_accents
243
+ do_lower_case = self.original_tokenizer.basic_tokenizer.do_lower_case
244
+
245
+ tokenizer.normalizer = normalizers.BertNormalizer(
246
+ clean_text=True,
247
+ handle_chinese_chars=tokenize_chinese_chars,
248
+ strip_accents=strip_accents,
249
+ lowercase=do_lower_case,
250
+ )
251
+ tokenizer.pre_tokenizer = pre_tokenizers.BertPreTokenizer()
252
+
253
+ cls = str(self.original_tokenizer.cls_token)
254
+ sep = str(self.original_tokenizer.sep_token)
255
+ cls_token_id = self.original_tokenizer.cls_token_id
256
+ sep_token_id = self.original_tokenizer.sep_token_id
257
+
258
+ tokenizer.post_processor = processors.TemplateProcessing(
259
+ single=f"{cls}:2 $A:0 {sep}:0", # token_type_id is 2 for Funnel transformer
260
+ pair=f"{cls}:2 $A:0 {sep}:0 $B:1 {sep}:1",
261
+ special_tokens=[
262
+ (cls, cls_token_id),
263
+ (sep, sep_token_id),
264
+ ],
265
+ )
266
+ tokenizer.decoder = decoders.WordPiece(prefix="##")
267
+
268
+ return tokenizer
269
+
270
+
271
+ class MPNetConverter(Converter):
272
+ def converted(self) -> Tokenizer:
273
+ vocab = self.original_tokenizer.vocab
274
+ tokenizer = Tokenizer(WordPiece(vocab, unk_token=str(self.original_tokenizer.unk_token)))
275
+
276
+ tokenize_chinese_chars = False
277
+ strip_accents = False
278
+ do_lower_case = False
279
+ if hasattr(self.original_tokenizer, "basic_tokenizer"):
280
+ tokenize_chinese_chars = self.original_tokenizer.basic_tokenizer.tokenize_chinese_chars
281
+ strip_accents = self.original_tokenizer.basic_tokenizer.strip_accents
282
+ do_lower_case = self.original_tokenizer.basic_tokenizer.do_lower_case
283
+
284
+ tokenizer.normalizer = normalizers.BertNormalizer(
285
+ clean_text=True,
286
+ handle_chinese_chars=tokenize_chinese_chars,
287
+ strip_accents=strip_accents,
288
+ lowercase=do_lower_case,
289
+ )
290
+ tokenizer.pre_tokenizer = pre_tokenizers.BertPreTokenizer()
291
+
292
+ cls = str(self.original_tokenizer.cls_token)
293
+ sep = str(self.original_tokenizer.sep_token)
294
+ cls_token_id = self.original_tokenizer.cls_token_id
295
+ sep_token_id = self.original_tokenizer.sep_token_id
296
+
297
+ tokenizer.post_processor = processors.TemplateProcessing(
298
+ single=f"{cls}:0 $A:0 {sep}:0",
299
+ pair=f"{cls}:0 $A:0 {sep}:0 {sep}:0 $B:1 {sep}:1", # MPNet uses two [SEP] tokens
300
+ special_tokens=[
301
+ (cls, cls_token_id),
302
+ (sep, sep_token_id),
303
+ ],
304
+ )
305
+ tokenizer.decoder = decoders.WordPiece(prefix="##")
306
+
307
+ return tokenizer
308
+
309
+
310
+ class OpenAIGPTConverter(Converter):
311
+ def converted(self) -> Tokenizer:
312
+ vocab = self.original_tokenizer.encoder
313
+ merges = list(self.original_tokenizer.bpe_ranks.keys())
314
+ unk_token = self.original_tokenizer.unk_token
315
+
316
+ tokenizer = Tokenizer(
317
+ BPE(
318
+ vocab=vocab,
319
+ merges=merges,
320
+ dropout=None,
321
+ unk_token=str(unk_token),
322
+ end_of_word_suffix="</w>",
323
+ fuse_unk=False,
324
+ )
325
+ )
326
+
327
+ if tokenizer.token_to_id(str(unk_token)) is not None:
328
+ tokenizer.add_special_tokens([str(unk_token)])
329
+
330
+ tokenizer.normalizer = normalizers.BertNormalizer(lowercase=True)
331
+ tokenizer.pre_tokenizer = pre_tokenizers.BertPreTokenizer()
332
+ tokenizer.decoder = decoders.BPEDecoder(suffix="</w>")
333
+
334
+ return tokenizer
335
+
336
+
337
+ class GPT2Converter(Converter):
338
+ def converted(self) -> Tokenizer:
339
+ vocab = self.original_tokenizer.encoder
340
+ merges = list(self.original_tokenizer.bpe_ranks.keys())
341
+
342
+ tokenizer = Tokenizer(
343
+ BPE(
344
+ vocab=vocab,
345
+ merges=merges,
346
+ dropout=None,
347
+ continuing_subword_prefix="",
348
+ end_of_word_suffix="",
349
+ fuse_unk=False,
350
+ )
351
+ )
352
+
353
+ tokenizer.pre_tokenizer = pre_tokenizers.ByteLevel(add_prefix_space=self.original_tokenizer.add_prefix_space)
354
+ tokenizer.decoder = decoders.ByteLevel()
355
+ if self.original_tokenizer.add_bos_token:
356
+ bos = self.original_tokenizer.bos_token
357
+ bos_token_id = self.original_tokenizer.bos_token_id
358
+ tokenizer.post_processor = processors.TemplateProcessing(
359
+ single=f"{bos}:0 $A:0",
360
+ pair=f"{bos}:0 $A:0 $B:1",
361
+ special_tokens=[
362
+ (bos, bos_token_id),
363
+ ],
364
+ )
365
+ else:
366
+ # XXX trim_offsets=False actually means this post_processor doesn't
367
+ # really do anything.
368
+ tokenizer.post_processor = processors.ByteLevel(trim_offsets=False)
369
+ return tokenizer
370
+
371
+
372
+ class HerbertConverter(Converter):
373
+ def converted(self) -> Tokenizer:
374
+ tokenizer_info_str = "#version:"
375
+ token_suffix = "</w>"
376
+
377
+ vocab = self.original_tokenizer.encoder
378
+ merges = list(self.original_tokenizer.bpe_ranks.keys())
379
+ if tokenizer_info_str in merges[0][0]:
380
+ merges = merges[1:]
381
+
382
+ tokenizer = Tokenizer(
383
+ BPE(
384
+ vocab,
385
+ merges,
386
+ dropout=None,
387
+ unk_token=self.original_tokenizer.unk_token,
388
+ end_of_word_suffix=token_suffix,
389
+ )
390
+ )
391
+
392
+ tokenizer.normalizer = normalizers.BertNormalizer(lowercase=False, strip_accents=False)
393
+ tokenizer.pre_tokenizer = pre_tokenizers.BertPreTokenizer()
394
+ tokenizer.decoder = decoders.BPEDecoder(suffix=token_suffix)
395
+ tokenizer.post_processor = processors.BertProcessing(
396
+ sep=(self.original_tokenizer.sep_token, self.original_tokenizer.sep_token_id),
397
+ cls=(self.original_tokenizer.cls_token, self.original_tokenizer.cls_token_id),
398
+ )
399
+
400
+ return tokenizer
401
+
402
+
403
+ class Qwen2Converter(Converter):
404
+ def converted(self) -> Tokenizer:
405
+ vocab = self.original_tokenizer.encoder
406
+ merges = list(self.original_tokenizer.bpe_ranks.keys())
407
+
408
+ tokenizer = Tokenizer(
409
+ BPE(
410
+ vocab=vocab,
411
+ merges=merges,
412
+ dropout=None,
413
+ unk_token=None,
414
+ continuing_subword_prefix="",
415
+ end_of_word_suffix="",
416
+ fuse_unk=False,
417
+ byte_fallback=False,
418
+ )
419
+ )
420
+
421
+ tokenizer.normalizer = normalizers.NFC()
422
+
423
+ tokenizer.pre_tokenizer = pre_tokenizers.Sequence(
424
+ [
425
+ pre_tokenizers.Split(
426
+ Regex(
427
+ r"""(?i:'s|'t|'re|'ve|'m|'ll|'d)|[^\r\n\p{L}\p{N}]?\p{L}+|\p{N}| ?[^\s\p{L}\p{N}]+[\r\n]*|\s*[\r\n]+|\s+(?!\S)|\s+"""
428
+ ),
429
+ behavior="isolated",
430
+ invert=False,
431
+ ),
432
+ pre_tokenizers.ByteLevel(
433
+ add_prefix_space=getattr(self.original_tokenizer, "add_prefix_space", False),
434
+ use_regex=False,
435
+ ),
436
+ ]
437
+ )
438
+
439
+ tokenizer.decoder = decoders.ByteLevel()
440
+ tokenizer.post_processor = processors.ByteLevel(trim_offsets=False)
441
+
442
+ return tokenizer
443
+
444
+
445
+ class RobertaConverter(Converter):
446
+ def converted(self) -> Tokenizer:
447
+ ot = self.original_tokenizer
448
+ vocab = ot.encoder
449
+ merges = list(ot.bpe_ranks.keys())
450
+
451
+ tokenizer = Tokenizer(
452
+ BPE(
453
+ vocab=vocab,
454
+ merges=merges,
455
+ dropout=None,
456
+ continuing_subword_prefix="",
457
+ end_of_word_suffix="",
458
+ fuse_unk=False,
459
+ )
460
+ )
461
+
462
+ tokenizer.pre_tokenizer = pre_tokenizers.ByteLevel(add_prefix_space=ot.add_prefix_space)
463
+ tokenizer.decoder = decoders.ByteLevel()
464
+ tokenizer.post_processor = processors.RobertaProcessing(
465
+ sep=(ot.sep_token, ot.sep_token_id),
466
+ cls=(ot.cls_token, ot.cls_token_id),
467
+ add_prefix_space=ot.add_prefix_space,
468
+ trim_offsets=True, # True by default on Roberta (historical)
469
+ )
470
+
471
+ return tokenizer
472
+
473
+
474
+ class RoFormerConverter(Converter):
475
+ def converted(self) -> Tokenizer:
476
+ from .models.roformer.tokenization_utils import JiebaPreTokenizer
477
+
478
+ vocab = self.original_tokenizer.vocab
479
+ tokenizer = Tokenizer(WordPiece(vocab, unk_token=str(self.original_tokenizer.unk_token)))
480
+
481
+ strip_accents = False
482
+ do_lower_case = False
483
+ if hasattr(self.original_tokenizer, "basic_tokenizer"):
484
+ strip_accents = self.original_tokenizer.basic_tokenizer.strip_accents
485
+ do_lower_case = self.original_tokenizer.basic_tokenizer.do_lower_case
486
+
487
+ tokenizer.normalizer = normalizers.BertNormalizer(
488
+ clean_text=True,
489
+ handle_chinese_chars=False,
490
+ strip_accents=strip_accents,
491
+ lowercase=do_lower_case,
492
+ )
493
+ tokenizer.pre_tokenizer = pre_tokenizers.PreTokenizer.custom(JiebaPreTokenizer(vocab))
494
+
495
+ cls = str(self.original_tokenizer.cls_token)
496
+ sep = str(self.original_tokenizer.sep_token)
497
+ cls_token_id = self.original_tokenizer.cls_token_id
498
+ sep_token_id = self.original_tokenizer.sep_token_id
499
+
500
+ tokenizer.post_processor = processors.TemplateProcessing(
501
+ single=f"{cls}:0 $A:0 {sep}:0",
502
+ pair=f"{cls}:0 $A:0 {sep}:0 $B:1 {sep}:1",
503
+ special_tokens=[
504
+ (cls, cls_token_id),
505
+ (sep, sep_token_id),
506
+ ],
507
+ )
508
+ tokenizer.decoder = decoders.WordPiece(prefix="##")
509
+
510
+ return tokenizer
511
+
512
+
513
+ class DebertaConverter(Converter):
514
+ def converted(self) -> Tokenizer:
515
+ ot = self.original_tokenizer
516
+ vocab = ot.encoder
517
+ merges = list(ot.bpe_ranks.keys())
518
+
519
+ tokenizer = Tokenizer(
520
+ BPE(
521
+ vocab=vocab,
522
+ merges=merges,
523
+ dropout=None,
524
+ continuing_subword_prefix="",
525
+ end_of_word_suffix="",
526
+ fuse_unk=False,
527
+ )
528
+ )
529
+
530
+ tokenizer.pre_tokenizer = pre_tokenizers.ByteLevel(add_prefix_space=ot.add_prefix_space)
531
+ tokenizer.decoder = decoders.ByteLevel()
532
+ tokenizer.post_processor = processors.TemplateProcessing(
533
+ single="[CLS]:0 $A:0 [SEP]:0",
534
+ pair="[CLS]:0 $A:0 [SEP]:0 $B:1 [SEP]:1",
535
+ special_tokens=[
536
+ ("[CLS]", self.original_tokenizer.convert_tokens_to_ids("[CLS]")),
537
+ ("[SEP]", self.original_tokenizer.convert_tokens_to_ids("[SEP]")),
538
+ ],
539
+ )
540
+
541
+ return tokenizer
542
+
543
+
544
+ class SpmConverter(Converter):
545
+ def __init__(self, *args):
546
+ requires_backends(self, "protobuf")
547
+
548
+ super().__init__(*args)
549
+
550
+ # from .utils import sentencepiece_model_pb2 as model_pb2
551
+ model_pb2 = import_protobuf()
552
+
553
+ m = model_pb2.ModelProto()
554
+ with open(self.original_tokenizer.vocab_file, "rb") as f:
555
+ m.ParseFromString(f.read())
556
+ self.proto = m
557
+
558
+ if self.proto.trainer_spec.byte_fallback:
559
+ if not getattr(self, "handle_byte_fallback", None):
560
+ warnings.warn(
561
+ "The sentencepiece tokenizer that you are converting to a fast tokenizer uses the byte fallback option"
562
+ " which is not implemented in the fast tokenizers. In practice this means that the fast version of the"
563
+ " tokenizer can produce unknown tokens whereas the sentencepiece version would have converted these "
564
+ "unknown tokens into a sequence of byte tokens matching the original piece of text."
565
+ )
566
+
567
+ def vocab(self, proto):
568
+ return [(piece.piece, piece.score) for piece in proto.pieces]
569
+
570
+ def unk_id(self, proto):
571
+ return proto.trainer_spec.unk_id
572
+
573
+ def tokenizer(self, proto):
574
+ model_type = proto.trainer_spec.model_type
575
+ vocab_scores = self.vocab(proto)
576
+ unk_id = self.unk_id(proto)
577
+
578
+ if model_type == 1:
579
+ tokenizer = Tokenizer(Unigram(vocab_scores, unk_id))
580
+ elif model_type == 2:
581
+ _, merges = SentencePieceExtractor(self.original_tokenizer.vocab_file).extract()
582
+ bpe_vocab = {word: i for i, (word, score) in enumerate(vocab_scores)}
583
+ tokenizer = Tokenizer(
584
+ BPE(
585
+ bpe_vocab,
586
+ merges,
587
+ unk_token=proto.trainer_spec.unk_piece,
588
+ fuse_unk=True,
589
+ )
590
+ )
591
+ else:
592
+ raise Exception(
593
+ "You're trying to run a `Unigram` model but you're file was trained with a different algorithm"
594
+ )
595
+
596
+ return tokenizer
597
+
598
+ def normalizer(self, proto):
599
+ precompiled_charsmap = proto.normalizer_spec.precompiled_charsmap
600
+ _normalizers = [
601
+ normalizers.Strip(left=False, right=True), # stripping is important
602
+ normalizers.Replace(Regex(" {2,}"), "▁"),
603
+ ]
604
+ if not precompiled_charsmap:
605
+ return normalizers.Sequence(_normalizers)
606
+ else:
607
+ return normalizers.Sequence([normalizers.Precompiled(precompiled_charsmap)] + _normalizers)
608
+
609
+ def pre_tokenizer(self, replacement, add_prefix_space):
610
+ prepend_scheme = _get_prepend_scheme(add_prefix_space, self.original_tokenizer)
611
+ return pre_tokenizers.Metaspace(replacement=replacement, prepend_scheme=prepend_scheme)
612
+
613
+ def post_processor(self):
614
+ return None
615
+
616
+ def decoder(self, replacement, add_prefix_space):
617
+ prepend_scheme = _get_prepend_scheme(add_prefix_space, self.original_tokenizer)
618
+ return decoders.Metaspace(replacement=replacement, prepend_scheme=prepend_scheme)
619
+
620
+ def converted(self) -> Tokenizer:
621
+ tokenizer = self.tokenizer(self.proto)
622
+
623
+ # Tokenizer assemble
624
+ normalizer = self.normalizer(self.proto)
625
+ if normalizer is not None:
626
+ tokenizer.normalizer = normalizer
627
+
628
+ replacement = "▁"
629
+ add_prefix_space = True
630
+ if hasattr(self.original_tokenizer, "add_prefix_space"):
631
+ add_prefix_space = self.original_tokenizer.add_prefix_space
632
+
633
+ pre_tokenizer = self.pre_tokenizer(replacement, add_prefix_space)
634
+ if pre_tokenizer is not None:
635
+ tokenizer.pre_tokenizer = pre_tokenizer
636
+
637
+ tokenizer.decoder = self.decoder(replacement, add_prefix_space)
638
+ post_processor = self.post_processor()
639
+ if post_processor:
640
+ tokenizer.post_processor = post_processor
641
+
642
+ return tokenizer
643
+
644
+
645
+ class AlbertConverter(SpmConverter):
646
+ def vocab(self, proto):
647
+ return [
648
+ (piece.piece, piece.score) if check_number_comma(piece.piece) else (piece.piece, piece.score - 100)
649
+ for piece in proto.pieces
650
+ ]
651
+
652
+ def normalizer(self, proto):
653
+ list_normalizers = [
654
+ normalizers.Replace("``", '"'),
655
+ normalizers.Replace("''", '"'),
656
+ ]
657
+ if not self.original_tokenizer.keep_accents:
658
+ list_normalizers.append(normalizers.NFKD())
659
+ list_normalizers.append(normalizers.StripAccents())
660
+ if self.original_tokenizer.do_lower_case:
661
+ list_normalizers.append(normalizers.Lowercase())
662
+
663
+ precompiled_charsmap = proto.normalizer_spec.precompiled_charsmap
664
+
665
+ if precompiled_charsmap:
666
+ list_normalizers.append(normalizers.Precompiled(precompiled_charsmap))
667
+
668
+ list_normalizers.append(normalizers.Replace(Regex(" {2,}"), " "))
669
+ return normalizers.Sequence(list_normalizers)
670
+
671
+ def post_processor(self):
672
+ return processors.TemplateProcessing(
673
+ single="[CLS]:0 $A:0 [SEP]:0",
674
+ pair="[CLS]:0 $A:0 [SEP]:0 $B:1 [SEP]:1",
675
+ special_tokens=[
676
+ ("[CLS]", self.original_tokenizer.convert_tokens_to_ids("[CLS]")),
677
+ ("[SEP]", self.original_tokenizer.convert_tokens_to_ids("[SEP]")),
678
+ ],
679
+ )
680
+
681
+
682
+ class BarthezConverter(SpmConverter):
683
+ def unk_id(self, proto):
684
+ unk_id = 3
685
+ return unk_id
686
+
687
+ def post_processor(self):
688
+ return processors.TemplateProcessing(
689
+ single="<s> $A </s>",
690
+ pair="<s> $A </s> </s> $B </s>",
691
+ special_tokens=[
692
+ ("<s>", self.original_tokenizer.convert_tokens_to_ids("<s>")),
693
+ ("</s>", self.original_tokenizer.convert_tokens_to_ids("</s>")),
694
+ ],
695
+ )
696
+
697
+
698
+ class CamembertConverter(SpmConverter):
699
+ def vocab(self, proto):
700
+ vocab = [
701
+ ("<s>NOTUSED", 0.0),
702
+ ("<pad>", 0.0),
703
+ ("</s>NOTUSED", 0.0),
704
+ ("<unk>", 0.0),
705
+ ("<unk>NOTUSED", -100),
706
+ ]
707
+ # We down-grade the original SentencePiece by -100 to avoid using it and use our added token instead
708
+ vocab += [(piece.piece, piece.score) for piece in proto.pieces[1:]]
709
+ vocab += [("<mask>", 0.0)]
710
+ return vocab
711
+
712
+ def unk_id(self, proto):
713
+ # See vocab unk position
714
+ return 3
715
+
716
+ def post_processor(self):
717
+ return processors.TemplateProcessing(
718
+ single="<s> $A </s>",
719
+ pair="<s> $A </s> </s> $B </s>",
720
+ special_tokens=[
721
+ ("<s>", self.original_tokenizer.convert_tokens_to_ids("<s>")),
722
+ ("</s>", self.original_tokenizer.convert_tokens_to_ids("</s>")),
723
+ ],
724
+ )
725
+
726
+
727
+ class DebertaV2Converter(SpmConverter):
728
+ def pre_tokenizer(self, replacement, add_prefix_space):
729
+ list_pretokenizers = []
730
+ if self.original_tokenizer.split_by_punct:
731
+ list_pretokenizers.append(pre_tokenizers.Punctuation(behavior="isolated"))
732
+ prepend_scheme = _get_prepend_scheme(add_prefix_space, self.original_tokenizer)
733
+ list_pretokenizers.append(pre_tokenizers.Metaspace(replacement=replacement, prepend_scheme=prepend_scheme))
734
+ return pre_tokenizers.Sequence(list_pretokenizers)
735
+
736
+ def normalizer(self, proto):
737
+ list_normalizers = []
738
+ if self.original_tokenizer.do_lower_case:
739
+ list_normalizers.append(normalizers.Lowercase())
740
+ list_normalizers.append(normalizers.Strip())
741
+
742
+ precompiled_charsmap = proto.normalizer_spec.precompiled_charsmap
743
+ if precompiled_charsmap:
744
+ list_normalizers.append(normalizers.Precompiled(precompiled_charsmap))
745
+ list_normalizers.append(normalizers.Replace(Regex(" {2,}"), " "))
746
+
747
+ return normalizers.Sequence(list_normalizers)
748
+
749
+ def post_processor(self):
750
+ return processors.TemplateProcessing(
751
+ single="[CLS]:0 $A:0 [SEP]:0",
752
+ pair="[CLS]:0 $A:0 [SEP]:0 $B:1 [SEP]:1",
753
+ special_tokens=[
754
+ ("[CLS]", self.original_tokenizer.convert_tokens_to_ids("[CLS]")),
755
+ ("[SEP]", self.original_tokenizer.convert_tokens_to_ids("[SEP]")),
756
+ ],
757
+ )
758
+
759
+
760
+ class MBartConverter(SpmConverter):
761
+ def vocab(self, proto):
762
+ vocab = [
763
+ ("<s>", 0.0),
764
+ ("<pad>", 0.0),
765
+ ("</s>", 0.0),
766
+ ("<unk>", 0.0),
767
+ ]
768
+ vocab += [(piece.piece, piece.score) for piece in proto.pieces[3:]]
769
+ vocab += [
770
+ ("ar_AR", 0.0),
771
+ ("cs_CZ", 0.0),
772
+ ("de_DE", 0.0),
773
+ ("en_XX", 0.0),
774
+ ("es_XX", 0.0),
775
+ ("et_EE", 0.0),
776
+ ("fi_FI", 0.0),
777
+ ("fr_XX", 0.0),
778
+ ("gu_IN", 0.0),
779
+ ("hi_IN", 0.0),
780
+ ("it_IT", 0.0),
781
+ ("ja_XX", 0.0),
782
+ ("kk_KZ", 0.0),
783
+ ("ko_KR", 0.0),
784
+ ("lt_LT", 0.0),
785
+ ("lv_LV", 0.0),
786
+ ("my_MM", 0.0),
787
+ ("ne_NP", 0.0),
788
+ ("nl_XX", 0.0),
789
+ ("ro_RO", 0.0),
790
+ ("ru_RU", 0.0),
791
+ ("si_LK", 0.0),
792
+ ("tr_TR", 0.0),
793
+ ("vi_VN", 0.0),
794
+ ("zh_CN", 0.0),
795
+ ]
796
+ vocab += [("<mask>", 0.0)]
797
+ return vocab
798
+
799
+ def unk_id(self, proto):
800
+ return 3
801
+
802
+ def post_processor(self):
803
+ return processors.TemplateProcessing(
804
+ single="$A </s> en_XX",
805
+ pair="$A $B </s> en_XX",
806
+ special_tokens=[
807
+ ("en_XX", self.original_tokenizer.convert_tokens_to_ids("en_XX")),
808
+ ("</s>", self.original_tokenizer.convert_tokens_to_ids("</s>")),
809
+ ],
810
+ )
811
+
812
+
813
+ class MBart50Converter(SpmConverter):
814
+ def vocab(self, proto):
815
+ vocab = [
816
+ ("<s>", 0.0),
817
+ ("<pad>", 0.0),
818
+ ("</s>", 0.0),
819
+ ("<unk>", 0.0),
820
+ ]
821
+ vocab += [(piece.piece, piece.score) for piece in proto.pieces[3:]]
822
+ vocab += [("ar_AR", 0.0), ("cs_CZ", 0.0), ("de_DE", 0.0), ("en_XX", 0.0), ("es_XX", 0.0), ("et_EE", 0.0), ("fi_FI", 0.0), ("fr_XX", 0.0), ("gu_IN", 0.0), ("hi_IN", 0.0), ("it_IT", 0.0), ("ja_XX", 0.0), ("kk_KZ", 0.0), ("ko_KR", 0.0), ("lt_LT", 0.0), ("lv_LV", 0.0), ("my_MM", 0.0), ("ne_NP", 0.0), ("nl_XX", 0.0), ("ro_RO", 0.0), ("ru_RU", 0.0), ("si_LK", 0.0), ("tr_TR", 0.0), ("vi_VN", 0.0), ("zh_CN", 0.0), ("af_ZA", 0.0), ("az_AZ", 0.0), ("bn_IN", 0.0), ("fa_IR", 0.0), ("he_IL", 0.0), ("hr_HR", 0.0), ("id_ID", 0.0), ("ka_GE", 0.0), ("km_KH", 0.0), ("mk_MK", 0.0), ("ml_IN", 0.0), ("mn_MN", 0.0), ("mr_IN", 0.0), ("pl_PL", 0.0), ("ps_AF", 0.0), ("pt_XX", 0.0), ("sv_SE", 0.0), ("sw_KE", 0.0), ("ta_IN", 0.0), ("te_IN", 0.0), ("th_TH", 0.0), ("tl_XX", 0.0), ("uk_UA", 0.0), ("ur_PK", 0.0), ("xh_ZA", 0.0), ("gl_ES", 0.0), ("sl_SI", 0.0)] # fmt: skip
823
+ vocab += [("<mask>", 0.0)]
824
+ return vocab
825
+
826
+ def unk_id(self, proto):
827
+ return 3
828
+
829
+ def post_processor(self):
830
+ return processors.TemplateProcessing(
831
+ single="en_XX $A </s>",
832
+ pair="en_XX $A $B </s>",
833
+ special_tokens=[
834
+ ("en_XX", self.original_tokenizer.convert_tokens_to_ids("en_XX")),
835
+ ("</s>", self.original_tokenizer.convert_tokens_to_ids("</s>")),
836
+ ],
837
+ )
838
+
839
+
840
+ class NllbConverter(SpmConverter):
841
+ def vocab(self, proto):
842
+ vocab = [
843
+ ("<s>", 0.0),
844
+ ("<pad>", 0.0),
845
+ ("</s>", 0.0),
846
+ ("<unk>", 0.0),
847
+ ]
848
+ vocab += [(piece.piece, piece.score) for piece in proto.pieces[3:]]
849
+ return vocab
850
+
851
+ def unk_id(self, proto):
852
+ return 3
853
+
854
+ def post_processor(self):
855
+ return processors.TemplateProcessing(
856
+ single="eng_Latn $A </s>",
857
+ pair="eng_Latn $A $B </s>",
858
+ special_tokens=[
859
+ ("eng_Latn", self.original_tokenizer.convert_tokens_to_ids("eng_Latn")),
860
+ ("</s>", self.original_tokenizer.convert_tokens_to_ids("</s>")),
861
+ ],
862
+ )
863
+
864
+
865
+ class SeamlessM4TConverter(SpmConverter):
866
+ def vocab(self, proto):
867
+ vocab = [
868
+ ("<pad>", 0.0),
869
+ ("<unk>", 0.0),
870
+ ("<s>", 0.0),
871
+ ("</s>", 0.0),
872
+ ]
873
+ vocab += [(piece.piece, piece.score) for piece in proto.pieces[3:]]
874
+ return vocab
875
+
876
+ def unk_id(self, proto):
877
+ return self.original_tokenizer.unk_token_id
878
+
879
+ def post_processor(self):
880
+ return processors.TemplateProcessing(
881
+ single="__eng__ $A </s>",
882
+ pair="__eng__ $A $B </s>",
883
+ special_tokens=[
884
+ ("__eng__", self.original_tokenizer.convert_tokens_to_ids("__eng__")),
885
+ ("</s>", self.original_tokenizer.convert_tokens_to_ids("</s>")),
886
+ ],
887
+ )
888
+
889
+
890
+ class XLMRobertaConverter(SpmConverter):
891
+ def vocab(self, proto):
892
+ vocab = [
893
+ ("<s>", 0.0),
894
+ ("<pad>", 0.0),
895
+ ("</s>", 0.0),
896
+ ("<unk>", 0.0),
897
+ ]
898
+ vocab += [(piece.piece, piece.score) for piece in proto.pieces[3:]]
899
+ vocab += [("<mask>", 0.0)]
900
+ return vocab
901
+
902
+ def unk_id(self, proto):
903
+ unk_id = 3
904
+ return unk_id
905
+
906
+ def post_processor(self):
907
+ return processors.TemplateProcessing(
908
+ single="<s> $A </s>",
909
+ pair="<s> $A </s> </s> $B </s>",
910
+ special_tokens=[
911
+ ("<s>", self.original_tokenizer.convert_tokens_to_ids("<s>")),
912
+ ("</s>", self.original_tokenizer.convert_tokens_to_ids("</s>")),
913
+ ],
914
+ )
915
+
916
+
917
+ class XLNetConverter(SpmConverter):
918
+ def vocab(self, proto):
919
+ return [
920
+ (piece.piece, piece.score) if check_number_comma(piece.piece) else (piece.piece, piece.score - 100)
921
+ for piece in proto.pieces
922
+ ]
923
+
924
+ def normalizer(self, proto):
925
+ list_normalizers = [
926
+ normalizers.Replace("``", '"'),
927
+ normalizers.Replace("''", '"'),
928
+ ]
929
+ if not self.original_tokenizer.keep_accents:
930
+ list_normalizers.append(normalizers.NFKD())
931
+ list_normalizers.append(normalizers.StripAccents())
932
+ if self.original_tokenizer.do_lower_case:
933
+ list_normalizers.append(normalizers.Lowercase())
934
+
935
+ precompiled_charsmap = proto.normalizer_spec.precompiled_charsmap
936
+
937
+ if precompiled_charsmap:
938
+ list_normalizers.append(normalizers.Precompiled(precompiled_charsmap))
939
+
940
+ list_normalizers.append(normalizers.Replace(Regex(" {2,}"), " "))
941
+ return normalizers.Sequence(list_normalizers)
942
+
943
+ def post_processor(self):
944
+ return processors.TemplateProcessing(
945
+ single="$A:0 <sep>:0 <cls>:2",
946
+ pair="$A:0 <sep>:0 $B:1 <sep>:1 <cls>:2",
947
+ special_tokens=[
948
+ ("<sep>", self.original_tokenizer.convert_tokens_to_ids("<sep>")),
949
+ ("<cls>", self.original_tokenizer.convert_tokens_to_ids("<cls>")),
950
+ ],
951
+ )
952
+
953
+
954
+ class ReformerConverter(SpmConverter):
955
+ pass
956
+
957
+
958
+ class RemBertConverter(SpmConverter):
959
+ # Inspired from AlbertConverter
960
+ def normalizer(self, proto):
961
+ list_normalizers = [
962
+ normalizers.Replace("``", '"'),
963
+ normalizers.Replace("''", '"'),
964
+ normalizers.Replace(Regex(" {2,}"), " "),
965
+ ]
966
+ if not self.original_tokenizer.keep_accents:
967
+ list_normalizers.append(normalizers.NFKD())
968
+ list_normalizers.append(normalizers.StripAccents())
969
+ if self.original_tokenizer.do_lower_case:
970
+ list_normalizers.append(normalizers.Lowercase())
971
+
972
+ precompiled_charsmap = proto.normalizer_spec.precompiled_charsmap
973
+
974
+ if precompiled_charsmap:
975
+ list_normalizers.append(normalizers.Precompiled(precompiled_charsmap))
976
+
977
+ return normalizers.Sequence(list_normalizers)
978
+
979
+ def post_processor(self):
980
+ return processors.TemplateProcessing(
981
+ single="[CLS]:0 $A:0 [SEP]:0",
982
+ pair="[CLS]:0 $A:0 [SEP]:0 $B:1 [SEP]:1",
983
+ special_tokens=[
984
+ ("[CLS]", self.original_tokenizer.convert_tokens_to_ids("[CLS]")),
985
+ ("[SEP]", self.original_tokenizer.convert_tokens_to_ids("[SEP]")),
986
+ ],
987
+ )
988
+
989
+
990
+ class BertGenerationConverter(SpmConverter):
991
+ pass
992
+
993
+
994
+ class PegasusConverter(SpmConverter):
995
+ def vocab(self, proto):
996
+ vocab = [
997
+ (self.original_tokenizer.pad_token, 0.0),
998
+ (self.original_tokenizer.eos_token, 0.0),
999
+ ]
1000
+
1001
+ if self.original_tokenizer.mask_token_sent is not None:
1002
+ vocab += [(self.original_tokenizer.mask_token_sent, 0.0)]
1003
+
1004
+ if (
1005
+ self.original_tokenizer.mask_token is not None
1006
+ and self.original_tokenizer.mask_token_id < self.original_tokenizer.offset
1007
+ ):
1008
+ vocab += [(self.original_tokenizer.mask_token, 0.0)]
1009
+
1010
+ vocab += [(f"<unk_{i}>", -100.0) for i in range(2, self.original_tokenizer.offset)]
1011
+ vocab += [(piece.piece, piece.score) for piece in proto.pieces[2:]]
1012
+ return vocab
1013
+
1014
+ def unk_id(self, proto):
1015
+ return proto.trainer_spec.unk_id + self.original_tokenizer.offset
1016
+
1017
+ def pre_tokenizer(self, replacement, add_prefix_space):
1018
+ prepend_scheme = _get_prepend_scheme(add_prefix_space, self.original_tokenizer)
1019
+ return pre_tokenizers.Sequence(
1020
+ [
1021
+ pre_tokenizers.WhitespaceSplit(),
1022
+ pre_tokenizers.Metaspace(replacement=replacement, prepend_scheme=prepend_scheme),
1023
+ ]
1024
+ )
1025
+
1026
+ def post_processor(self):
1027
+ eos = self.original_tokenizer.eos_token
1028
+ special_tokens = [
1029
+ (eos, self.original_tokenizer.eos_token_id),
1030
+ ]
1031
+ return processors.TemplateProcessing(single=["$A", eos], pair=["$A", "$B", eos], special_tokens=special_tokens)
1032
+
1033
+
1034
+ class T5Converter(SpmConverter):
1035
+ def vocab(self, proto):
1036
+ num_extra_ids = self.original_tokenizer._extra_ids
1037
+ vocab = [(piece.piece, piece.score) for piece in proto.pieces]
1038
+ vocab += [(f"<extra_id_{i}>", 0.0) for i in range(num_extra_ids - 1, -1, -1)]
1039
+ return vocab
1040
+
1041
+ def post_processor(self):
1042
+ return processors.TemplateProcessing(
1043
+ single=["$A", "</s>"],
1044
+ pair=["$A", "</s>", "$B", "</s>"],
1045
+ special_tokens=[
1046
+ ("</s>", self.original_tokenizer.convert_tokens_to_ids("</s>")),
1047
+ ],
1048
+ )
1049
+
1050
+
1051
+ class UdopConverter(SpmConverter):
1052
+ def post_processor(self):
1053
+ return processors.TemplateProcessing(
1054
+ single=["$A", "</s>"],
1055
+ pair=["$A", "</s>", "$B", "</s>"],
1056
+ special_tokens=[
1057
+ ("</s>", self.original_tokenizer.convert_tokens_to_ids("</s>")),
1058
+ ],
1059
+ )
1060
+
1061
+
1062
+ class WhisperConverter(Converter):
1063
+ def converted(self) -> Tokenizer:
1064
+ vocab = self.original_tokenizer.encoder
1065
+ merges = list(self.original_tokenizer.bpe_ranks.keys())
1066
+
1067
+ tokenizer = Tokenizer(
1068
+ BPE(
1069
+ vocab=vocab,
1070
+ merges=merges,
1071
+ dropout=None,
1072
+ continuing_subword_prefix="",
1073
+ end_of_word_suffix="",
1074
+ fuse_unk=False,
1075
+ )
1076
+ )
1077
+
1078
+ tokenizer.pre_tokenizer = pre_tokenizers.ByteLevel(add_prefix_space=self.original_tokenizer.add_prefix_space)
1079
+ tokenizer.decoder = decoders.ByteLevel()
1080
+
1081
+ prefix_token_ids = self.original_tokenizer.prefix_tokens
1082
+ prefixes = self.original_tokenizer.convert_ids_to_tokens(prefix_token_ids)
1083
+ eos = self.original_tokenizer.eos_token
1084
+ eos_token_id = self.original_tokenizer.eos_token_id
1085
+ prefix_template = " ".join([f"{token}:0" for token in prefixes])
1086
+ tokenizer.post_processor = processors.TemplateProcessing(
1087
+ single=f"{prefix_template} $A:0 {eos}:0",
1088
+ pair=f"{prefix_template} $A:0 $B:1 {eos}:1",
1089
+ special_tokens=[
1090
+ (eos, eos_token_id),
1091
+ *zip(prefixes, prefix_token_ids),
1092
+ ],
1093
+ )
1094
+
1095
+ return tokenizer
1096
+
1097
+
1098
+ class BigBirdConverter(SpmConverter):
1099
+ def post_processor(self):
1100
+ return processors.TemplateProcessing(
1101
+ single="[CLS]:0 $A:0 [SEP]:0",
1102
+ pair="[CLS]:0 $A:0 [SEP]:0 $B:1 [SEP]:1",
1103
+ special_tokens=[
1104
+ ("[CLS]", self.original_tokenizer.convert_tokens_to_ids("[CLS]")),
1105
+ ("[SEP]", self.original_tokenizer.convert_tokens_to_ids("[SEP]")),
1106
+ ],
1107
+ )
1108
+
1109
+
1110
+ class CLIPConverter(Converter):
1111
+ def converted(self) -> Tokenizer:
1112
+ vocab = self.original_tokenizer.encoder
1113
+ merges = list(self.original_tokenizer.bpe_ranks.keys())
1114
+ unk_token = self.original_tokenizer.unk_token
1115
+
1116
+ tokenizer = Tokenizer(
1117
+ BPE(
1118
+ vocab=vocab,
1119
+ merges=merges,
1120
+ dropout=None,
1121
+ continuing_subword_prefix="",
1122
+ end_of_word_suffix="</w>",
1123
+ fuse_unk=False,
1124
+ unk_token=str(unk_token),
1125
+ )
1126
+ )
1127
+
1128
+ tokenizer.normalizer = normalizers.Sequence(
1129
+ [normalizers.NFC(), normalizers.Replace(Regex(r"\s+"), " "), normalizers.Lowercase()]
1130
+ )
1131
+ tokenizer.pre_tokenizer = pre_tokenizers.Sequence(
1132
+ [
1133
+ pre_tokenizers.Split(
1134
+ Regex(r"""'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+"""),
1135
+ behavior="removed",
1136
+ invert=True,
1137
+ ),
1138
+ pre_tokenizers.ByteLevel(add_prefix_space=False),
1139
+ ]
1140
+ )
1141
+ tokenizer.decoder = decoders.ByteLevel()
1142
+
1143
+ # Hack to have a ByteLevel and TemplaceProcessor
1144
+ tokenizer.post_processor = processors.RobertaProcessing(
1145
+ sep=(self.original_tokenizer.eos_token, self.original_tokenizer.eos_token_id),
1146
+ cls=(self.original_tokenizer.bos_token, self.original_tokenizer.bos_token_id),
1147
+ add_prefix_space=False,
1148
+ trim_offsets=False,
1149
+ )
1150
+ return tokenizer
1151
+
1152
+
1153
+ class LayoutLMv2Converter(Converter):
1154
+ def converted(self) -> Tokenizer:
1155
+ vocab = self.original_tokenizer.vocab
1156
+ tokenizer = Tokenizer(WordPiece(vocab, unk_token=str(self.original_tokenizer.unk_token)))
1157
+
1158
+ tokenize_chinese_chars = False
1159
+ strip_accents = False
1160
+ do_lower_case = True
1161
+ if hasattr(self.original_tokenizer, "basic_tokenizer"):
1162
+ tokenize_chinese_chars = self.original_tokenizer.basic_tokenizer.tokenize_chinese_chars
1163
+ strip_accents = self.original_tokenizer.basic_tokenizer.strip_accents
1164
+ do_lower_case = self.original_tokenizer.basic_tokenizer.do_lower_case
1165
+
1166
+ tokenizer.normalizer = normalizers.BertNormalizer(
1167
+ clean_text=True,
1168
+ handle_chinese_chars=tokenize_chinese_chars,
1169
+ strip_accents=strip_accents,
1170
+ lowercase=do_lower_case,
1171
+ )
1172
+ tokenizer.pre_tokenizer = pre_tokenizers.BertPreTokenizer()
1173
+
1174
+ cls = str(self.original_tokenizer.cls_token)
1175
+ sep = str(self.original_tokenizer.sep_token)
1176
+ cls_token_id = self.original_tokenizer.cls_token_id
1177
+ sep_token_id = self.original_tokenizer.sep_token_id
1178
+
1179
+ tokenizer.post_processor = processors.TemplateProcessing(
1180
+ single=f"{cls}:0 $A:0 {sep}:0",
1181
+ pair=f"{cls}:0 $A:0 {sep}:0 $B:1 {sep}:1",
1182
+ special_tokens=[
1183
+ (cls, cls_token_id),
1184
+ (sep, sep_token_id),
1185
+ ],
1186
+ )
1187
+ tokenizer.decoder = decoders.WordPiece(prefix="##")
1188
+
1189
+ return tokenizer
1190
+
1191
+
1192
+ class BlenderbotConverter(Converter):
1193
+ def converted(self) -> Tokenizer:
1194
+ ot = self.original_tokenizer
1195
+ vocab = ot.encoder
1196
+ merges = list(ot.bpe_ranks.keys())
1197
+
1198
+ tokenizer = Tokenizer(
1199
+ BPE(
1200
+ vocab=vocab,
1201
+ merges=merges,
1202
+ dropout=None,
1203
+ continuing_subword_prefix="",
1204
+ end_of_word_suffix="",
1205
+ fuse_unk=False,
1206
+ )
1207
+ )
1208
+
1209
+ tokenizer.pre_tokenizer = pre_tokenizers.ByteLevel(add_prefix_space=ot.add_prefix_space)
1210
+ tokenizer.decoder = decoders.ByteLevel()
1211
+ tokenizer.post_processor = processors.TemplateProcessing(
1212
+ single=f"$A:0 {ot.eos_token}:0",
1213
+ special_tokens=[
1214
+ (ot.eos_token, ot.eos_token_id),
1215
+ ],
1216
+ )
1217
+
1218
+ return tokenizer
1219
+
1220
+
1221
+ class XGLMConverter(SpmConverter):
1222
+ def vocab(self, proto):
1223
+ vocab = [
1224
+ ("<s>", 0.0),
1225
+ ("<pad>", 0.0),
1226
+ ("</s>", 0.0),
1227
+ ("<unk>", 0.0),
1228
+ ]
1229
+ vocab += [(piece.piece, piece.score) for piece in proto.pieces[3:]]
1230
+ vocab += [("<madeupword0>", 0.0), ("<madeupword1>", 0.0), ("<madeupword2>", 0.0), ("<madeupword3>", 0.0), ("<madeupword4>", 0.0), ("<madeupword5>", 0.0), ("<madeupword6>", 0.0)] # fmt: skip
1231
+ return vocab
1232
+
1233
+ def unk_id(self, proto):
1234
+ unk_id = 3
1235
+ return unk_id
1236
+
1237
+ def post_processor(self):
1238
+ return processors.TemplateProcessing(
1239
+ single="</s> $A",
1240
+ pair="</s> $A </s> </s> $B",
1241
+ special_tokens=[
1242
+ ("<s>", self.original_tokenizer.convert_tokens_to_ids("<s>")),
1243
+ ("</s>", self.original_tokenizer.convert_tokens_to_ids("</s>")),
1244
+ ],
1245
+ )
1246
+
1247
+
1248
+ class GemmaConvert(SpmConverter):
1249
+ handle_byte_fallback = True
1250
+
1251
+ """"
1252
+ split_by_unicode_script: true
1253
+ split_by_number: true
1254
+ split_by_whitespace: true
1255
+ treat_whitespace_as_suffix: false
1256
+ allow_whitespace_only_pieces: true
1257
+ split_digits: true
1258
+ byte_fallback: true
1259
+ """
1260
+
1261
+ def normalizer(self, proto):
1262
+ return normalizers.Replace(" ", "▁")
1263
+
1264
+ def vocab(self, proto):
1265
+ vocab = [
1266
+ (self.original_tokenizer.pad_token, 0.0),
1267
+ (self.original_tokenizer.eos_token, 0.0),
1268
+ (self.original_tokenizer.bos_token, 0.0),
1269
+ ]
1270
+ for piece in proto.pieces[3:]:
1271
+ if piece.piece == "<0x09>":
1272
+ vocab += [("\t", piece.score)]
1273
+ else:
1274
+ vocab += [(piece.piece, piece.score)]
1275
+ # vocab += [(piece.piece, piece.score) for piece in proto.pieces[3:]]
1276
+ return vocab
1277
+
1278
+ def pre_tokenizer(self, replacement, add_prefix_space):
1279
+ return None
1280
+
1281
+ def unk_id(self, proto):
1282
+ unk_id = 3
1283
+ return unk_id
1284
+
1285
+ def decoder(self, replacement, add_prefix_space):
1286
+ return decoders.Sequence(
1287
+ [
1288
+ decoders.Replace("▁", " "),
1289
+ decoders.ByteFallback(),
1290
+ decoders.Fuse(),
1291
+ ]
1292
+ )
1293
+
1294
+ def tokenizer(self, proto):
1295
+ model_type = proto.trainer_spec.model_type
1296
+ vocab_scores = self.vocab(proto)
1297
+ if model_type == 1:
1298
+ import tokenizers
1299
+
1300
+ if version.parse(tokenizers.__version__) < version.parse("0.14.0"):
1301
+ tokenizer = Tokenizer(Unigram(vocab_scores, 0))
1302
+ else:
1303
+ tokenizer = Tokenizer(Unigram(vocab_scores, 0, byte_fallback=True))
1304
+
1305
+ elif model_type == 2:
1306
+ _, merges = GemmaSentencePieceExtractor(self.original_tokenizer.vocab_file).extract(vocab_scores)
1307
+ bpe_vocab = {word: i for i, (word, _score) in enumerate(vocab_scores)}
1308
+
1309
+ tokenizer = Tokenizer(
1310
+ BPE(
1311
+ bpe_vocab,
1312
+ merges,
1313
+ unk_token=proto.trainer_spec.unk_piece,
1314
+ fuse_unk=True,
1315
+ byte_fallback=True,
1316
+ dropout=None,
1317
+ )
1318
+ )
1319
+ tokenizer.add_special_tokens(
1320
+ [
1321
+ AddedToken("<pad>", normalized=False, special=True),
1322
+ AddedToken("<eos>", normalized=False, special=True),
1323
+ AddedToken("<bos>", normalized=False, special=True),
1324
+ AddedToken("<unk>", normalized=False, special=True),
1325
+ ]
1326
+ )
1327
+ else:
1328
+ raise Exception(
1329
+ "You're trying to run a `Unigram` model but you're file was trained with a different algorithm"
1330
+ )
1331
+ user_defined_symbols = [
1332
+ AddedToken(token, normalized=False, special=False) for token in proto.trainer_spec.user_defined_symbols
1333
+ ]
1334
+ tokenizer.add_tokens(user_defined_symbols)
1335
+ return tokenizer
1336
+
1337
+
1338
+ class LlamaConverter(SpmConverter):
1339
+ handle_byte_fallback = True
1340
+
1341
+ def vocab(self, proto):
1342
+ vocab = [
1343
+ (self.original_tokenizer.convert_ids_to_tokens(0), 0.0),
1344
+ (self.original_tokenizer.convert_ids_to_tokens(1), 0.0),
1345
+ (self.original_tokenizer.convert_ids_to_tokens(2), 0.0),
1346
+ ]
1347
+ vocab += [(piece.piece, piece.score) for piece in proto.pieces[3:]]
1348
+ return vocab
1349
+
1350
+ def unk_id(self, proto):
1351
+ unk_id = 0
1352
+ return unk_id
1353
+
1354
+ def decoder(self, replacement, add_prefix_space):
1355
+ sequence = [
1356
+ decoders.Replace("▁", " "),
1357
+ decoders.ByteFallback(),
1358
+ decoders.Fuse(),
1359
+ ]
1360
+ if add_prefix_space:
1361
+ sequence += [decoders.Strip(content=" ", left=1)]
1362
+ return decoders.Sequence(sequence)
1363
+
1364
+ def tokenizer(self, proto):
1365
+ model_type = proto.trainer_spec.model_type
1366
+ vocab_scores = self.vocab(proto)
1367
+ if model_type == 1:
1368
+ import tokenizers
1369
+
1370
+ if version.parse(tokenizers.__version__) < version.parse("0.14.0"):
1371
+ tokenizer = Tokenizer(Unigram(vocab_scores, 0))
1372
+ else:
1373
+ tokenizer = Tokenizer(Unigram(vocab_scores, 0, byte_fallback=True))
1374
+
1375
+ elif model_type == 2:
1376
+ _, merges = SentencePieceExtractor(self.original_tokenizer.vocab_file).extract(vocab_scores)
1377
+ bpe_vocab = {word: i for i, (word, _score) in enumerate(vocab_scores)}
1378
+ tokenizer = Tokenizer(
1379
+ BPE(bpe_vocab, merges, unk_token=proto.trainer_spec.unk_piece, fuse_unk=True, byte_fallback=True)
1380
+ )
1381
+ tokenizer.add_special_tokens(
1382
+ [
1383
+ AddedToken(self.original_tokenizer.convert_ids_to_tokens(0), normalized=False, special=True),
1384
+ AddedToken(self.original_tokenizer.convert_ids_to_tokens(1), normalized=False, special=True),
1385
+ AddedToken(self.original_tokenizer.convert_ids_to_tokens(2), normalized=False, special=True),
1386
+ ]
1387
+ )
1388
+ else:
1389
+ raise Exception(
1390
+ "You're trying to run a `Unigram` model but you're file was trained with a different algorithm"
1391
+ )
1392
+
1393
+ return tokenizer
1394
+
1395
+ def normalizer(self, proto):
1396
+ sequence = []
1397
+ if hasattr(self.original_tokenizer, "add_prefix_space"):
1398
+ if self.original_tokenizer.add_prefix_space:
1399
+ sequence += [normalizers.Prepend(prepend="▁")]
1400
+ sequence += [normalizers.Replace(pattern=" ", content="▁")]
1401
+ return normalizers.Sequence(sequence)
1402
+
1403
+ def pre_tokenizer(self, replacement, add_prefix_space):
1404
+ return None
1405
+
1406
+ def post_processor(self):
1407
+ # the processor is defined in the LlamaTokenizerFast class.
1408
+ return None
1409
+
1410
+
1411
+ class MarkupLMConverter(Converter):
1412
+ def converted(self) -> Tokenizer:
1413
+ ot = self.original_tokenizer
1414
+ vocab = ot.encoder
1415
+ merges = list(ot.bpe_ranks.keys())
1416
+
1417
+ tokenizer = Tokenizer(
1418
+ BPE(
1419
+ vocab=vocab,
1420
+ merges=merges,
1421
+ dropout=None,
1422
+ continuing_subword_prefix="",
1423
+ end_of_word_suffix="",
1424
+ fuse_unk=False,
1425
+ unk_token=self.original_tokenizer.unk_token,
1426
+ )
1427
+ )
1428
+
1429
+ tokenizer.pre_tokenizer = pre_tokenizers.ByteLevel(add_prefix_space=ot.add_prefix_space)
1430
+ tokenizer.decoder = decoders.ByteLevel()
1431
+
1432
+ cls = str(self.original_tokenizer.cls_token)
1433
+ sep = str(self.original_tokenizer.sep_token)
1434
+ cls_token_id = self.original_tokenizer.cls_token_id
1435
+ sep_token_id = self.original_tokenizer.sep_token_id
1436
+
1437
+ tokenizer.post_processor = processors.TemplateProcessing(
1438
+ single=f"{cls} $A {sep}",
1439
+ pair=f"{cls} $A {sep} $B {sep}",
1440
+ special_tokens=[
1441
+ (cls, cls_token_id),
1442
+ (sep, sep_token_id),
1443
+ ],
1444
+ )
1445
+
1446
+ return tokenizer
1447
+
1448
+
1449
+ SLOW_TO_FAST_CONVERTERS = {
1450
+ "AlbertTokenizer": AlbertConverter,
1451
+ "BartTokenizer": RobertaConverter,
1452
+ "BarthezTokenizer": BarthezConverter,
1453
+ "BertTokenizer": BertConverter,
1454
+ "BigBirdTokenizer": BigBirdConverter,
1455
+ "BlenderbotTokenizer": BlenderbotConverter,
1456
+ "CamembertTokenizer": CamembertConverter,
1457
+ "CLIPTokenizer": CLIPConverter,
1458
+ "CodeGenTokenizer": GPT2Converter,
1459
+ "ConvBertTokenizer": BertConverter,
1460
+ "DebertaTokenizer": DebertaConverter,
1461
+ "DebertaV2Tokenizer": DebertaV2Converter,
1462
+ "DistilBertTokenizer": BertConverter,
1463
+ "DPRReaderTokenizer": BertConverter,
1464
+ "DPRQuestionEncoderTokenizer": BertConverter,
1465
+ "DPRContextEncoderTokenizer": BertConverter,
1466
+ "ElectraTokenizer": BertConverter,
1467
+ "FNetTokenizer": AlbertConverter,
1468
+ "FunnelTokenizer": FunnelConverter,
1469
+ "GPT2Tokenizer": GPT2Converter,
1470
+ "HerbertTokenizer": HerbertConverter,
1471
+ "LayoutLMTokenizer": BertConverter,
1472
+ "LayoutLMv2Tokenizer": BertConverter,
1473
+ "LayoutLMv3Tokenizer": RobertaConverter,
1474
+ "LayoutXLMTokenizer": XLMRobertaConverter,
1475
+ "LongformerTokenizer": RobertaConverter,
1476
+ "LEDTokenizer": RobertaConverter,
1477
+ "LxmertTokenizer": BertConverter,
1478
+ "MarkupLMTokenizer": MarkupLMConverter,
1479
+ "MBartTokenizer": MBartConverter,
1480
+ "MBart50Tokenizer": MBart50Converter,
1481
+ "MPNetTokenizer": MPNetConverter,
1482
+ "MobileBertTokenizer": BertConverter,
1483
+ "MvpTokenizer": RobertaConverter,
1484
+ "NllbTokenizer": NllbConverter,
1485
+ "OpenAIGPTTokenizer": OpenAIGPTConverter,
1486
+ "PegasusTokenizer": PegasusConverter,
1487
+ "Qwen2Tokenizer": Qwen2Converter,
1488
+ "RealmTokenizer": BertConverter,
1489
+ "ReformerTokenizer": ReformerConverter,
1490
+ "RemBertTokenizer": RemBertConverter,
1491
+ "RetriBertTokenizer": BertConverter,
1492
+ "RobertaTokenizer": RobertaConverter,
1493
+ "RoFormerTokenizer": RoFormerConverter,
1494
+ "SeamlessM4TTokenizer": SeamlessM4TConverter,
1495
+ "SqueezeBertTokenizer": BertConverter,
1496
+ "T5Tokenizer": T5Converter,
1497
+ "UdopTokenizer": UdopConverter,
1498
+ "WhisperTokenizer": WhisperConverter,
1499
+ "XLMRobertaTokenizer": XLMRobertaConverter,
1500
+ "XLNetTokenizer": XLNetConverter,
1501
+ "SplinterTokenizer": SplinterConverter,
1502
+ "XGLMTokenizer": XGLMConverter,
1503
+ "LlamaTokenizer": LlamaConverter,
1504
+ "CodeLlamaTokenizer": LlamaConverter,
1505
+ "GemmaTokenizer": GemmaConvert,
1506
+ }
1507
+
1508
+
1509
+ def convert_slow_tokenizer(transformer_tokenizer) -> Tokenizer:
1510
+ """
1511
+ Utilities to convert a slow tokenizer instance in a fast tokenizer instance.
1512
+
1513
+ Args:
1514
+ transformer_tokenizer ([`~tokenization_utils_base.PreTrainedTokenizer`]):
1515
+ Instance of a slow tokenizer to convert in the backend tokenizer for
1516
+ [`~tokenization_utils_base.PreTrainedTokenizerFast`].
1517
+
1518
+ Return:
1519
+ A instance of [`~tokenizers.Tokenizer`] to be used as the backend tokenizer of a
1520
+ [`~tokenization_utils_base.PreTrainedTokenizerFast`]
1521
+ """
1522
+
1523
+ tokenizer_class_name = transformer_tokenizer.__class__.__name__
1524
+
1525
+ if tokenizer_class_name not in SLOW_TO_FAST_CONVERTERS:
1526
+ raise ValueError(
1527
+ f"An instance of tokenizer class {tokenizer_class_name} cannot be converted in a Fast tokenizer instance."
1528
+ " No converter was found. Currently available slow->fast convertors:"
1529
+ f" {list(SLOW_TO_FAST_CONVERTERS.keys())}"
1530
+ )
1531
+
1532
+ converter_class = SLOW_TO_FAST_CONVERTERS[tokenizer_class_name]
1533
+
1534
+ return converter_class(transformer_tokenizer).converted()
venv/lib/python3.10/site-packages/transformers/convert_slow_tokenizers_checkpoints_to_fast.py ADDED
@@ -0,0 +1,126 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Convert slow tokenizers checkpoints in fast (serialization format of the `tokenizers` library)"""
16
+
17
+ import argparse
18
+ import os
19
+
20
+ import transformers
21
+
22
+ from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
23
+ from .utils import logging
24
+
25
+
26
+ logging.set_verbosity_info()
27
+
28
+ logger = logging.get_logger(__name__)
29
+
30
+
31
+ TOKENIZER_CLASSES = {name: getattr(transformers, name + "Fast") for name in SLOW_TO_FAST_CONVERTERS}
32
+
33
+
34
+ def convert_slow_checkpoint_to_fast(tokenizer_name, checkpoint_name, dump_path, force_download):
35
+ if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
36
+ raise ValueError(f"Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys())}.")
37
+
38
+ if tokenizer_name is None:
39
+ tokenizer_names = TOKENIZER_CLASSES
40
+ else:
41
+ tokenizer_names = {tokenizer_name: getattr(transformers, tokenizer_name + "Fast")}
42
+
43
+ logger.info(f"Loading tokenizer classes: {tokenizer_names}")
44
+
45
+ for tokenizer_name in tokenizer_names:
46
+ tokenizer_class = TOKENIZER_CLASSES[tokenizer_name]
47
+
48
+ add_prefix = True
49
+ if checkpoint_name is None:
50
+ checkpoint_names = list(tokenizer_class.max_model_input_sizes.keys())
51
+ else:
52
+ checkpoint_names = [checkpoint_name]
53
+
54
+ logger.info(f"For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}")
55
+
56
+ for checkpoint in checkpoint_names:
57
+ logger.info(f"Loading {tokenizer_class.__class__.__name__} {checkpoint}")
58
+
59
+ # Load tokenizer
60
+ tokenizer = tokenizer_class.from_pretrained(checkpoint, force_download=force_download)
61
+
62
+ # Save fast tokenizer
63
+ logger.info(f"Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}")
64
+
65
+ # For organization names we create sub-directories
66
+ if "/" in checkpoint:
67
+ checkpoint_directory, checkpoint_prefix_name = checkpoint.split("/")
68
+ dump_path_full = os.path.join(dump_path, checkpoint_directory)
69
+ elif add_prefix:
70
+ checkpoint_prefix_name = checkpoint
71
+ dump_path_full = dump_path
72
+ else:
73
+ checkpoint_prefix_name = None
74
+ dump_path_full = dump_path
75
+
76
+ logger.info(f"=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}")
77
+
78
+ if checkpoint in list(tokenizer.pretrained_vocab_files_map.values())[0]:
79
+ file_path = list(tokenizer.pretrained_vocab_files_map.values())[0][checkpoint]
80
+ next_char = file_path.split(checkpoint)[-1][0]
81
+ if next_char == "/":
82
+ dump_path_full = os.path.join(dump_path_full, checkpoint_prefix_name)
83
+ checkpoint_prefix_name = None
84
+
85
+ logger.info(f"=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}")
86
+
87
+ file_names = tokenizer.save_pretrained(
88
+ dump_path_full, legacy_format=False, filename_prefix=checkpoint_prefix_name
89
+ )
90
+ logger.info(f"=> File names {file_names}")
91
+
92
+ for file_name in file_names:
93
+ if not file_name.endswith("tokenizer.json"):
94
+ os.remove(file_name)
95
+ logger.info(f"=> removing {file_name}")
96
+
97
+
98
+ if __name__ == "__main__":
99
+ parser = argparse.ArgumentParser()
100
+ # Required parameters
101
+ parser.add_argument(
102
+ "--dump_path", default=None, type=str, required=True, help="Path to output generated fast tokenizer files."
103
+ )
104
+ parser.add_argument(
105
+ "--tokenizer_name",
106
+ default=None,
107
+ type=str,
108
+ help=(
109
+ f"Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will "
110
+ "download and convert all the checkpoints from AWS."
111
+ ),
112
+ )
113
+ parser.add_argument(
114
+ "--checkpoint_name",
115
+ default=None,
116
+ type=str,
117
+ help="Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.",
118
+ )
119
+ parser.add_argument(
120
+ "--force_download",
121
+ action="store_true",
122
+ help="Re-download checkpoints.",
123
+ )
124
+ args = parser.parse_args()
125
+
126
+ convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
venv/lib/python3.10/site-packages/transformers/convert_tf_hub_seq_to_seq_bert_to_pytorch.py ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Convert Seq2Seq TF Hub checkpoint."""
16
+
17
+
18
+ import argparse
19
+
20
+ from . import (
21
+ BertConfig,
22
+ BertGenerationConfig,
23
+ BertGenerationDecoder,
24
+ BertGenerationEncoder,
25
+ load_tf_weights_in_bert_generation,
26
+ logging,
27
+ )
28
+
29
+
30
+ logging.set_verbosity_info()
31
+
32
+
33
+ def convert_tf_checkpoint_to_pytorch(tf_hub_path, pytorch_dump_path, is_encoder_named_decoder, vocab_size, is_encoder):
34
+ # Initialise PyTorch model
35
+ bert_config = BertConfig.from_pretrained(
36
+ "google-bert/bert-large-cased",
37
+ vocab_size=vocab_size,
38
+ max_position_embeddings=512,
39
+ is_decoder=True,
40
+ add_cross_attention=True,
41
+ )
42
+ bert_config_dict = bert_config.to_dict()
43
+ del bert_config_dict["type_vocab_size"]
44
+ config = BertGenerationConfig(**bert_config_dict)
45
+ if is_encoder:
46
+ model = BertGenerationEncoder(config)
47
+ else:
48
+ model = BertGenerationDecoder(config)
49
+ print(f"Building PyTorch model from configuration: {config}")
50
+
51
+ # Load weights from tf checkpoint
52
+ load_tf_weights_in_bert_generation(
53
+ model,
54
+ tf_hub_path,
55
+ model_class="bert",
56
+ is_encoder_named_decoder=is_encoder_named_decoder,
57
+ is_encoder=is_encoder,
58
+ )
59
+
60
+ # Save pytorch-model
61
+ print(f"Save PyTorch model and config to {pytorch_dump_path}")
62
+ model.save_pretrained(pytorch_dump_path)
63
+
64
+
65
+ if __name__ == "__main__":
66
+ parser = argparse.ArgumentParser()
67
+ # Required parameters
68
+ parser.add_argument(
69
+ "--tf_hub_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
70
+ )
71
+ parser.add_argument(
72
+ "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
73
+ )
74
+ parser.add_argument(
75
+ "--is_encoder_named_decoder",
76
+ action="store_true",
77
+ help="If decoder has to be renamed to encoder in PyTorch model.",
78
+ )
79
+ parser.add_argument("--is_encoder", action="store_true", help="If model is an encoder.")
80
+ parser.add_argument("--vocab_size", default=50358, type=int, help="Vocab size of model")
81
+ args = parser.parse_args()
82
+ convert_tf_checkpoint_to_pytorch(
83
+ args.tf_hub_path,
84
+ args.pytorch_dump_path,
85
+ args.is_encoder_named_decoder,
86
+ args.vocab_size,
87
+ is_encoder=args.is_encoder,
88
+ )
venv/lib/python3.10/site-packages/transformers/debug_utils.py ADDED
@@ -0,0 +1,346 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import collections
16
+
17
+ from .utils import ExplicitEnum, is_torch_available, logging
18
+
19
+
20
+ if is_torch_available():
21
+ import torch
22
+
23
+
24
+ logger = logging.get_logger(__name__)
25
+
26
+
27
+ class DebugUnderflowOverflow:
28
+ """
29
+ This debug class helps detect and understand where the model starts getting very large or very small, and more
30
+ importantly `nan` or `inf` weight and activation elements.
31
+
32
+ There are 2 working modes:
33
+
34
+ 1. Underflow/overflow detection (default)
35
+ 2. Specific batch absolute min/max tracing without detection
36
+
37
+ Mode 1: Underflow/overflow detection
38
+
39
+ To activate the underflow/overflow detection, initialize the object with the model :
40
+
41
+ ```python
42
+ debug_overflow = DebugUnderflowOverflow(model)
43
+ ```
44
+
45
+ then run the training as normal and if `nan` or `inf` gets detected in at least one of the weight, input or output
46
+ elements this module will throw an exception and will print `max_frames_to_save` frames that lead to this event,
47
+ each frame reporting
48
+
49
+ 1. the fully qualified module name plus the class name whose `forward` was run
50
+ 2. the absolute min and max value of all elements for each module weights, and the inputs and output
51
+
52
+ For example, here is the header and the last few frames in detection report for `google/mt5-small` run in fp16
53
+ mixed precision :
54
+
55
+ ```
56
+ Detected inf/nan during batch_number=0
57
+ Last 21 forward frames:
58
+ abs min abs max metadata
59
+ [...]
60
+ encoder.block.2.layer.1.DenseReluDense.wi_0 Linear
61
+ 2.17e-07 4.50e+00 weight
62
+ 1.79e-06 4.65e+00 input[0]
63
+ 2.68e-06 3.70e+01 output
64
+ encoder.block.2.layer.1.DenseReluDense.wi_1 Linear
65
+ 8.08e-07 2.66e+01 weight
66
+ 1.79e-06 4.65e+00 input[0]
67
+ 1.27e-04 2.37e+02 output
68
+ encoder.block.2.layer.1.DenseReluDense.wo Linear
69
+ 1.01e-06 6.44e+00 weight
70
+ 0.00e+00 9.74e+03 input[0]
71
+ 3.18e-04 6.27e+04 output
72
+ encoder.block.2.layer.1.DenseReluDense T5DenseGatedGeluDense
73
+ 1.79e-06 4.65e+00 input[0]
74
+ 3.18e-04 6.27e+04 output
75
+ encoder.block.2.layer.1.dropout Dropout
76
+ 3.18e-04 6.27e+04 input[0]
77
+ 0.00e+00 inf output
78
+ ```
79
+
80
+ You can see here, that `T5DenseGatedGeluDense.forward` resulted in output activations, whose absolute max value was
81
+ around 62.7K, which is very close to fp16's top limit of 64K. In the next frame we have `Dropout` which
82
+ renormalizes the weights, after it zeroed some of the elements, which pushes the absolute max value to more than
83
+ 64K, and we get an overlow.
84
+
85
+ As you can see it's the previous frames that we need to look into when the numbers start going into very large for
86
+ fp16 numbers.
87
+
88
+ The tracking is done in a forward hook, which gets invoked immediately after `forward` has completed.
89
+
90
+ By default the last 21 frames are printed. You can change the default to adjust for your needs. For example :
91
+
92
+ ```python
93
+ debug_overflow = DebugUnderflowOverflow(model, max_frames_to_save=100)
94
+ ```
95
+
96
+ To validate that you have set up this debugging feature correctly, and you intend to use it in a training that
97
+ may take hours to complete, first run it with normal tracing enabled for one of a few batches as explained in
98
+ the next section.
99
+
100
+
101
+ Mode 2. Specific batch absolute min/max tracing without detection
102
+
103
+ The second work mode is per-batch tracing with the underflow/overflow detection feature turned off.
104
+
105
+ Let's say you want to watch the absolute min and max values for all the ingredients of each `forward` call of a
106
+ given batch, and only do that for batches 1 and 3. Then you instantiate this class as :
107
+
108
+ ```python
109
+ debug_overflow = DebugUnderflowOverflow(model, trace_batch_nums=[1, 3])
110
+ ```
111
+
112
+ And now full batches 1 and 3 will be traced using the same format as explained above. Batches are 0-indexed.
113
+
114
+ This is helpful if you know that the program starts misbehaving after a certain batch number, so you can
115
+ fast-forward right to that area.
116
+
117
+
118
+ Early stopping:
119
+
120
+ You can also specify the batch number after which to stop the training, with :
121
+
122
+ ```python
123
+ debug_overflow = DebugUnderflowOverflow(model, trace_batch_nums=[1, 3], abort_after_batch_num=3)
124
+ ```
125
+
126
+ This feature is mainly useful in the tracing mode, but you can use it for any mode.
127
+
128
+
129
+ **Performance**:
130
+
131
+ As this module measures absolute `min`/``max` of each weight of the model on every forward it'll slow the training
132
+ down. Therefore remember to turn it off once the debugging needs have been met.
133
+
134
+ Args:
135
+ model (`nn.Module`):
136
+ The model to debug.
137
+ max_frames_to_save (`int`, *optional*, defaults to 21):
138
+ How many frames back to record
139
+ trace_batch_nums(`List[int]`, *optional*, defaults to `[]`):
140
+ Which batch numbers to trace (turns detection off)
141
+ abort_after_batch_num (`int``, *optional*):
142
+ Whether to abort after a certain batch number has finished
143
+ """
144
+
145
+ def __init__(self, model, max_frames_to_save=21, trace_batch_nums=[], abort_after_batch_num=None):
146
+ self.model = model
147
+ self.trace_batch_nums = trace_batch_nums
148
+ self.abort_after_batch_num = abort_after_batch_num
149
+
150
+ # keep a LIFO buffer of frames to dump as soon as inf/nan is encountered to give context to the problem emergence
151
+ self.frames = collections.deque([], max_frames_to_save)
152
+ self.frame = []
153
+ self.batch_number = 0
154
+ self.total_calls = 0
155
+ self.detected_overflow = False
156
+ self.prefix = " "
157
+
158
+ self.analyse_model()
159
+
160
+ self.register_forward_hook()
161
+
162
+ def save_frame(self, frame=None):
163
+ if frame is not None:
164
+ self.expand_frame(frame)
165
+ self.frames.append("\n".join(self.frame))
166
+ self.frame = [] # start a new frame
167
+
168
+ def expand_frame(self, line):
169
+ self.frame.append(line)
170
+
171
+ def trace_frames(self):
172
+ print("\n".join(self.frames))
173
+ self.frames = []
174
+
175
+ def reset_saved_frames(self):
176
+ self.frames = []
177
+
178
+ def dump_saved_frames(self):
179
+ print(f"\nDetected inf/nan during batch_number={self.batch_number}")
180
+ print(f"Last {len(self.frames)} forward frames:")
181
+ print(f"{'abs min':8} {'abs max':8} metadata")
182
+ print("\n".join(self.frames))
183
+ print("\n\n")
184
+ self.frames = []
185
+
186
+ def analyse_model(self):
187
+ # extract the fully qualified module names, to be able to report at run time. e.g.:
188
+ # encoder.block.2.layer.0.SelfAttention.o
189
+ #
190
+ # for shared weights only the first shared module name will be registered
191
+ self.module_names = {m: name for name, m in self.model.named_modules()}
192
+ # self.longest_module_name = max(len(v) for v in self.module_names.values())
193
+
194
+ def analyse_variable(self, var, ctx):
195
+ if torch.is_tensor(var):
196
+ self.expand_frame(get_abs_min_max(var, ctx))
197
+ if detect_overflow(var, ctx):
198
+ self.detected_overflow = True
199
+ elif var is None:
200
+ self.expand_frame(f"{'None':>17} {ctx}")
201
+ else:
202
+ self.expand_frame(f"{'not a tensor':>17} {ctx}")
203
+
204
+ def batch_start_frame(self):
205
+ self.expand_frame(f"\n\n{self.prefix} *** Starting batch number={self.batch_number} ***")
206
+ self.expand_frame(f"{'abs min':8} {'abs max':8} metadata")
207
+
208
+ def batch_end_frame(self):
209
+ self.expand_frame(f"{self.prefix} *** Finished batch number={self.batch_number-1} ***\n\n")
210
+
211
+ def create_frame(self, module, input, output):
212
+ self.expand_frame(f"{self.prefix} {self.module_names[module]} {module.__class__.__name__}")
213
+
214
+ # params
215
+ for name, p in module.named_parameters(recurse=False):
216
+ self.analyse_variable(p, name)
217
+
218
+ # inputs
219
+ if isinstance(input, tuple):
220
+ for i, x in enumerate(input):
221
+ self.analyse_variable(x, f"input[{i}]")
222
+ else:
223
+ self.analyse_variable(input, "input")
224
+
225
+ # outputs
226
+ if isinstance(output, tuple):
227
+ for i, x in enumerate(output):
228
+ # possibly a tuple of tuples
229
+ if isinstance(x, tuple):
230
+ for j, y in enumerate(x):
231
+ self.analyse_variable(y, f"output[{i}][{j}]")
232
+ else:
233
+ self.analyse_variable(x, f"output[{i}]")
234
+ else:
235
+ self.analyse_variable(output, "output")
236
+
237
+ self.save_frame()
238
+
239
+ def register_forward_hook(self):
240
+ self.model.apply(self._register_forward_hook)
241
+
242
+ def _register_forward_hook(self, module):
243
+ module.register_forward_hook(self.forward_hook)
244
+
245
+ def forward_hook(self, module, input, output):
246
+ # - input is a tuple of packed inputs (could be non-Tensors)
247
+ # - output could be a Tensor or a tuple of Tensors and non-Tensors
248
+
249
+ last_frame_of_batch = False
250
+
251
+ trace_mode = True if self.batch_number in self.trace_batch_nums else False
252
+ if trace_mode:
253
+ self.reset_saved_frames()
254
+
255
+ if self.total_calls == 0:
256
+ self.batch_start_frame()
257
+ self.total_calls += 1
258
+
259
+ # count batch numbers - the very first forward hook of the batch will be called when the
260
+ # batch completes - i.e. it gets called very last - we know this batch has finished
261
+ if module == self.model:
262
+ self.batch_number += 1
263
+ last_frame_of_batch = True
264
+
265
+ self.create_frame(module, input, output)
266
+
267
+ # if last_frame_of_batch:
268
+ # self.batch_end_frame()
269
+
270
+ if trace_mode:
271
+ self.trace_frames()
272
+
273
+ if last_frame_of_batch:
274
+ self.batch_start_frame()
275
+
276
+ if self.detected_overflow and not trace_mode:
277
+ self.dump_saved_frames()
278
+
279
+ # now we can abort, as it's pointless to continue running
280
+ raise ValueError(
281
+ "DebugUnderflowOverflow: inf/nan detected, aborting as there is no point running further. "
282
+ "Please scroll up above this traceback to see the activation values prior to this event."
283
+ )
284
+
285
+ # abort after certain batch if requested to do so
286
+ if self.abort_after_batch_num is not None and self.batch_number > self.abort_after_batch_num:
287
+ raise ValueError(
288
+ f"DebugUnderflowOverflow: aborting after {self.batch_number} batches due to"
289
+ f" `abort_after_batch_num={self.abort_after_batch_num}` arg"
290
+ )
291
+
292
+
293
+ def get_abs_min_max(var, ctx):
294
+ abs_var = var.abs()
295
+ return f"{abs_var.min():8.2e} {abs_var.max():8.2e} {ctx}"
296
+
297
+
298
+ def detect_overflow(var, ctx):
299
+ """
300
+ Report whether the tensor contains any `nan` or `inf` entries.
301
+
302
+ This is useful for detecting overflows/underflows and best to call right after the function that did some math that
303
+ modified the tensor in question.
304
+
305
+ This function contains a few other helper features that you can enable and tweak directly if you want to track
306
+ various other things.
307
+
308
+ Args:
309
+ var: the tensor variable to check
310
+ ctx: the message to print as a context
311
+
312
+ Return:
313
+ `True` if `inf` or `nan` was detected, `False` otherwise
314
+ """
315
+ detected = False
316
+ if torch.isnan(var).any().item():
317
+ detected = True
318
+ print(f"{ctx} has nans")
319
+ if torch.isinf(var).any().item():
320
+ detected = True
321
+ print(f"{ctx} has infs")
322
+
323
+ # if needed to monitor large elements can enable the following
324
+ if 0: # and detected:
325
+ n100 = var[torch.ge(var.abs(), 100)]
326
+ if n100.numel() > 0:
327
+ print(f"{ctx}: n100={n100.numel()}")
328
+ n1000 = var[torch.ge(var.abs(), 1000)]
329
+ if n1000.numel() > 0:
330
+ print(f"{ctx}: n1000={n1000.numel()}")
331
+ n10000 = var[torch.ge(var.abs(), 10000)]
332
+ if n10000.numel() > 0:
333
+ print(f"{ctx}: n10000={n10000.numel()}")
334
+
335
+ if 0:
336
+ print(f"min={var.min():9.2e} max={var.max():9.2e}")
337
+
338
+ if 0:
339
+ print(f"min={var.min():9.2e} max={var.max():9.2e} var={var.var():9.2e} mean={var.mean():9.2e} ({ctx})")
340
+
341
+ return detected
342
+
343
+
344
+ class DebugOption(ExplicitEnum):
345
+ UNDERFLOW_OVERFLOW = "underflow_overflow"
346
+ TPU_METRICS_DEBUG = "tpu_metrics_debug"
venv/lib/python3.10/site-packages/transformers/deepspeed.py ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """
15
+ Integration with Deepspeed - kept for backward compatiblity, if you plan to make any edit, make sure to modify the file
16
+ in `integrations/deepspeed` instead.
17
+
18
+ Check: https://github.com/huggingface/transformers/pull/25599
19
+ """
20
+ import warnings
21
+
22
+
23
+ warnings.warn(
24
+ "transformers.deepspeed module is deprecated and will be removed in a future version. Please import deepspeed modules directly from transformers.integrations",
25
+ FutureWarning,
26
+ )
27
+
28
+ # Backward compatibility imports, to make sure all those objects can be found in integrations/deepspeed
29
+ from .integrations.deepspeed import ( # noqa
30
+ HfDeepSpeedConfig,
31
+ HfTrainerDeepSpeedConfig,
32
+ deepspeed_config,
33
+ deepspeed_init,
34
+ deepspeed_load_checkpoint,
35
+ deepspeed_optim_sched,
36
+ is_deepspeed_available,
37
+ is_deepspeed_zero3_enabled,
38
+ set_hf_deepspeed_config,
39
+ unset_hf_deepspeed_config,
40
+ )
venv/lib/python3.10/site-packages/transformers/dependency_versions_check.py ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from .dependency_versions_table import deps
16
+ from .utils.versions import require_version, require_version_core
17
+
18
+
19
+ # define which module versions we always want to check at run time
20
+ # (usually the ones defined in `install_requires` in setup.py)
21
+ #
22
+ # order specific notes:
23
+ # - tqdm must be checked before tokenizers
24
+
25
+ pkgs_to_check_at_runtime = [
26
+ "python",
27
+ "tqdm",
28
+ "regex",
29
+ "requests",
30
+ "packaging",
31
+ "filelock",
32
+ "numpy",
33
+ "tokenizers",
34
+ "huggingface-hub",
35
+ "safetensors",
36
+ "accelerate",
37
+ "pyyaml",
38
+ ]
39
+
40
+ for pkg in pkgs_to_check_at_runtime:
41
+ if pkg in deps:
42
+ if pkg == "tokenizers":
43
+ # must be loaded here, or else tqdm check may fail
44
+ from .utils import is_tokenizers_available
45
+
46
+ if not is_tokenizers_available():
47
+ continue # not required, check version only if installed
48
+ elif pkg == "accelerate":
49
+ # must be loaded here, or else tqdm check may fail
50
+ from .utils import is_accelerate_available
51
+
52
+ # Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of
53
+ # Transformers with PyTorch
54
+ if not is_accelerate_available():
55
+ continue # not required, check version only if installed
56
+
57
+ require_version_core(deps[pkg])
58
+ else:
59
+ raise ValueError(f"can't find {pkg} in {deps.keys()}, check dependency_versions_table.py")
60
+
61
+
62
+ def dep_version_check(pkg, hint=None):
63
+ require_version(deps[pkg], hint)
venv/lib/python3.10/site-packages/transformers/dependency_versions_table.py ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # THIS FILE HAS BEEN AUTOGENERATED. To update:
2
+ # 1. modify the `_deps` dict in setup.py
3
+ # 2. run `make deps_table_update``
4
+ deps = {
5
+ "Pillow": "Pillow>=10.0.1,<=15.0",
6
+ "accelerate": "accelerate>=0.21.0",
7
+ "av": "av==9.2.0",
8
+ "beautifulsoup4": "beautifulsoup4",
9
+ "codecarbon": "codecarbon==1.2.0",
10
+ "cookiecutter": "cookiecutter==1.7.3",
11
+ "dataclasses": "dataclasses",
12
+ "datasets": "datasets!=2.5.0",
13
+ "decord": "decord==0.6.0",
14
+ "deepspeed": "deepspeed>=0.9.3",
15
+ "diffusers": "diffusers",
16
+ "dill": "dill<0.3.5",
17
+ "evaluate": "evaluate>=0.2.0",
18
+ "faiss-cpu": "faiss-cpu",
19
+ "fastapi": "fastapi",
20
+ "filelock": "filelock",
21
+ "flax": "flax>=0.4.1,<=0.7.0",
22
+ "fsspec": "fsspec<2023.10.0",
23
+ "ftfy": "ftfy",
24
+ "fugashi": "fugashi>=1.0",
25
+ "GitPython": "GitPython<3.1.19",
26
+ "hf-doc-builder": "hf-doc-builder>=0.3.0",
27
+ "huggingface-hub": "huggingface-hub>=0.19.3,<1.0",
28
+ "importlib_metadata": "importlib_metadata",
29
+ "ipadic": "ipadic>=1.0.0,<2.0",
30
+ "isort": "isort>=5.5.4",
31
+ "jax": "jax>=0.4.1,<=0.4.13",
32
+ "jaxlib": "jaxlib>=0.4.1,<=0.4.13",
33
+ "jieba": "jieba",
34
+ "kenlm": "kenlm",
35
+ "keras": "keras<2.16",
36
+ "keras-nlp": "keras-nlp>=0.3.1",
37
+ "librosa": "librosa",
38
+ "nltk": "nltk",
39
+ "natten": "natten>=0.14.6,<0.15.0",
40
+ "numpy": "numpy>=1.17",
41
+ "onnxconverter-common": "onnxconverter-common",
42
+ "onnxruntime-tools": "onnxruntime-tools>=1.4.2",
43
+ "onnxruntime": "onnxruntime>=1.4.0",
44
+ "opencv-python": "opencv-python",
45
+ "optuna": "optuna",
46
+ "optax": "optax>=0.0.8,<=0.1.4",
47
+ "packaging": "packaging>=20.0",
48
+ "parameterized": "parameterized",
49
+ "phonemizer": "phonemizer",
50
+ "protobuf": "protobuf",
51
+ "psutil": "psutil",
52
+ "pyyaml": "pyyaml>=5.1",
53
+ "pydantic": "pydantic",
54
+ "pytest": "pytest>=7.2.0,<8.0.0",
55
+ "pytest-timeout": "pytest-timeout",
56
+ "pytest-xdist": "pytest-xdist",
57
+ "python": "python>=3.8.0",
58
+ "ray[tune]": "ray[tune]>=2.7.0",
59
+ "regex": "regex!=2019.12.17",
60
+ "requests": "requests",
61
+ "rhoknp": "rhoknp>=1.1.0,<1.3.1",
62
+ "rjieba": "rjieba",
63
+ "rouge-score": "rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1",
64
+ "ruff": "ruff==0.1.5",
65
+ "sacrebleu": "sacrebleu>=1.4.12,<2.0.0",
66
+ "sacremoses": "sacremoses",
67
+ "safetensors": "safetensors>=0.4.1",
68
+ "sagemaker": "sagemaker>=2.31.0",
69
+ "scikit-learn": "scikit-learn",
70
+ "sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
71
+ "sigopt": "sigopt",
72
+ "starlette": "starlette",
73
+ "sudachipy": "sudachipy>=0.6.6",
74
+ "sudachidict_core": "sudachidict_core>=20220729",
75
+ "tensorboard": "tensorboard",
76
+ "tensorflow-cpu": "tensorflow-cpu>=2.6,<2.16",
77
+ "tensorflow": "tensorflow>=2.6,<2.16",
78
+ "tensorflow-text": "tensorflow-text<2.16",
79
+ "tf2onnx": "tf2onnx",
80
+ "timeout-decorator": "timeout-decorator",
81
+ "timm": "timm",
82
+ "tokenizers": "tokenizers>=0.19,<0.20",
83
+ "torch": "torch",
84
+ "torchaudio": "torchaudio",
85
+ "torchvision": "torchvision",
86
+ "pyctcdecode": "pyctcdecode>=0.4.0",
87
+ "tqdm": "tqdm>=4.27",
88
+ "unidic": "unidic>=1.0.2",
89
+ "unidic_lite": "unidic_lite>=1.0.7",
90
+ "urllib3": "urllib3<2.0.0",
91
+ "uvicorn": "uvicorn",
92
+ }
venv/lib/python3.10/site-packages/transformers/dynamic_module_utils.py ADDED
@@ -0,0 +1,633 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Utilities to dynamically load objects from the Hub."""
16
+ import filecmp
17
+ import importlib
18
+ import os
19
+ import re
20
+ import shutil
21
+ import signal
22
+ import sys
23
+ import typing
24
+ import warnings
25
+ from pathlib import Path
26
+ from typing import Any, Dict, List, Optional, Union
27
+
28
+ from huggingface_hub import try_to_load_from_cache
29
+
30
+ from .utils import (
31
+ HF_MODULES_CACHE,
32
+ TRANSFORMERS_DYNAMIC_MODULE_NAME,
33
+ cached_file,
34
+ extract_commit_hash,
35
+ is_offline_mode,
36
+ logging,
37
+ )
38
+
39
+
40
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
41
+
42
+
43
+ def init_hf_modules():
44
+ """
45
+ Creates the cache directory for modules with an init, and adds it to the Python path.
46
+ """
47
+ # This function has already been executed if HF_MODULES_CACHE already is in the Python path.
48
+ if HF_MODULES_CACHE in sys.path:
49
+ return
50
+
51
+ sys.path.append(HF_MODULES_CACHE)
52
+ os.makedirs(HF_MODULES_CACHE, exist_ok=True)
53
+ init_path = Path(HF_MODULES_CACHE) / "__init__.py"
54
+ if not init_path.exists():
55
+ init_path.touch()
56
+ importlib.invalidate_caches()
57
+
58
+
59
+ def create_dynamic_module(name: Union[str, os.PathLike]):
60
+ """
61
+ Creates a dynamic module in the cache directory for modules.
62
+
63
+ Args:
64
+ name (`str` or `os.PathLike`):
65
+ The name of the dynamic module to create.
66
+ """
67
+ init_hf_modules()
68
+ dynamic_module_path = (Path(HF_MODULES_CACHE) / name).resolve()
69
+ # If the parent module does not exist yet, recursively create it.
70
+ if not dynamic_module_path.parent.exists():
71
+ create_dynamic_module(dynamic_module_path.parent)
72
+ os.makedirs(dynamic_module_path, exist_ok=True)
73
+ init_path = dynamic_module_path / "__init__.py"
74
+ if not init_path.exists():
75
+ init_path.touch()
76
+ # It is extremely important to invalidate the cache when we change stuff in those modules, or users end up
77
+ # with errors about module that do not exist. Same for all other `invalidate_caches` in this file.
78
+ importlib.invalidate_caches()
79
+
80
+
81
+ def get_relative_imports(module_file: Union[str, os.PathLike]) -> List[str]:
82
+ """
83
+ Get the list of modules that are relatively imported in a module file.
84
+
85
+ Args:
86
+ module_file (`str` or `os.PathLike`): The module file to inspect.
87
+
88
+ Returns:
89
+ `List[str]`: The list of relative imports in the module.
90
+ """
91
+ with open(module_file, "r", encoding="utf-8") as f:
92
+ content = f.read()
93
+
94
+ # Imports of the form `import .xxx`
95
+ relative_imports = re.findall(r"^\s*import\s+\.(\S+)\s*$", content, flags=re.MULTILINE)
96
+ # Imports of the form `from .xxx import yyy`
97
+ relative_imports += re.findall(r"^\s*from\s+\.(\S+)\s+import", content, flags=re.MULTILINE)
98
+ # Unique-ify
99
+ return list(set(relative_imports))
100
+
101
+
102
+ def get_relative_import_files(module_file: Union[str, os.PathLike]) -> List[str]:
103
+ """
104
+ Get the list of all files that are needed for a given module. Note that this function recurses through the relative
105
+ imports (if a imports b and b imports c, it will return module files for b and c).
106
+
107
+ Args:
108
+ module_file (`str` or `os.PathLike`): The module file to inspect.
109
+
110
+ Returns:
111
+ `List[str]`: The list of all relative imports a given module needs (recursively), which will give us the list
112
+ of module files a given module needs.
113
+ """
114
+ no_change = False
115
+ files_to_check = [module_file]
116
+ all_relative_imports = []
117
+
118
+ # Let's recurse through all relative imports
119
+ while not no_change:
120
+ new_imports = []
121
+ for f in files_to_check:
122
+ new_imports.extend(get_relative_imports(f))
123
+
124
+ module_path = Path(module_file).parent
125
+ new_import_files = [str(module_path / m) for m in new_imports]
126
+ new_import_files = [f for f in new_import_files if f not in all_relative_imports]
127
+ files_to_check = [f"{f}.py" for f in new_import_files]
128
+
129
+ no_change = len(new_import_files) == 0
130
+ all_relative_imports.extend(files_to_check)
131
+
132
+ return all_relative_imports
133
+
134
+
135
+ def get_imports(filename: Union[str, os.PathLike]) -> List[str]:
136
+ """
137
+ Extracts all the libraries (not relative imports this time) that are imported in a file.
138
+
139
+ Args:
140
+ filename (`str` or `os.PathLike`): The module file to inspect.
141
+
142
+ Returns:
143
+ `List[str]`: The list of all packages required to use the input module.
144
+ """
145
+ with open(filename, "r", encoding="utf-8") as f:
146
+ content = f.read()
147
+
148
+ # filter out try/except block so in custom code we can have try/except imports
149
+ content = re.sub(r"\s*try\s*:\s*.*?\s*except\s*.*?:", "", content, flags=re.MULTILINE | re.DOTALL)
150
+
151
+ # Imports of the form `import xxx`
152
+ imports = re.findall(r"^\s*import\s+(\S+)\s*$", content, flags=re.MULTILINE)
153
+ # Imports of the form `from xxx import yyy`
154
+ imports += re.findall(r"^\s*from\s+(\S+)\s+import", content, flags=re.MULTILINE)
155
+ # Only keep the top-level module
156
+ imports = [imp.split(".")[0] for imp in imports if not imp.startswith(".")]
157
+ return list(set(imports))
158
+
159
+
160
+ def check_imports(filename: Union[str, os.PathLike]) -> List[str]:
161
+ """
162
+ Check if the current Python environment contains all the libraries that are imported in a file. Will raise if a
163
+ library is missing.
164
+
165
+ Args:
166
+ filename (`str` or `os.PathLike`): The module file to check.
167
+
168
+ Returns:
169
+ `List[str]`: The list of relative imports in the file.
170
+ """
171
+ imports = get_imports(filename)
172
+ missing_packages = []
173
+ for imp in imports:
174
+ try:
175
+ importlib.import_module(imp)
176
+ except ImportError:
177
+ missing_packages.append(imp)
178
+
179
+ if len(missing_packages) > 0:
180
+ raise ImportError(
181
+ "This modeling file requires the following packages that were not found in your environment: "
182
+ f"{', '.join(missing_packages)}. Run `pip install {' '.join(missing_packages)}`"
183
+ )
184
+
185
+ return get_relative_imports(filename)
186
+
187
+
188
+ def get_class_in_module(class_name: str, module_path: Union[str, os.PathLike]) -> typing.Type:
189
+ """
190
+ Import a module on the cache directory for modules and extract a class from it.
191
+
192
+ Args:
193
+ class_name (`str`): The name of the class to import.
194
+ module_path (`str` or `os.PathLike`): The path to the module to import.
195
+
196
+ Returns:
197
+ `typing.Type`: The class looked for.
198
+ """
199
+ name = os.path.normpath(module_path).replace(".py", "").replace(os.path.sep, ".")
200
+ module_path = str(Path(HF_MODULES_CACHE) / module_path)
201
+ module = importlib.machinery.SourceFileLoader(name, module_path).load_module()
202
+ return getattr(module, class_name)
203
+
204
+
205
+ def get_cached_module_file(
206
+ pretrained_model_name_or_path: Union[str, os.PathLike],
207
+ module_file: str,
208
+ cache_dir: Optional[Union[str, os.PathLike]] = None,
209
+ force_download: bool = False,
210
+ resume_download: bool = False,
211
+ proxies: Optional[Dict[str, str]] = None,
212
+ token: Optional[Union[bool, str]] = None,
213
+ revision: Optional[str] = None,
214
+ local_files_only: bool = False,
215
+ repo_type: Optional[str] = None,
216
+ _commit_hash: Optional[str] = None,
217
+ **deprecated_kwargs,
218
+ ) -> str:
219
+ """
220
+ Prepares Downloads a module from a local folder or a distant repo and returns its path inside the cached
221
+ Transformers module.
222
+
223
+ Args:
224
+ pretrained_model_name_or_path (`str` or `os.PathLike`):
225
+ This can be either:
226
+
227
+ - a string, the *model id* of a pretrained model configuration hosted inside a model repo on
228
+ huggingface.co.
229
+ - a path to a *directory* containing a configuration file saved using the
230
+ [`~PreTrainedTokenizer.save_pretrained`] method, e.g., `./my_model_directory/`.
231
+
232
+ module_file (`str`):
233
+ The name of the module file containing the class to look for.
234
+ cache_dir (`str` or `os.PathLike`, *optional*):
235
+ Path to a directory in which a downloaded pretrained model configuration should be cached if the standard
236
+ cache should not be used.
237
+ force_download (`bool`, *optional*, defaults to `False`):
238
+ Whether or not to force to (re-)download the configuration files and override the cached versions if they
239
+ exist.
240
+ resume_download (`bool`, *optional*, defaults to `False`):
241
+ Whether or not to delete incompletely received file. Attempts to resume the download if such a file exists.
242
+ proxies (`Dict[str, str]`, *optional*):
243
+ A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',
244
+ 'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request.
245
+ token (`str` or *bool*, *optional*):
246
+ The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated
247
+ when running `huggingface-cli login` (stored in `~/.huggingface`).
248
+ revision (`str`, *optional*, defaults to `"main"`):
249
+ The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
250
+ git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any
251
+ identifier allowed by git.
252
+ local_files_only (`bool`, *optional*, defaults to `False`):
253
+ If `True`, will only try to load the tokenizer configuration from local files.
254
+ repo_type (`str`, *optional*):
255
+ Specify the repo type (useful when downloading from a space for instance).
256
+
257
+ <Tip>
258
+
259
+ Passing `token=True` is required when you want to use a private model.
260
+
261
+ </Tip>
262
+
263
+ Returns:
264
+ `str`: The path to the module inside the cache.
265
+ """
266
+ use_auth_token = deprecated_kwargs.pop("use_auth_token", None)
267
+ if use_auth_token is not None:
268
+ warnings.warn(
269
+ "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.",
270
+ FutureWarning,
271
+ )
272
+ if token is not None:
273
+ raise ValueError("`token` and `use_auth_token` are both specified. Please set only the argument `token`.")
274
+ token = use_auth_token
275
+
276
+ if is_offline_mode() and not local_files_only:
277
+ logger.info("Offline mode: forcing local_files_only=True")
278
+ local_files_only = True
279
+
280
+ # Download and cache module_file from the repo `pretrained_model_name_or_path` of grab it if it's a local file.
281
+ pretrained_model_name_or_path = str(pretrained_model_name_or_path)
282
+ is_local = os.path.isdir(pretrained_model_name_or_path)
283
+ if is_local:
284
+ submodule = os.path.basename(pretrained_model_name_or_path)
285
+ else:
286
+ submodule = pretrained_model_name_or_path.replace("/", os.path.sep)
287
+ cached_module = try_to_load_from_cache(
288
+ pretrained_model_name_or_path, module_file, cache_dir=cache_dir, revision=_commit_hash, repo_type=repo_type
289
+ )
290
+
291
+ new_files = []
292
+ try:
293
+ # Load from URL or cache if already cached
294
+ resolved_module_file = cached_file(
295
+ pretrained_model_name_or_path,
296
+ module_file,
297
+ cache_dir=cache_dir,
298
+ force_download=force_download,
299
+ proxies=proxies,
300
+ resume_download=resume_download,
301
+ local_files_only=local_files_only,
302
+ token=token,
303
+ revision=revision,
304
+ repo_type=repo_type,
305
+ _commit_hash=_commit_hash,
306
+ )
307
+ if not is_local and cached_module != resolved_module_file:
308
+ new_files.append(module_file)
309
+
310
+ except EnvironmentError:
311
+ logger.error(f"Could not locate the {module_file} inside {pretrained_model_name_or_path}.")
312
+ raise
313
+
314
+ # Check we have all the requirements in our environment
315
+ modules_needed = check_imports(resolved_module_file)
316
+
317
+ # Now we move the module inside our cached dynamic modules.
318
+ full_submodule = TRANSFORMERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
319
+ create_dynamic_module(full_submodule)
320
+ submodule_path = Path(HF_MODULES_CACHE) / full_submodule
321
+ if submodule == os.path.basename(pretrained_model_name_or_path):
322
+ # We copy local files to avoid putting too many folders in sys.path. This copy is done when the file is new or
323
+ # has changed since last copy.
324
+ if not (submodule_path / module_file).exists() or not filecmp.cmp(
325
+ resolved_module_file, str(submodule_path / module_file)
326
+ ):
327
+ shutil.copy(resolved_module_file, submodule_path / module_file)
328
+ importlib.invalidate_caches()
329
+ for module_needed in modules_needed:
330
+ module_needed = f"{module_needed}.py"
331
+ module_needed_file = os.path.join(pretrained_model_name_or_path, module_needed)
332
+ if not (submodule_path / module_needed).exists() or not filecmp.cmp(
333
+ module_needed_file, str(submodule_path / module_needed)
334
+ ):
335
+ shutil.copy(module_needed_file, submodule_path / module_needed)
336
+ importlib.invalidate_caches()
337
+ else:
338
+ # Get the commit hash
339
+ commit_hash = extract_commit_hash(resolved_module_file, _commit_hash)
340
+
341
+ # The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
342
+ # benefit of versioning.
343
+ submodule_path = submodule_path / commit_hash
344
+ full_submodule = full_submodule + os.path.sep + commit_hash
345
+ create_dynamic_module(full_submodule)
346
+
347
+ if not (submodule_path / module_file).exists():
348
+ shutil.copy(resolved_module_file, submodule_path / module_file)
349
+ importlib.invalidate_caches()
350
+ # Make sure we also have every file with relative
351
+ for module_needed in modules_needed:
352
+ if not (submodule_path / f"{module_needed}.py").exists():
353
+ get_cached_module_file(
354
+ pretrained_model_name_or_path,
355
+ f"{module_needed}.py",
356
+ cache_dir=cache_dir,
357
+ force_download=force_download,
358
+ resume_download=resume_download,
359
+ proxies=proxies,
360
+ token=token,
361
+ revision=revision,
362
+ local_files_only=local_files_only,
363
+ _commit_hash=commit_hash,
364
+ )
365
+ new_files.append(f"{module_needed}.py")
366
+
367
+ if len(new_files) > 0 and revision is None:
368
+ new_files = "\n".join([f"- {f}" for f in new_files])
369
+ repo_type_str = "" if repo_type is None else f"{repo_type}s/"
370
+ url = f"https://huggingface.co/{repo_type_str}{pretrained_model_name_or_path}"
371
+ logger.warning(
372
+ f"A new version of the following files was downloaded from {url}:\n{new_files}"
373
+ "\n. Make sure to double-check they do not contain any added malicious code. To avoid downloading new "
374
+ "versions of the code file, you can pin a revision."
375
+ )
376
+
377
+ return os.path.join(full_submodule, module_file)
378
+
379
+
380
+ def get_class_from_dynamic_module(
381
+ class_reference: str,
382
+ pretrained_model_name_or_path: Union[str, os.PathLike],
383
+ cache_dir: Optional[Union[str, os.PathLike]] = None,
384
+ force_download: bool = False,
385
+ resume_download: bool = False,
386
+ proxies: Optional[Dict[str, str]] = None,
387
+ token: Optional[Union[bool, str]] = None,
388
+ revision: Optional[str] = None,
389
+ local_files_only: bool = False,
390
+ repo_type: Optional[str] = None,
391
+ code_revision: Optional[str] = None,
392
+ **kwargs,
393
+ ) -> typing.Type:
394
+ """
395
+ Extracts a class from a module file, present in the local folder or repository of a model.
396
+
397
+ <Tip warning={true}>
398
+
399
+ Calling this function will execute the code in the module file found locally or downloaded from the Hub. It should
400
+ therefore only be called on trusted repos.
401
+
402
+ </Tip>
403
+
404
+
405
+
406
+ Args:
407
+ class_reference (`str`):
408
+ The full name of the class to load, including its module and optionally its repo.
409
+ pretrained_model_name_or_path (`str` or `os.PathLike`):
410
+ This can be either:
411
+
412
+ - a string, the *model id* of a pretrained model configuration hosted inside a model repo on
413
+ huggingface.co.
414
+ - a path to a *directory* containing a configuration file saved using the
415
+ [`~PreTrainedTokenizer.save_pretrained`] method, e.g., `./my_model_directory/`.
416
+
417
+ This is used when `class_reference` does not specify another repo.
418
+ module_file (`str`):
419
+ The name of the module file containing the class to look for.
420
+ class_name (`str`):
421
+ The name of the class to import in the module.
422
+ cache_dir (`str` or `os.PathLike`, *optional*):
423
+ Path to a directory in which a downloaded pretrained model configuration should be cached if the standard
424
+ cache should not be used.
425
+ force_download (`bool`, *optional*, defaults to `False`):
426
+ Whether or not to force to (re-)download the configuration files and override the cached versions if they
427
+ exist.
428
+ resume_download (`bool`, *optional*, defaults to `False`):
429
+ Whether or not to delete incompletely received file. Attempts to resume the download if such a file exists.
430
+ proxies (`Dict[str, str]`, *optional*):
431
+ A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',
432
+ 'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request.
433
+ token (`str` or `bool`, *optional*):
434
+ The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated
435
+ when running `huggingface-cli login` (stored in `~/.huggingface`).
436
+ revision (`str`, *optional*, defaults to `"main"`):
437
+ The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
438
+ git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any
439
+ identifier allowed by git.
440
+ local_files_only (`bool`, *optional*, defaults to `False`):
441
+ If `True`, will only try to load the tokenizer configuration from local files.
442
+ repo_type (`str`, *optional*):
443
+ Specify the repo type (useful when downloading from a space for instance).
444
+ code_revision (`str`, *optional*, defaults to `"main"`):
445
+ The specific revision to use for the code on the Hub, if the code leaves in a different repository than the
446
+ rest of the model. It can be a branch name, a tag name, or a commit id, since we use a git-based system for
447
+ storing models and other artifacts on huggingface.co, so `revision` can be any identifier allowed by git.
448
+
449
+ <Tip>
450
+
451
+ Passing `token=True` is required when you want to use a private model.
452
+
453
+ </Tip>
454
+
455
+ Returns:
456
+ `typing.Type`: The class, dynamically imported from the module.
457
+
458
+ Examples:
459
+
460
+ ```python
461
+ # Download module `modeling.py` from huggingface.co and cache then extract the class `MyBertModel` from this
462
+ # module.
463
+ cls = get_class_from_dynamic_module("modeling.MyBertModel", "sgugger/my-bert-model")
464
+
465
+ # Download module `modeling.py` from a given repo and cache then extract the class `MyBertModel` from this
466
+ # module.
467
+ cls = get_class_from_dynamic_module("sgugger/my-bert-model--modeling.MyBertModel", "sgugger/another-bert-model")
468
+ ```"""
469
+ use_auth_token = kwargs.pop("use_auth_token", None)
470
+ if use_auth_token is not None:
471
+ warnings.warn(
472
+ "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.",
473
+ FutureWarning,
474
+ )
475
+ if token is not None:
476
+ raise ValueError("`token` and `use_auth_token` are both specified. Please set only the argument `token`.")
477
+ token = use_auth_token
478
+
479
+ # Catch the name of the repo if it's specified in `class_reference`
480
+ if "--" in class_reference:
481
+ repo_id, class_reference = class_reference.split("--")
482
+ else:
483
+ repo_id = pretrained_model_name_or_path
484
+ module_file, class_name = class_reference.split(".")
485
+
486
+ if code_revision is None and pretrained_model_name_or_path == repo_id:
487
+ code_revision = revision
488
+ # And lastly we get the class inside our newly created module
489
+ final_module = get_cached_module_file(
490
+ repo_id,
491
+ module_file + ".py",
492
+ cache_dir=cache_dir,
493
+ force_download=force_download,
494
+ resume_download=resume_download,
495
+ proxies=proxies,
496
+ token=token,
497
+ revision=code_revision,
498
+ local_files_only=local_files_only,
499
+ repo_type=repo_type,
500
+ )
501
+ return get_class_in_module(class_name, final_module)
502
+
503
+
504
+ def custom_object_save(obj: Any, folder: Union[str, os.PathLike], config: Optional[Dict] = None) -> List[str]:
505
+ """
506
+ Save the modeling files corresponding to a custom model/configuration/tokenizer etc. in a given folder. Optionally
507
+ adds the proper fields in a config.
508
+
509
+ Args:
510
+ obj (`Any`): The object for which to save the module files.
511
+ folder (`str` or `os.PathLike`): The folder where to save.
512
+ config (`PretrainedConfig` or dictionary, `optional`):
513
+ A config in which to register the auto_map corresponding to this custom object.
514
+
515
+ Returns:
516
+ `List[str]`: The list of files saved.
517
+ """
518
+ if obj.__module__ == "__main__":
519
+ logger.warning(
520
+ f"We can't save the code defining {obj} in {folder} as it's been defined in __main__. You should put "
521
+ "this code in a separate module so we can include it in the saved folder and make it easier to share via "
522
+ "the Hub."
523
+ )
524
+ return
525
+
526
+ def _set_auto_map_in_config(_config):
527
+ module_name = obj.__class__.__module__
528
+ last_module = module_name.split(".")[-1]
529
+ full_name = f"{last_module}.{obj.__class__.__name__}"
530
+ # Special handling for tokenizers
531
+ if "Tokenizer" in full_name:
532
+ slow_tokenizer_class = None
533
+ fast_tokenizer_class = None
534
+ if obj.__class__.__name__.endswith("Fast"):
535
+ # Fast tokenizer: we have the fast tokenizer class and we may have the slow one has an attribute.
536
+ fast_tokenizer_class = f"{last_module}.{obj.__class__.__name__}"
537
+ if getattr(obj, "slow_tokenizer_class", None) is not None:
538
+ slow_tokenizer = getattr(obj, "slow_tokenizer_class")
539
+ slow_tok_module_name = slow_tokenizer.__module__
540
+ last_slow_tok_module = slow_tok_module_name.split(".")[-1]
541
+ slow_tokenizer_class = f"{last_slow_tok_module}.{slow_tokenizer.__name__}"
542
+ else:
543
+ # Slow tokenizer: no way to have the fast class
544
+ slow_tokenizer_class = f"{last_module}.{obj.__class__.__name__}"
545
+
546
+ full_name = (slow_tokenizer_class, fast_tokenizer_class)
547
+
548
+ if isinstance(_config, dict):
549
+ auto_map = _config.get("auto_map", {})
550
+ auto_map[obj._auto_class] = full_name
551
+ _config["auto_map"] = auto_map
552
+ elif getattr(_config, "auto_map", None) is not None:
553
+ _config.auto_map[obj._auto_class] = full_name
554
+ else:
555
+ _config.auto_map = {obj._auto_class: full_name}
556
+
557
+ # Add object class to the config auto_map
558
+ if isinstance(config, (list, tuple)):
559
+ for cfg in config:
560
+ _set_auto_map_in_config(cfg)
561
+ elif config is not None:
562
+ _set_auto_map_in_config(config)
563
+
564
+ result = []
565
+ # Copy module file to the output folder.
566
+ object_file = sys.modules[obj.__module__].__file__
567
+ dest_file = Path(folder) / (Path(object_file).name)
568
+ shutil.copy(object_file, dest_file)
569
+ result.append(dest_file)
570
+
571
+ # Gather all relative imports recursively and make sure they are copied as well.
572
+ for needed_file in get_relative_import_files(object_file):
573
+ dest_file = Path(folder) / (Path(needed_file).name)
574
+ shutil.copy(needed_file, dest_file)
575
+ result.append(dest_file)
576
+
577
+ return result
578
+
579
+
580
+ def _raise_timeout_error(signum, frame):
581
+ raise ValueError(
582
+ "Loading this model requires you to execute custom code contained in the model repository on your local "
583
+ "machine. Please set the option `trust_remote_code=True` to permit loading of this model."
584
+ )
585
+
586
+
587
+ TIME_OUT_REMOTE_CODE = 15
588
+
589
+
590
+ def resolve_trust_remote_code(trust_remote_code, model_name, has_local_code, has_remote_code):
591
+ if trust_remote_code is None:
592
+ if has_local_code:
593
+ trust_remote_code = False
594
+ elif has_remote_code and TIME_OUT_REMOTE_CODE > 0:
595
+ prev_sig_handler = None
596
+ try:
597
+ prev_sig_handler = signal.signal(signal.SIGALRM, _raise_timeout_error)
598
+ signal.alarm(TIME_OUT_REMOTE_CODE)
599
+ while trust_remote_code is None:
600
+ answer = input(
601
+ f"The repository for {model_name} contains custom code which must be executed to correctly "
602
+ f"load the model. You can inspect the repository content at https://hf.co/{model_name}.\n"
603
+ f"You can avoid this prompt in future by passing the argument `trust_remote_code=True`.\n\n"
604
+ f"Do you wish to run the custom code? [y/N] "
605
+ )
606
+ if answer.lower() in ["yes", "y", "1"]:
607
+ trust_remote_code = True
608
+ elif answer.lower() in ["no", "n", "0", ""]:
609
+ trust_remote_code = False
610
+ signal.alarm(0)
611
+ except Exception:
612
+ # OS which does not support signal.SIGALRM
613
+ raise ValueError(
614
+ f"The repository for {model_name} contains custom code which must be executed to correctly "
615
+ f"load the model. You can inspect the repository content at https://hf.co/{model_name}.\n"
616
+ f"Please pass the argument `trust_remote_code=True` to allow custom code to be run."
617
+ )
618
+ finally:
619
+ if prev_sig_handler is not None:
620
+ signal.signal(signal.SIGALRM, prev_sig_handler)
621
+ signal.alarm(0)
622
+ elif has_remote_code:
623
+ # For the CI which puts the timeout at 0
624
+ _raise_timeout_error(None, None)
625
+
626
+ if has_remote_code and not has_local_code and not trust_remote_code:
627
+ raise ValueError(
628
+ f"Loading {model_name} requires you to execute the configuration file in that"
629
+ " repo on your local machine. Make sure you have read the code there to avoid malicious use, then"
630
+ " set the option `trust_remote_code=True` to remove this error."
631
+ )
632
+
633
+ return trust_remote_code
venv/lib/python3.10/site-packages/transformers/feature_extraction_sequence_utils.py ADDED
@@ -0,0 +1,371 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """
16
+ Sequence feature extraction class for common feature extractors to preprocess sequences.
17
+ """
18
+ from typing import Dict, List, Optional, Union
19
+
20
+ import numpy as np
21
+
22
+ from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
23
+ from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
24
+
25
+
26
+ logger = logging.get_logger(__name__)
27
+
28
+
29
+ class SequenceFeatureExtractor(FeatureExtractionMixin):
30
+ """
31
+ This is a general feature extraction class for speech recognition.
32
+
33
+ Args:
34
+ feature_size (`int`):
35
+ The feature dimension of the extracted features.
36
+ sampling_rate (`int`):
37
+ The sampling rate at which the audio files should be digitalized expressed in hertz (Hz).
38
+ padding_value (`float`):
39
+ The value that is used to fill the padding values / vectors.
40
+ """
41
+
42
+ def __init__(self, feature_size: int, sampling_rate: int, padding_value: float, **kwargs):
43
+ self.feature_size = feature_size
44
+ self.sampling_rate = sampling_rate
45
+ self.padding_value = padding_value
46
+
47
+ self.padding_side = kwargs.pop("padding_side", "right")
48
+ self.return_attention_mask = kwargs.pop("return_attention_mask", True)
49
+
50
+ super().__init__(**kwargs)
51
+
52
+ def pad(
53
+ self,
54
+ processed_features: Union[
55
+ BatchFeature,
56
+ List[BatchFeature],
57
+ Dict[str, BatchFeature],
58
+ Dict[str, List[BatchFeature]],
59
+ List[Dict[str, BatchFeature]],
60
+ ],
61
+ padding: Union[bool, str, PaddingStrategy] = True,
62
+ max_length: Optional[int] = None,
63
+ truncation: bool = False,
64
+ pad_to_multiple_of: Optional[int] = None,
65
+ return_attention_mask: Optional[bool] = None,
66
+ return_tensors: Optional[Union[str, TensorType]] = None,
67
+ ) -> BatchFeature:
68
+ """
69
+ Pad input values / input vectors or a batch of input values / input vectors up to predefined length or to the
70
+ max sequence length in the batch.
71
+
72
+ Padding side (left/right) padding values are defined at the feature extractor level (with `self.padding_side`,
73
+ `self.padding_value`)
74
+
75
+ <Tip>
76
+
77
+ If the `processed_features` passed are dictionary of numpy arrays, PyTorch tensors or TensorFlow tensors, the
78
+ result will use the same type unless you provide a different tensor type with `return_tensors`. In the case of
79
+ PyTorch tensors, you will lose the specific device of your tensors however.
80
+
81
+ </Tip>
82
+
83
+ Args:
84
+ processed_features ([`BatchFeature`], list of [`BatchFeature`], `Dict[str, List[float]]`, `Dict[str, List[List[float]]` or `List[Dict[str, List[float]]]`):
85
+ Processed inputs. Can represent one input ([`BatchFeature`] or `Dict[str, List[float]]`) or a batch of
86
+ input values / vectors (list of [`BatchFeature`], *Dict[str, List[List[float]]]* or *List[Dict[str,
87
+ List[float]]]*) so you can use this method during preprocessing as well as in a PyTorch Dataloader
88
+ collate function.
89
+
90
+ Instead of `List[float]` you can have tensors (numpy arrays, PyTorch tensors or TensorFlow tensors),
91
+ see the note above for the return type.
92
+ padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`):
93
+ Select a strategy to pad the returned sequences (according to the model's padding side and padding
94
+ index) among:
95
+
96
+ - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
97
+ sequence if provided).
98
+ - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
99
+ acceptable input length for the model if that argument is not provided.
100
+ - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
101
+ lengths).
102
+ max_length (`int`, *optional*):
103
+ Maximum length of the returned list and optionally padding length (see above).
104
+ truncation (`bool`):
105
+ Activates truncation to cut input sequences longer than `max_length` to `max_length`.
106
+ pad_to_multiple_of (`int`, *optional*):
107
+ If set will pad the sequence to a multiple of the provided value.
108
+
109
+ This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability
110
+ `>= 7.5` (Volta), or on TPUs which benefit from having sequence lengths be a multiple of 128.
111
+ return_attention_mask (`bool`, *optional*):
112
+ Whether to return the attention mask. If left to the default, will return the attention mask according
113
+ to the specific feature_extractor's default.
114
+
115
+ [What are attention masks?](../glossary#attention-mask)
116
+ return_tensors (`str` or [`~utils.TensorType`], *optional*):
117
+ If set, will return tensors instead of list of python integers. Acceptable values are:
118
+
119
+ - `'tf'`: Return TensorFlow `tf.constant` objects.
120
+ - `'pt'`: Return PyTorch `torch.Tensor` objects.
121
+ - `'np'`: Return Numpy `np.ndarray` objects.
122
+ """
123
+ # If we have a list of dicts, let's convert it in a dict of lists
124
+ # We do this to allow using this method as a collate_fn function in PyTorch Dataloader
125
+ if isinstance(processed_features, (list, tuple)) and isinstance(processed_features[0], (dict, BatchFeature)):
126
+ processed_features = {
127
+ key: [example[key] for example in processed_features] for key in processed_features[0].keys()
128
+ }
129
+
130
+ # The model's main input name, usually `input_values`, has be passed for padding
131
+ if self.model_input_names[0] not in processed_features:
132
+ raise ValueError(
133
+ "You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`"
134
+ f" to this method that includes {self.model_input_names[0]}, but you provided"
135
+ f" {list(processed_features.keys())}"
136
+ )
137
+
138
+ required_input = processed_features[self.model_input_names[0]]
139
+ return_attention_mask = (
140
+ return_attention_mask if return_attention_mask is not None else self.return_attention_mask
141
+ )
142
+
143
+ if len(required_input) == 0:
144
+ if return_attention_mask:
145
+ processed_features["attention_mask"] = []
146
+ return processed_features
147
+
148
+ # If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
149
+ # and rebuild them afterwards if no return_tensors is specified
150
+ # Note that we lose the specific device the tensor may be on for PyTorch
151
+
152
+ first_element = required_input[0]
153
+ if isinstance(first_element, (list, tuple)):
154
+ # first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
155
+ index = 0
156
+ while len(required_input[index]) == 0:
157
+ index += 1
158
+ if index < len(required_input):
159
+ first_element = required_input[index][0]
160
+
161
+ if return_tensors is None:
162
+ if is_tf_tensor(first_element):
163
+ return_tensors = "tf"
164
+ elif is_torch_tensor(first_element):
165
+ return_tensors = "pt"
166
+ elif isinstance(first_element, (int, float, list, tuple, np.ndarray)):
167
+ return_tensors = "np"
168
+ else:
169
+ raise ValueError(
170
+ f"type of {first_element} unknown: {type(first_element)}. "
171
+ "Should be one of a python, numpy, pytorch or tensorflow object."
172
+ )
173
+
174
+ for key, value in processed_features.items():
175
+ if isinstance(value[0], (int, float)):
176
+ processed_features[key] = to_numpy(value)
177
+ else:
178
+ processed_features[key] = [to_numpy(v) for v in value]
179
+
180
+ # Convert padding_strategy in PaddingStrategy
181
+ padding_strategy = self._get_padding_strategies(padding=padding, max_length=max_length)
182
+
183
+ required_input = processed_features[self.model_input_names[0]]
184
+
185
+ batch_size = len(required_input)
186
+ if not all(len(v) == batch_size for v in processed_features.values()):
187
+ raise ValueError("Some items in the output dictionary have a different batch size than others.")
188
+
189
+ truncated_inputs = []
190
+ for i in range(batch_size):
191
+ inputs = {k: v[i] for k, v in processed_features.items()}
192
+ # truncation
193
+ inputs_slice = self._truncate(
194
+ inputs,
195
+ max_length=max_length,
196
+ pad_to_multiple_of=pad_to_multiple_of,
197
+ truncation=truncation,
198
+ )
199
+ truncated_inputs.append(inputs_slice)
200
+
201
+ if padding_strategy == PaddingStrategy.LONGEST:
202
+ # make sure that `max_length` cannot be longer than the longest truncated length
203
+ max_length = max(len(input_slice[self.model_input_names[0]]) for input_slice in truncated_inputs)
204
+ padding_strategy = PaddingStrategy.MAX_LENGTH
205
+
206
+ batch_outputs = {}
207
+ for i in range(batch_size):
208
+ # padding
209
+ outputs = self._pad(
210
+ truncated_inputs[i],
211
+ max_length=max_length,
212
+ padding_strategy=padding_strategy,
213
+ pad_to_multiple_of=pad_to_multiple_of,
214
+ return_attention_mask=return_attention_mask,
215
+ )
216
+
217
+ for key, value in outputs.items():
218
+ if key not in batch_outputs:
219
+ batch_outputs[key] = []
220
+ if value.dtype is np.dtype(np.float64):
221
+ value = value.astype(np.float32)
222
+ batch_outputs[key].append(value)
223
+
224
+ return BatchFeature(batch_outputs, tensor_type=return_tensors)
225
+
226
+ def _pad(
227
+ self,
228
+ processed_features: Union[Dict[str, np.ndarray], BatchFeature],
229
+ max_length: Optional[int] = None,
230
+ padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
231
+ pad_to_multiple_of: Optional[int] = None,
232
+ return_attention_mask: Optional[bool] = None,
233
+ ) -> dict:
234
+ """
235
+ Pad inputs (on left/right and up to predefined length or max length in the batch)
236
+
237
+ Args:
238
+ processed_features (`Union[Dict[str, np.ndarray], BatchFeature]`):
239
+ Dictionary of input values (`np.ndarray[float]`) / input vectors (`List[np.ndarray[float]]`) or batch
240
+ of inputs values (`List[np.ndarray[int]]`) / input vectors (`List[np.ndarray[int]]`)
241
+ max_length (`int`, *optional*):
242
+ Maximum length of the returned list and optionally padding length (see below)
243
+ padding_strategy (`PaddingStrategy`, *optional*, default to `PaddingStrategy.DO_NOT_PAD`):
244
+ PaddingStrategy to use for padding.
245
+
246
+ - PaddingStrategy.LONGEST Pad to the longest sequence in the batch
247
+ - PaddingStrategy.MAX_LENGTH: Pad to the max length (default)
248
+ - PaddingStrategy.DO_NOT_PAD: Do not pad
249
+ The feature_extractor padding sides are defined in self.padding_side:
250
+
251
+ - 'left': pads on the left of the sequences
252
+ - 'right': pads on the right of the sequences
253
+ pad_to_multiple_of (`int`, *optional*):
254
+ Integer if set will pad the sequence to a multiple of the provided value. This is especially useful to
255
+ enable the use of Tensor Core on NVIDIA hardware with compute capability `>= 7.5` (Volta), or on TPUs
256
+ which benefit from having sequence lengths be a multiple of 128.
257
+ return_attention_mask (`bool`, *optional*):
258
+ Set to False to avoid returning attention mask (default: set to model specifics)
259
+ """
260
+ required_input = processed_features[self.model_input_names[0]]
261
+
262
+ if padding_strategy == PaddingStrategy.LONGEST:
263
+ max_length = len(required_input)
264
+
265
+ if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
266
+ max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
267
+
268
+ needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(required_input) < max_length
269
+
270
+ if return_attention_mask and "attention_mask" not in processed_features:
271
+ processed_features["attention_mask"] = np.ones(len(required_input), dtype=np.int32)
272
+
273
+ if needs_to_be_padded:
274
+ difference = max_length - len(required_input)
275
+ if self.padding_side == "right":
276
+ if return_attention_mask:
277
+ processed_features["attention_mask"] = np.pad(
278
+ processed_features["attention_mask"], (0, difference)
279
+ )
280
+ padding_shape = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
281
+ processed_features[self.model_input_names[0]] = np.pad(
282
+ required_input, padding_shape, "constant", constant_values=self.padding_value
283
+ )
284
+ elif self.padding_side == "left":
285
+ if return_attention_mask:
286
+ processed_features["attention_mask"] = np.pad(
287
+ processed_features["attention_mask"], (difference, 0)
288
+ )
289
+ padding_shape = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
290
+ processed_features[self.model_input_names[0]] = np.pad(
291
+ required_input, padding_shape, "constant", constant_values=self.padding_value
292
+ )
293
+ else:
294
+ raise ValueError("Invalid padding strategy:" + str(self.padding_side))
295
+
296
+ return processed_features
297
+
298
+ def _truncate(
299
+ self,
300
+ processed_features: Union[Dict[str, np.ndarray], BatchFeature],
301
+ max_length: Optional[int] = None,
302
+ pad_to_multiple_of: Optional[int] = None,
303
+ truncation: Optional[bool] = None,
304
+ ):
305
+ """
306
+ Truncate inputs to predefined length or max length in the batch
307
+
308
+ Args:
309
+ processed_features(`Union[Dict[str, np.ndarray], BatchFeature]`):
310
+ Dictionary of input values (`np.ndarray[float]`) / input vectors (`List[np.ndarray[float]]`) or batch
311
+ of inputs values (`List[np.ndarray[int]]`) / input vectors (`List[np.ndarray[int]]`)
312
+ max_length (`int`, *optional*):
313
+ maximum length of the returned list and optionally padding length (see below)
314
+ pad_to_multiple_of (`int`, *optional*) :
315
+ Integer if set will pad the sequence to a multiple of the provided value. This is especially useful to
316
+ enable the use of Tensor Core on NVIDIA hardware with compute capability `>= 7.5` (Volta), or on TPUs
317
+ which benefit from having sequence lengths be a multiple of 128.
318
+ truncation (`bool`, *optional*):
319
+ Activates truncation to cut input sequences longer than `max_length` to `max_length`.
320
+ """
321
+ if not truncation:
322
+ return processed_features
323
+ elif truncation and max_length is None:
324
+ raise ValueError("When setting ``truncation=True``, make sure that ``max_length`` is defined.")
325
+
326
+ required_input = processed_features[self.model_input_names[0]]
327
+
328
+ # find `max_length` that fits `pad_to_multiple_of`
329
+ if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
330
+ max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
331
+
332
+ needs_to_be_truncated = len(required_input) > max_length
333
+
334
+ if needs_to_be_truncated:
335
+ processed_features[self.model_input_names[0]] = processed_features[self.model_input_names[0]][:max_length]
336
+ if "attention_mask" in processed_features:
337
+ processed_features["attention_mask"] = processed_features["attention_mask"][:max_length]
338
+
339
+ return processed_features
340
+
341
+ def _get_padding_strategies(self, padding=False, max_length=None):
342
+ """
343
+ Find the correct padding strategy
344
+ """
345
+
346
+ # Get padding strategy
347
+ if padding is not False:
348
+ if padding is True:
349
+ padding_strategy = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
350
+ elif not isinstance(padding, PaddingStrategy):
351
+ padding_strategy = PaddingStrategy(padding)
352
+ elif isinstance(padding, PaddingStrategy):
353
+ padding_strategy = padding
354
+ else:
355
+ padding_strategy = PaddingStrategy.DO_NOT_PAD
356
+
357
+ # Set max length if needed
358
+ if max_length is None:
359
+ if padding_strategy == PaddingStrategy.MAX_LENGTH:
360
+ raise ValueError(
361
+ f"When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined"
362
+ )
363
+
364
+ # Test if we have a padding value
365
+ if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
366
+ raise ValueError(
367
+ "Asking to pad but the feature_extractor does not have a padding value. Please select a value to use"
368
+ " as `padding_value`. For example: `feature_extractor.padding_value = 0.0`."
369
+ )
370
+
371
+ return padding_strategy
venv/lib/python3.10/site-packages/transformers/feature_extraction_utils.py ADDED
@@ -0,0 +1,684 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """
16
+ Feature extraction saving/loading class for common feature extractors.
17
+ """
18
+
19
+ import copy
20
+ import json
21
+ import os
22
+ import warnings
23
+ from collections import UserDict
24
+ from typing import TYPE_CHECKING, Any, Dict, Optional, Tuple, Union
25
+
26
+ import numpy as np
27
+
28
+ from .dynamic_module_utils import custom_object_save
29
+ from .utils import (
30
+ FEATURE_EXTRACTOR_NAME,
31
+ PushToHubMixin,
32
+ TensorType,
33
+ add_model_info_to_auto_map,
34
+ cached_file,
35
+ copy_func,
36
+ download_url,
37
+ is_flax_available,
38
+ is_jax_tensor,
39
+ is_numpy_array,
40
+ is_offline_mode,
41
+ is_remote_url,
42
+ is_tf_available,
43
+ is_torch_available,
44
+ is_torch_device,
45
+ is_torch_dtype,
46
+ logging,
47
+ requires_backends,
48
+ )
49
+
50
+
51
+ if TYPE_CHECKING:
52
+ if is_torch_available():
53
+ import torch # noqa
54
+
55
+
56
+ logger = logging.get_logger(__name__)
57
+
58
+ PreTrainedFeatureExtractor = Union["SequenceFeatureExtractor"] # noqa: F821
59
+
60
+
61
+ class BatchFeature(UserDict):
62
+ r"""
63
+ Holds the output of the [`~SequenceFeatureExtractor.pad`] and feature extractor specific `__call__` methods.
64
+
65
+ This class is derived from a python dictionary and can be used as a dictionary.
66
+
67
+ Args:
68
+ data (`dict`, *optional*):
69
+ Dictionary of lists/arrays/tensors returned by the __call__/pad methods ('input_values', 'attention_mask',
70
+ etc.).
71
+ tensor_type (`Union[None, str, TensorType]`, *optional*):
72
+ You can give a tensor_type here to convert the lists of integers in PyTorch/TensorFlow/Numpy Tensors at
73
+ initialization.
74
+ """
75
+
76
+ def __init__(self, data: Optional[Dict[str, Any]] = None, tensor_type: Union[None, str, TensorType] = None):
77
+ super().__init__(data)
78
+ self.convert_to_tensors(tensor_type=tensor_type)
79
+
80
+ def __getitem__(self, item: str) -> Union[Any]:
81
+ """
82
+ If the key is a string, returns the value of the dict associated to `key` ('input_values', 'attention_mask',
83
+ etc.).
84
+ """
85
+ if isinstance(item, str):
86
+ return self.data[item]
87
+ else:
88
+ raise KeyError("Indexing with integers is not available when using Python based feature extractors")
89
+
90
+ def __getattr__(self, item: str):
91
+ try:
92
+ return self.data[item]
93
+ except KeyError:
94
+ raise AttributeError
95
+
96
+ def __getstate__(self):
97
+ return {"data": self.data}
98
+
99
+ def __setstate__(self, state):
100
+ if "data" in state:
101
+ self.data = state["data"]
102
+
103
+ # Copied from transformers.tokenization_utils_base.BatchEncoding.keys
104
+ def keys(self):
105
+ return self.data.keys()
106
+
107
+ # Copied from transformers.tokenization_utils_base.BatchEncoding.values
108
+ def values(self):
109
+ return self.data.values()
110
+
111
+ # Copied from transformers.tokenization_utils_base.BatchEncoding.items
112
+ def items(self):
113
+ return self.data.items()
114
+
115
+ def _get_is_as_tensor_fns(self, tensor_type: Optional[Union[str, TensorType]] = None):
116
+ if tensor_type is None:
117
+ return None, None
118
+
119
+ # Convert to TensorType
120
+ if not isinstance(tensor_type, TensorType):
121
+ tensor_type = TensorType(tensor_type)
122
+
123
+ # Get a function reference for the correct framework
124
+ if tensor_type == TensorType.TENSORFLOW:
125
+ if not is_tf_available():
126
+ raise ImportError(
127
+ "Unable to convert output to TensorFlow tensors format, TensorFlow is not installed."
128
+ )
129
+ import tensorflow as tf
130
+
131
+ as_tensor = tf.constant
132
+ is_tensor = tf.is_tensor
133
+ elif tensor_type == TensorType.PYTORCH:
134
+ if not is_torch_available():
135
+ raise ImportError("Unable to convert output to PyTorch tensors format, PyTorch is not installed.")
136
+ import torch # noqa
137
+
138
+ def as_tensor(value):
139
+ if isinstance(value, (list, tuple)) and len(value) > 0 and isinstance(value[0], np.ndarray):
140
+ value = np.array(value)
141
+ return torch.tensor(value)
142
+
143
+ is_tensor = torch.is_tensor
144
+ elif tensor_type == TensorType.JAX:
145
+ if not is_flax_available():
146
+ raise ImportError("Unable to convert output to JAX tensors format, JAX is not installed.")
147
+ import jax.numpy as jnp # noqa: F811
148
+
149
+ as_tensor = jnp.array
150
+ is_tensor = is_jax_tensor
151
+ else:
152
+
153
+ def as_tensor(value, dtype=None):
154
+ if isinstance(value, (list, tuple)) and isinstance(value[0], (list, tuple, np.ndarray)):
155
+ value_lens = [len(val) for val in value]
156
+ if len(set(value_lens)) > 1 and dtype is None:
157
+ # we have a ragged list so handle explicitly
158
+ value = as_tensor([np.asarray(val) for val in value], dtype=object)
159
+ return np.asarray(value, dtype=dtype)
160
+
161
+ is_tensor = is_numpy_array
162
+ return is_tensor, as_tensor
163
+
164
+ def convert_to_tensors(self, tensor_type: Optional[Union[str, TensorType]] = None):
165
+ """
166
+ Convert the inner content to tensors.
167
+
168
+ Args:
169
+ tensor_type (`str` or [`~utils.TensorType`], *optional*):
170
+ The type of tensors to use. If `str`, should be one of the values of the enum [`~utils.TensorType`]. If
171
+ `None`, no modification is done.
172
+ """
173
+ if tensor_type is None:
174
+ return self
175
+
176
+ is_tensor, as_tensor = self._get_is_as_tensor_fns(tensor_type)
177
+
178
+ # Do the tensor conversion in batch
179
+ for key, value in self.items():
180
+ try:
181
+ if not is_tensor(value):
182
+ tensor = as_tensor(value)
183
+
184
+ self[key] = tensor
185
+ except: # noqa E722
186
+ if key == "overflowing_values":
187
+ raise ValueError("Unable to create tensor returning overflowing values of different lengths. ")
188
+ raise ValueError(
189
+ "Unable to create tensor, you should probably activate padding "
190
+ "with 'padding=True' to have batched tensors with the same length."
191
+ )
192
+
193
+ return self
194
+
195
+ def to(self, *args, **kwargs) -> "BatchFeature":
196
+ """
197
+ Send all values to device by calling `v.to(*args, **kwargs)` (PyTorch only). This should support casting in
198
+ different `dtypes` and sending the `BatchFeature` to a different `device`.
199
+
200
+ Args:
201
+ args (`Tuple`):
202
+ Will be passed to the `to(...)` function of the tensors.
203
+ kwargs (`Dict`, *optional*):
204
+ Will be passed to the `to(...)` function of the tensors.
205
+
206
+ Returns:
207
+ [`BatchFeature`]: The same instance after modification.
208
+ """
209
+ requires_backends(self, ["torch"])
210
+ import torch # noqa
211
+
212
+ new_data = {}
213
+ device = kwargs.get("device")
214
+ # Check if the args are a device or a dtype
215
+ if device is None and len(args) > 0:
216
+ # device should be always the first argument
217
+ arg = args[0]
218
+ if is_torch_dtype(arg):
219
+ # The first argument is a dtype
220
+ pass
221
+ elif isinstance(arg, str) or is_torch_device(arg) or isinstance(arg, int):
222
+ device = arg
223
+ else:
224
+ # it's something else
225
+ raise ValueError(f"Attempting to cast a BatchFeature to type {str(arg)}. This is not supported.")
226
+ # We cast only floating point tensors to avoid issues with tokenizers casting `LongTensor` to `FloatTensor`
227
+ for k, v in self.items():
228
+ # check if v is a floating point
229
+ if torch.is_floating_point(v):
230
+ # cast and send to device
231
+ new_data[k] = v.to(*args, **kwargs)
232
+ elif device is not None:
233
+ new_data[k] = v.to(device=device)
234
+ else:
235
+ new_data[k] = v
236
+ self.data = new_data
237
+ return self
238
+
239
+
240
+ class FeatureExtractionMixin(PushToHubMixin):
241
+ """
242
+ This is a feature extraction mixin used to provide saving/loading functionality for sequential and image feature
243
+ extractors.
244
+ """
245
+
246
+ _auto_class = None
247
+
248
+ def __init__(self, **kwargs):
249
+ """Set elements of `kwargs` as attributes."""
250
+ # Pop "processor_class" as it should be saved as private attribute
251
+ self._processor_class = kwargs.pop("processor_class", None)
252
+ # Additional attributes without default values
253
+ for key, value in kwargs.items():
254
+ try:
255
+ setattr(self, key, value)
256
+ except AttributeError as err:
257
+ logger.error(f"Can't set {key} with value {value} for {self}")
258
+ raise err
259
+
260
+ def _set_processor_class(self, processor_class: str):
261
+ """Sets processor class as an attribute."""
262
+ self._processor_class = processor_class
263
+
264
+ @classmethod
265
+ def from_pretrained(
266
+ cls,
267
+ pretrained_model_name_or_path: Union[str, os.PathLike],
268
+ cache_dir: Optional[Union[str, os.PathLike]] = None,
269
+ force_download: bool = False,
270
+ local_files_only: bool = False,
271
+ token: Optional[Union[str, bool]] = None,
272
+ revision: str = "main",
273
+ **kwargs,
274
+ ):
275
+ r"""
276
+ Instantiate a type of [`~feature_extraction_utils.FeatureExtractionMixin`] from a feature extractor, *e.g.* a
277
+ derived class of [`SequenceFeatureExtractor`].
278
+
279
+ Args:
280
+ pretrained_model_name_or_path (`str` or `os.PathLike`):
281
+ This can be either:
282
+
283
+ - a string, the *model id* of a pretrained feature_extractor hosted inside a model repo on
284
+ huggingface.co.
285
+ - a path to a *directory* containing a feature extractor file saved using the
286
+ [`~feature_extraction_utils.FeatureExtractionMixin.save_pretrained`] method, e.g.,
287
+ `./my_model_directory/`.
288
+ - a path or url to a saved feature extractor JSON *file*, e.g.,
289
+ `./my_model_directory/preprocessor_config.json`.
290
+ cache_dir (`str` or `os.PathLike`, *optional*):
291
+ Path to a directory in which a downloaded pretrained model feature extractor should be cached if the
292
+ standard cache should not be used.
293
+ force_download (`bool`, *optional*, defaults to `False`):
294
+ Whether or not to force to (re-)download the feature extractor files and override the cached versions
295
+ if they exist.
296
+ resume_download (`bool`, *optional*, defaults to `False`):
297
+ Whether or not to delete incompletely received file. Attempts to resume the download if such a file
298
+ exists.
299
+ proxies (`Dict[str, str]`, *optional*):
300
+ A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',
301
+ 'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request.
302
+ token (`str` or `bool`, *optional*):
303
+ The token to use as HTTP bearer authorization for remote files. If `True`, or not specified, will use
304
+ the token generated when running `huggingface-cli login` (stored in `~/.huggingface`).
305
+ revision (`str`, *optional*, defaults to `"main"`):
306
+ The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
307
+ git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any
308
+ identifier allowed by git.
309
+
310
+
311
+ <Tip>
312
+
313
+ To test a pull request you made on the Hub, you can pass `revision="refs/pr/<pr_number>".
314
+
315
+ </Tip>
316
+
317
+ return_unused_kwargs (`bool`, *optional*, defaults to `False`):
318
+ If `False`, then this function returns just the final feature extractor object. If `True`, then this
319
+ functions returns a `Tuple(feature_extractor, unused_kwargs)` where *unused_kwargs* is a dictionary
320
+ consisting of the key/value pairs whose keys are not feature extractor attributes: i.e., the part of
321
+ `kwargs` which has not been used to update `feature_extractor` and is otherwise ignored.
322
+ kwargs (`Dict[str, Any]`, *optional*):
323
+ The values in kwargs of any keys which are feature extractor attributes will be used to override the
324
+ loaded values. Behavior concerning key/value pairs whose keys are *not* feature extractor attributes is
325
+ controlled by the `return_unused_kwargs` keyword parameter.
326
+
327
+ Returns:
328
+ A feature extractor of type [`~feature_extraction_utils.FeatureExtractionMixin`].
329
+
330
+ Examples:
331
+
332
+ ```python
333
+ # We can't instantiate directly the base class *FeatureExtractionMixin* nor *SequenceFeatureExtractor* so let's show the examples on a
334
+ # derived class: *Wav2Vec2FeatureExtractor*
335
+ feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(
336
+ "facebook/wav2vec2-base-960h"
337
+ ) # Download feature_extraction_config from huggingface.co and cache.
338
+ feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(
339
+ "./test/saved_model/"
340
+ ) # E.g. feature_extractor (or model) was saved using *save_pretrained('./test/saved_model/')*
341
+ feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained("./test/saved_model/preprocessor_config.json")
342
+ feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(
343
+ "facebook/wav2vec2-base-960h", return_attention_mask=False, foo=False
344
+ )
345
+ assert feature_extractor.return_attention_mask is False
346
+ feature_extractor, unused_kwargs = Wav2Vec2FeatureExtractor.from_pretrained(
347
+ "facebook/wav2vec2-base-960h", return_attention_mask=False, foo=False, return_unused_kwargs=True
348
+ )
349
+ assert feature_extractor.return_attention_mask is False
350
+ assert unused_kwargs == {"foo": False}
351
+ ```"""
352
+ kwargs["cache_dir"] = cache_dir
353
+ kwargs["force_download"] = force_download
354
+ kwargs["local_files_only"] = local_files_only
355
+ kwargs["revision"] = revision
356
+
357
+ use_auth_token = kwargs.pop("use_auth_token", None)
358
+ if use_auth_token is not None:
359
+ warnings.warn(
360
+ "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.",
361
+ FutureWarning,
362
+ )
363
+ if token is not None:
364
+ raise ValueError(
365
+ "`token` and `use_auth_token` are both specified. Please set only the argument `token`."
366
+ )
367
+ token = use_auth_token
368
+
369
+ if token is not None:
370
+ kwargs["token"] = token
371
+
372
+ feature_extractor_dict, kwargs = cls.get_feature_extractor_dict(pretrained_model_name_or_path, **kwargs)
373
+
374
+ return cls.from_dict(feature_extractor_dict, **kwargs)
375
+
376
+ def save_pretrained(self, save_directory: Union[str, os.PathLike], push_to_hub: bool = False, **kwargs):
377
+ """
378
+ Save a feature_extractor object to the directory `save_directory`, so that it can be re-loaded using the
379
+ [`~feature_extraction_utils.FeatureExtractionMixin.from_pretrained`] class method.
380
+
381
+ Args:
382
+ save_directory (`str` or `os.PathLike`):
383
+ Directory where the feature extractor JSON file will be saved (will be created if it does not exist).
384
+ push_to_hub (`bool`, *optional*, defaults to `False`):
385
+ Whether or not to push your model to the Hugging Face model hub after saving it. You can specify the
386
+ repository you want to push to with `repo_id` (will default to the name of `save_directory` in your
387
+ namespace).
388
+ kwargs (`Dict[str, Any]`, *optional*):
389
+ Additional key word arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method.
390
+ """
391
+ use_auth_token = kwargs.pop("use_auth_token", None)
392
+
393
+ if use_auth_token is not None:
394
+ warnings.warn(
395
+ "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.",
396
+ FutureWarning,
397
+ )
398
+ if kwargs.get("token", None) is not None:
399
+ raise ValueError(
400
+ "`token` and `use_auth_token` are both specified. Please set only the argument `token`."
401
+ )
402
+ kwargs["token"] = use_auth_token
403
+
404
+ if os.path.isfile(save_directory):
405
+ raise AssertionError(f"Provided path ({save_directory}) should be a directory, not a file")
406
+
407
+ os.makedirs(save_directory, exist_ok=True)
408
+
409
+ if push_to_hub:
410
+ commit_message = kwargs.pop("commit_message", None)
411
+ repo_id = kwargs.pop("repo_id", save_directory.split(os.path.sep)[-1])
412
+ repo_id = self._create_repo(repo_id, **kwargs)
413
+ files_timestamps = self._get_files_timestamps(save_directory)
414
+
415
+ # If we have a custom config, we copy the file defining it in the folder and set the attributes so it can be
416
+ # loaded from the Hub.
417
+ if self._auto_class is not None:
418
+ custom_object_save(self, save_directory, config=self)
419
+
420
+ # If we save using the predefined names, we can load using `from_pretrained`
421
+ output_feature_extractor_file = os.path.join(save_directory, FEATURE_EXTRACTOR_NAME)
422
+
423
+ self.to_json_file(output_feature_extractor_file)
424
+ logger.info(f"Feature extractor saved in {output_feature_extractor_file}")
425
+
426
+ if push_to_hub:
427
+ self._upload_modified_files(
428
+ save_directory,
429
+ repo_id,
430
+ files_timestamps,
431
+ commit_message=commit_message,
432
+ token=kwargs.get("token"),
433
+ )
434
+
435
+ return [output_feature_extractor_file]
436
+
437
+ @classmethod
438
+ def get_feature_extractor_dict(
439
+ cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs
440
+ ) -> Tuple[Dict[str, Any], Dict[str, Any]]:
441
+ """
442
+ From a `pretrained_model_name_or_path`, resolve to a dictionary of parameters, to be used for instantiating a
443
+ feature extractor of type [`~feature_extraction_utils.FeatureExtractionMixin`] using `from_dict`.
444
+
445
+ Parameters:
446
+ pretrained_model_name_or_path (`str` or `os.PathLike`):
447
+ The identifier of the pre-trained checkpoint from which we want the dictionary of parameters.
448
+
449
+ Returns:
450
+ `Tuple[Dict, Dict]`: The dictionary(ies) that will be used to instantiate the feature extractor object.
451
+ """
452
+ cache_dir = kwargs.pop("cache_dir", None)
453
+ force_download = kwargs.pop("force_download", False)
454
+ resume_download = kwargs.pop("resume_download", False)
455
+ proxies = kwargs.pop("proxies", None)
456
+ subfolder = kwargs.pop("subfolder", None)
457
+ token = kwargs.pop("token", None)
458
+ use_auth_token = kwargs.pop("use_auth_token", None)
459
+ local_files_only = kwargs.pop("local_files_only", False)
460
+ revision = kwargs.pop("revision", None)
461
+
462
+ if use_auth_token is not None:
463
+ warnings.warn(
464
+ "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.",
465
+ FutureWarning,
466
+ )
467
+ if token is not None:
468
+ raise ValueError(
469
+ "`token` and `use_auth_token` are both specified. Please set only the argument `token`."
470
+ )
471
+ token = use_auth_token
472
+
473
+ from_pipeline = kwargs.pop("_from_pipeline", None)
474
+ from_auto_class = kwargs.pop("_from_auto", False)
475
+
476
+ user_agent = {"file_type": "feature extractor", "from_auto_class": from_auto_class}
477
+ if from_pipeline is not None:
478
+ user_agent["using_pipeline"] = from_pipeline
479
+
480
+ if is_offline_mode() and not local_files_only:
481
+ logger.info("Offline mode: forcing local_files_only=True")
482
+ local_files_only = True
483
+
484
+ pretrained_model_name_or_path = str(pretrained_model_name_or_path)
485
+ is_local = os.path.isdir(pretrained_model_name_or_path)
486
+ if os.path.isdir(pretrained_model_name_or_path):
487
+ feature_extractor_file = os.path.join(pretrained_model_name_or_path, FEATURE_EXTRACTOR_NAME)
488
+ if os.path.isfile(pretrained_model_name_or_path):
489
+ resolved_feature_extractor_file = pretrained_model_name_or_path
490
+ is_local = True
491
+ elif is_remote_url(pretrained_model_name_or_path):
492
+ feature_extractor_file = pretrained_model_name_or_path
493
+ resolved_feature_extractor_file = download_url(pretrained_model_name_or_path)
494
+ else:
495
+ feature_extractor_file = FEATURE_EXTRACTOR_NAME
496
+ try:
497
+ # Load from local folder or from cache or download from model Hub and cache
498
+ resolved_feature_extractor_file = cached_file(
499
+ pretrained_model_name_or_path,
500
+ feature_extractor_file,
501
+ cache_dir=cache_dir,
502
+ force_download=force_download,
503
+ proxies=proxies,
504
+ resume_download=resume_download,
505
+ local_files_only=local_files_only,
506
+ subfolder=subfolder,
507
+ token=token,
508
+ user_agent=user_agent,
509
+ revision=revision,
510
+ )
511
+ except EnvironmentError:
512
+ # Raise any environment error raise by `cached_file`. It will have a helpful error message adapted to
513
+ # the original exception.
514
+ raise
515
+ except Exception:
516
+ # For any other exception, we throw a generic error.
517
+ raise EnvironmentError(
518
+ f"Can't load feature extractor for '{pretrained_model_name_or_path}'. If you were trying to load"
519
+ " it from 'https://huggingface.co/models', make sure you don't have a local directory with the"
520
+ f" same name. Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a"
521
+ f" directory containing a {FEATURE_EXTRACTOR_NAME} file"
522
+ )
523
+
524
+ try:
525
+ # Load feature_extractor dict
526
+ with open(resolved_feature_extractor_file, "r", encoding="utf-8") as reader:
527
+ text = reader.read()
528
+ feature_extractor_dict = json.loads(text)
529
+
530
+ except json.JSONDecodeError:
531
+ raise EnvironmentError(
532
+ f"It looks like the config file at '{resolved_feature_extractor_file}' is not a valid JSON file."
533
+ )
534
+
535
+ if is_local:
536
+ logger.info(f"loading configuration file {resolved_feature_extractor_file}")
537
+ else:
538
+ logger.info(
539
+ f"loading configuration file {feature_extractor_file} from cache at {resolved_feature_extractor_file}"
540
+ )
541
+
542
+ if "auto_map" in feature_extractor_dict and not is_local:
543
+ feature_extractor_dict["auto_map"] = add_model_info_to_auto_map(
544
+ feature_extractor_dict["auto_map"], pretrained_model_name_or_path
545
+ )
546
+
547
+ return feature_extractor_dict, kwargs
548
+
549
+ @classmethod
550
+ def from_dict(cls, feature_extractor_dict: Dict[str, Any], **kwargs) -> PreTrainedFeatureExtractor:
551
+ """
552
+ Instantiates a type of [`~feature_extraction_utils.FeatureExtractionMixin`] from a Python dictionary of
553
+ parameters.
554
+
555
+ Args:
556
+ feature_extractor_dict (`Dict[str, Any]`):
557
+ Dictionary that will be used to instantiate the feature extractor object. Such a dictionary can be
558
+ retrieved from a pretrained checkpoint by leveraging the
559
+ [`~feature_extraction_utils.FeatureExtractionMixin.to_dict`] method.
560
+ kwargs (`Dict[str, Any]`):
561
+ Additional parameters from which to initialize the feature extractor object.
562
+
563
+ Returns:
564
+ [`~feature_extraction_utils.FeatureExtractionMixin`]: The feature extractor object instantiated from those
565
+ parameters.
566
+ """
567
+ return_unused_kwargs = kwargs.pop("return_unused_kwargs", False)
568
+
569
+ feature_extractor = cls(**feature_extractor_dict)
570
+
571
+ # Update feature_extractor with kwargs if needed
572
+ to_remove = []
573
+ for key, value in kwargs.items():
574
+ if hasattr(feature_extractor, key):
575
+ setattr(feature_extractor, key, value)
576
+ to_remove.append(key)
577
+ for key in to_remove:
578
+ kwargs.pop(key, None)
579
+
580
+ logger.info(f"Feature extractor {feature_extractor}")
581
+ if return_unused_kwargs:
582
+ return feature_extractor, kwargs
583
+ else:
584
+ return feature_extractor
585
+
586
+ def to_dict(self) -> Dict[str, Any]:
587
+ """
588
+ Serializes this instance to a Python dictionary. Returns:
589
+ `Dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance.
590
+ """
591
+ output = copy.deepcopy(self.__dict__)
592
+ output["feature_extractor_type"] = self.__class__.__name__
593
+ if "mel_filters" in output:
594
+ del output["mel_filters"]
595
+ if "window" in output:
596
+ del output["window"]
597
+ return output
598
+
599
+ @classmethod
600
+ def from_json_file(cls, json_file: Union[str, os.PathLike]) -> PreTrainedFeatureExtractor:
601
+ """
602
+ Instantiates a feature extractor of type [`~feature_extraction_utils.FeatureExtractionMixin`] from the path to
603
+ a JSON file of parameters.
604
+
605
+ Args:
606
+ json_file (`str` or `os.PathLike`):
607
+ Path to the JSON file containing the parameters.
608
+
609
+ Returns:
610
+ A feature extractor of type [`~feature_extraction_utils.FeatureExtractionMixin`]: The feature_extractor
611
+ object instantiated from that JSON file.
612
+ """
613
+ with open(json_file, "r", encoding="utf-8") as reader:
614
+ text = reader.read()
615
+ feature_extractor_dict = json.loads(text)
616
+ return cls(**feature_extractor_dict)
617
+
618
+ def to_json_string(self) -> str:
619
+ """
620
+ Serializes this instance to a JSON string.
621
+
622
+ Returns:
623
+ `str`: String containing all the attributes that make up this feature_extractor instance in JSON format.
624
+ """
625
+ dictionary = self.to_dict()
626
+
627
+ for key, value in dictionary.items():
628
+ if isinstance(value, np.ndarray):
629
+ dictionary[key] = value.tolist()
630
+
631
+ # make sure private name "_processor_class" is correctly
632
+ # saved as "processor_class"
633
+ _processor_class = dictionary.pop("_processor_class", None)
634
+ if _processor_class is not None:
635
+ dictionary["processor_class"] = _processor_class
636
+
637
+ return json.dumps(dictionary, indent=2, sort_keys=True) + "\n"
638
+
639
+ def to_json_file(self, json_file_path: Union[str, os.PathLike]):
640
+ """
641
+ Save this instance to a JSON file.
642
+
643
+ Args:
644
+ json_file_path (`str` or `os.PathLike`):
645
+ Path to the JSON file in which this feature_extractor instance's parameters will be saved.
646
+ """
647
+ with open(json_file_path, "w", encoding="utf-8") as writer:
648
+ writer.write(self.to_json_string())
649
+
650
+ def __repr__(self):
651
+ return f"{self.__class__.__name__} {self.to_json_string()}"
652
+
653
+ @classmethod
654
+ def register_for_auto_class(cls, auto_class="AutoFeatureExtractor"):
655
+ """
656
+ Register this class with a given auto class. This should only be used for custom feature extractors as the ones
657
+ in the library are already mapped with `AutoFeatureExtractor`.
658
+
659
+ <Tip warning={true}>
660
+
661
+ This API is experimental and may have some slight breaking changes in the next releases.
662
+
663
+ </Tip>
664
+
665
+ Args:
666
+ auto_class (`str` or `type`, *optional*, defaults to `"AutoFeatureExtractor"`):
667
+ The auto class to register this new feature extractor with.
668
+ """
669
+ if not isinstance(auto_class, str):
670
+ auto_class = auto_class.__name__
671
+
672
+ import transformers.models.auto as auto_module
673
+
674
+ if not hasattr(auto_module, auto_class):
675
+ raise ValueError(f"{auto_class} is not a valid auto class.")
676
+
677
+ cls._auto_class = auto_class
678
+
679
+
680
+ FeatureExtractionMixin.push_to_hub = copy_func(FeatureExtractionMixin.push_to_hub)
681
+ if FeatureExtractionMixin.push_to_hub.__doc__ is not None:
682
+ FeatureExtractionMixin.push_to_hub.__doc__ = FeatureExtractionMixin.push_to_hub.__doc__.format(
683
+ object="feature extractor", object_class="AutoFeatureExtractor", object_files="feature extractor file"
684
+ )
venv/lib/python3.10/site-packages/transformers/file_utils.py ADDED
@@ -0,0 +1,133 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """
15
+ File utilities: utilities related to download and cache models
16
+
17
+ This module should not be update anymore and is only left for backward compatibility.
18
+ """
19
+
20
+ from huggingface_hub import get_full_repo_name # for backward compatibility
21
+ from huggingface_hub.constants import HF_HUB_DISABLE_TELEMETRY as DISABLE_TELEMETRY # for backward compatibility
22
+
23
+ from . import __version__
24
+
25
+ # Backward compatibility imports, to make sure all those objects can be found in file_utils
26
+ from .utils import (
27
+ CLOUDFRONT_DISTRIB_PREFIX,
28
+ CONFIG_NAME,
29
+ DUMMY_INPUTS,
30
+ DUMMY_MASK,
31
+ ENV_VARS_TRUE_AND_AUTO_VALUES,
32
+ ENV_VARS_TRUE_VALUES,
33
+ FEATURE_EXTRACTOR_NAME,
34
+ FLAX_WEIGHTS_NAME,
35
+ HF_MODULES_CACHE,
36
+ HUGGINGFACE_CO_PREFIX,
37
+ HUGGINGFACE_CO_RESOLVE_ENDPOINT,
38
+ MODEL_CARD_NAME,
39
+ MULTIPLE_CHOICE_DUMMY_INPUTS,
40
+ PYTORCH_PRETRAINED_BERT_CACHE,
41
+ PYTORCH_TRANSFORMERS_CACHE,
42
+ S3_BUCKET_PREFIX,
43
+ SENTENCEPIECE_UNDERLINE,
44
+ SPIECE_UNDERLINE,
45
+ TF2_WEIGHTS_NAME,
46
+ TF_WEIGHTS_NAME,
47
+ TORCH_FX_REQUIRED_VERSION,
48
+ TRANSFORMERS_CACHE,
49
+ TRANSFORMERS_DYNAMIC_MODULE_NAME,
50
+ USE_JAX,
51
+ USE_TF,
52
+ USE_TORCH,
53
+ WEIGHTS_INDEX_NAME,
54
+ WEIGHTS_NAME,
55
+ ContextManagers,
56
+ DummyObject,
57
+ EntryNotFoundError,
58
+ ExplicitEnum,
59
+ ModelOutput,
60
+ PaddingStrategy,
61
+ PushToHubMixin,
62
+ RepositoryNotFoundError,
63
+ RevisionNotFoundError,
64
+ TensorType,
65
+ _LazyModule,
66
+ add_code_sample_docstrings,
67
+ add_end_docstrings,
68
+ add_start_docstrings,
69
+ add_start_docstrings_to_model_forward,
70
+ cached_property,
71
+ copy_func,
72
+ default_cache_path,
73
+ define_sagemaker_information,
74
+ get_cached_models,
75
+ get_file_from_repo,
76
+ get_torch_version,
77
+ has_file,
78
+ http_user_agent,
79
+ is_apex_available,
80
+ is_bs4_available,
81
+ is_coloredlogs_available,
82
+ is_datasets_available,
83
+ is_detectron2_available,
84
+ is_faiss_available,
85
+ is_flax_available,
86
+ is_ftfy_available,
87
+ is_g2p_en_available,
88
+ is_in_notebook,
89
+ is_ipex_available,
90
+ is_librosa_available,
91
+ is_offline_mode,
92
+ is_onnx_available,
93
+ is_pandas_available,
94
+ is_phonemizer_available,
95
+ is_protobuf_available,
96
+ is_psutil_available,
97
+ is_py3nvml_available,
98
+ is_pyctcdecode_available,
99
+ is_pytesseract_available,
100
+ is_pytorch_quantization_available,
101
+ is_rjieba_available,
102
+ is_sagemaker_dp_enabled,
103
+ is_sagemaker_mp_enabled,
104
+ is_scipy_available,
105
+ is_sentencepiece_available,
106
+ is_seqio_available,
107
+ is_sklearn_available,
108
+ is_soundfile_availble,
109
+ is_spacy_available,
110
+ is_speech_available,
111
+ is_tensor,
112
+ is_tensorflow_probability_available,
113
+ is_tf2onnx_available,
114
+ is_tf_available,
115
+ is_timm_available,
116
+ is_tokenizers_available,
117
+ is_torch_available,
118
+ is_torch_bf16_available,
119
+ is_torch_cuda_available,
120
+ is_torch_fx_available,
121
+ is_torch_fx_proxy,
122
+ is_torch_mps_available,
123
+ is_torch_tf32_available,
124
+ is_torch_xla_available,
125
+ is_torchaudio_available,
126
+ is_training_run_on_sagemaker,
127
+ is_vision_available,
128
+ replace_return_docstrings,
129
+ requires_backends,
130
+ to_numpy,
131
+ to_py_obj,
132
+ torch_only_method,
133
+ )
venv/lib/python3.10/site-packages/transformers/hf_argparser.py ADDED
@@ -0,0 +1,424 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import dataclasses
16
+ import json
17
+ import os
18
+ import sys
19
+ import types
20
+ from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
21
+ from copy import copy
22
+ from enum import Enum
23
+ from inspect import isclass
24
+ from pathlib import Path
25
+ from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
26
+
27
+ import yaml
28
+
29
+
30
+ DataClass = NewType("DataClass", Any)
31
+ DataClassType = NewType("DataClassType", Any)
32
+
33
+
34
+ # From https://stackoverflow.com/questions/15008758/parsing-boolean-values-with-argparse
35
+ def string_to_bool(v):
36
+ if isinstance(v, bool):
37
+ return v
38
+ if v.lower() in ("yes", "true", "t", "y", "1"):
39
+ return True
40
+ elif v.lower() in ("no", "false", "f", "n", "0"):
41
+ return False
42
+ else:
43
+ raise ArgumentTypeError(
44
+ f"Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive)."
45
+ )
46
+
47
+
48
+ def make_choice_type_function(choices: list) -> Callable[[str], Any]:
49
+ """
50
+ Creates a mapping function from each choices string representation to the actual value. Used to support multiple
51
+ value types for a single argument.
52
+
53
+ Args:
54
+ choices (list): List of choices.
55
+
56
+ Returns:
57
+ Callable[[str], Any]: Mapping function from string representation to actual value for each choice.
58
+ """
59
+ str_to_choice = {str(choice): choice for choice in choices}
60
+ return lambda arg: str_to_choice.get(arg, arg)
61
+
62
+
63
+ def HfArg(
64
+ *,
65
+ aliases: Union[str, List[str]] = None,
66
+ help: str = None,
67
+ default: Any = dataclasses.MISSING,
68
+ default_factory: Callable[[], Any] = dataclasses.MISSING,
69
+ metadata: dict = None,
70
+ **kwargs,
71
+ ) -> dataclasses.Field:
72
+ """Argument helper enabling a concise syntax to create dataclass fields for parsing with `HfArgumentParser`.
73
+
74
+ Example comparing the use of `HfArg` and `dataclasses.field`:
75
+ ```
76
+ @dataclass
77
+ class Args:
78
+ regular_arg: str = dataclasses.field(default="Huggingface", metadata={"aliases": ["--example", "-e"], "help": "This syntax could be better!"})
79
+ hf_arg: str = HfArg(default="Huggingface", aliases=["--example", "-e"], help="What a nice syntax!")
80
+ ```
81
+
82
+ Args:
83
+ aliases (Union[str, List[str]], optional):
84
+ Single string or list of strings of aliases to pass on to argparse, e.g. `aliases=["--example", "-e"]`.
85
+ Defaults to None.
86
+ help (str, optional): Help string to pass on to argparse that can be displayed with --help. Defaults to None.
87
+ default (Any, optional):
88
+ Default value for the argument. If not default or default_factory is specified, the argument is required.
89
+ Defaults to dataclasses.MISSING.
90
+ default_factory (Callable[[], Any], optional):
91
+ The default_factory is a 0-argument function called to initialize a field's value. It is useful to provide
92
+ default values for mutable types, e.g. lists: `default_factory=list`. Mutually exclusive with `default=`.
93
+ Defaults to dataclasses.MISSING.
94
+ metadata (dict, optional): Further metadata to pass on to `dataclasses.field`. Defaults to None.
95
+
96
+ Returns:
97
+ Field: A `dataclasses.Field` with the desired properties.
98
+ """
99
+ if metadata is None:
100
+ # Important, don't use as default param in function signature because dict is mutable and shared across function calls
101
+ metadata = {}
102
+ if aliases is not None:
103
+ metadata["aliases"] = aliases
104
+ if help is not None:
105
+ metadata["help"] = help
106
+
107
+ return dataclasses.field(metadata=metadata, default=default, default_factory=default_factory, **kwargs)
108
+
109
+
110
+ class HfArgumentParser(ArgumentParser):
111
+ """
112
+ This subclass of `argparse.ArgumentParser` uses type hints on dataclasses to generate arguments.
113
+
114
+ The class is designed to play well with the native argparse. In particular, you can add more (non-dataclass backed)
115
+ arguments to the parser after initialization and you'll get the output back after parsing as an additional
116
+ namespace. Optional: To create sub argument groups use the `_argument_group_name` attribute in the dataclass.
117
+ """
118
+
119
+ dataclass_types: Iterable[DataClassType]
120
+
121
+ def __init__(self, dataclass_types: Union[DataClassType, Iterable[DataClassType]], **kwargs):
122
+ """
123
+ Args:
124
+ dataclass_types:
125
+ Dataclass type, or list of dataclass types for which we will "fill" instances with the parsed args.
126
+ kwargs (`Dict[str, Any]`, *optional*):
127
+ Passed to `argparse.ArgumentParser()` in the regular way.
128
+ """
129
+ # To make the default appear when using --help
130
+ if "formatter_class" not in kwargs:
131
+ kwargs["formatter_class"] = ArgumentDefaultsHelpFormatter
132
+ super().__init__(**kwargs)
133
+ if dataclasses.is_dataclass(dataclass_types):
134
+ dataclass_types = [dataclass_types]
135
+ self.dataclass_types = list(dataclass_types)
136
+ for dtype in self.dataclass_types:
137
+ self._add_dataclass_arguments(dtype)
138
+
139
+ @staticmethod
140
+ def _parse_dataclass_field(parser: ArgumentParser, field: dataclasses.Field):
141
+ field_name = f"--{field.name}"
142
+ kwargs = field.metadata.copy()
143
+ # field.metadata is not used at all by Data Classes,
144
+ # it is provided as a third-party extension mechanism.
145
+ if isinstance(field.type, str):
146
+ raise RuntimeError(
147
+ "Unresolved type detected, which should have been done with the help of "
148
+ "`typing.get_type_hints` method by default"
149
+ )
150
+
151
+ aliases = kwargs.pop("aliases", [])
152
+ if isinstance(aliases, str):
153
+ aliases = [aliases]
154
+
155
+ origin_type = getattr(field.type, "__origin__", field.type)
156
+ if origin_type is Union or (hasattr(types, "UnionType") and isinstance(origin_type, types.UnionType)):
157
+ if str not in field.type.__args__ and (
158
+ len(field.type.__args__) != 2 or type(None) not in field.type.__args__
159
+ ):
160
+ raise ValueError(
161
+ "Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because"
162
+ " the argument parser only supports one type per argument."
163
+ f" Problem encountered in field '{field.name}'."
164
+ )
165
+ if type(None) not in field.type.__args__:
166
+ # filter `str` in Union
167
+ field.type = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
168
+ origin_type = getattr(field.type, "__origin__", field.type)
169
+ elif bool not in field.type.__args__:
170
+ # filter `NoneType` in Union (except for `Union[bool, NoneType]`)
171
+ field.type = (
172
+ field.type.__args__[0] if isinstance(None, field.type.__args__[1]) else field.type.__args__[1]
173
+ )
174
+ origin_type = getattr(field.type, "__origin__", field.type)
175
+
176
+ # A variable to store kwargs for a boolean field, if needed
177
+ # so that we can init a `no_*` complement argument (see below)
178
+ bool_kwargs = {}
179
+ if origin_type is Literal or (isinstance(field.type, type) and issubclass(field.type, Enum)):
180
+ if origin_type is Literal:
181
+ kwargs["choices"] = field.type.__args__
182
+ else:
183
+ kwargs["choices"] = [x.value for x in field.type]
184
+
185
+ kwargs["type"] = make_choice_type_function(kwargs["choices"])
186
+
187
+ if field.default is not dataclasses.MISSING:
188
+ kwargs["default"] = field.default
189
+ else:
190
+ kwargs["required"] = True
191
+ elif field.type is bool or field.type == Optional[bool]:
192
+ # Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
193
+ # We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
194
+ bool_kwargs = copy(kwargs)
195
+
196
+ # Hack because type=bool in argparse does not behave as we want.
197
+ kwargs["type"] = string_to_bool
198
+ if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
199
+ # Default value is False if we have no default when of type bool.
200
+ default = False if field.default is dataclasses.MISSING else field.default
201
+ # This is the value that will get picked if we don't include --field_name in any way
202
+ kwargs["default"] = default
203
+ # This tells argparse we accept 0 or 1 value after --field_name
204
+ kwargs["nargs"] = "?"
205
+ # This is the value that will get picked if we do --field_name (without value)
206
+ kwargs["const"] = True
207
+ elif isclass(origin_type) and issubclass(origin_type, list):
208
+ kwargs["type"] = field.type.__args__[0]
209
+ kwargs["nargs"] = "+"
210
+ if field.default_factory is not dataclasses.MISSING:
211
+ kwargs["default"] = field.default_factory()
212
+ elif field.default is dataclasses.MISSING:
213
+ kwargs["required"] = True
214
+ else:
215
+ kwargs["type"] = field.type
216
+ if field.default is not dataclasses.MISSING:
217
+ kwargs["default"] = field.default
218
+ elif field.default_factory is not dataclasses.MISSING:
219
+ kwargs["default"] = field.default_factory()
220
+ else:
221
+ kwargs["required"] = True
222
+ parser.add_argument(field_name, *aliases, **kwargs)
223
+
224
+ # Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
225
+ # Order is important for arguments with the same destination!
226
+ # We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
227
+ # here and we do not need those changes/additional keys.
228
+ if field.default is True and (field.type is bool or field.type == Optional[bool]):
229
+ bool_kwargs["default"] = False
230
+ parser.add_argument(f"--no_{field.name}", action="store_false", dest=field.name, **bool_kwargs)
231
+
232
+ def _add_dataclass_arguments(self, dtype: DataClassType):
233
+ if hasattr(dtype, "_argument_group_name"):
234
+ parser = self.add_argument_group(dtype._argument_group_name)
235
+ else:
236
+ parser = self
237
+
238
+ try:
239
+ type_hints: Dict[str, type] = get_type_hints(dtype)
240
+ except NameError:
241
+ raise RuntimeError(
242
+ f"Type resolution failed for {dtype}. Try declaring the class in global scope or "
243
+ "removing line of `from __future__ import annotations` which opts in Postponed "
244
+ "Evaluation of Annotations (PEP 563)"
245
+ )
246
+ except TypeError as ex:
247
+ # Remove this block when we drop Python 3.9 support
248
+ if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(ex):
249
+ python_version = ".".join(map(str, sys.version_info[:3]))
250
+ raise RuntimeError(
251
+ f"Type resolution failed for {dtype} on Python {python_version}. Try removing "
252
+ "line of `from __future__ import annotations` which opts in union types as "
253
+ "`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To "
254
+ "support Python versions that lower than 3.10, you need to use "
255
+ "`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of "
256
+ "`X | None`."
257
+ ) from ex
258
+ raise
259
+
260
+ for field in dataclasses.fields(dtype):
261
+ if not field.init:
262
+ continue
263
+ field.type = type_hints[field.name]
264
+ self._parse_dataclass_field(parser, field)
265
+
266
+ def parse_args_into_dataclasses(
267
+ self,
268
+ args=None,
269
+ return_remaining_strings=False,
270
+ look_for_args_file=True,
271
+ args_filename=None,
272
+ args_file_flag=None,
273
+ ) -> Tuple[DataClass, ...]:
274
+ """
275
+ Parse command-line args into instances of the specified dataclass types.
276
+
277
+ This relies on argparse's `ArgumentParser.parse_known_args`. See the doc at:
278
+ docs.python.org/3.7/library/argparse.html#argparse.ArgumentParser.parse_args
279
+
280
+ Args:
281
+ args:
282
+ List of strings to parse. The default is taken from sys.argv. (same as argparse.ArgumentParser)
283
+ return_remaining_strings:
284
+ If true, also return a list of remaining argument strings.
285
+ look_for_args_file:
286
+ If true, will look for a ".args" file with the same base name as the entry point script for this
287
+ process, and will append its potential content to the command line args.
288
+ args_filename:
289
+ If not None, will uses this file instead of the ".args" file specified in the previous argument.
290
+ args_file_flag:
291
+ If not None, will look for a file in the command-line args specified with this flag. The flag can be
292
+ specified multiple times and precedence is determined by the order (last one wins).
293
+
294
+ Returns:
295
+ Tuple consisting of:
296
+
297
+ - the dataclass instances in the same order as they were passed to the initializer.abspath
298
+ - if applicable, an additional namespace for more (non-dataclass backed) arguments added to the parser
299
+ after initialization.
300
+ - The potential list of remaining argument strings. (same as argparse.ArgumentParser.parse_known_args)
301
+ """
302
+
303
+ if args_file_flag or args_filename or (look_for_args_file and len(sys.argv)):
304
+ args_files = []
305
+
306
+ if args_filename:
307
+ args_files.append(Path(args_filename))
308
+ elif look_for_args_file and len(sys.argv):
309
+ args_files.append(Path(sys.argv[0]).with_suffix(".args"))
310
+
311
+ # args files specified via command line flag should overwrite default args files so we add them last
312
+ if args_file_flag:
313
+ # Create special parser just to extract the args_file_flag values
314
+ args_file_parser = ArgumentParser()
315
+ args_file_parser.add_argument(args_file_flag, type=str, action="append")
316
+
317
+ # Use only remaining args for further parsing (remove the args_file_flag)
318
+ cfg, args = args_file_parser.parse_known_args(args=args)
319
+ cmd_args_file_paths = vars(cfg).get(args_file_flag.lstrip("-"), None)
320
+
321
+ if cmd_args_file_paths:
322
+ args_files.extend([Path(p) for p in cmd_args_file_paths])
323
+
324
+ file_args = []
325
+ for args_file in args_files:
326
+ if args_file.exists():
327
+ file_args += args_file.read_text().split()
328
+
329
+ # in case of duplicate arguments the last one has precedence
330
+ # args specified via the command line should overwrite args from files, so we add them last
331
+ args = file_args + args if args is not None else file_args + sys.argv[1:]
332
+ namespace, remaining_args = self.parse_known_args(args=args)
333
+ outputs = []
334
+ for dtype in self.dataclass_types:
335
+ keys = {f.name for f in dataclasses.fields(dtype) if f.init}
336
+ inputs = {k: v for k, v in vars(namespace).items() if k in keys}
337
+ for k in keys:
338
+ delattr(namespace, k)
339
+ obj = dtype(**inputs)
340
+ outputs.append(obj)
341
+ if len(namespace.__dict__) > 0:
342
+ # additional namespace.
343
+ outputs.append(namespace)
344
+ if return_remaining_strings:
345
+ return (*outputs, remaining_args)
346
+ else:
347
+ if remaining_args:
348
+ raise ValueError(f"Some specified arguments are not used by the HfArgumentParser: {remaining_args}")
349
+
350
+ return (*outputs,)
351
+
352
+ def parse_dict(self, args: Dict[str, Any], allow_extra_keys: bool = False) -> Tuple[DataClass, ...]:
353
+ """
354
+ Alternative helper method that does not use `argparse` at all, instead uses a dict and populating the dataclass
355
+ types.
356
+
357
+ Args:
358
+ args (`dict`):
359
+ dict containing config values
360
+ allow_extra_keys (`bool`, *optional*, defaults to `False`):
361
+ Defaults to False. If False, will raise an exception if the dict contains keys that are not parsed.
362
+
363
+ Returns:
364
+ Tuple consisting of:
365
+
366
+ - the dataclass instances in the same order as they were passed to the initializer.
367
+ """
368
+ unused_keys = set(args.keys())
369
+ outputs = []
370
+ for dtype in self.dataclass_types:
371
+ keys = {f.name for f in dataclasses.fields(dtype) if f.init}
372
+ inputs = {k: v for k, v in args.items() if k in keys}
373
+ unused_keys.difference_update(inputs.keys())
374
+ obj = dtype(**inputs)
375
+ outputs.append(obj)
376
+ if not allow_extra_keys and unused_keys:
377
+ raise ValueError(f"Some keys are not used by the HfArgumentParser: {sorted(unused_keys)}")
378
+ return tuple(outputs)
379
+
380
+ def parse_json_file(
381
+ self, json_file: Union[str, os.PathLike], allow_extra_keys: bool = False
382
+ ) -> Tuple[DataClass, ...]:
383
+ """
384
+ Alternative helper method that does not use `argparse` at all, instead loading a json file and populating the
385
+ dataclass types.
386
+
387
+ Args:
388
+ json_file (`str` or `os.PathLike`):
389
+ File name of the json file to parse
390
+ allow_extra_keys (`bool`, *optional*, defaults to `False`):
391
+ Defaults to False. If False, will raise an exception if the json file contains keys that are not
392
+ parsed.
393
+
394
+ Returns:
395
+ Tuple consisting of:
396
+
397
+ - the dataclass instances in the same order as they were passed to the initializer.
398
+ """
399
+ with open(Path(json_file), encoding="utf-8") as open_json_file:
400
+ data = json.loads(open_json_file.read())
401
+ outputs = self.parse_dict(data, allow_extra_keys=allow_extra_keys)
402
+ return tuple(outputs)
403
+
404
+ def parse_yaml_file(
405
+ self, yaml_file: Union[str, os.PathLike], allow_extra_keys: bool = False
406
+ ) -> Tuple[DataClass, ...]:
407
+ """
408
+ Alternative helper method that does not use `argparse` at all, instead loading a yaml file and populating the
409
+ dataclass types.
410
+
411
+ Args:
412
+ yaml_file (`str` or `os.PathLike`):
413
+ File name of the yaml file to parse
414
+ allow_extra_keys (`bool`, *optional*, defaults to `False`):
415
+ Defaults to False. If False, will raise an exception if the json file contains keys that are not
416
+ parsed.
417
+
418
+ Returns:
419
+ Tuple consisting of:
420
+
421
+ - the dataclass instances in the same order as they were passed to the initializer.
422
+ """
423
+ outputs = self.parse_dict(yaml.safe_load(Path(yaml_file).read_text()), allow_extra_keys=allow_extra_keys)
424
+ return tuple(outputs)
venv/lib/python3.10/site-packages/transformers/hyperparameter_search.py ADDED
@@ -0,0 +1,141 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023-present the HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ from .integrations import (
17
+ is_optuna_available,
18
+ is_ray_tune_available,
19
+ is_sigopt_available,
20
+ is_wandb_available,
21
+ run_hp_search_optuna,
22
+ run_hp_search_ray,
23
+ run_hp_search_sigopt,
24
+ run_hp_search_wandb,
25
+ )
26
+ from .trainer_utils import (
27
+ HPSearchBackend,
28
+ default_hp_space_optuna,
29
+ default_hp_space_ray,
30
+ default_hp_space_sigopt,
31
+ default_hp_space_wandb,
32
+ )
33
+ from .utils import logging
34
+
35
+
36
+ logger = logging.get_logger(__name__)
37
+
38
+
39
+ class HyperParamSearchBackendBase:
40
+ name: str
41
+ pip_package: str = None
42
+
43
+ @staticmethod
44
+ def is_available():
45
+ raise NotImplementedError
46
+
47
+ def run(self, trainer, n_trials: int, direction: str, **kwargs):
48
+ raise NotImplementedError
49
+
50
+ def default_hp_space(self, trial):
51
+ raise NotImplementedError
52
+
53
+ def ensure_available(self):
54
+ if not self.is_available():
55
+ raise RuntimeError(
56
+ f"You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}."
57
+ )
58
+
59
+ @classmethod
60
+ def pip_install(cls):
61
+ return f"`pip install {cls.pip_package or cls.name}`"
62
+
63
+
64
+ class OptunaBackend(HyperParamSearchBackendBase):
65
+ name = "optuna"
66
+
67
+ @staticmethod
68
+ def is_available():
69
+ return is_optuna_available()
70
+
71
+ def run(self, trainer, n_trials: int, direction: str, **kwargs):
72
+ return run_hp_search_optuna(trainer, n_trials, direction, **kwargs)
73
+
74
+ def default_hp_space(self, trial):
75
+ return default_hp_space_optuna(trial)
76
+
77
+
78
+ class RayTuneBackend(HyperParamSearchBackendBase):
79
+ name = "ray"
80
+ pip_package = "'ray[tune]'"
81
+
82
+ @staticmethod
83
+ def is_available():
84
+ return is_ray_tune_available()
85
+
86
+ def run(self, trainer, n_trials: int, direction: str, **kwargs):
87
+ return run_hp_search_ray(trainer, n_trials, direction, **kwargs)
88
+
89
+ def default_hp_space(self, trial):
90
+ return default_hp_space_ray(trial)
91
+
92
+
93
+ class SigOptBackend(HyperParamSearchBackendBase):
94
+ name = "sigopt"
95
+
96
+ @staticmethod
97
+ def is_available():
98
+ return is_sigopt_available()
99
+
100
+ def run(self, trainer, n_trials: int, direction: str, **kwargs):
101
+ return run_hp_search_sigopt(trainer, n_trials, direction, **kwargs)
102
+
103
+ def default_hp_space(self, trial):
104
+ return default_hp_space_sigopt(trial)
105
+
106
+
107
+ class WandbBackend(HyperParamSearchBackendBase):
108
+ name = "wandb"
109
+
110
+ @staticmethod
111
+ def is_available():
112
+ return is_wandb_available()
113
+
114
+ def run(self, trainer, n_trials: int, direction: str, **kwargs):
115
+ return run_hp_search_wandb(trainer, n_trials, direction, **kwargs)
116
+
117
+ def default_hp_space(self, trial):
118
+ return default_hp_space_wandb(trial)
119
+
120
+
121
+ ALL_HYPERPARAMETER_SEARCH_BACKENDS = {
122
+ HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
123
+ }
124
+
125
+
126
+ def default_hp_search_backend() -> str:
127
+ available_backends = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
128
+ if len(available_backends) > 0:
129
+ name = available_backends[0].name
130
+ if len(available_backends) > 1:
131
+ logger.info(
132
+ f"{len(available_backends)} hyperparameter search backends available. Using {name} as the default."
133
+ )
134
+ return name
135
+ raise RuntimeError(
136
+ "No hyperparameter search backend available.\n"
137
+ + "\n".join(
138
+ f" - To install {backend.name} run {backend.pip_install()}"
139
+ for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values()
140
+ )
141
+ )
venv/lib/python3.10/site-packages/transformers/image_processing_utils.py ADDED
@@ -0,0 +1,793 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import copy
17
+ import json
18
+ import os
19
+ import warnings
20
+ from io import BytesIO
21
+ from typing import Any, Dict, Iterable, List, Optional, Tuple, Union
22
+
23
+ import numpy as np
24
+ import requests
25
+
26
+ from .dynamic_module_utils import custom_object_save
27
+ from .feature_extraction_utils import BatchFeature as BaseBatchFeature
28
+ from .image_transforms import center_crop, normalize, rescale
29
+ from .image_utils import ChannelDimension
30
+ from .utils import (
31
+ IMAGE_PROCESSOR_NAME,
32
+ PushToHubMixin,
33
+ add_model_info_to_auto_map,
34
+ cached_file,
35
+ copy_func,
36
+ download_url,
37
+ is_offline_mode,
38
+ is_remote_url,
39
+ is_vision_available,
40
+ logging,
41
+ )
42
+
43
+
44
+ if is_vision_available():
45
+ from PIL import Image
46
+
47
+ logger = logging.get_logger(__name__)
48
+
49
+
50
+ # TODO: Move BatchFeature to be imported by both image_processing_utils and image_processing_utils
51
+ # We override the class string here, but logic is the same.
52
+ class BatchFeature(BaseBatchFeature):
53
+ r"""
54
+ Holds the output of the image processor specific `__call__` methods.
55
+
56
+ This class is derived from a python dictionary and can be used as a dictionary.
57
+
58
+ Args:
59
+ data (`dict`):
60
+ Dictionary of lists/arrays/tensors returned by the __call__ method ('pixel_values', etc.).
61
+ tensor_type (`Union[None, str, TensorType]`, *optional*):
62
+ You can give a tensor_type here to convert the lists of integers in PyTorch/TensorFlow/Numpy Tensors at
63
+ initialization.
64
+ """
65
+
66
+
67
+ # TODO: (Amy) - factor out the common parts of this and the feature extractor
68
+ class ImageProcessingMixin(PushToHubMixin):
69
+ """
70
+ This is an image processor mixin used to provide saving/loading functionality for sequential and image feature
71
+ extractors.
72
+ """
73
+
74
+ _auto_class = None
75
+
76
+ def __init__(self, **kwargs):
77
+ """Set elements of `kwargs` as attributes."""
78
+ # This key was saved while we still used `XXXFeatureExtractor` for image processing. Now we use
79
+ # `XXXImageProcessor`, this attribute and its value are misleading.
80
+ kwargs.pop("feature_extractor_type", None)
81
+ # Pop "processor_class" as it should be saved as private attribute
82
+ self._processor_class = kwargs.pop("processor_class", None)
83
+ # Additional attributes without default values
84
+ for key, value in kwargs.items():
85
+ try:
86
+ setattr(self, key, value)
87
+ except AttributeError as err:
88
+ logger.error(f"Can't set {key} with value {value} for {self}")
89
+ raise err
90
+
91
+ def _set_processor_class(self, processor_class: str):
92
+ """Sets processor class as an attribute."""
93
+ self._processor_class = processor_class
94
+
95
+ @classmethod
96
+ def from_pretrained(
97
+ cls,
98
+ pretrained_model_name_or_path: Union[str, os.PathLike],
99
+ cache_dir: Optional[Union[str, os.PathLike]] = None,
100
+ force_download: bool = False,
101
+ local_files_only: bool = False,
102
+ token: Optional[Union[str, bool]] = None,
103
+ revision: str = "main",
104
+ **kwargs,
105
+ ):
106
+ r"""
107
+ Instantiate a type of [`~image_processing_utils.ImageProcessingMixin`] from an image processor.
108
+
109
+ Args:
110
+ pretrained_model_name_or_path (`str` or `os.PathLike`):
111
+ This can be either:
112
+
113
+ - a string, the *model id* of a pretrained image_processor hosted inside a model repo on
114
+ huggingface.co.
115
+ - a path to a *directory* containing a image processor file saved using the
116
+ [`~image_processing_utils.ImageProcessingMixin.save_pretrained`] method, e.g.,
117
+ `./my_model_directory/`.
118
+ - a path or url to a saved image processor JSON *file*, e.g.,
119
+ `./my_model_directory/preprocessor_config.json`.
120
+ cache_dir (`str` or `os.PathLike`, *optional*):
121
+ Path to a directory in which a downloaded pretrained model image processor should be cached if the
122
+ standard cache should not be used.
123
+ force_download (`bool`, *optional*, defaults to `False`):
124
+ Whether or not to force to (re-)download the image processor files and override the cached versions if
125
+ they exist.
126
+ resume_download (`bool`, *optional*, defaults to `False`):
127
+ Whether or not to delete incompletely received file. Attempts to resume the download if such a file
128
+ exists.
129
+ proxies (`Dict[str, str]`, *optional*):
130
+ A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',
131
+ 'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request.
132
+ token (`str` or `bool`, *optional*):
133
+ The token to use as HTTP bearer authorization for remote files. If `True`, or not specified, will use
134
+ the token generated when running `huggingface-cli login` (stored in `~/.huggingface`).
135
+ revision (`str`, *optional*, defaults to `"main"`):
136
+ The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
137
+ git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any
138
+ identifier allowed by git.
139
+
140
+
141
+ <Tip>
142
+
143
+ To test a pull request you made on the Hub, you can pass `revision="refs/pr/<pr_number>".
144
+
145
+ </Tip>
146
+
147
+ return_unused_kwargs (`bool`, *optional*, defaults to `False`):
148
+ If `False`, then this function returns just the final image processor object. If `True`, then this
149
+ functions returns a `Tuple(image_processor, unused_kwargs)` where *unused_kwargs* is a dictionary
150
+ consisting of the key/value pairs whose keys are not image processor attributes: i.e., the part of
151
+ `kwargs` which has not been used to update `image_processor` and is otherwise ignored.
152
+ subfolder (`str`, *optional*, defaults to `""`):
153
+ In case the relevant files are located inside a subfolder of the model repo on huggingface.co, you can
154
+ specify the folder name here.
155
+ kwargs (`Dict[str, Any]`, *optional*):
156
+ The values in kwargs of any keys which are image processor attributes will be used to override the
157
+ loaded values. Behavior concerning key/value pairs whose keys are *not* image processor attributes is
158
+ controlled by the `return_unused_kwargs` keyword parameter.
159
+
160
+ Returns:
161
+ A image processor of type [`~image_processing_utils.ImageProcessingMixin`].
162
+
163
+ Examples:
164
+
165
+ ```python
166
+ # We can't instantiate directly the base class *ImageProcessingMixin* so let's show the examples on a
167
+ # derived class: *CLIPImageProcessor*
168
+ image_processor = CLIPImageProcessor.from_pretrained(
169
+ "openai/clip-vit-base-patch32"
170
+ ) # Download image_processing_config from huggingface.co and cache.
171
+ image_processor = CLIPImageProcessor.from_pretrained(
172
+ "./test/saved_model/"
173
+ ) # E.g. image processor (or model) was saved using *save_pretrained('./test/saved_model/')*
174
+ image_processor = CLIPImageProcessor.from_pretrained("./test/saved_model/preprocessor_config.json")
175
+ image_processor = CLIPImageProcessor.from_pretrained(
176
+ "openai/clip-vit-base-patch32", do_normalize=False, foo=False
177
+ )
178
+ assert image_processor.do_normalize is False
179
+ image_processor, unused_kwargs = CLIPImageProcessor.from_pretrained(
180
+ "openai/clip-vit-base-patch32", do_normalize=False, foo=False, return_unused_kwargs=True
181
+ )
182
+ assert image_processor.do_normalize is False
183
+ assert unused_kwargs == {"foo": False}
184
+ ```"""
185
+ kwargs["cache_dir"] = cache_dir
186
+ kwargs["force_download"] = force_download
187
+ kwargs["local_files_only"] = local_files_only
188
+ kwargs["revision"] = revision
189
+
190
+ use_auth_token = kwargs.pop("use_auth_token", None)
191
+ if use_auth_token is not None:
192
+ warnings.warn(
193
+ "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.",
194
+ FutureWarning,
195
+ )
196
+ if token is not None:
197
+ raise ValueError(
198
+ "`token` and `use_auth_token` are both specified. Please set only the argument `token`."
199
+ )
200
+ token = use_auth_token
201
+
202
+ if token is not None:
203
+ kwargs["token"] = token
204
+
205
+ image_processor_dict, kwargs = cls.get_image_processor_dict(pretrained_model_name_or_path, **kwargs)
206
+
207
+ return cls.from_dict(image_processor_dict, **kwargs)
208
+
209
+ def save_pretrained(self, save_directory: Union[str, os.PathLike], push_to_hub: bool = False, **kwargs):
210
+ """
211
+ Save an image processor object to the directory `save_directory`, so that it can be re-loaded using the
212
+ [`~image_processing_utils.ImageProcessingMixin.from_pretrained`] class method.
213
+
214
+ Args:
215
+ save_directory (`str` or `os.PathLike`):
216
+ Directory where the image processor JSON file will be saved (will be created if it does not exist).
217
+ push_to_hub (`bool`, *optional*, defaults to `False`):
218
+ Whether or not to push your model to the Hugging Face model hub after saving it. You can specify the
219
+ repository you want to push to with `repo_id` (will default to the name of `save_directory` in your
220
+ namespace).
221
+ kwargs (`Dict[str, Any]`, *optional*):
222
+ Additional key word arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method.
223
+ """
224
+ use_auth_token = kwargs.pop("use_auth_token", None)
225
+
226
+ if use_auth_token is not None:
227
+ warnings.warn(
228
+ "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.",
229
+ FutureWarning,
230
+ )
231
+ if kwargs.get("token", None) is not None:
232
+ raise ValueError(
233
+ "`token` and `use_auth_token` are both specified. Please set only the argument `token`."
234
+ )
235
+ kwargs["token"] = use_auth_token
236
+
237
+ if os.path.isfile(save_directory):
238
+ raise AssertionError(f"Provided path ({save_directory}) should be a directory, not a file")
239
+
240
+ os.makedirs(save_directory, exist_ok=True)
241
+
242
+ if push_to_hub:
243
+ commit_message = kwargs.pop("commit_message", None)
244
+ repo_id = kwargs.pop("repo_id", save_directory.split(os.path.sep)[-1])
245
+ repo_id = self._create_repo(repo_id, **kwargs)
246
+ files_timestamps = self._get_files_timestamps(save_directory)
247
+
248
+ # If we have a custom config, we copy the file defining it in the folder and set the attributes so it can be
249
+ # loaded from the Hub.
250
+ if self._auto_class is not None:
251
+ custom_object_save(self, save_directory, config=self)
252
+
253
+ # If we save using the predefined names, we can load using `from_pretrained`
254
+ output_image_processor_file = os.path.join(save_directory, IMAGE_PROCESSOR_NAME)
255
+
256
+ self.to_json_file(output_image_processor_file)
257
+ logger.info(f"Image processor saved in {output_image_processor_file}")
258
+
259
+ if push_to_hub:
260
+ self._upload_modified_files(
261
+ save_directory,
262
+ repo_id,
263
+ files_timestamps,
264
+ commit_message=commit_message,
265
+ token=kwargs.get("token"),
266
+ )
267
+
268
+ return [output_image_processor_file]
269
+
270
+ @classmethod
271
+ def get_image_processor_dict(
272
+ cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs
273
+ ) -> Tuple[Dict[str, Any], Dict[str, Any]]:
274
+ """
275
+ From a `pretrained_model_name_or_path`, resolve to a dictionary of parameters, to be used for instantiating a
276
+ image processor of type [`~image_processor_utils.ImageProcessingMixin`] using `from_dict`.
277
+
278
+ Parameters:
279
+ pretrained_model_name_or_path (`str` or `os.PathLike`):
280
+ The identifier of the pre-trained checkpoint from which we want the dictionary of parameters.
281
+ subfolder (`str`, *optional*, defaults to `""`):
282
+ In case the relevant files are located inside a subfolder of the model repo on huggingface.co, you can
283
+ specify the folder name here.
284
+
285
+ Returns:
286
+ `Tuple[Dict, Dict]`: The dictionary(ies) that will be used to instantiate the image processor object.
287
+ """
288
+ cache_dir = kwargs.pop("cache_dir", None)
289
+ force_download = kwargs.pop("force_download", False)
290
+ resume_download = kwargs.pop("resume_download", False)
291
+ proxies = kwargs.pop("proxies", None)
292
+ token = kwargs.pop("token", None)
293
+ use_auth_token = kwargs.pop("use_auth_token", None)
294
+ local_files_only = kwargs.pop("local_files_only", False)
295
+ revision = kwargs.pop("revision", None)
296
+ subfolder = kwargs.pop("subfolder", "")
297
+
298
+ from_pipeline = kwargs.pop("_from_pipeline", None)
299
+ from_auto_class = kwargs.pop("_from_auto", False)
300
+
301
+ if use_auth_token is not None:
302
+ warnings.warn(
303
+ "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.",
304
+ FutureWarning,
305
+ )
306
+ if token is not None:
307
+ raise ValueError(
308
+ "`token` and `use_auth_token` are both specified. Please set only the argument `token`."
309
+ )
310
+ token = use_auth_token
311
+
312
+ user_agent = {"file_type": "image processor", "from_auto_class": from_auto_class}
313
+ if from_pipeline is not None:
314
+ user_agent["using_pipeline"] = from_pipeline
315
+
316
+ if is_offline_mode() and not local_files_only:
317
+ logger.info("Offline mode: forcing local_files_only=True")
318
+ local_files_only = True
319
+
320
+ pretrained_model_name_or_path = str(pretrained_model_name_or_path)
321
+ is_local = os.path.isdir(pretrained_model_name_or_path)
322
+ if os.path.isdir(pretrained_model_name_or_path):
323
+ image_processor_file = os.path.join(pretrained_model_name_or_path, IMAGE_PROCESSOR_NAME)
324
+ if os.path.isfile(pretrained_model_name_or_path):
325
+ resolved_image_processor_file = pretrained_model_name_or_path
326
+ is_local = True
327
+ elif is_remote_url(pretrained_model_name_or_path):
328
+ image_processor_file = pretrained_model_name_or_path
329
+ resolved_image_processor_file = download_url(pretrained_model_name_or_path)
330
+ else:
331
+ image_processor_file = IMAGE_PROCESSOR_NAME
332
+ try:
333
+ # Load from local folder or from cache or download from model Hub and cache
334
+ resolved_image_processor_file = cached_file(
335
+ pretrained_model_name_or_path,
336
+ image_processor_file,
337
+ cache_dir=cache_dir,
338
+ force_download=force_download,
339
+ proxies=proxies,
340
+ resume_download=resume_download,
341
+ local_files_only=local_files_only,
342
+ token=token,
343
+ user_agent=user_agent,
344
+ revision=revision,
345
+ subfolder=subfolder,
346
+ )
347
+ except EnvironmentError:
348
+ # Raise any environment error raise by `cached_file`. It will have a helpful error message adapted to
349
+ # the original exception.
350
+ raise
351
+ except Exception:
352
+ # For any other exception, we throw a generic error.
353
+ raise EnvironmentError(
354
+ f"Can't load image processor for '{pretrained_model_name_or_path}'. If you were trying to load"
355
+ " it from 'https://huggingface.co/models', make sure you don't have a local directory with the"
356
+ f" same name. Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a"
357
+ f" directory containing a {IMAGE_PROCESSOR_NAME} file"
358
+ )
359
+
360
+ try:
361
+ # Load image_processor dict
362
+ with open(resolved_image_processor_file, "r", encoding="utf-8") as reader:
363
+ text = reader.read()
364
+ image_processor_dict = json.loads(text)
365
+
366
+ except json.JSONDecodeError:
367
+ raise EnvironmentError(
368
+ f"It looks like the config file at '{resolved_image_processor_file}' is not a valid JSON file."
369
+ )
370
+
371
+ if is_local:
372
+ logger.info(f"loading configuration file {resolved_image_processor_file}")
373
+ else:
374
+ logger.info(
375
+ f"loading configuration file {image_processor_file} from cache at {resolved_image_processor_file}"
376
+ )
377
+
378
+ if "auto_map" in image_processor_dict and not is_local:
379
+ image_processor_dict["auto_map"] = add_model_info_to_auto_map(
380
+ image_processor_dict["auto_map"], pretrained_model_name_or_path
381
+ )
382
+
383
+ return image_processor_dict, kwargs
384
+
385
+ @classmethod
386
+ def from_dict(cls, image_processor_dict: Dict[str, Any], **kwargs):
387
+ """
388
+ Instantiates a type of [`~image_processing_utils.ImageProcessingMixin`] from a Python dictionary of parameters.
389
+
390
+ Args:
391
+ image_processor_dict (`Dict[str, Any]`):
392
+ Dictionary that will be used to instantiate the image processor object. Such a dictionary can be
393
+ retrieved from a pretrained checkpoint by leveraging the
394
+ [`~image_processing_utils.ImageProcessingMixin.to_dict`] method.
395
+ kwargs (`Dict[str, Any]`):
396
+ Additional parameters from which to initialize the image processor object.
397
+
398
+ Returns:
399
+ [`~image_processing_utils.ImageProcessingMixin`]: The image processor object instantiated from those
400
+ parameters.
401
+ """
402
+ image_processor_dict = image_processor_dict.copy()
403
+ return_unused_kwargs = kwargs.pop("return_unused_kwargs", False)
404
+
405
+ # The `size` parameter is a dict and was previously an int or tuple in feature extractors.
406
+ # We set `size` here directly to the `image_processor_dict` so that it is converted to the appropriate
407
+ # dict within the image processor and isn't overwritten if `size` is passed in as a kwarg.
408
+ if "size" in kwargs and "size" in image_processor_dict:
409
+ image_processor_dict["size"] = kwargs.pop("size")
410
+ if "crop_size" in kwargs and "crop_size" in image_processor_dict:
411
+ image_processor_dict["crop_size"] = kwargs.pop("crop_size")
412
+
413
+ image_processor = cls(**image_processor_dict)
414
+
415
+ # Update image_processor with kwargs if needed
416
+ to_remove = []
417
+ for key, value in kwargs.items():
418
+ if hasattr(image_processor, key):
419
+ setattr(image_processor, key, value)
420
+ to_remove.append(key)
421
+ for key in to_remove:
422
+ kwargs.pop(key, None)
423
+
424
+ logger.info(f"Image processor {image_processor}")
425
+ if return_unused_kwargs:
426
+ return image_processor, kwargs
427
+ else:
428
+ return image_processor
429
+
430
+ def to_dict(self) -> Dict[str, Any]:
431
+ """
432
+ Serializes this instance to a Python dictionary.
433
+
434
+ Returns:
435
+ `Dict[str, Any]`: Dictionary of all the attributes that make up this image processor instance.
436
+ """
437
+ output = copy.deepcopy(self.__dict__)
438
+ output["image_processor_type"] = self.__class__.__name__
439
+
440
+ return output
441
+
442
+ @classmethod
443
+ def from_json_file(cls, json_file: Union[str, os.PathLike]):
444
+ """
445
+ Instantiates a image processor of type [`~image_processing_utils.ImageProcessingMixin`] from the path to a JSON
446
+ file of parameters.
447
+
448
+ Args:
449
+ json_file (`str` or `os.PathLike`):
450
+ Path to the JSON file containing the parameters.
451
+
452
+ Returns:
453
+ A image processor of type [`~image_processing_utils.ImageProcessingMixin`]: The image_processor object
454
+ instantiated from that JSON file.
455
+ """
456
+ with open(json_file, "r", encoding="utf-8") as reader:
457
+ text = reader.read()
458
+ image_processor_dict = json.loads(text)
459
+ return cls(**image_processor_dict)
460
+
461
+ def to_json_string(self) -> str:
462
+ """
463
+ Serializes this instance to a JSON string.
464
+
465
+ Returns:
466
+ `str`: String containing all the attributes that make up this feature_extractor instance in JSON format.
467
+ """
468
+ dictionary = self.to_dict()
469
+
470
+ for key, value in dictionary.items():
471
+ if isinstance(value, np.ndarray):
472
+ dictionary[key] = value.tolist()
473
+
474
+ # make sure private name "_processor_class" is correctly
475
+ # saved as "processor_class"
476
+ _processor_class = dictionary.pop("_processor_class", None)
477
+ if _processor_class is not None:
478
+ dictionary["processor_class"] = _processor_class
479
+
480
+ return json.dumps(dictionary, indent=2, sort_keys=True) + "\n"
481
+
482
+ def to_json_file(self, json_file_path: Union[str, os.PathLike]):
483
+ """
484
+ Save this instance to a JSON file.
485
+
486
+ Args:
487
+ json_file_path (`str` or `os.PathLike`):
488
+ Path to the JSON file in which this image_processor instance's parameters will be saved.
489
+ """
490
+ with open(json_file_path, "w", encoding="utf-8") as writer:
491
+ writer.write(self.to_json_string())
492
+
493
+ def __repr__(self):
494
+ return f"{self.__class__.__name__} {self.to_json_string()}"
495
+
496
+ @classmethod
497
+ def register_for_auto_class(cls, auto_class="AutoImageProcessor"):
498
+ """
499
+ Register this class with a given auto class. This should only be used for custom image processors as the ones
500
+ in the library are already mapped with `AutoImageProcessor `.
501
+
502
+ <Tip warning={true}>
503
+
504
+ This API is experimental and may have some slight breaking changes in the next releases.
505
+
506
+ </Tip>
507
+
508
+ Args:
509
+ auto_class (`str` or `type`, *optional*, defaults to `"AutoImageProcessor "`):
510
+ The auto class to register this new image processor with.
511
+ """
512
+ if not isinstance(auto_class, str):
513
+ auto_class = auto_class.__name__
514
+
515
+ import transformers.models.auto as auto_module
516
+
517
+ if not hasattr(auto_module, auto_class):
518
+ raise ValueError(f"{auto_class} is not a valid auto class.")
519
+
520
+ cls._auto_class = auto_class
521
+
522
+ def fetch_images(self, image_url_or_urls: Union[str, List[str]]):
523
+ """
524
+ Convert a single or a list of urls into the corresponding `PIL.Image` objects.
525
+
526
+ If a single url is passed, the return value will be a single object. If a list is passed a list of objects is
527
+ returned.
528
+ """
529
+ headers = {
530
+ "User-Agent": (
531
+ "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0"
532
+ " Safari/537.36"
533
+ )
534
+ }
535
+ if isinstance(image_url_or_urls, list):
536
+ return [self.fetch_images(x) for x in image_url_or_urls]
537
+ elif isinstance(image_url_or_urls, str):
538
+ response = requests.get(image_url_or_urls, stream=True, headers=headers)
539
+ response.raise_for_status()
540
+ return Image.open(BytesIO(response.content))
541
+ else:
542
+ raise ValueError(f"only a single or a list of entries is supported but got type={type(image_url_or_urls)}")
543
+
544
+
545
+ class BaseImageProcessor(ImageProcessingMixin):
546
+ def __init__(self, **kwargs):
547
+ super().__init__(**kwargs)
548
+
549
+ def __call__(self, images, **kwargs) -> BatchFeature:
550
+ """Preprocess an image or a batch of images."""
551
+ return self.preprocess(images, **kwargs)
552
+
553
+ def preprocess(self, images, **kwargs) -> BatchFeature:
554
+ raise NotImplementedError("Each image processor must implement its own preprocess method")
555
+
556
+ def rescale(
557
+ self,
558
+ image: np.ndarray,
559
+ scale: float,
560
+ data_format: Optional[Union[str, ChannelDimension]] = None,
561
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
562
+ **kwargs,
563
+ ) -> np.ndarray:
564
+ """
565
+ Rescale an image by a scale factor. image = image * scale.
566
+
567
+ Args:
568
+ image (`np.ndarray`):
569
+ Image to rescale.
570
+ scale (`float`):
571
+ The scaling factor to rescale pixel values by.
572
+ data_format (`str` or `ChannelDimension`, *optional*):
573
+ The channel dimension format for the output image. If unset, the channel dimension format of the input
574
+ image is used. Can be one of:
575
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
576
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
577
+ input_data_format (`ChannelDimension` or `str`, *optional*):
578
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
579
+ from the input image. Can be one of:
580
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
581
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
582
+
583
+ Returns:
584
+ `np.ndarray`: The rescaled image.
585
+ """
586
+ return rescale(image, scale=scale, data_format=data_format, input_data_format=input_data_format, **kwargs)
587
+
588
+ def normalize(
589
+ self,
590
+ image: np.ndarray,
591
+ mean: Union[float, Iterable[float]],
592
+ std: Union[float, Iterable[float]],
593
+ data_format: Optional[Union[str, ChannelDimension]] = None,
594
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
595
+ **kwargs,
596
+ ) -> np.ndarray:
597
+ """
598
+ Normalize an image. image = (image - image_mean) / image_std.
599
+
600
+ Args:
601
+ image (`np.ndarray`):
602
+ Image to normalize.
603
+ mean (`float` or `Iterable[float]`):
604
+ Image mean to use for normalization.
605
+ std (`float` or `Iterable[float]`):
606
+ Image standard deviation to use for normalization.
607
+ data_format (`str` or `ChannelDimension`, *optional*):
608
+ The channel dimension format for the output image. If unset, the channel dimension format of the input
609
+ image is used. Can be one of:
610
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
611
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
612
+ input_data_format (`ChannelDimension` or `str`, *optional*):
613
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
614
+ from the input image. Can be one of:
615
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
616
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
617
+
618
+ Returns:
619
+ `np.ndarray`: The normalized image.
620
+ """
621
+ return normalize(
622
+ image, mean=mean, std=std, data_format=data_format, input_data_format=input_data_format, **kwargs
623
+ )
624
+
625
+ def center_crop(
626
+ self,
627
+ image: np.ndarray,
628
+ size: Dict[str, int],
629
+ data_format: Optional[Union[str, ChannelDimension]] = None,
630
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
631
+ **kwargs,
632
+ ) -> np.ndarray:
633
+ """
634
+ Center crop an image to `(size["height"], size["width"])`. If the input size is smaller than `crop_size` along
635
+ any edge, the image is padded with 0's and then center cropped.
636
+
637
+ Args:
638
+ image (`np.ndarray`):
639
+ Image to center crop.
640
+ size (`Dict[str, int]`):
641
+ Size of the output image.
642
+ data_format (`str` or `ChannelDimension`, *optional*):
643
+ The channel dimension format for the output image. If unset, the channel dimension format of the input
644
+ image is used. Can be one of:
645
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
646
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
647
+ input_data_format (`ChannelDimension` or `str`, *optional*):
648
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
649
+ from the input image. Can be one of:
650
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
651
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
652
+ """
653
+ size = get_size_dict(size)
654
+ if "height" not in size or "width" not in size:
655
+ raise ValueError(f"The size dictionary must have keys 'height' and 'width'. Got {size.keys()}")
656
+ return center_crop(
657
+ image,
658
+ size=(size["height"], size["width"]),
659
+ data_format=data_format,
660
+ input_data_format=input_data_format,
661
+ **kwargs,
662
+ )
663
+
664
+
665
+ VALID_SIZE_DICT_KEYS = ({"height", "width"}, {"shortest_edge"}, {"shortest_edge", "longest_edge"}, {"longest_edge"})
666
+
667
+
668
+ def is_valid_size_dict(size_dict):
669
+ if not isinstance(size_dict, dict):
670
+ return False
671
+
672
+ size_dict_keys = set(size_dict.keys())
673
+ for allowed_keys in VALID_SIZE_DICT_KEYS:
674
+ if size_dict_keys == allowed_keys:
675
+ return True
676
+ return False
677
+
678
+
679
+ def convert_to_size_dict(
680
+ size, max_size: Optional[int] = None, default_to_square: bool = True, height_width_order: bool = True
681
+ ):
682
+ # By default, if size is an int we assume it represents a tuple of (size, size).
683
+ if isinstance(size, int) and default_to_square:
684
+ if max_size is not None:
685
+ raise ValueError("Cannot specify both size as an int, with default_to_square=True and max_size")
686
+ return {"height": size, "width": size}
687
+ # In other configs, if size is an int and default_to_square is False, size represents the length of
688
+ # the shortest edge after resizing.
689
+ elif isinstance(size, int) and not default_to_square:
690
+ size_dict = {"shortest_edge": size}
691
+ if max_size is not None:
692
+ size_dict["longest_edge"] = max_size
693
+ return size_dict
694
+ # Otherwise, if size is a tuple it's either (height, width) or (width, height)
695
+ elif isinstance(size, (tuple, list)) and height_width_order:
696
+ return {"height": size[0], "width": size[1]}
697
+ elif isinstance(size, (tuple, list)) and not height_width_order:
698
+ return {"height": size[1], "width": size[0]}
699
+ elif size is None and max_size is not None:
700
+ if default_to_square:
701
+ raise ValueError("Cannot specify both default_to_square=True and max_size")
702
+ return {"longest_edge": max_size}
703
+
704
+ raise ValueError(f"Could not convert size input to size dict: {size}")
705
+
706
+
707
+ def get_size_dict(
708
+ size: Union[int, Iterable[int], Dict[str, int]] = None,
709
+ max_size: Optional[int] = None,
710
+ height_width_order: bool = True,
711
+ default_to_square: bool = True,
712
+ param_name="size",
713
+ ) -> dict:
714
+ """
715
+ Converts the old size parameter in the config into the new dict expected in the config. This is to ensure backwards
716
+ compatibility with the old image processor configs and removes ambiguity over whether the tuple is in (height,
717
+ width) or (width, height) format.
718
+
719
+ - If `size` is tuple, it is converted to `{"height": size[0], "width": size[1]}` or `{"height": size[1], "width":
720
+ size[0]}` if `height_width_order` is `False`.
721
+ - If `size` is an int, and `default_to_square` is `True`, it is converted to `{"height": size, "width": size}`.
722
+ - If `size` is an int and `default_to_square` is False, it is converted to `{"shortest_edge": size}`. If `max_size`
723
+ is set, it is added to the dict as `{"longest_edge": max_size}`.
724
+
725
+ Args:
726
+ size (`Union[int, Iterable[int], Dict[str, int]]`, *optional*):
727
+ The `size` parameter to be cast into a size dictionary.
728
+ max_size (`Optional[int]`, *optional*):
729
+ The `max_size` parameter to be cast into a size dictionary.
730
+ height_width_order (`bool`, *optional*, defaults to `True`):
731
+ If `size` is a tuple, whether it's in (height, width) or (width, height) order.
732
+ default_to_square (`bool`, *optional*, defaults to `True`):
733
+ If `size` is an int, whether to default to a square image or not.
734
+ """
735
+ if not isinstance(size, dict):
736
+ size_dict = convert_to_size_dict(size, max_size, default_to_square, height_width_order)
737
+ logger.info(
738
+ f"{param_name} should be a dictionary on of the following set of keys: {VALID_SIZE_DICT_KEYS}, got {size}."
739
+ f" Converted to {size_dict}.",
740
+ )
741
+ else:
742
+ size_dict = size
743
+
744
+ if not is_valid_size_dict(size_dict):
745
+ raise ValueError(
746
+ f"{param_name} must have one of the following set of keys: {VALID_SIZE_DICT_KEYS}, got {size_dict.keys()}"
747
+ )
748
+ return size_dict
749
+
750
+
751
+ def select_best_resolution(original_size: tuple, possible_resolutions: list) -> tuple:
752
+ """
753
+ Selects the best resolution from a list of possible resolutions based on the original size.
754
+
755
+ This is done by calculating the effective and wasted resolution for each possible resolution.
756
+
757
+ The best fit resolution is the one that maximizes the effective resolution and minimizes the wasted resolution.
758
+
759
+ Args:
760
+ original_size (tuple):
761
+ The original size of the image in the format (height, width).
762
+ possible_resolutions (list):
763
+ A list of possible resolutions in the format [(height1, width1), (height2, width2), ...].
764
+
765
+ Returns:
766
+ tuple: The best fit resolution in the format (height, width).
767
+ """
768
+ original_height, original_width = original_size
769
+ best_fit = None
770
+ max_effective_resolution = 0
771
+ min_wasted_resolution = float("inf")
772
+
773
+ for height, width in possible_resolutions:
774
+ scale = min(width / original_width, height / original_height)
775
+ downscaled_width, downscaled_height = int(original_width * scale), int(original_height * scale)
776
+ effective_resolution = min(downscaled_width * downscaled_height, original_width * original_height)
777
+ wasted_resolution = (width * height) - effective_resolution
778
+
779
+ if effective_resolution > max_effective_resolution or (
780
+ effective_resolution == max_effective_resolution and wasted_resolution < min_wasted_resolution
781
+ ):
782
+ max_effective_resolution = effective_resolution
783
+ min_wasted_resolution = wasted_resolution
784
+ best_fit = (height, width)
785
+
786
+ return best_fit
787
+
788
+
789
+ ImageProcessingMixin.push_to_hub = copy_func(ImageProcessingMixin.push_to_hub)
790
+ if ImageProcessingMixin.push_to_hub.__doc__ is not None:
791
+ ImageProcessingMixin.push_to_hub.__doc__ = ImageProcessingMixin.push_to_hub.__doc__.format(
792
+ object="image processor", object_class="AutoImageProcessor", object_files="image processor file"
793
+ )
venv/lib/python3.10/site-packages/transformers/image_transforms.py ADDED
@@ -0,0 +1,803 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import warnings
17
+ from typing import Iterable, List, Optional, Tuple, Union
18
+
19
+ import numpy as np
20
+
21
+ from .image_utils import (
22
+ ChannelDimension,
23
+ ImageInput,
24
+ get_channel_dimension_axis,
25
+ get_image_size,
26
+ infer_channel_dimension_format,
27
+ )
28
+ from .utils import ExplicitEnum, TensorType, is_jax_tensor, is_tf_tensor, is_torch_tensor
29
+ from .utils.import_utils import (
30
+ is_flax_available,
31
+ is_tf_available,
32
+ is_torch_available,
33
+ is_vision_available,
34
+ requires_backends,
35
+ )
36
+
37
+
38
+ if is_vision_available():
39
+ import PIL
40
+
41
+ from .image_utils import PILImageResampling
42
+
43
+ if is_torch_available():
44
+ import torch
45
+
46
+ if is_tf_available():
47
+ import tensorflow as tf
48
+
49
+ if is_flax_available():
50
+ import jax.numpy as jnp
51
+
52
+
53
+ def to_channel_dimension_format(
54
+ image: np.ndarray,
55
+ channel_dim: Union[ChannelDimension, str],
56
+ input_channel_dim: Optional[Union[ChannelDimension, str]] = None,
57
+ ) -> np.ndarray:
58
+ """
59
+ Converts `image` to the channel dimension format specified by `channel_dim`.
60
+
61
+ Args:
62
+ image (`numpy.ndarray`):
63
+ The image to have its channel dimension set.
64
+ channel_dim (`ChannelDimension`):
65
+ The channel dimension format to use.
66
+ input_channel_dim (`ChannelDimension`, *optional*):
67
+ The channel dimension format of the input image. If not provided, it will be inferred from the input image.
68
+
69
+ Returns:
70
+ `np.ndarray`: The image with the channel dimension set to `channel_dim`.
71
+ """
72
+ if not isinstance(image, np.ndarray):
73
+ raise ValueError(f"Input image must be of type np.ndarray, got {type(image)}")
74
+
75
+ if input_channel_dim is None:
76
+ input_channel_dim = infer_channel_dimension_format(image)
77
+
78
+ target_channel_dim = ChannelDimension(channel_dim)
79
+ if input_channel_dim == target_channel_dim:
80
+ return image
81
+
82
+ if target_channel_dim == ChannelDimension.FIRST:
83
+ image = image.transpose((2, 0, 1))
84
+ elif target_channel_dim == ChannelDimension.LAST:
85
+ image = image.transpose((1, 2, 0))
86
+ else:
87
+ raise ValueError("Unsupported channel dimension format: {}".format(channel_dim))
88
+
89
+ return image
90
+
91
+
92
+ def rescale(
93
+ image: np.ndarray,
94
+ scale: float,
95
+ data_format: Optional[ChannelDimension] = None,
96
+ dtype: np.dtype = np.float32,
97
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
98
+ ) -> np.ndarray:
99
+ """
100
+ Rescales `image` by `scale`.
101
+
102
+ Args:
103
+ image (`np.ndarray`):
104
+ The image to rescale.
105
+ scale (`float`):
106
+ The scale to use for rescaling the image.
107
+ data_format (`ChannelDimension`, *optional*):
108
+ The channel dimension format of the image. If not provided, it will be the same as the input image.
109
+ dtype (`np.dtype`, *optional*, defaults to `np.float32`):
110
+ The dtype of the output image. Defaults to `np.float32`. Used for backwards compatibility with feature
111
+ extractors.
112
+ input_data_format (`ChannelDimension`, *optional*):
113
+ The channel dimension format of the input image. If not provided, it will be inferred from the input image.
114
+
115
+ Returns:
116
+ `np.ndarray`: The rescaled image.
117
+ """
118
+ if not isinstance(image, np.ndarray):
119
+ raise ValueError(f"Input image must be of type np.ndarray, got {type(image)}")
120
+
121
+ rescaled_image = image * scale
122
+ if data_format is not None:
123
+ rescaled_image = to_channel_dimension_format(rescaled_image, data_format, input_data_format)
124
+
125
+ rescaled_image = rescaled_image.astype(dtype)
126
+
127
+ return rescaled_image
128
+
129
+
130
+ def _rescale_for_pil_conversion(image):
131
+ """
132
+ Detects whether or not the image needs to be rescaled before being converted to a PIL image.
133
+
134
+ The assumption is that if the image is of type `np.float` and all values are between 0 and 1, it needs to be
135
+ rescaled.
136
+ """
137
+ if image.dtype == np.uint8:
138
+ do_rescale = False
139
+ elif np.allclose(image, image.astype(int)):
140
+ if np.all(0 <= image) and np.all(image <= 255):
141
+ do_rescale = False
142
+ else:
143
+ raise ValueError(
144
+ "The image to be converted to a PIL image contains values outside the range [0, 255], "
145
+ f"got [{image.min()}, {image.max()}] which cannot be converted to uint8."
146
+ )
147
+ elif np.all(0 <= image) and np.all(image <= 1):
148
+ do_rescale = True
149
+ else:
150
+ raise ValueError(
151
+ "The image to be converted to a PIL image contains values outside the range [0, 1], "
152
+ f"got [{image.min()}, {image.max()}] which cannot be converted to uint8."
153
+ )
154
+ return do_rescale
155
+
156
+
157
+ def to_pil_image(
158
+ image: Union[np.ndarray, "PIL.Image.Image", "torch.Tensor", "tf.Tensor", "jnp.ndarray"],
159
+ do_rescale: Optional[bool] = None,
160
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
161
+ ) -> "PIL.Image.Image":
162
+ """
163
+ Converts `image` to a PIL Image. Optionally rescales it and puts the channel dimension back as the last axis if
164
+ needed.
165
+
166
+ Args:
167
+ image (`PIL.Image.Image` or `numpy.ndarray` or `torch.Tensor` or `tf.Tensor`):
168
+ The image to convert to the `PIL.Image` format.
169
+ do_rescale (`bool`, *optional*):
170
+ Whether or not to apply the scaling factor (to make pixel values integers between 0 and 255). Will default
171
+ to `True` if the image type is a floating type and casting to `int` would result in a loss of precision,
172
+ and `False` otherwise.
173
+ input_data_format (`ChannelDimension`, *optional*):
174
+ The channel dimension format of the input image. If unset, will use the inferred format from the input.
175
+
176
+ Returns:
177
+ `PIL.Image.Image`: The converted image.
178
+ """
179
+ requires_backends(to_pil_image, ["vision"])
180
+
181
+ if isinstance(image, PIL.Image.Image):
182
+ return image
183
+
184
+ # Convert all tensors to numpy arrays before converting to PIL image
185
+ if is_torch_tensor(image) or is_tf_tensor(image):
186
+ image = image.numpy()
187
+ elif is_jax_tensor(image):
188
+ image = np.array(image)
189
+ elif not isinstance(image, np.ndarray):
190
+ raise ValueError("Input image type not supported: {}".format(type(image)))
191
+
192
+ # If the channel has been moved to first dim, we put it back at the end.
193
+ image = to_channel_dimension_format(image, ChannelDimension.LAST, input_data_format)
194
+
195
+ # If there is a single channel, we squeeze it, as otherwise PIL can't handle it.
196
+ image = np.squeeze(image, axis=-1) if image.shape[-1] == 1 else image
197
+
198
+ # PIL.Image can only store uint8 values so we rescale the image to be between 0 and 255 if needed.
199
+ do_rescale = _rescale_for_pil_conversion(image) if do_rescale is None else do_rescale
200
+
201
+ if do_rescale:
202
+ image = rescale(image, 255)
203
+
204
+ image = image.astype(np.uint8)
205
+ return PIL.Image.fromarray(image)
206
+
207
+
208
+ # Logic adapted from torchvision resizing logic: https://github.com/pytorch/vision/blob/511924c1ced4ce0461197e5caa64ce5b9e558aab/torchvision/transforms/functional.py#L366
209
+ def get_resize_output_image_size(
210
+ input_image: np.ndarray,
211
+ size: Union[int, Tuple[int, int], List[int], Tuple[int]],
212
+ default_to_square: bool = True,
213
+ max_size: Optional[int] = None,
214
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
215
+ ) -> tuple:
216
+ """
217
+ Find the target (height, width) dimension of the output image after resizing given the input image and the desired
218
+ size.
219
+
220
+ Args:
221
+ input_image (`np.ndarray`):
222
+ The image to resize.
223
+ size (`int` or `Tuple[int, int]` or List[int] or Tuple[int]):
224
+ The size to use for resizing the image. If `size` is a sequence like (h, w), output size will be matched to
225
+ this.
226
+
227
+ If `size` is an int and `default_to_square` is `True`, then image will be resized to (size, size). If
228
+ `size` is an int and `default_to_square` is `False`, then smaller edge of the image will be matched to this
229
+ number. i.e, if height > width, then image will be rescaled to (size * height / width, size).
230
+ default_to_square (`bool`, *optional*, defaults to `True`):
231
+ How to convert `size` when it is a single int. If set to `True`, the `size` will be converted to a square
232
+ (`size`,`size`). If set to `False`, will replicate
233
+ [`torchvision.transforms.Resize`](https://pytorch.org/vision/stable/transforms.html#torchvision.transforms.Resize)
234
+ with support for resizing only the smallest edge and providing an optional `max_size`.
235
+ max_size (`int`, *optional*):
236
+ The maximum allowed for the longer edge of the resized image: if the longer edge of the image is greater
237
+ than `max_size` after being resized according to `size`, then the image is resized again so that the longer
238
+ edge is equal to `max_size`. As a result, `size` might be overruled, i.e the smaller edge may be shorter
239
+ than `size`. Only used if `default_to_square` is `False`.
240
+ input_data_format (`ChannelDimension`, *optional*):
241
+ The channel dimension format of the input image. If unset, will use the inferred format from the input.
242
+
243
+ Returns:
244
+ `tuple`: The target (height, width) dimension of the output image after resizing.
245
+ """
246
+ if isinstance(size, (tuple, list)):
247
+ if len(size) == 2:
248
+ return tuple(size)
249
+ elif len(size) == 1:
250
+ # Perform same logic as if size was an int
251
+ size = size[0]
252
+ else:
253
+ raise ValueError("size must have 1 or 2 elements if it is a list or tuple")
254
+
255
+ if default_to_square:
256
+ return (size, size)
257
+
258
+ height, width = get_image_size(input_image, input_data_format)
259
+ short, long = (width, height) if width <= height else (height, width)
260
+ requested_new_short = size
261
+
262
+ new_short, new_long = requested_new_short, int(requested_new_short * long / short)
263
+
264
+ if max_size is not None:
265
+ if max_size <= requested_new_short:
266
+ raise ValueError(
267
+ f"max_size = {max_size} must be strictly greater than the requested "
268
+ f"size for the smaller edge size = {size}"
269
+ )
270
+ if new_long > max_size:
271
+ new_short, new_long = int(max_size * new_short / new_long), max_size
272
+
273
+ return (new_long, new_short) if width <= height else (new_short, new_long)
274
+
275
+
276
+ def resize(
277
+ image: np.ndarray,
278
+ size: Tuple[int, int],
279
+ resample: "PILImageResampling" = None,
280
+ reducing_gap: Optional[int] = None,
281
+ data_format: Optional[ChannelDimension] = None,
282
+ return_numpy: bool = True,
283
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
284
+ ) -> np.ndarray:
285
+ """
286
+ Resizes `image` to `(height, width)` specified by `size` using the PIL library.
287
+
288
+ Args:
289
+ image (`np.ndarray`):
290
+ The image to resize.
291
+ size (`Tuple[int, int]`):
292
+ The size to use for resizing the image.
293
+ resample (`int`, *optional*, defaults to `PILImageResampling.BILINEAR`):
294
+ The filter to user for resampling.
295
+ reducing_gap (`int`, *optional*):
296
+ Apply optimization by resizing the image in two steps. The bigger `reducing_gap`, the closer the result to
297
+ the fair resampling. See corresponding Pillow documentation for more details.
298
+ data_format (`ChannelDimension`, *optional*):
299
+ The channel dimension format of the output image. If unset, will use the inferred format from the input.
300
+ return_numpy (`bool`, *optional*, defaults to `True`):
301
+ Whether or not to return the resized image as a numpy array. If False a `PIL.Image.Image` object is
302
+ returned.
303
+ input_data_format (`ChannelDimension`, *optional*):
304
+ The channel dimension format of the input image. If unset, will use the inferred format from the input.
305
+
306
+ Returns:
307
+ `np.ndarray`: The resized image.
308
+ """
309
+ requires_backends(resize, ["vision"])
310
+
311
+ resample = resample if resample is not None else PILImageResampling.BILINEAR
312
+
313
+ if not len(size) == 2:
314
+ raise ValueError("size must have 2 elements")
315
+
316
+ # For all transformations, we want to keep the same data format as the input image unless otherwise specified.
317
+ # The resized image from PIL will always have channels last, so find the input format first.
318
+ if input_data_format is None:
319
+ input_data_format = infer_channel_dimension_format(image)
320
+ data_format = input_data_format if data_format is None else data_format
321
+
322
+ # To maintain backwards compatibility with the resizing done in previous image feature extractors, we use
323
+ # the pillow library to resize the image and then convert back to numpy
324
+ do_rescale = False
325
+ if not isinstance(image, PIL.Image.Image):
326
+ do_rescale = _rescale_for_pil_conversion(image)
327
+ image = to_pil_image(image, do_rescale=do_rescale, input_data_format=input_data_format)
328
+ height, width = size
329
+ # PIL images are in the format (width, height)
330
+ resized_image = image.resize((width, height), resample=resample, reducing_gap=reducing_gap)
331
+
332
+ if return_numpy:
333
+ resized_image = np.array(resized_image)
334
+ # If the input image channel dimension was of size 1, then it is dropped when converting to a PIL image
335
+ # so we need to add it back if necessary.
336
+ resized_image = np.expand_dims(resized_image, axis=-1) if resized_image.ndim == 2 else resized_image
337
+ # The image is always in channels last format after converting from a PIL image
338
+ resized_image = to_channel_dimension_format(
339
+ resized_image, data_format, input_channel_dim=ChannelDimension.LAST
340
+ )
341
+ # If an image was rescaled to be in the range [0, 255] before converting to a PIL image, then we need to
342
+ # rescale it back to the original range.
343
+ resized_image = rescale(resized_image, 1 / 255) if do_rescale else resized_image
344
+ return resized_image
345
+
346
+
347
+ def normalize(
348
+ image: np.ndarray,
349
+ mean: Union[float, Iterable[float]],
350
+ std: Union[float, Iterable[float]],
351
+ data_format: Optional[ChannelDimension] = None,
352
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
353
+ ) -> np.ndarray:
354
+ """
355
+ Normalizes `image` using the mean and standard deviation specified by `mean` and `std`.
356
+
357
+ image = (image - mean) / std
358
+
359
+ Args:
360
+ image (`np.ndarray`):
361
+ The image to normalize.
362
+ mean (`float` or `Iterable[float]`):
363
+ The mean to use for normalization.
364
+ std (`float` or `Iterable[float]`):
365
+ The standard deviation to use for normalization.
366
+ data_format (`ChannelDimension`, *optional*):
367
+ The channel dimension format of the output image. If unset, will use the inferred format from the input.
368
+ input_data_format (`ChannelDimension`, *optional*):
369
+ The channel dimension format of the input image. If unset, will use the inferred format from the input.
370
+ """
371
+ if not isinstance(image, np.ndarray):
372
+ raise ValueError("image must be a numpy array")
373
+
374
+ if input_data_format is None:
375
+ input_data_format = infer_channel_dimension_format(image)
376
+ channel_axis = get_channel_dimension_axis(image, input_data_format=input_data_format)
377
+ num_channels = image.shape[channel_axis]
378
+
379
+ # We cast to float32 to avoid errors that can occur when subtracting uint8 values.
380
+ # We preserve the original dtype if it is a float type to prevent upcasting float16.
381
+ if not np.issubdtype(image.dtype, np.floating):
382
+ image = image.astype(np.float32)
383
+
384
+ if isinstance(mean, Iterable):
385
+ if len(mean) != num_channels:
386
+ raise ValueError(f"mean must have {num_channels} elements if it is an iterable, got {len(mean)}")
387
+ else:
388
+ mean = [mean] * num_channels
389
+ mean = np.array(mean, dtype=image.dtype)
390
+
391
+ if isinstance(std, Iterable):
392
+ if len(std) != num_channels:
393
+ raise ValueError(f"std must have {num_channels} elements if it is an iterable, got {len(std)}")
394
+ else:
395
+ std = [std] * num_channels
396
+ std = np.array(std, dtype=image.dtype)
397
+
398
+ if input_data_format == ChannelDimension.LAST:
399
+ image = (image - mean) / std
400
+ else:
401
+ image = ((image.T - mean) / std).T
402
+
403
+ image = to_channel_dimension_format(image, data_format, input_data_format) if data_format is not None else image
404
+ return image
405
+
406
+
407
+ def center_crop(
408
+ image: np.ndarray,
409
+ size: Tuple[int, int],
410
+ data_format: Optional[Union[str, ChannelDimension]] = None,
411
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
412
+ return_numpy: Optional[bool] = None,
413
+ ) -> np.ndarray:
414
+ """
415
+ Crops the `image` to the specified `size` using a center crop. Note that if the image is too small to be cropped to
416
+ the size given, it will be padded (so the returned result will always be of size `size`).
417
+
418
+ Args:
419
+ image (`np.ndarray`):
420
+ The image to crop.
421
+ size (`Tuple[int, int]`):
422
+ The target size for the cropped image.
423
+ data_format (`str` or `ChannelDimension`, *optional*):
424
+ The channel dimension format for the output image. Can be one of:
425
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
426
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
427
+ If unset, will use the inferred format of the input image.
428
+ input_data_format (`str` or `ChannelDimension`, *optional*):
429
+ The channel dimension format for the input image. Can be one of:
430
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
431
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
432
+ If unset, will use the inferred format of the input image.
433
+ return_numpy (`bool`, *optional*):
434
+ Whether or not to return the cropped image as a numpy array. Used for backwards compatibility with the
435
+ previous ImageFeatureExtractionMixin method.
436
+ - Unset: will return the same type as the input image.
437
+ - `True`: will return a numpy array.
438
+ - `False`: will return a `PIL.Image.Image` object.
439
+ Returns:
440
+ `np.ndarray`: The cropped image.
441
+ """
442
+ requires_backends(center_crop, ["vision"])
443
+
444
+ if return_numpy is not None:
445
+ warnings.warn("return_numpy is deprecated and will be removed in v.4.33", FutureWarning)
446
+
447
+ return_numpy = True if return_numpy is None else return_numpy
448
+
449
+ if not isinstance(image, np.ndarray):
450
+ raise ValueError(f"Input image must be of type np.ndarray, got {type(image)}")
451
+
452
+ if not isinstance(size, Iterable) or len(size) != 2:
453
+ raise ValueError("size must have 2 elements representing the height and width of the output image")
454
+
455
+ if input_data_format is None:
456
+ input_data_format = infer_channel_dimension_format(image)
457
+ output_data_format = data_format if data_format is not None else input_data_format
458
+
459
+ # We perform the crop in (C, H, W) format and then convert to the output format
460
+ image = to_channel_dimension_format(image, ChannelDimension.FIRST, input_data_format)
461
+
462
+ orig_height, orig_width = get_image_size(image, ChannelDimension.FIRST)
463
+ crop_height, crop_width = size
464
+ crop_height, crop_width = int(crop_height), int(crop_width)
465
+
466
+ # In case size is odd, (image_shape[0] + size[0]) // 2 won't give the proper result.
467
+ top = (orig_height - crop_height) // 2
468
+ bottom = top + crop_height
469
+ # In case size is odd, (image_shape[1] + size[1]) // 2 won't give the proper result.
470
+ left = (orig_width - crop_width) // 2
471
+ right = left + crop_width
472
+
473
+ # Check if cropped area is within image boundaries
474
+ if top >= 0 and bottom <= orig_height and left >= 0 and right <= orig_width:
475
+ image = image[..., top:bottom, left:right]
476
+ image = to_channel_dimension_format(image, output_data_format, ChannelDimension.FIRST)
477
+ return image
478
+
479
+ # Otherwise, we may need to pad if the image is too small. Oh joy...
480
+ new_height = max(crop_height, orig_height)
481
+ new_width = max(crop_width, orig_width)
482
+ new_shape = image.shape[:-2] + (new_height, new_width)
483
+ new_image = np.zeros_like(image, shape=new_shape)
484
+
485
+ # If the image is too small, pad it with zeros
486
+ top_pad = (new_height - orig_height) // 2
487
+ bottom_pad = top_pad + orig_height
488
+ left_pad = (new_width - orig_width) // 2
489
+ right_pad = left_pad + orig_width
490
+ new_image[..., top_pad:bottom_pad, left_pad:right_pad] = image
491
+
492
+ top += top_pad
493
+ bottom += top_pad
494
+ left += left_pad
495
+ right += left_pad
496
+
497
+ new_image = new_image[..., max(0, top) : min(new_height, bottom), max(0, left) : min(new_width, right)]
498
+ new_image = to_channel_dimension_format(new_image, output_data_format, ChannelDimension.FIRST)
499
+
500
+ if not return_numpy:
501
+ new_image = to_pil_image(new_image)
502
+
503
+ return new_image
504
+
505
+
506
+ def _center_to_corners_format_torch(bboxes_center: "torch.Tensor") -> "torch.Tensor":
507
+ center_x, center_y, width, height = bboxes_center.unbind(-1)
508
+ bbox_corners = torch.stack(
509
+ # top left x, top left y, bottom right x, bottom right y
510
+ [(center_x - 0.5 * width), (center_y - 0.5 * height), (center_x + 0.5 * width), (center_y + 0.5 * height)],
511
+ dim=-1,
512
+ )
513
+ return bbox_corners
514
+
515
+
516
+ def _center_to_corners_format_numpy(bboxes_center: np.ndarray) -> np.ndarray:
517
+ center_x, center_y, width, height = bboxes_center.T
518
+ bboxes_corners = np.stack(
519
+ # top left x, top left y, bottom right x, bottom right y
520
+ [center_x - 0.5 * width, center_y - 0.5 * height, center_x + 0.5 * width, center_y + 0.5 * height],
521
+ axis=-1,
522
+ )
523
+ return bboxes_corners
524
+
525
+
526
+ def _center_to_corners_format_tf(bboxes_center: "tf.Tensor") -> "tf.Tensor":
527
+ center_x, center_y, width, height = tf.unstack(bboxes_center, axis=-1)
528
+ bboxes_corners = tf.stack(
529
+ # top left x, top left y, bottom right x, bottom right y
530
+ [center_x - 0.5 * width, center_y - 0.5 * height, center_x + 0.5 * width, center_y + 0.5 * height],
531
+ axis=-1,
532
+ )
533
+ return bboxes_corners
534
+
535
+
536
+ # 2 functions below inspired by https://github.com/facebookresearch/detr/blob/master/util/box_ops.py
537
+ def center_to_corners_format(bboxes_center: TensorType) -> TensorType:
538
+ """
539
+ Converts bounding boxes from center format to corners format.
540
+
541
+ center format: contains the coordinate for the center of the box and its width, height dimensions
542
+ (center_x, center_y, width, height)
543
+ corners format: contains the coodinates for the top-left and bottom-right corners of the box
544
+ (top_left_x, top_left_y, bottom_right_x, bottom_right_y)
545
+ """
546
+ # Function is used during model forward pass, so we use the input framework if possible, without
547
+ # converting to numpy
548
+ if is_torch_tensor(bboxes_center):
549
+ return _center_to_corners_format_torch(bboxes_center)
550
+ elif isinstance(bboxes_center, np.ndarray):
551
+ return _center_to_corners_format_numpy(bboxes_center)
552
+ elif is_tf_tensor(bboxes_center):
553
+ return _center_to_corners_format_tf(bboxes_center)
554
+
555
+ raise ValueError(f"Unsupported input type {type(bboxes_center)}")
556
+
557
+
558
+ def _corners_to_center_format_torch(bboxes_corners: "torch.Tensor") -> "torch.Tensor":
559
+ top_left_x, top_left_y, bottom_right_x, bottom_right_y = bboxes_corners.unbind(-1)
560
+ b = [
561
+ (top_left_x + bottom_right_x) / 2, # center x
562
+ (top_left_y + bottom_right_y) / 2, # center y
563
+ (bottom_right_x - top_left_x), # width
564
+ (bottom_right_y - top_left_y), # height
565
+ ]
566
+ return torch.stack(b, dim=-1)
567
+
568
+
569
+ def _corners_to_center_format_numpy(bboxes_corners: np.ndarray) -> np.ndarray:
570
+ top_left_x, top_left_y, bottom_right_x, bottom_right_y = bboxes_corners.T
571
+ bboxes_center = np.stack(
572
+ [
573
+ (top_left_x + bottom_right_x) / 2, # center x
574
+ (top_left_y + bottom_right_y) / 2, # center y
575
+ (bottom_right_x - top_left_x), # width
576
+ (bottom_right_y - top_left_y), # height
577
+ ],
578
+ axis=-1,
579
+ )
580
+ return bboxes_center
581
+
582
+
583
+ def _corners_to_center_format_tf(bboxes_corners: "tf.Tensor") -> "tf.Tensor":
584
+ top_left_x, top_left_y, bottom_right_x, bottom_right_y = tf.unstack(bboxes_corners, axis=-1)
585
+ bboxes_center = tf.stack(
586
+ [
587
+ (top_left_x + bottom_right_x) / 2, # center x
588
+ (top_left_y + bottom_right_y) / 2, # center y
589
+ (bottom_right_x - top_left_x), # width
590
+ (bottom_right_y - top_left_y), # height
591
+ ],
592
+ axis=-1,
593
+ )
594
+ return bboxes_center
595
+
596
+
597
+ def corners_to_center_format(bboxes_corners: TensorType) -> TensorType:
598
+ """
599
+ Converts bounding boxes from corners format to center format.
600
+
601
+ corners format: contains the coordinates for the top-left and bottom-right corners of the box
602
+ (top_left_x, top_left_y, bottom_right_x, bottom_right_y)
603
+ center format: contains the coordinate for the center of the box and its the width, height dimensions
604
+ (center_x, center_y, width, height)
605
+ """
606
+ # Inverse function accepts different input types so implemented here too
607
+ if is_torch_tensor(bboxes_corners):
608
+ return _corners_to_center_format_torch(bboxes_corners)
609
+ elif isinstance(bboxes_corners, np.ndarray):
610
+ return _corners_to_center_format_numpy(bboxes_corners)
611
+ elif is_tf_tensor(bboxes_corners):
612
+ return _corners_to_center_format_tf(bboxes_corners)
613
+
614
+ raise ValueError(f"Unsupported input type {type(bboxes_corners)}")
615
+
616
+
617
+ # 2 functions below copied from https://github.com/cocodataset/panopticapi/blob/master/panopticapi/utils.py
618
+ # Copyright (c) 2018, Alexander Kirillov
619
+ # All rights reserved.
620
+ def rgb_to_id(color):
621
+ """
622
+ Converts RGB color to unique ID.
623
+ """
624
+ if isinstance(color, np.ndarray) and len(color.shape) == 3:
625
+ if color.dtype == np.uint8:
626
+ color = color.astype(np.int32)
627
+ return color[:, :, 0] + 256 * color[:, :, 1] + 256 * 256 * color[:, :, 2]
628
+ return int(color[0] + 256 * color[1] + 256 * 256 * color[2])
629
+
630
+
631
+ def id_to_rgb(id_map):
632
+ """
633
+ Converts unique ID to RGB color.
634
+ """
635
+ if isinstance(id_map, np.ndarray):
636
+ id_map_copy = id_map.copy()
637
+ rgb_shape = tuple(list(id_map.shape) + [3])
638
+ rgb_map = np.zeros(rgb_shape, dtype=np.uint8)
639
+ for i in range(3):
640
+ rgb_map[..., i] = id_map_copy % 256
641
+ id_map_copy //= 256
642
+ return rgb_map
643
+ color = []
644
+ for _ in range(3):
645
+ color.append(id_map % 256)
646
+ id_map //= 256
647
+ return color
648
+
649
+
650
+ class PaddingMode(ExplicitEnum):
651
+ """
652
+ Enum class for the different padding modes to use when padding images.
653
+ """
654
+
655
+ CONSTANT = "constant"
656
+ REFLECT = "reflect"
657
+ REPLICATE = "replicate"
658
+ SYMMETRIC = "symmetric"
659
+
660
+
661
+ def pad(
662
+ image: np.ndarray,
663
+ padding: Union[int, Tuple[int, int], Iterable[Tuple[int, int]]],
664
+ mode: PaddingMode = PaddingMode.CONSTANT,
665
+ constant_values: Union[float, Iterable[float]] = 0.0,
666
+ data_format: Optional[Union[str, ChannelDimension]] = None,
667
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
668
+ ) -> np.ndarray:
669
+ """
670
+ Pads the `image` with the specified (height, width) `padding` and `mode`.
671
+
672
+ Args:
673
+ image (`np.ndarray`):
674
+ The image to pad.
675
+ padding (`int` or `Tuple[int, int]` or `Iterable[Tuple[int, int]]`):
676
+ Padding to apply to the edges of the height, width axes. Can be one of three formats:
677
+ - `((before_height, after_height), (before_width, after_width))` unique pad widths for each axis.
678
+ - `((before, after),)` yields same before and after pad for height and width.
679
+ - `(pad,)` or int is a shortcut for before = after = pad width for all axes.
680
+ mode (`PaddingMode`):
681
+ The padding mode to use. Can be one of:
682
+ - `"constant"`: pads with a constant value.
683
+ - `"reflect"`: pads with the reflection of the vector mirrored on the first and last values of the
684
+ vector along each axis.
685
+ - `"replicate"`: pads with the replication of the last value on the edge of the array along each axis.
686
+ - `"symmetric"`: pads with the reflection of the vector mirrored along the edge of the array.
687
+ constant_values (`float` or `Iterable[float]`, *optional*):
688
+ The value to use for the padding if `mode` is `"constant"`.
689
+ data_format (`str` or `ChannelDimension`, *optional*):
690
+ The channel dimension format for the output image. Can be one of:
691
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
692
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
693
+ If unset, will use same as the input image.
694
+ input_data_format (`str` or `ChannelDimension`, *optional*):
695
+ The channel dimension format for the input image. Can be one of:
696
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
697
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
698
+ If unset, will use the inferred format of the input image.
699
+
700
+ Returns:
701
+ `np.ndarray`: The padded image.
702
+
703
+ """
704
+ if input_data_format is None:
705
+ input_data_format = infer_channel_dimension_format(image)
706
+
707
+ def _expand_for_data_format(values):
708
+ """
709
+ Convert values to be in the format expected by np.pad based on the data format.
710
+ """
711
+ if isinstance(values, (int, float)):
712
+ values = ((values, values), (values, values))
713
+ elif isinstance(values, tuple) and len(values) == 1:
714
+ values = ((values[0], values[0]), (values[0], values[0]))
715
+ elif isinstance(values, tuple) and len(values) == 2 and isinstance(values[0], int):
716
+ values = (values, values)
717
+ elif isinstance(values, tuple) and len(values) == 2 and isinstance(values[0], tuple):
718
+ values = values
719
+ else:
720
+ raise ValueError(f"Unsupported format: {values}")
721
+
722
+ # add 0 for channel dimension
723
+ values = ((0, 0), *values) if input_data_format == ChannelDimension.FIRST else (*values, (0, 0))
724
+
725
+ # Add additional padding if there's a batch dimension
726
+ values = (0, *values) if image.ndim == 4 else values
727
+ return values
728
+
729
+ padding = _expand_for_data_format(padding)
730
+
731
+ if mode == PaddingMode.CONSTANT:
732
+ constant_values = _expand_for_data_format(constant_values)
733
+ image = np.pad(image, padding, mode="constant", constant_values=constant_values)
734
+ elif mode == PaddingMode.REFLECT:
735
+ image = np.pad(image, padding, mode="reflect")
736
+ elif mode == PaddingMode.REPLICATE:
737
+ image = np.pad(image, padding, mode="edge")
738
+ elif mode == PaddingMode.SYMMETRIC:
739
+ image = np.pad(image, padding, mode="symmetric")
740
+ else:
741
+ raise ValueError(f"Invalid padding mode: {mode}")
742
+
743
+ image = to_channel_dimension_format(image, data_format, input_data_format) if data_format is not None else image
744
+ return image
745
+
746
+
747
+ # TODO (Amy): Accept 1/3/4 channel numpy array as input and return np.array as default
748
+ def convert_to_rgb(image: ImageInput) -> ImageInput:
749
+ """
750
+ Converts an image to RGB format. Only converts if the image is of type PIL.Image.Image, otherwise returns the image
751
+ as is.
752
+ Args:
753
+ image (Image):
754
+ The image to convert.
755
+ """
756
+ requires_backends(convert_to_rgb, ["vision"])
757
+
758
+ if not isinstance(image, PIL.Image.Image):
759
+ return image
760
+
761
+ if image.mode == "RGB":
762
+ return image
763
+
764
+ image = image.convert("RGB")
765
+ return image
766
+
767
+
768
+ def flip_channel_order(
769
+ image: np.ndarray,
770
+ data_format: Optional[ChannelDimension] = None,
771
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
772
+ ) -> np.ndarray:
773
+ """
774
+ Flips the channel order of the image.
775
+
776
+ If the image is in RGB format, it will be converted to BGR and vice versa.
777
+
778
+ Args:
779
+ image (`np.ndarray`):
780
+ The image to flip.
781
+ data_format (`ChannelDimension`, *optional*):
782
+ The channel dimension format for the output image. Can be one of:
783
+ - `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
784
+ - `ChannelDimension.LAST`: image in (height, width, num_channels) format.
785
+ If unset, will use same as the input image.
786
+ input_data_format (`ChannelDimension`, *optional*):
787
+ The channel dimension format for the input image. Can be one of:
788
+ - `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
789
+ - `ChannelDimension.LAST`: image in (height, width, num_channels) format.
790
+ If unset, will use the inferred format of the input image.
791
+ """
792
+ input_data_format = infer_channel_dimension_format(image) if input_data_format is None else input_data_format
793
+
794
+ if input_data_format == ChannelDimension.LAST:
795
+ image = image[..., ::-1]
796
+ elif input_data_format == ChannelDimension.FIRST:
797
+ image = image[::-1, ...]
798
+ else:
799
+ raise ValueError(f"Unsupported channel dimension: {input_data_format}")
800
+
801
+ if data_format is not None:
802
+ image = to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format)
803
+ return image
venv/lib/python3.10/site-packages/transformers/image_utils.py ADDED
@@ -0,0 +1,769 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import base64
17
+ import os
18
+ from io import BytesIO
19
+ from typing import TYPE_CHECKING, Dict, Iterable, List, Optional, Tuple, Union
20
+
21
+ import numpy as np
22
+ import requests
23
+ from packaging import version
24
+
25
+ from .utils import (
26
+ ExplicitEnum,
27
+ is_jax_tensor,
28
+ is_tf_tensor,
29
+ is_torch_available,
30
+ is_torch_tensor,
31
+ is_vision_available,
32
+ logging,
33
+ requires_backends,
34
+ to_numpy,
35
+ )
36
+ from .utils.constants import ( # noqa: F401
37
+ IMAGENET_DEFAULT_MEAN,
38
+ IMAGENET_DEFAULT_STD,
39
+ IMAGENET_STANDARD_MEAN,
40
+ IMAGENET_STANDARD_STD,
41
+ OPENAI_CLIP_MEAN,
42
+ OPENAI_CLIP_STD,
43
+ )
44
+
45
+
46
+ if is_vision_available():
47
+ import PIL.Image
48
+ import PIL.ImageOps
49
+
50
+ if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("9.1.0"):
51
+ PILImageResampling = PIL.Image.Resampling
52
+ else:
53
+ PILImageResampling = PIL.Image
54
+
55
+ if TYPE_CHECKING:
56
+ if is_torch_available():
57
+ import torch
58
+
59
+
60
+ logger = logging.get_logger(__name__)
61
+
62
+
63
+ ImageInput = Union[
64
+ "PIL.Image.Image", np.ndarray, "torch.Tensor", List["PIL.Image.Image"], List[np.ndarray], List["torch.Tensor"]
65
+ ] # noqa
66
+
67
+
68
+ class ChannelDimension(ExplicitEnum):
69
+ FIRST = "channels_first"
70
+ LAST = "channels_last"
71
+
72
+
73
+ class AnnotationFormat(ExplicitEnum):
74
+ COCO_DETECTION = "coco_detection"
75
+ COCO_PANOPTIC = "coco_panoptic"
76
+
77
+
78
+ class AnnotionFormat(ExplicitEnum):
79
+ COCO_DETECTION = AnnotationFormat.COCO_DETECTION.value
80
+ COCO_PANOPTIC = AnnotationFormat.COCO_PANOPTIC.value
81
+
82
+
83
+ AnnotationType = Dict[str, Union[int, str, List[Dict]]]
84
+
85
+
86
+ def is_pil_image(img):
87
+ return is_vision_available() and isinstance(img, PIL.Image.Image)
88
+
89
+
90
+ def is_valid_image(img):
91
+ return (
92
+ (is_vision_available() and isinstance(img, PIL.Image.Image))
93
+ or isinstance(img, np.ndarray)
94
+ or is_torch_tensor(img)
95
+ or is_tf_tensor(img)
96
+ or is_jax_tensor(img)
97
+ )
98
+
99
+
100
+ def valid_images(imgs):
101
+ # If we have an list of images, make sure every image is valid
102
+ if isinstance(imgs, (list, tuple)):
103
+ for img in imgs:
104
+ if not valid_images(img):
105
+ return False
106
+ # If not a list of tuple, we have been given a single image or batched tensor of images
107
+ elif not is_valid_image(imgs):
108
+ return False
109
+ return True
110
+
111
+
112
+ def is_batched(img):
113
+ if isinstance(img, (list, tuple)):
114
+ return is_valid_image(img[0])
115
+ return False
116
+
117
+
118
+ def is_scaled_image(image: np.ndarray) -> bool:
119
+ """
120
+ Checks to see whether the pixel values have already been rescaled to [0, 1].
121
+ """
122
+ if image.dtype == np.uint8:
123
+ return False
124
+
125
+ # It's possible the image has pixel values in [0, 255] but is of floating type
126
+ return np.min(image) >= 0 and np.max(image) <= 1
127
+
128
+
129
+ def make_list_of_images(images, expected_ndims: int = 3) -> List[ImageInput]:
130
+ """
131
+ Ensure that the input is a list of images. If the input is a single image, it is converted to a list of length 1.
132
+ If the input is a batch of images, it is converted to a list of images.
133
+
134
+ Args:
135
+ images (`ImageInput`):
136
+ Image of images to turn into a list of images.
137
+ expected_ndims (`int`, *optional*, defaults to 3):
138
+ Expected number of dimensions for a single input image. If the input image has a different number of
139
+ dimensions, an error is raised.
140
+ """
141
+ if is_batched(images):
142
+ return images
143
+
144
+ # Either the input is a single image, in which case we create a list of length 1
145
+ if isinstance(images, PIL.Image.Image):
146
+ # PIL images are never batched
147
+ return [images]
148
+
149
+ if is_valid_image(images):
150
+ if images.ndim == expected_ndims + 1:
151
+ # Batch of images
152
+ images = list(images)
153
+ elif images.ndim == expected_ndims:
154
+ # Single image
155
+ images = [images]
156
+ else:
157
+ raise ValueError(
158
+ f"Invalid image shape. Expected either {expected_ndims + 1} or {expected_ndims} dimensions, but got"
159
+ f" {images.ndim} dimensions."
160
+ )
161
+ return images
162
+ raise ValueError(
163
+ "Invalid image type. Expected either PIL.Image.Image, numpy.ndarray, torch.Tensor, tf.Tensor or "
164
+ f"jax.ndarray, but got {type(images)}."
165
+ )
166
+
167
+
168
+ def to_numpy_array(img) -> np.ndarray:
169
+ if not is_valid_image(img):
170
+ raise ValueError(f"Invalid image type: {type(img)}")
171
+
172
+ if is_vision_available() and isinstance(img, PIL.Image.Image):
173
+ return np.array(img)
174
+ return to_numpy(img)
175
+
176
+
177
+ def infer_channel_dimension_format(
178
+ image: np.ndarray, num_channels: Optional[Union[int, Tuple[int, ...]]] = None
179
+ ) -> ChannelDimension:
180
+ """
181
+ Infers the channel dimension format of `image`.
182
+
183
+ Args:
184
+ image (`np.ndarray`):
185
+ The image to infer the channel dimension of.
186
+ num_channels (`int` or `Tuple[int, ...]`, *optional*, defaults to `(1, 3)`):
187
+ The number of channels of the image.
188
+
189
+ Returns:
190
+ The channel dimension of the image.
191
+ """
192
+ num_channels = num_channels if num_channels is not None else (1, 3)
193
+ num_channels = (num_channels,) if isinstance(num_channels, int) else num_channels
194
+
195
+ if image.ndim == 3:
196
+ first_dim, last_dim = 0, 2
197
+ elif image.ndim == 4:
198
+ first_dim, last_dim = 1, 3
199
+ else:
200
+ raise ValueError(f"Unsupported number of image dimensions: {image.ndim}")
201
+
202
+ if image.shape[first_dim] in num_channels:
203
+ return ChannelDimension.FIRST
204
+ elif image.shape[last_dim] in num_channels:
205
+ return ChannelDimension.LAST
206
+ raise ValueError("Unable to infer channel dimension format")
207
+
208
+
209
+ def get_channel_dimension_axis(
210
+ image: np.ndarray, input_data_format: Optional[Union[ChannelDimension, str]] = None
211
+ ) -> int:
212
+ """
213
+ Returns the channel dimension axis of the image.
214
+
215
+ Args:
216
+ image (`np.ndarray`):
217
+ The image to get the channel dimension axis of.
218
+ input_data_format (`ChannelDimension` or `str`, *optional*):
219
+ The channel dimension format of the image. If `None`, will infer the channel dimension from the image.
220
+
221
+ Returns:
222
+ The channel dimension axis of the image.
223
+ """
224
+ if input_data_format is None:
225
+ input_data_format = infer_channel_dimension_format(image)
226
+ if input_data_format == ChannelDimension.FIRST:
227
+ return image.ndim - 3
228
+ elif input_data_format == ChannelDimension.LAST:
229
+ return image.ndim - 1
230
+ raise ValueError(f"Unsupported data format: {input_data_format}")
231
+
232
+
233
+ def get_image_size(image: np.ndarray, channel_dim: ChannelDimension = None) -> Tuple[int, int]:
234
+ """
235
+ Returns the (height, width) dimensions of the image.
236
+
237
+ Args:
238
+ image (`np.ndarray`):
239
+ The image to get the dimensions of.
240
+ channel_dim (`ChannelDimension`, *optional*):
241
+ Which dimension the channel dimension is in. If `None`, will infer the channel dimension from the image.
242
+
243
+ Returns:
244
+ A tuple of the image's height and width.
245
+ """
246
+ if channel_dim is None:
247
+ channel_dim = infer_channel_dimension_format(image)
248
+
249
+ if channel_dim == ChannelDimension.FIRST:
250
+ return image.shape[-2], image.shape[-1]
251
+ elif channel_dim == ChannelDimension.LAST:
252
+ return image.shape[-3], image.shape[-2]
253
+ else:
254
+ raise ValueError(f"Unsupported data format: {channel_dim}")
255
+
256
+
257
+ def is_valid_annotation_coco_detection(annotation: Dict[str, Union[List, Tuple]]) -> bool:
258
+ if (
259
+ isinstance(annotation, dict)
260
+ and "image_id" in annotation
261
+ and "annotations" in annotation
262
+ and isinstance(annotation["annotations"], (list, tuple))
263
+ and (
264
+ # an image can have no annotations
265
+ len(annotation["annotations"]) == 0 or isinstance(annotation["annotations"][0], dict)
266
+ )
267
+ ):
268
+ return True
269
+ return False
270
+
271
+
272
+ def is_valid_annotation_coco_panoptic(annotation: Dict[str, Union[List, Tuple]]) -> bool:
273
+ if (
274
+ isinstance(annotation, dict)
275
+ and "image_id" in annotation
276
+ and "segments_info" in annotation
277
+ and "file_name" in annotation
278
+ and isinstance(annotation["segments_info"], (list, tuple))
279
+ and (
280
+ # an image can have no segments
281
+ len(annotation["segments_info"]) == 0 or isinstance(annotation["segments_info"][0], dict)
282
+ )
283
+ ):
284
+ return True
285
+ return False
286
+
287
+
288
+ def valid_coco_detection_annotations(annotations: Iterable[Dict[str, Union[List, Tuple]]]) -> bool:
289
+ return all(is_valid_annotation_coco_detection(ann) for ann in annotations)
290
+
291
+
292
+ def valid_coco_panoptic_annotations(annotations: Iterable[Dict[str, Union[List, Tuple]]]) -> bool:
293
+ return all(is_valid_annotation_coco_panoptic(ann) for ann in annotations)
294
+
295
+
296
+ def load_image(image: Union[str, "PIL.Image.Image"], timeout: Optional[float] = None) -> "PIL.Image.Image":
297
+ """
298
+ Loads `image` to a PIL Image.
299
+
300
+ Args:
301
+ image (`str` or `PIL.Image.Image`):
302
+ The image to convert to the PIL Image format.
303
+ timeout (`float`, *optional*):
304
+ The timeout value in seconds for the URL request.
305
+
306
+ Returns:
307
+ `PIL.Image.Image`: A PIL Image.
308
+ """
309
+ requires_backends(load_image, ["vision"])
310
+ if isinstance(image, str):
311
+ if image.startswith("http://") or image.startswith("https://"):
312
+ # We need to actually check for a real protocol, otherwise it's impossible to use a local file
313
+ # like http_huggingface_co.png
314
+ image = PIL.Image.open(BytesIO(requests.get(image, timeout=timeout).content))
315
+ elif os.path.isfile(image):
316
+ image = PIL.Image.open(image)
317
+ else:
318
+ if image.startswith("data:image/"):
319
+ image = image.split(",")[1]
320
+
321
+ # Try to load as base64
322
+ try:
323
+ b64 = base64.b64decode(image, validate=True)
324
+ image = PIL.Image.open(BytesIO(b64))
325
+ except Exception as e:
326
+ raise ValueError(
327
+ f"Incorrect image source. Must be a valid URL starting with `http://` or `https://`, a valid path to an image file, or a base64 encoded string. Got {image}. Failed with {e}"
328
+ )
329
+ elif isinstance(image, PIL.Image.Image):
330
+ image = image
331
+ else:
332
+ raise ValueError(
333
+ "Incorrect format used for image. Should be an url linking to an image, a base64 string, a local path, or a PIL image."
334
+ )
335
+ image = PIL.ImageOps.exif_transpose(image)
336
+ image = image.convert("RGB")
337
+ return image
338
+
339
+
340
+ def validate_preprocess_arguments(
341
+ do_rescale: Optional[bool] = None,
342
+ rescale_factor: Optional[float] = None,
343
+ do_normalize: Optional[bool] = None,
344
+ image_mean: Optional[Union[float, List[float]]] = None,
345
+ image_std: Optional[Union[float, List[float]]] = None,
346
+ do_pad: Optional[bool] = None,
347
+ size_divisibility: Optional[int] = None,
348
+ do_center_crop: Optional[bool] = None,
349
+ crop_size: Optional[Dict[str, int]] = None,
350
+ do_resize: Optional[bool] = None,
351
+ size: Optional[Dict[str, int]] = None,
352
+ resample: Optional["PILImageResampling"] = None,
353
+ ):
354
+ """
355
+ Checks validity of typically used arguments in an `ImageProcessor` `preprocess` method.
356
+ Raises `ValueError` if arguments incompatibility is caught.
357
+ Many incompatibilities are model-specific. `do_pad` sometimes needs `size_divisor`,
358
+ sometimes `size_divisibility`, and sometimes `size`. New models and processors added should follow
359
+ existing arguments when possible.
360
+
361
+ """
362
+ if do_rescale and rescale_factor is None:
363
+ raise ValueError("rescale_factor must be specified if do_rescale is True.")
364
+
365
+ if do_pad and size_divisibility is None:
366
+ # Here, size_divisor might be passed as the value of size
367
+ raise ValueError(
368
+ "Depending on moel, size_divisibility, size_divisor, pad_size or size must be specified if do_pad is True."
369
+ )
370
+
371
+ if do_normalize and (image_mean is None or image_std is None):
372
+ raise ValueError("image_mean and image_std must both be specified if do_normalize is True.")
373
+
374
+ if do_center_crop and crop_size is None:
375
+ raise ValueError("crop_size must be specified if do_center_crop is True.")
376
+
377
+ if do_resize and (size is None or resample is None):
378
+ raise ValueError("size and resample must be specified if do_resize is True.")
379
+
380
+
381
+ # In the future we can add a TF implementation here when we have TF models.
382
+ class ImageFeatureExtractionMixin:
383
+ """
384
+ Mixin that contain utilities for preparing image features.
385
+ """
386
+
387
+ def _ensure_format_supported(self, image):
388
+ if not isinstance(image, (PIL.Image.Image, np.ndarray)) and not is_torch_tensor(image):
389
+ raise ValueError(
390
+ f"Got type {type(image)} which is not supported, only `PIL.Image.Image`, `np.array` and "
391
+ "`torch.Tensor` are."
392
+ )
393
+
394
+ def to_pil_image(self, image, rescale=None):
395
+ """
396
+ Converts `image` to a PIL Image. Optionally rescales it and puts the channel dimension back as the last axis if
397
+ needed.
398
+
399
+ Args:
400
+ image (`PIL.Image.Image` or `numpy.ndarray` or `torch.Tensor`):
401
+ The image to convert to the PIL Image format.
402
+ rescale (`bool`, *optional*):
403
+ Whether or not to apply the scaling factor (to make pixel values integers between 0 and 255). Will
404
+ default to `True` if the image type is a floating type, `False` otherwise.
405
+ """
406
+ self._ensure_format_supported(image)
407
+
408
+ if is_torch_tensor(image):
409
+ image = image.numpy()
410
+
411
+ if isinstance(image, np.ndarray):
412
+ if rescale is None:
413
+ # rescale default to the array being of floating type.
414
+ rescale = isinstance(image.flat[0], np.floating)
415
+ # If the channel as been moved to first dim, we put it back at the end.
416
+ if image.ndim == 3 and image.shape[0] in [1, 3]:
417
+ image = image.transpose(1, 2, 0)
418
+ if rescale:
419
+ image = image * 255
420
+ image = image.astype(np.uint8)
421
+ return PIL.Image.fromarray(image)
422
+ return image
423
+
424
+ def convert_rgb(self, image):
425
+ """
426
+ Converts `PIL.Image.Image` to RGB format.
427
+
428
+ Args:
429
+ image (`PIL.Image.Image`):
430
+ The image to convert.
431
+ """
432
+ self._ensure_format_supported(image)
433
+ if not isinstance(image, PIL.Image.Image):
434
+ return image
435
+
436
+ return image.convert("RGB")
437
+
438
+ def rescale(self, image: np.ndarray, scale: Union[float, int]) -> np.ndarray:
439
+ """
440
+ Rescale a numpy image by scale amount
441
+ """
442
+ self._ensure_format_supported(image)
443
+ return image * scale
444
+
445
+ def to_numpy_array(self, image, rescale=None, channel_first=True):
446
+ """
447
+ Converts `image` to a numpy array. Optionally rescales it and puts the channel dimension as the first
448
+ dimension.
449
+
450
+ Args:
451
+ image (`PIL.Image.Image` or `np.ndarray` or `torch.Tensor`):
452
+ The image to convert to a NumPy array.
453
+ rescale (`bool`, *optional*):
454
+ Whether or not to apply the scaling factor (to make pixel values floats between 0. and 1.). Will
455
+ default to `True` if the image is a PIL Image or an array/tensor of integers, `False` otherwise.
456
+ channel_first (`bool`, *optional*, defaults to `True`):
457
+ Whether or not to permute the dimensions of the image to put the channel dimension first.
458
+ """
459
+ self._ensure_format_supported(image)
460
+
461
+ if isinstance(image, PIL.Image.Image):
462
+ image = np.array(image)
463
+
464
+ if is_torch_tensor(image):
465
+ image = image.numpy()
466
+
467
+ rescale = isinstance(image.flat[0], np.integer) if rescale is None else rescale
468
+
469
+ if rescale:
470
+ image = self.rescale(image.astype(np.float32), 1 / 255.0)
471
+
472
+ if channel_first and image.ndim == 3:
473
+ image = image.transpose(2, 0, 1)
474
+
475
+ return image
476
+
477
+ def expand_dims(self, image):
478
+ """
479
+ Expands 2-dimensional `image` to 3 dimensions.
480
+
481
+ Args:
482
+ image (`PIL.Image.Image` or `np.ndarray` or `torch.Tensor`):
483
+ The image to expand.
484
+ """
485
+ self._ensure_format_supported(image)
486
+
487
+ # Do nothing if PIL image
488
+ if isinstance(image, PIL.Image.Image):
489
+ return image
490
+
491
+ if is_torch_tensor(image):
492
+ image = image.unsqueeze(0)
493
+ else:
494
+ image = np.expand_dims(image, axis=0)
495
+ return image
496
+
497
+ def normalize(self, image, mean, std, rescale=False):
498
+ """
499
+ Normalizes `image` with `mean` and `std`. Note that this will trigger a conversion of `image` to a NumPy array
500
+ if it's a PIL Image.
501
+
502
+ Args:
503
+ image (`PIL.Image.Image` or `np.ndarray` or `torch.Tensor`):
504
+ The image to normalize.
505
+ mean (`List[float]` or `np.ndarray` or `torch.Tensor`):
506
+ The mean (per channel) to use for normalization.
507
+ std (`List[float]` or `np.ndarray` or `torch.Tensor`):
508
+ The standard deviation (per channel) to use for normalization.
509
+ rescale (`bool`, *optional*, defaults to `False`):
510
+ Whether or not to rescale the image to be between 0 and 1. If a PIL image is provided, scaling will
511
+ happen automatically.
512
+ """
513
+ self._ensure_format_supported(image)
514
+
515
+ if isinstance(image, PIL.Image.Image):
516
+ image = self.to_numpy_array(image, rescale=True)
517
+ # If the input image is a PIL image, it automatically gets rescaled. If it's another
518
+ # type it may need rescaling.
519
+ elif rescale:
520
+ if isinstance(image, np.ndarray):
521
+ image = self.rescale(image.astype(np.float32), 1 / 255.0)
522
+ elif is_torch_tensor(image):
523
+ image = self.rescale(image.float(), 1 / 255.0)
524
+
525
+ if isinstance(image, np.ndarray):
526
+ if not isinstance(mean, np.ndarray):
527
+ mean = np.array(mean).astype(image.dtype)
528
+ if not isinstance(std, np.ndarray):
529
+ std = np.array(std).astype(image.dtype)
530
+ elif is_torch_tensor(image):
531
+ import torch
532
+
533
+ if not isinstance(mean, torch.Tensor):
534
+ mean = torch.tensor(mean)
535
+ if not isinstance(std, torch.Tensor):
536
+ std = torch.tensor(std)
537
+
538
+ if image.ndim == 3 and image.shape[0] in [1, 3]:
539
+ return (image - mean[:, None, None]) / std[:, None, None]
540
+ else:
541
+ return (image - mean) / std
542
+
543
+ def resize(self, image, size, resample=None, default_to_square=True, max_size=None):
544
+ """
545
+ Resizes `image`. Enforces conversion of input to PIL.Image.
546
+
547
+ Args:
548
+ image (`PIL.Image.Image` or `np.ndarray` or `torch.Tensor`):
549
+ The image to resize.
550
+ size (`int` or `Tuple[int, int]`):
551
+ The size to use for resizing the image. If `size` is a sequence like (h, w), output size will be
552
+ matched to this.
553
+
554
+ If `size` is an int and `default_to_square` is `True`, then image will be resized to (size, size). If
555
+ `size` is an int and `default_to_square` is `False`, then smaller edge of the image will be matched to
556
+ this number. i.e, if height > width, then image will be rescaled to (size * height / width, size).
557
+ resample (`int`, *optional*, defaults to `PILImageResampling.BILINEAR`):
558
+ The filter to user for resampling.
559
+ default_to_square (`bool`, *optional*, defaults to `True`):
560
+ How to convert `size` when it is a single int. If set to `True`, the `size` will be converted to a
561
+ square (`size`,`size`). If set to `False`, will replicate
562
+ [`torchvision.transforms.Resize`](https://pytorch.org/vision/stable/transforms.html#torchvision.transforms.Resize)
563
+ with support for resizing only the smallest edge and providing an optional `max_size`.
564
+ max_size (`int`, *optional*, defaults to `None`):
565
+ The maximum allowed for the longer edge of the resized image: if the longer edge of the image is
566
+ greater than `max_size` after being resized according to `size`, then the image is resized again so
567
+ that the longer edge is equal to `max_size`. As a result, `size` might be overruled, i.e the smaller
568
+ edge may be shorter than `size`. Only used if `default_to_square` is `False`.
569
+
570
+ Returns:
571
+ image: A resized `PIL.Image.Image`.
572
+ """
573
+ resample = resample if resample is not None else PILImageResampling.BILINEAR
574
+
575
+ self._ensure_format_supported(image)
576
+
577
+ if not isinstance(image, PIL.Image.Image):
578
+ image = self.to_pil_image(image)
579
+
580
+ if isinstance(size, list):
581
+ size = tuple(size)
582
+
583
+ if isinstance(size, int) or len(size) == 1:
584
+ if default_to_square:
585
+ size = (size, size) if isinstance(size, int) else (size[0], size[0])
586
+ else:
587
+ width, height = image.size
588
+ # specified size only for the smallest edge
589
+ short, long = (width, height) if width <= height else (height, width)
590
+ requested_new_short = size if isinstance(size, int) else size[0]
591
+
592
+ if short == requested_new_short:
593
+ return image
594
+
595
+ new_short, new_long = requested_new_short, int(requested_new_short * long / short)
596
+
597
+ if max_size is not None:
598
+ if max_size <= requested_new_short:
599
+ raise ValueError(
600
+ f"max_size = {max_size} must be strictly greater than the requested "
601
+ f"size for the smaller edge size = {size}"
602
+ )
603
+ if new_long > max_size:
604
+ new_short, new_long = int(max_size * new_short / new_long), max_size
605
+
606
+ size = (new_short, new_long) if width <= height else (new_long, new_short)
607
+
608
+ return image.resize(size, resample=resample)
609
+
610
+ def center_crop(self, image, size):
611
+ """
612
+ Crops `image` to the given size using a center crop. Note that if the image is too small to be cropped to the
613
+ size given, it will be padded (so the returned result has the size asked).
614
+
615
+ Args:
616
+ image (`PIL.Image.Image` or `np.ndarray` or `torch.Tensor` of shape (n_channels, height, width) or (height, width, n_channels)):
617
+ The image to resize.
618
+ size (`int` or `Tuple[int, int]`):
619
+ The size to which crop the image.
620
+
621
+ Returns:
622
+ new_image: A center cropped `PIL.Image.Image` or `np.ndarray` or `torch.Tensor` of shape: (n_channels,
623
+ height, width).
624
+ """
625
+ self._ensure_format_supported(image)
626
+
627
+ if not isinstance(size, tuple):
628
+ size = (size, size)
629
+
630
+ # PIL Image.size is (width, height) but NumPy array and torch Tensors have (height, width)
631
+ if is_torch_tensor(image) or isinstance(image, np.ndarray):
632
+ if image.ndim == 2:
633
+ image = self.expand_dims(image)
634
+ image_shape = image.shape[1:] if image.shape[0] in [1, 3] else image.shape[:2]
635
+ else:
636
+ image_shape = (image.size[1], image.size[0])
637
+
638
+ top = (image_shape[0] - size[0]) // 2
639
+ bottom = top + size[0] # In case size is odd, (image_shape[0] + size[0]) // 2 won't give the proper result.
640
+ left = (image_shape[1] - size[1]) // 2
641
+ right = left + size[1] # In case size is odd, (image_shape[1] + size[1]) // 2 won't give the proper result.
642
+
643
+ # For PIL Images we have a method to crop directly.
644
+ if isinstance(image, PIL.Image.Image):
645
+ return image.crop((left, top, right, bottom))
646
+
647
+ # Check if image is in (n_channels, height, width) or (height, width, n_channels) format
648
+ channel_first = True if image.shape[0] in [1, 3] else False
649
+
650
+ # Transpose (height, width, n_channels) format images
651
+ if not channel_first:
652
+ if isinstance(image, np.ndarray):
653
+ image = image.transpose(2, 0, 1)
654
+ if is_torch_tensor(image):
655
+ image = image.permute(2, 0, 1)
656
+
657
+ # Check if cropped area is within image boundaries
658
+ if top >= 0 and bottom <= image_shape[0] and left >= 0 and right <= image_shape[1]:
659
+ return image[..., top:bottom, left:right]
660
+
661
+ # Otherwise, we may need to pad if the image is too small. Oh joy...
662
+ new_shape = image.shape[:-2] + (max(size[0], image_shape[0]), max(size[1], image_shape[1]))
663
+ if isinstance(image, np.ndarray):
664
+ new_image = np.zeros_like(image, shape=new_shape)
665
+ elif is_torch_tensor(image):
666
+ new_image = image.new_zeros(new_shape)
667
+
668
+ top_pad = (new_shape[-2] - image_shape[0]) // 2
669
+ bottom_pad = top_pad + image_shape[0]
670
+ left_pad = (new_shape[-1] - image_shape[1]) // 2
671
+ right_pad = left_pad + image_shape[1]
672
+ new_image[..., top_pad:bottom_pad, left_pad:right_pad] = image
673
+
674
+ top += top_pad
675
+ bottom += top_pad
676
+ left += left_pad
677
+ right += left_pad
678
+
679
+ new_image = new_image[
680
+ ..., max(0, top) : min(new_image.shape[-2], bottom), max(0, left) : min(new_image.shape[-1], right)
681
+ ]
682
+
683
+ return new_image
684
+
685
+ def flip_channel_order(self, image):
686
+ """
687
+ Flips the channel order of `image` from RGB to BGR, or vice versa. Note that this will trigger a conversion of
688
+ `image` to a NumPy array if it's a PIL Image.
689
+
690
+ Args:
691
+ image (`PIL.Image.Image` or `np.ndarray` or `torch.Tensor`):
692
+ The image whose color channels to flip. If `np.ndarray` or `torch.Tensor`, the channel dimension should
693
+ be first.
694
+ """
695
+ self._ensure_format_supported(image)
696
+
697
+ if isinstance(image, PIL.Image.Image):
698
+ image = self.to_numpy_array(image)
699
+
700
+ return image[::-1, :, :]
701
+
702
+ def rotate(self, image, angle, resample=None, expand=0, center=None, translate=None, fillcolor=None):
703
+ """
704
+ Returns a rotated copy of `image`. This method returns a copy of `image`, rotated the given number of degrees
705
+ counter clockwise around its centre.
706
+
707
+ Args:
708
+ image (`PIL.Image.Image` or `np.ndarray` or `torch.Tensor`):
709
+ The image to rotate. If `np.ndarray` or `torch.Tensor`, will be converted to `PIL.Image.Image` before
710
+ rotating.
711
+
712
+ Returns:
713
+ image: A rotated `PIL.Image.Image`.
714
+ """
715
+ resample = resample if resample is not None else PIL.Image.NEAREST
716
+
717
+ self._ensure_format_supported(image)
718
+
719
+ if not isinstance(image, PIL.Image.Image):
720
+ image = self.to_pil_image(image)
721
+
722
+ return image.rotate(
723
+ angle, resample=resample, expand=expand, center=center, translate=translate, fillcolor=fillcolor
724
+ )
725
+
726
+
727
+ def promote_annotation_format(annotation_format: Union[AnnotionFormat, AnnotationFormat]) -> AnnotationFormat:
728
+ # can be removed when `AnnotionFormat` is fully deprecated
729
+ return AnnotationFormat(annotation_format.value)
730
+
731
+
732
+ def validate_annotations(
733
+ annotation_format: AnnotationFormat,
734
+ supported_annotation_formats: Tuple[AnnotationFormat, ...],
735
+ annotations: List[Dict],
736
+ ) -> None:
737
+ if isinstance(annotation_format, AnnotionFormat):
738
+ logger.warning_once(
739
+ f"`{annotation_format.__class__.__name__}` is deprecated and will be removed in v4.38. "
740
+ f"Please use `{AnnotationFormat.__name__}` instead."
741
+ )
742
+ annotation_format = promote_annotation_format(annotation_format)
743
+
744
+ if annotation_format not in supported_annotation_formats:
745
+ raise ValueError(f"Unsupported annotation format: {format} must be one of {supported_annotation_formats}")
746
+
747
+ if annotation_format is AnnotationFormat.COCO_DETECTION:
748
+ if not valid_coco_detection_annotations(annotations):
749
+ raise ValueError(
750
+ "Invalid COCO detection annotations. Annotations must a dict (single image) or list of dicts "
751
+ "(batch of images) with the following keys: `image_id` and `annotations`, with the latter "
752
+ "being a list of annotations in the COCO format."
753
+ )
754
+
755
+ if annotation_format is AnnotationFormat.COCO_PANOPTIC:
756
+ if not valid_coco_panoptic_annotations(annotations):
757
+ raise ValueError(
758
+ "Invalid COCO panoptic annotations. Annotations must a dict (single image) or list of dicts "
759
+ "(batch of images) with the following keys: `image_id`, `file_name` and `segments_info`, with "
760
+ "the latter being a list of annotations in the COCO format."
761
+ )
762
+
763
+
764
+ def validate_kwargs(valid_processor_keys: List[str], captured_kwargs: List[str]):
765
+ unused_keys = set(captured_kwargs).difference(set(valid_processor_keys))
766
+ if unused_keys:
767
+ unused_key_str = ", ".join(unused_keys)
768
+ # TODO raise a warning here instead of simply logging?
769
+ logger.warning(f"Unused or unrecognized kwargs: {unused_key_str}.")
venv/lib/python3.10/site-packages/transformers/keras_callbacks.py ADDED
@@ -0,0 +1,413 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ import os
3
+ from pathlib import Path
4
+ from time import sleep
5
+ from typing import Callable, List, Optional, Union
6
+
7
+ import numpy as np
8
+ import tensorflow as tf
9
+ from huggingface_hub import Repository, create_repo
10
+ from packaging.version import parse
11
+
12
+ from . import IntervalStrategy, PreTrainedTokenizerBase
13
+ from .modelcard import TrainingSummary
14
+ from .modeling_tf_utils import keras
15
+
16
+
17
+ logger = logging.getLogger(__name__)
18
+
19
+
20
+ class KerasMetricCallback(keras.callbacks.Callback):
21
+ """
22
+ Callback to compute metrics at the end of every epoch. Unlike normal Keras metrics, these do not need to be
23
+ compilable by TF. It is particularly useful for common NLP metrics like BLEU and ROUGE that require string
24
+ operations or generation loops that cannot be compiled. Predictions (or generations) will be computed on the
25
+ `eval_dataset` before being passed to the `metric_fn` in `np.ndarray` format. The `metric_fn` should compute
26
+ metrics and return a dict mapping metric names to metric values.
27
+
28
+ We provide an example of a suitable metric_fn that computes ROUGE scores for a summarization model below. Note that
29
+ this example skips some post-processing for readability and simplicity, and should probably not be used as-is!
30
+
31
+ ```py
32
+ from datasets import load_metric
33
+
34
+ rouge_metric = load_metric("rouge")
35
+
36
+
37
+ def rouge_fn(predictions, labels):
38
+ decoded_predictions = tokenizer.batch_decode(predictions, skip_special_tokens=True)
39
+ decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True)
40
+ result = rouge_metric.compute(predictions=decoded_predictions, references=decoded_labels)
41
+ return {key: value.mid.fmeasure * 100 for key, value in result.items()}
42
+ ```
43
+
44
+ The above function will return a dict containing values which will be logged like any other Keras metric:
45
+
46
+ ```
47
+ {'rouge1': 37.4199, 'rouge2': 13.9768, 'rougeL': 34.361, 'rougeLsum': 35.0781
48
+ ```
49
+
50
+ Args:
51
+ metric_fn (`Callable`):
52
+ Metric function provided by the user. It will be called with two arguments - `predictions` and `labels`.
53
+ These contain the model's outputs and matching labels from the dataset. It should return a dict mapping
54
+ metric names to numerical values.
55
+ eval_dataset (`tf.data.Dataset` or `dict` or `tuple` or `np.ndarray` or `tf.Tensor`):
56
+ Validation data to be used to generate predictions for the `metric_fn`.
57
+ output_cols (`List[str], *optional*):
58
+ A list of columns to be retained from the model output as the predictions. Defaults to all.
59
+ label_cols ('`List[str]`, *optional*'):
60
+ A list of columns to be retained from the input dataset as the labels. Will be autodetected if this is not
61
+ supplied.
62
+ batch_size (`int`, *optional*):
63
+ Batch size. Only used when the data is not a pre-batched `tf.data.Dataset`.
64
+ predict_with_generate (`bool`, *optional*, defaults to `False`):
65
+ Whether we should use `model.generate()` to get outputs for the model.
66
+ use_xla_generation (`bool`, *optional*, defaults to `False`):
67
+ If we're generating, whether to compile model generation with XLA. This can massively increase the speed of
68
+ generation (up to 100X speedup) but will require a new XLA compilation for each input shape. When using XLA
69
+ generation, it's a good idea to pad your inputs to the same size, or to use the `pad_to_multiple_of`
70
+ argument in your `tokenizer` or `DataCollator`, which will reduce the number of unique input shapes and
71
+ save a lot of compilation time. This option has no effect is `predict_with_generate` is `False`.
72
+ generate_kwargs (`dict`, *optional*):
73
+ Keyword arguments to pass to `model.generate()` when generating. Has no effect if `predict_with_generate`
74
+ is `False`.
75
+
76
+ """
77
+
78
+ def __init__(
79
+ self,
80
+ metric_fn: Callable,
81
+ eval_dataset: Union[tf.data.Dataset, np.ndarray, tf.Tensor, tuple, dict],
82
+ output_cols: Optional[List[str]] = None,
83
+ label_cols: Optional[List[str]] = None,
84
+ batch_size: Optional[int] = None,
85
+ predict_with_generate: bool = False,
86
+ use_xla_generation: bool = False,
87
+ generate_kwargs: Optional[dict] = None,
88
+ ):
89
+ super().__init__()
90
+ self.metric_fn = metric_fn
91
+ self.batch_size = batch_size
92
+ if not isinstance(eval_dataset, tf.data.Dataset):
93
+ if batch_size is None:
94
+ raise ValueError(
95
+ "When passing data to KerasMetricCallback that is not a pre-batched tf.data.Dataset "
96
+ "the batch_size argument must be set."
97
+ )
98
+ # Wrap a tf.data.Dataset around it
99
+ eval_dataset = tf.data.Dataset.from_tensor_slices(eval_dataset).batch(batch_size, drop_remainder=False)
100
+ self.eval_dataset = eval_dataset
101
+ self.predict_with_generate = predict_with_generate
102
+ self.output_cols = output_cols
103
+
104
+ # This next block attempts to parse out which elements of the dataset should be appended to the labels list
105
+ # that is passed to the metric_fn
106
+ if isinstance(eval_dataset.element_spec, tuple) and len(eval_dataset.element_spec) == 2:
107
+ input_spec, label_spec = eval_dataset.element_spec
108
+ else:
109
+ input_spec = eval_dataset.element_spec
110
+ label_spec = None
111
+ if label_cols is not None:
112
+ for label in label_cols:
113
+ if label not in input_spec:
114
+ raise ValueError(f"Label {label} is in label_cols but could not be found in the dataset inputs!")
115
+ self.label_cols = label_cols
116
+ self.use_keras_label = False
117
+ elif label_spec is not None:
118
+ # If the dataset inputs are split into a 2-tuple of inputs and labels,
119
+ # assume the second element is the labels
120
+ self.label_cols = None
121
+ self.use_keras_label = True
122
+ elif "labels" in input_spec:
123
+ self.label_cols = ["labels"]
124
+ self.use_keras_label = False
125
+ logging.warning("No label_cols specified for KerasMetricCallback, assuming you want the 'labels' key.")
126
+ elif "start_positions" in input_spec and "end_positions" in input_spec:
127
+ self.label_cols = ["start_positions", "end_positions"]
128
+ self.use_keras_label = False
129
+ logging.warning(
130
+ "No label_cols specified for KerasMetricCallback, assuming you want the "
131
+ "start_positions and end_positions keys."
132
+ )
133
+ else:
134
+ raise ValueError("Could not autodetect label_cols for KerasMetricCallback, please specify them!")
135
+ if parse(tf.__version__) < parse("2.7"):
136
+ logging.warning("TF versions less than 2.7 may encounter issues with KerasMetricCallback!")
137
+
138
+ self.use_xla_generation = use_xla_generation
139
+ self.generate_kwargs = {} if generate_kwargs is None else generate_kwargs
140
+
141
+ self.generation_function = None
142
+
143
+ @staticmethod
144
+ def _concatenate_batches(batches, padding_index=-100):
145
+ # If all batches are unidimensional or same length, do a simple concatenation
146
+ if batches[0].ndim == 1 or all(batch.shape[1] == batches[0].shape[1] for batch in batches):
147
+ return np.concatenate(batches, axis=0)
148
+
149
+ # Welp, they're not the same length. Let's do some padding
150
+ max_len = max([batch.shape[1] for batch in batches])
151
+ num_samples = sum([batch.shape[0] for batch in batches])
152
+ output = np.full_like(
153
+ batches[0], fill_value=padding_index, shape=[num_samples, max_len] + list(batches[0].shape[2:])
154
+ )
155
+ # i keeps track of which part of the concatenated array we're writing the next batch to
156
+ i = 0
157
+ for batch in batches:
158
+ output[i : i + len(batch), : batch.shape[1]] = batch
159
+ i += len(batch)
160
+ return output
161
+
162
+ def _postprocess_predictions_or_labels(self, inputs):
163
+ if isinstance(inputs[0], dict):
164
+ outputs = {}
165
+ for key in inputs[0].keys():
166
+ outputs[key] = self._concatenate_batches([batch[key] for batch in inputs])
167
+ # If it's a dict with only one key, just return the array
168
+ if len(outputs) == 1:
169
+ outputs = list(outputs.values())[0]
170
+ elif isinstance(inputs[0], list) or isinstance(inputs[0], tuple):
171
+ outputs = []
172
+ for input_list in zip(*inputs):
173
+ outputs.append(self._concatenate_batches(input_list))
174
+ if len(outputs) == 1:
175
+ outputs = outputs[0] # If it's a list with only one element, just return the array
176
+ elif isinstance(inputs[0], np.ndarray):
177
+ outputs = self._concatenate_batches(inputs)
178
+ elif isinstance(inputs[0], tf.Tensor):
179
+ outputs = self._concatenate_batches([tensor.numpy() for tensor in inputs])
180
+ else:
181
+ raise TypeError(f"Couldn't handle batch of type {type(inputs[0])}!")
182
+ return outputs
183
+
184
+ def on_epoch_end(self, epoch, logs=None):
185
+ if hasattr(self.model, "config"):
186
+ ignore_keys = getattr(self.model.config, "keys_to_ignore_at_inference", [])
187
+ else:
188
+ ignore_keys = []
189
+
190
+ main_input_name = None
191
+ if self.predict_with_generate:
192
+ # This dense conditional recognizes the case where we have an encoder-decoder model, but
193
+ # avoids getting tangled up when we just have a model with a layer called 'encoder'
194
+ if hasattr(self.model, "encoder") and hasattr(self.model.encoder, "main_input_name"):
195
+ main_input_name = self.model.encoder.main_input_name
196
+ else:
197
+ main_input_name = getattr(self.model, "main_input_name", "input_ids")
198
+
199
+ if self.use_xla_generation and self.generation_function is None:
200
+
201
+ def generation_function(inputs, attention_mask):
202
+ return self.model.generate(inputs, attention_mask=attention_mask, **self.generate_kwargs)
203
+
204
+ self.generation_function = tf.function(generation_function, jit_compile=True)
205
+
206
+ prediction_list = []
207
+ label_list = []
208
+
209
+ # The whole predict/generate loop is handled inside this method
210
+ for batch in self.eval_dataset:
211
+ if isinstance(batch, tuple):
212
+ batch, labels = batch
213
+ else:
214
+ labels = None
215
+ if self.predict_with_generate:
216
+ if isinstance(batch, dict):
217
+ generation_inputs = batch[main_input_name]
218
+ attention_mask = batch.get("attention_mask", None)
219
+ else:
220
+ generation_inputs = batch
221
+ attention_mask = None
222
+ if self.use_xla_generation:
223
+ predictions = self.generation_function(generation_inputs, attention_mask=attention_mask)
224
+ else:
225
+ predictions = self.model.generate(
226
+ generation_inputs, attention_mask=attention_mask, **self.generate_kwargs
227
+ )
228
+ else:
229
+ predictions = self.model.predict_on_batch(batch)
230
+ if isinstance(predictions, dict):
231
+ # This converts any dict-subclass to a regular dict
232
+ # Keras REALLY doesn't like it when we pass around a BatchEncoding or other derived class
233
+ predictions = dict(predictions)
234
+ if self.output_cols is not None:
235
+ predictions = {key: predictions[key] for key in self.output_cols}
236
+ else:
237
+ predictions = {
238
+ key: val for key, val in predictions.items() if key not in ignore_keys + ["loss"]
239
+ }
240
+ prediction_list.append(predictions)
241
+ if not self.use_keras_label:
242
+ labels = {key: batch[key].numpy() for key in self.label_cols}
243
+ elif isinstance(labels, dict):
244
+ labels = {key: array.numpy() for key, array in labels.items()}
245
+ elif isinstance(labels, list) or isinstance(labels, tuple):
246
+ labels = [array.numpy() for array in labels]
247
+ elif isinstance(labels, tf.Tensor):
248
+ labels = labels.numpy()
249
+ else:
250
+ raise TypeError(f"Confused by labels of type {type(labels)}")
251
+ label_list.append(labels)
252
+
253
+ all_preds = self._postprocess_predictions_or_labels(prediction_list)
254
+ all_labels = self._postprocess_predictions_or_labels(label_list)
255
+
256
+ metric_output = self.metric_fn((all_preds, all_labels))
257
+ if not isinstance(metric_output, dict):
258
+ raise TypeError(
259
+ f"metric_fn should return a dict mapping metric names to values but instead returned {metric_output}"
260
+ )
261
+ # This is the critical bit - Keras passes a dict containing the loss and standard metric values for this epoch
262
+ # in the logs argument. Ordinarily, this is so the callback can read them, but in this case we write a bunch of
263
+ # new keys in there, which will then get read by the History callback and treated like any other metric value.
264
+ # I promise that I have it in writing from Chollet that this is okay.
265
+ logs.update(metric_output)
266
+
267
+
268
+ class PushToHubCallback(keras.callbacks.Callback):
269
+ """
270
+ Callback that will save and push the model to the Hub regularly. By default, it pushes once per epoch, but this can
271
+ be changed with the `save_strategy` argument. Pushed models can be accessed like any other model on the hub, such
272
+ as with the `from_pretrained` method.
273
+
274
+ ```py
275
+ from transformers.keras_callbacks import PushToHubCallback
276
+
277
+ push_to_hub_callback = PushToHubCallback(
278
+ output_dir="./model_save",
279
+ tokenizer=tokenizer,
280
+ hub_model_id="gpt5-7xlarge",
281
+ )
282
+
283
+ model.fit(train_dataset, callbacks=[push_to_hub_callback])
284
+ ```
285
+
286
+ Args:
287
+ output_dir (`str`):
288
+ The output directory where the model predictions and checkpoints will be written and synced with the
289
+ repository on the Hub.
290
+ save_strategy (`str` or [`~trainer_utils.IntervalStrategy`], *optional*, defaults to `"epoch"`):
291
+ The checkpoint save strategy to adopt during training. Possible values are:
292
+
293
+ - `"no"`: Save is done at the end of training.
294
+ - `"epoch"`: Save is done at the end of each epoch.
295
+ - `"steps"`: Save is done every `save_steps`
296
+ save_steps (`int`, *optional*):
297
+ The number of steps between saves when using the "steps" `save_strategy`.
298
+ tokenizer (`PreTrainedTokenizerBase`, *optional*):
299
+ The tokenizer used by the model. If supplied, will be uploaded to the repo alongside the weights.
300
+ hub_model_id (`str`, *optional*):
301
+ The name of the repository to keep in sync with the local `output_dir`. It can be a simple model ID in
302
+ which case the model will be pushed in your namespace. Otherwise it should be the whole repository name,
303
+ for instance `"user_name/model"`, which allows you to push to an organization you are a member of with
304
+ `"organization_name/model"`.
305
+
306
+ Will default to the name of `output_dir`.
307
+ hub_token (`str`, *optional*):
308
+ The token to use to push the model to the Hub. Will default to the token in the cache folder obtained with
309
+ `huggingface-cli login`.
310
+ checkpoint (`bool`, *optional*, defaults to `False`):
311
+ Whether to save full training checkpoints (including epoch and optimizer state) to allow training to be
312
+ resumed. Only usable when `save_strategy` is `"epoch"`.
313
+ """
314
+
315
+ def __init__(
316
+ self,
317
+ output_dir: Union[str, Path],
318
+ save_strategy: Union[str, IntervalStrategy] = "epoch",
319
+ save_steps: Optional[int] = None,
320
+ tokenizer: Optional[PreTrainedTokenizerBase] = None,
321
+ hub_model_id: Optional[str] = None,
322
+ hub_token: Optional[str] = None,
323
+ checkpoint: bool = False,
324
+ **model_card_args,
325
+ ):
326
+ super().__init__()
327
+ if checkpoint and save_strategy != "epoch":
328
+ raise ValueError("Cannot save checkpoints when save_strategy is not 'epoch'!")
329
+ if isinstance(save_strategy, str):
330
+ save_strategy = IntervalStrategy(save_strategy.lower())
331
+ self.save_strategy = save_strategy
332
+ if self.save_strategy == IntervalStrategy.STEPS and (not isinstance(save_steps, int) or save_steps <= 0):
333
+ raise ValueError("Please supply a positive integer argument for save_steps when save_strategy == 'steps'!")
334
+ self.save_steps = save_steps
335
+ output_dir = Path(output_dir)
336
+
337
+ # Create repo and retrieve repo_id
338
+ if hub_model_id is None:
339
+ hub_model_id = output_dir.absolute().name
340
+ self.hub_model_id = create_repo(repo_id=hub_model_id, exist_ok=True, token=hub_token).repo_id
341
+
342
+ self.output_dir = output_dir
343
+ self.repo = Repository(str(self.output_dir), clone_from=self.hub_model_id, token=hub_token)
344
+
345
+ self.tokenizer = tokenizer
346
+ self.last_job = None
347
+ self.checkpoint = checkpoint
348
+ self.training_history = None
349
+ self.model_card_args = model_card_args
350
+
351
+ def on_train_begin(self, logs=None):
352
+ # Although we can access model.history, we have no guarantees that the History callback will fire before this
353
+ # one, so we keep track of it here too
354
+ self.training_history = []
355
+
356
+ def on_train_batch_end(self, batch, logs=None):
357
+ if self.save_strategy == IntervalStrategy.STEPS and (batch + 1) % self.save_steps == 0:
358
+ if self.last_job is not None and not self.last_job.is_done:
359
+ return # The last upload is still running, don't start another
360
+ self.model.save_pretrained(self.output_dir)
361
+ if self.tokenizer is not None:
362
+ self.tokenizer.save_pretrained(self.output_dir)
363
+ _, self.last_job = self.repo.push_to_hub(
364
+ commit_message=f"Training in progress steps {batch}", blocking=False
365
+ )
366
+
367
+ def on_epoch_end(self, epoch, logs=None):
368
+ logs = logs.copy() # Don't accidentally write things that Keras will read later
369
+ if "epoch" not in logs:
370
+ logs["epoch"] = epoch
371
+ self.training_history.append(logs)
372
+ if self.save_strategy == IntervalStrategy.EPOCH:
373
+ if self.last_job is not None and not self.last_job.is_done:
374
+ return # The last upload is still running, don't start another
375
+ self.model.save_pretrained(self.output_dir)
376
+ if self.tokenizer is not None:
377
+ self.tokenizer.save_pretrained(self.output_dir)
378
+ if self.checkpoint:
379
+ checkpoint_dir = os.path.join(self.output_dir, "checkpoint")
380
+ self.model._save_checkpoint(checkpoint_dir, epoch)
381
+ train_summary = TrainingSummary.from_keras(
382
+ model=self.model,
383
+ model_name=self.hub_model_id,
384
+ keras_history=self.training_history,
385
+ **self.model_card_args,
386
+ )
387
+ model_card = train_summary.to_model_card()
388
+ with (self.output_dir / "README.md").open("w") as f:
389
+ f.write(model_card)
390
+ _, self.last_job = self.repo.push_to_hub(
391
+ commit_message=f"Training in progress epoch {epoch}", blocking=False
392
+ )
393
+
394
+ def on_train_end(self, logs=None):
395
+ # Makes sure the latest version of the model is uploaded
396
+ if self.last_job is not None and not self.last_job.is_done:
397
+ logging.info("Pushing the last epoch to the Hub, this may take a while...")
398
+ while not self.last_job.is_done:
399
+ sleep(1)
400
+ else:
401
+ self.model.save_pretrained(self.output_dir)
402
+ if self.tokenizer is not None:
403
+ self.tokenizer.save_pretrained(self.output_dir)
404
+ train_summary = TrainingSummary.from_keras(
405
+ model=self.model,
406
+ model_name=self.hub_model_id,
407
+ keras_history=self.training_history,
408
+ **self.model_card_args,
409
+ )
410
+ model_card = train_summary.to_model_card()
411
+ with (self.output_dir / "README.md").open("w") as f:
412
+ f.write(model_card)
413
+ self.repo.push_to_hub(commit_message="End of training", blocking=True)
venv/lib/python3.10/site-packages/transformers/kernels/deformable_detr/ms_deform_attn.h ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*!
2
+ **************************************************************************************************
3
+ * Deformable DETR
4
+ * Copyright (c) 2020 SenseTime. All Rights Reserved.
5
+ * Licensed under the Apache License, Version 2.0 [see LICENSE for details]
6
+ **************************************************************************************************
7
+ * Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0
8
+ **************************************************************************************************
9
+ */
10
+
11
+ #pragma once
12
+
13
+ #include "cpu/ms_deform_attn_cpu.h"
14
+
15
+ #ifdef WITH_CUDA
16
+ #include "cuda/ms_deform_attn_cuda.h"
17
+ #endif
18
+
19
+
20
+ at::Tensor
21
+ ms_deform_attn_forward(
22
+ const at::Tensor &value,
23
+ const at::Tensor &spatial_shapes,
24
+ const at::Tensor &level_start_index,
25
+ const at::Tensor &sampling_loc,
26
+ const at::Tensor &attn_weight,
27
+ const int im2col_step)
28
+ {
29
+ if (value.type().is_cuda())
30
+ {
31
+ #ifdef WITH_CUDA
32
+ return ms_deform_attn_cuda_forward(
33
+ value, spatial_shapes, level_start_index, sampling_loc, attn_weight, im2col_step);
34
+ #else
35
+ AT_ERROR("Not compiled with GPU support");
36
+ #endif
37
+ }
38
+ AT_ERROR("Not implemented on the CPU");
39
+ }
40
+
41
+ std::vector<at::Tensor>
42
+ ms_deform_attn_backward(
43
+ const at::Tensor &value,
44
+ const at::Tensor &spatial_shapes,
45
+ const at::Tensor &level_start_index,
46
+ const at::Tensor &sampling_loc,
47
+ const at::Tensor &attn_weight,
48
+ const at::Tensor &grad_output,
49
+ const int im2col_step)
50
+ {
51
+ if (value.type().is_cuda())
52
+ {
53
+ #ifdef WITH_CUDA
54
+ return ms_deform_attn_cuda_backward(
55
+ value, spatial_shapes, level_start_index, sampling_loc, attn_weight, grad_output, im2col_step);
56
+ #else
57
+ AT_ERROR("Not compiled with GPU support");
58
+ #endif
59
+ }
60
+ AT_ERROR("Not implemented on the CPU");
61
+ }
venv/lib/python3.10/site-packages/transformers/kernels/deta/cpu/ms_deform_attn_cpu.cpp ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*!
2
+ **************************************************************************************************
3
+ * Deformable DETR
4
+ * Copyright (c) 2020 SenseTime. All Rights Reserved.
5
+ * Licensed under the Apache License, Version 2.0 [see LICENSE for details]
6
+ **************************************************************************************************
7
+ * Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0
8
+ **************************************************************************************************
9
+ */
10
+
11
+ #include <vector>
12
+
13
+ #include <ATen/ATen.h>
14
+ #include <ATen/cuda/CUDAContext.h>
15
+
16
+
17
+ at::Tensor
18
+ ms_deform_attn_cpu_forward(
19
+ const at::Tensor &value,
20
+ const at::Tensor &spatial_shapes,
21
+ const at::Tensor &level_start_index,
22
+ const at::Tensor &sampling_loc,
23
+ const at::Tensor &attn_weight,
24
+ const int im2col_step)
25
+ {
26
+ AT_ERROR("Not implement on cpu");
27
+ }
28
+
29
+ std::vector<at::Tensor>
30
+ ms_deform_attn_cpu_backward(
31
+ const at::Tensor &value,
32
+ const at::Tensor &spatial_shapes,
33
+ const at::Tensor &level_start_index,
34
+ const at::Tensor &sampling_loc,
35
+ const at::Tensor &attn_weight,
36
+ const at::Tensor &grad_output,
37
+ const int im2col_step)
38
+ {
39
+ AT_ERROR("Not implement on cpu");
40
+ }
venv/lib/python3.10/site-packages/transformers/kernels/deta/cpu/ms_deform_attn_cpu.h ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*!
2
+ **************************************************************************************************
3
+ * Deformable DETR
4
+ * Copyright (c) 2020 SenseTime. All Rights Reserved.
5
+ * Licensed under the Apache License, Version 2.0 [see LICENSE for details]
6
+ **************************************************************************************************
7
+ * Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0
8
+ **************************************************************************************************
9
+ */
10
+
11
+ #pragma once
12
+ #include <torch/extension.h>
13
+
14
+ at::Tensor
15
+ ms_deform_attn_cpu_forward(
16
+ const at::Tensor &value,
17
+ const at::Tensor &spatial_shapes,
18
+ const at::Tensor &level_start_index,
19
+ const at::Tensor &sampling_loc,
20
+ const at::Tensor &attn_weight,
21
+ const int im2col_step);
22
+
23
+ std::vector<at::Tensor>
24
+ ms_deform_attn_cpu_backward(
25
+ const at::Tensor &value,
26
+ const at::Tensor &spatial_shapes,
27
+ const at::Tensor &level_start_index,
28
+ const at::Tensor &sampling_loc,
29
+ const at::Tensor &attn_weight,
30
+ const at::Tensor &grad_output,
31
+ const int im2col_step);
32
+
venv/lib/python3.10/site-packages/transformers/kernels/deta/cuda/ms_deform_attn_cuda.cu ADDED
@@ -0,0 +1,156 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*!
2
+ **************************************************************************************************
3
+ * Deformable DETR
4
+ * Copyright (c) 2020 SenseTime. All Rights Reserved.
5
+ * Licensed under the Apache License, Version 2.0 [see LICENSE for details]
6
+ **************************************************************************************************
7
+ * Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0
8
+ **************************************************************************************************
9
+ */
10
+
11
+ #include <vector>
12
+ #include "cuda/ms_deform_im2col_cuda.cuh"
13
+
14
+ #include <ATen/ATen.h>
15
+ #include <ATen/cuda/CUDAContext.h>
16
+ #include <cuda.h>
17
+ #include <cuda_runtime.h>
18
+
19
+ #pragma once
20
+ #include <torch/extension.h>
21
+
22
+
23
+ at::Tensor ms_deform_attn_cuda_forward(
24
+ const at::Tensor &value,
25
+ const at::Tensor &spatial_shapes,
26
+ const at::Tensor &level_start_index,
27
+ const at::Tensor &sampling_loc,
28
+ const at::Tensor &attn_weight,
29
+ const int im2col_step)
30
+ {
31
+ AT_ASSERTM(value.is_contiguous(), "value tensor has to be contiguous");
32
+ AT_ASSERTM(spatial_shapes.is_contiguous(), "spatial_shapes tensor has to be contiguous");
33
+ AT_ASSERTM(level_start_index.is_contiguous(), "level_start_index tensor has to be contiguous");
34
+ AT_ASSERTM(sampling_loc.is_contiguous(), "sampling_loc tensor has to be contiguous");
35
+ AT_ASSERTM(attn_weight.is_contiguous(), "attn_weight tensor has to be contiguous");
36
+
37
+ AT_ASSERTM(value.type().is_cuda(), "value must be a CUDA tensor");
38
+ AT_ASSERTM(spatial_shapes.type().is_cuda(), "spatial_shapes must be a CUDA tensor");
39
+ AT_ASSERTM(level_start_index.type().is_cuda(), "level_start_index must be a CUDA tensor");
40
+ AT_ASSERTM(sampling_loc.type().is_cuda(), "sampling_loc must be a CUDA tensor");
41
+ AT_ASSERTM(attn_weight.type().is_cuda(), "attn_weight must be a CUDA tensor");
42
+
43
+ const int batch = value.size(0);
44
+ const int spatial_size = value.size(1);
45
+ const int num_heads = value.size(2);
46
+ const int channels = value.size(3);
47
+
48
+ const int num_levels = spatial_shapes.size(0);
49
+
50
+ const int num_query = sampling_loc.size(1);
51
+ const int num_point = sampling_loc.size(4);
52
+
53
+ const int im2col_step_ = std::min(batch, im2col_step);
54
+
55
+ AT_ASSERTM(batch % im2col_step_ == 0, "batch(%d) must divide im2col_step(%d)", batch, im2col_step_);
56
+
57
+ auto output = at::zeros({batch, num_query, num_heads, channels}, value.options());
58
+
59
+ const int batch_n = im2col_step_;
60
+ auto output_n = output.view({batch/im2col_step_, batch_n, num_query, num_heads, channels});
61
+ auto per_value_size = spatial_size * num_heads * channels;
62
+ auto per_sample_loc_size = num_query * num_heads * num_levels * num_point * 2;
63
+ auto per_attn_weight_size = num_query * num_heads * num_levels * num_point;
64
+ for (int n = 0; n < batch/im2col_step_; ++n)
65
+ {
66
+ auto columns = output_n.select(0, n);
67
+ AT_DISPATCH_FLOATING_TYPES(value.type(), "ms_deform_attn_forward_cuda", ([&] {
68
+ ms_deformable_im2col_cuda(at::cuda::getCurrentCUDAStream(),
69
+ value.data<scalar_t>() + n * im2col_step_ * per_value_size,
70
+ spatial_shapes.data<int64_t>(),
71
+ level_start_index.data<int64_t>(),
72
+ sampling_loc.data<scalar_t>() + n * im2col_step_ * per_sample_loc_size,
73
+ attn_weight.data<scalar_t>() + n * im2col_step_ * per_attn_weight_size,
74
+ batch_n, spatial_size, num_heads, channels, num_levels, num_query, num_point,
75
+ columns.data<scalar_t>());
76
+
77
+ }));
78
+ }
79
+
80
+ output = output.view({batch, num_query, num_heads*channels});
81
+
82
+ return output;
83
+ }
84
+
85
+
86
+ std::vector<at::Tensor> ms_deform_attn_cuda_backward(
87
+ const at::Tensor &value,
88
+ const at::Tensor &spatial_shapes,
89
+ const at::Tensor &level_start_index,
90
+ const at::Tensor &sampling_loc,
91
+ const at::Tensor &attn_weight,
92
+ const at::Tensor &grad_output,
93
+ const int im2col_step)
94
+ {
95
+
96
+ AT_ASSERTM(value.is_contiguous(), "value tensor has to be contiguous");
97
+ AT_ASSERTM(spatial_shapes.is_contiguous(), "spatial_shapes tensor has to be contiguous");
98
+ AT_ASSERTM(level_start_index.is_contiguous(), "level_start_index tensor has to be contiguous");
99
+ AT_ASSERTM(sampling_loc.is_contiguous(), "sampling_loc tensor has to be contiguous");
100
+ AT_ASSERTM(attn_weight.is_contiguous(), "attn_weight tensor has to be contiguous");
101
+ AT_ASSERTM(grad_output.is_contiguous(), "grad_output tensor has to be contiguous");
102
+
103
+ AT_ASSERTM(value.type().is_cuda(), "value must be a CUDA tensor");
104
+ AT_ASSERTM(spatial_shapes.type().is_cuda(), "spatial_shapes must be a CUDA tensor");
105
+ AT_ASSERTM(level_start_index.type().is_cuda(), "level_start_index must be a CUDA tensor");
106
+ AT_ASSERTM(sampling_loc.type().is_cuda(), "sampling_loc must be a CUDA tensor");
107
+ AT_ASSERTM(attn_weight.type().is_cuda(), "attn_weight must be a CUDA tensor");
108
+ AT_ASSERTM(grad_output.type().is_cuda(), "grad_output must be a CUDA tensor");
109
+
110
+ const int batch = value.size(0);
111
+ const int spatial_size = value.size(1);
112
+ const int num_heads = value.size(2);
113
+ const int channels = value.size(3);
114
+
115
+ const int num_levels = spatial_shapes.size(0);
116
+
117
+ const int num_query = sampling_loc.size(1);
118
+ const int num_point = sampling_loc.size(4);
119
+
120
+ const int im2col_step_ = std::min(batch, im2col_step);
121
+
122
+ AT_ASSERTM(batch % im2col_step_ == 0, "batch(%d) must divide im2col_step(%d)", batch, im2col_step_);
123
+
124
+ auto grad_value = at::zeros_like(value);
125
+ auto grad_sampling_loc = at::zeros_like(sampling_loc);
126
+ auto grad_attn_weight = at::zeros_like(attn_weight);
127
+
128
+ const int batch_n = im2col_step_;
129
+ auto per_value_size = spatial_size * num_heads * channels;
130
+ auto per_sample_loc_size = num_query * num_heads * num_levels * num_point * 2;
131
+ auto per_attn_weight_size = num_query * num_heads * num_levels * num_point;
132
+ auto grad_output_n = grad_output.view({batch/im2col_step_, batch_n, num_query, num_heads, channels});
133
+
134
+ for (int n = 0; n < batch/im2col_step_; ++n)
135
+ {
136
+ auto grad_output_g = grad_output_n.select(0, n);
137
+ AT_DISPATCH_FLOATING_TYPES(value.type(), "ms_deform_attn_backward_cuda", ([&] {
138
+ ms_deformable_col2im_cuda(at::cuda::getCurrentCUDAStream(),
139
+ grad_output_g.data<scalar_t>(),
140
+ value.data<scalar_t>() + n * im2col_step_ * per_value_size,
141
+ spatial_shapes.data<int64_t>(),
142
+ level_start_index.data<int64_t>(),
143
+ sampling_loc.data<scalar_t>() + n * im2col_step_ * per_sample_loc_size,
144
+ attn_weight.data<scalar_t>() + n * im2col_step_ * per_attn_weight_size,
145
+ batch_n, spatial_size, num_heads, channels, num_levels, num_query, num_point,
146
+ grad_value.data<scalar_t>() + n * im2col_step_ * per_value_size,
147
+ grad_sampling_loc.data<scalar_t>() + n * im2col_step_ * per_sample_loc_size,
148
+ grad_attn_weight.data<scalar_t>() + n * im2col_step_ * per_attn_weight_size);
149
+
150
+ }));
151
+ }
152
+
153
+ return {
154
+ grad_value, grad_sampling_loc, grad_attn_weight
155
+ };
156
+ }
venv/lib/python3.10/site-packages/transformers/kernels/deta/cuda/ms_deform_attn_cuda.cuh ADDED
@@ -0,0 +1,1467 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*!
2
+ **************************************************************************************************
3
+ * Deformable DETR
4
+ * Copyright (c) 2020 SenseTime. All Rights Reserved.
5
+ * Licensed under the Apache License, Version 2.0 [see LICENSE for details]
6
+ **************************************************************************************************
7
+ * Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0
8
+ **************************************************************************************************
9
+ */
10
+
11
+ #include <vector>
12
+
13
+ #include <cuda.h>
14
+ #include <cuda_runtime.h>
15
+
16
+ #include <cstdio>
17
+ #include <algorithm>
18
+ #include <cstring>
19
+
20
+ #include <ATen/ATen.h>
21
+ #include <ATen/cuda/CUDAContext.h>
22
+
23
+ #include <THC/THCAtomics.cuh>
24
+
25
+ #define CUDA_KERNEL_LOOP(i, n) \
26
+ for (int i = blockIdx.x * blockDim.x + threadIdx.x; \
27
+ i < (n); \
28
+ i += blockDim.x * gridDim.x)
29
+
30
+
31
+ at::Tensor ms_deform_attn_cuda_forward(
32
+ const at::Tensor &value,
33
+ const at::Tensor &spatial_shapes,
34
+ const at::Tensor &level_start_index,
35
+ const at::Tensor &sampling_loc,
36
+ const at::Tensor &attn_weight,
37
+ const int im2col_step)
38
+ {
39
+ AT_ASSERTM(value.is_contiguous(), "value tensor has to be contiguous");
40
+ AT_ASSERTM(spatial_shapes.is_contiguous(), "spatial_shapes tensor has to be contiguous");
41
+ AT_ASSERTM(level_start_index.is_contiguous(), "level_start_index tensor has to be contiguous");
42
+ AT_ASSERTM(sampling_loc.is_contiguous(), "sampling_loc tensor has to be contiguous");
43
+ AT_ASSERTM(attn_weight.is_contiguous(), "attn_weight tensor has to be contiguous");
44
+
45
+ AT_ASSERTM(value.type().is_cuda(), "value must be a CUDA tensor");
46
+ AT_ASSERTM(spatial_shapes.type().is_cuda(), "spatial_shapes must be a CUDA tensor");
47
+ AT_ASSERTM(level_start_index.type().is_cuda(), "level_start_index must be a CUDA tensor");
48
+ AT_ASSERTM(sampling_loc.type().is_cuda(), "sampling_loc must be a CUDA tensor");
49
+ AT_ASSERTM(attn_weight.type().is_cuda(), "attn_weight must be a CUDA tensor");
50
+
51
+ const int batch = value.size(0);
52
+ const int spatial_size = value.size(1);
53
+ const int num_heads = value.size(2);
54
+ const int channels = value.size(3);
55
+
56
+ const int num_levels = spatial_shapes.size(0);
57
+
58
+ const int num_query = sampling_loc.size(1);
59
+ const int num_point = sampling_loc.size(4);
60
+
61
+ const int im2col_step_ = std::min(batch, im2col_step);
62
+
63
+ AT_ASSERTM(batch % im2col_step_ == 0, "batch(%d) must divide im2col_step(%d)", batch, im2col_step_);
64
+
65
+ auto output = at::zeros({batch, num_query, num_heads, channels}, value.options());
66
+
67
+ const int batch_n = im2col_step_;
68
+ auto output_n = output.view({batch/im2col_step_, batch_n, num_query, num_heads, channels});
69
+ auto per_value_size = spatial_size * num_heads * channels;
70
+ auto per_sample_loc_size = num_query * num_heads * num_levels * num_point * 2;
71
+ auto per_attn_weight_size = num_query * num_heads * num_levels * num_point;
72
+ for (int n = 0; n < batch/im2col_step_; ++n)
73
+ {
74
+ auto columns = output_n.select(0, n);
75
+ AT_DISPATCH_FLOATING_TYPES(value.type(), "ms_deform_attn_forward_cuda", ([&] {
76
+ ms_deformable_im2col_cuda(at::cuda::getCurrentCUDAStream(),
77
+ value.data<scalar_t>() + n * im2col_step_ * per_value_size,
78
+ spatial_shapes.data<int64_t>(),
79
+ level_start_index.data<int64_t>(),
80
+ sampling_loc.data<scalar_t>() + n * im2col_step_ * per_sample_loc_size,
81
+ attn_weight.data<scalar_t>() + n * im2col_step_ * per_attn_weight_size,
82
+ batch_n, spatial_size, num_heads, channels, num_levels, num_query, num_point,
83
+ columns.data<scalar_t>());
84
+
85
+ }));
86
+ }
87
+
88
+ output = output.view({batch, num_query, num_heads*channels});
89
+
90
+ return output;
91
+ }
92
+
93
+
94
+ std::vector<at::Tensor> ms_deform_attn_cuda_backward(
95
+ const at::Tensor &value,
96
+ const at::Tensor &spatial_shapes,
97
+ const at::Tensor &level_start_index,
98
+ const at::Tensor &sampling_loc,
99
+ const at::Tensor &attn_weight,
100
+ const at::Tensor &grad_output,
101
+ const int im2col_step)
102
+ {
103
+
104
+ AT_ASSERTM(value.is_contiguous(), "value tensor has to be contiguous");
105
+ AT_ASSERTM(spatial_shapes.is_contiguous(), "spatial_shapes tensor has to be contiguous");
106
+ AT_ASSERTM(level_start_index.is_contiguous(), "level_start_index tensor has to be contiguous");
107
+ AT_ASSERTM(sampling_loc.is_contiguous(), "sampling_loc tensor has to be contiguous");
108
+ AT_ASSERTM(attn_weight.is_contiguous(), "attn_weight tensor has to be contiguous");
109
+ AT_ASSERTM(grad_output.is_contiguous(), "grad_output tensor has to be contiguous");
110
+
111
+ AT_ASSERTM(value.type().is_cuda(), "value must be a CUDA tensor");
112
+ AT_ASSERTM(spatial_shapes.type().is_cuda(), "spatial_shapes must be a CUDA tensor");
113
+ AT_ASSERTM(level_start_index.type().is_cuda(), "level_start_index must be a CUDA tensor");
114
+ AT_ASSERTM(sampling_loc.type().is_cuda(), "sampling_loc must be a CUDA tensor");
115
+ AT_ASSERTM(attn_weight.type().is_cuda(), "attn_weight must be a CUDA tensor");
116
+ AT_ASSERTM(grad_output.type().is_cuda(), "grad_output must be a CUDA tensor");
117
+
118
+ const int batch = value.size(0);
119
+ const int spatial_size = value.size(1);
120
+ const int num_heads = value.size(2);
121
+ const int channels = value.size(3);
122
+
123
+ const int num_levels = spatial_shapes.size(0);
124
+
125
+ const int num_query = sampling_loc.size(1);
126
+ const int num_point = sampling_loc.size(4);
127
+
128
+ const int im2col_step_ = std::min(batch, im2col_step);
129
+
130
+ AT_ASSERTM(batch % im2col_step_ == 0, "batch(%d) must divide im2col_step(%d)", batch, im2col_step_);
131
+
132
+ auto grad_value = at::zeros_like(value);
133
+ auto grad_sampling_loc = at::zeros_like(sampling_loc);
134
+ auto grad_attn_weight = at::zeros_like(attn_weight);
135
+
136
+ const int batch_n = im2col_step_;
137
+ auto per_value_size = spatial_size * num_heads * channels;
138
+ auto per_sample_loc_size = num_query * num_heads * num_levels * num_point * 2;
139
+ auto per_attn_weight_size = num_query * num_heads * num_levels * num_point;
140
+ auto grad_output_n = grad_output.view({batch/im2col_step_, batch_n, num_query, num_heads, channels});
141
+
142
+ for (int n = 0; n < batch/im2col_step_; ++n)
143
+ {
144
+ auto grad_output_g = grad_output_n.select(0, n);
145
+ AT_DISPATCH_FLOATING_TYPES(value.type(), "ms_deform_attn_backward_cuda", ([&] {
146
+ ms_deformable_col2im_cuda(at::cuda::getCurrentCUDAStream(),
147
+ grad_output_g.data<scalar_t>(),
148
+ value.data<scalar_t>() + n * im2col_step_ * per_value_size,
149
+ spatial_shapes.data<int64_t>(),
150
+ level_start_index.data<int64_t>(),
151
+ sampling_loc.data<scalar_t>() + n * im2col_step_ * per_sample_loc_size,
152
+ attn_weight.data<scalar_t>() + n * im2col_step_ * per_attn_weight_size,
153
+ batch_n, spatial_size, num_heads, channels, num_levels, num_query, num_point,
154
+ grad_value.data<scalar_t>() + n * im2col_step_ * per_value_size,
155
+ grad_sampling_loc.data<scalar_t>() + n * im2col_step_ * per_sample_loc_size,
156
+ grad_attn_weight.data<scalar_t>() + n * im2col_step_ * per_attn_weight_size);
157
+
158
+ }));
159
+ }
160
+
161
+ return {
162
+ grad_value, grad_sampling_loc, grad_attn_weight
163
+ };
164
+ }
165
+
166
+ const int CUDA_NUM_THREADS = 1024;
167
+ inline int GET_BLOCKS(const int N, const int num_threads)
168
+ {
169
+ return (N + num_threads - 1) / num_threads;
170
+ }
171
+
172
+
173
+ template <typename scalar_t>
174
+ __device__ scalar_t ms_deform_attn_im2col_bilinear(const scalar_t* &bottom_data,
175
+ const int &height, const int &width, const int &nheads, const int &channels,
176
+ const scalar_t &h, const scalar_t &w, const int &m, const int &c)
177
+ {
178
+ const int h_low = floor(h);
179
+ const int w_low = floor(w);
180
+ const int h_high = h_low + 1;
181
+ const int w_high = w_low + 1;
182
+
183
+ const scalar_t lh = h - h_low;
184
+ const scalar_t lw = w - w_low;
185
+ const scalar_t hh = 1 - lh, hw = 1 - lw;
186
+
187
+ const int w_stride = nheads * channels;
188
+ const int h_stride = width * w_stride;
189
+ const int h_low_ptr_offset = h_low * h_stride;
190
+ const int h_high_ptr_offset = h_low_ptr_offset + h_stride;
191
+ const int w_low_ptr_offset = w_low * w_stride;
192
+ const int w_high_ptr_offset = w_low_ptr_offset + w_stride;
193
+ const int base_ptr = m * channels + c;
194
+
195
+ scalar_t v1 = 0;
196
+ if (h_low >= 0 && w_low >= 0)
197
+ {
198
+ const int ptr1 = h_low_ptr_offset + w_low_ptr_offset + base_ptr;
199
+ v1 = bottom_data[ptr1];
200
+ }
201
+ scalar_t v2 = 0;
202
+ if (h_low >= 0 && w_high <= width - 1)
203
+ {
204
+ const int ptr2 = h_low_ptr_offset + w_high_ptr_offset + base_ptr;
205
+ v2 = bottom_data[ptr2];
206
+ }
207
+ scalar_t v3 = 0;
208
+ if (h_high <= height - 1 && w_low >= 0)
209
+ {
210
+ const int ptr3 = h_high_ptr_offset + w_low_ptr_offset + base_ptr;
211
+ v3 = bottom_data[ptr3];
212
+ }
213
+ scalar_t v4 = 0;
214
+ if (h_high <= height - 1 && w_high <= width - 1)
215
+ {
216
+ const int ptr4 = h_high_ptr_offset + w_high_ptr_offset + base_ptr;
217
+ v4 = bottom_data[ptr4];
218
+ }
219
+
220
+ const scalar_t w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw;
221
+
222
+ const scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
223
+ return val;
224
+ }
225
+
226
+
227
+ template <typename scalar_t>
228
+ __device__ void ms_deform_attn_col2im_bilinear(const scalar_t* &bottom_data,
229
+ const int &height, const int &width, const int &nheads, const int &channels,
230
+ const scalar_t &h, const scalar_t &w, const int &m, const int &c,
231
+ const scalar_t &top_grad,
232
+ const scalar_t &attn_weight,
233
+ scalar_t* &grad_value,
234
+ scalar_t* grad_sampling_loc,
235
+ scalar_t* grad_attn_weight)
236
+ {
237
+ const int h_low = floor(h);
238
+ const int w_low = floor(w);
239
+ const int h_high = h_low + 1;
240
+ const int w_high = w_low + 1;
241
+
242
+ const scalar_t lh = h - h_low;
243
+ const scalar_t lw = w - w_low;
244
+ const scalar_t hh = 1 - lh, hw = 1 - lw;
245
+
246
+ const int w_stride = nheads * channels;
247
+ const int h_stride = width * w_stride;
248
+ const int h_low_ptr_offset = h_low * h_stride;
249
+ const int h_high_ptr_offset = h_low_ptr_offset + h_stride;
250
+ const int w_low_ptr_offset = w_low * w_stride;
251
+ const int w_high_ptr_offset = w_low_ptr_offset + w_stride;
252
+ const int base_ptr = m * channels + c;
253
+
254
+ const scalar_t w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw;
255
+ const scalar_t top_grad_value = top_grad * attn_weight;
256
+ scalar_t grad_h_weight = 0, grad_w_weight = 0;
257
+
258
+ scalar_t v1 = 0;
259
+ if (h_low >= 0 && w_low >= 0)
260
+ {
261
+ const int ptr1 = h_low_ptr_offset + w_low_ptr_offset + base_ptr;
262
+ v1 = bottom_data[ptr1];
263
+ grad_h_weight -= hw * v1;
264
+ grad_w_weight -= hh * v1;
265
+ atomicAdd(grad_value+ptr1, w1*top_grad_value);
266
+ }
267
+ scalar_t v2 = 0;
268
+ if (h_low >= 0 && w_high <= width - 1)
269
+ {
270
+ const int ptr2 = h_low_ptr_offset + w_high_ptr_offset + base_ptr;
271
+ v2 = bottom_data[ptr2];
272
+ grad_h_weight -= lw * v2;
273
+ grad_w_weight += hh * v2;
274
+ atomicAdd(grad_value+ptr2, w2*top_grad_value);
275
+ }
276
+ scalar_t v3 = 0;
277
+ if (h_high <= height - 1 && w_low >= 0)
278
+ {
279
+ const int ptr3 = h_high_ptr_offset + w_low_ptr_offset + base_ptr;
280
+ v3 = bottom_data[ptr3];
281
+ grad_h_weight += hw * v3;
282
+ grad_w_weight -= lh * v3;
283
+ atomicAdd(grad_value+ptr3, w3*top_grad_value);
284
+ }
285
+ scalar_t v4 = 0;
286
+ if (h_high <= height - 1 && w_high <= width - 1)
287
+ {
288
+ const int ptr4 = h_high_ptr_offset + w_high_ptr_offset + base_ptr;
289
+ v4 = bottom_data[ptr4];
290
+ grad_h_weight += lw * v4;
291
+ grad_w_weight += lh * v4;
292
+ atomicAdd(grad_value+ptr4, w4*top_grad_value);
293
+ }
294
+
295
+ const scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
296
+ *grad_attn_weight = top_grad * val;
297
+ *grad_sampling_loc = width * grad_w_weight * top_grad_value;
298
+ *(grad_sampling_loc + 1) = height * grad_h_weight * top_grad_value;
299
+ }
300
+
301
+
302
+ template <typename scalar_t>
303
+ __device__ void ms_deform_attn_col2im_bilinear_gm(const scalar_t* &bottom_data,
304
+ const int &height, const int &width, const int &nheads, const int &channels,
305
+ const scalar_t &h, const scalar_t &w, const int &m, const int &c,
306
+ const scalar_t &top_grad,
307
+ const scalar_t &attn_weight,
308
+ scalar_t* &grad_value,
309
+ scalar_t* grad_sampling_loc,
310
+ scalar_t* grad_attn_weight)
311
+ {
312
+ const int h_low = floor(h);
313
+ const int w_low = floor(w);
314
+ const int h_high = h_low + 1;
315
+ const int w_high = w_low + 1;
316
+
317
+ const scalar_t lh = h - h_low;
318
+ const scalar_t lw = w - w_low;
319
+ const scalar_t hh = 1 - lh, hw = 1 - lw;
320
+
321
+ const int w_stride = nheads * channels;
322
+ const int h_stride = width * w_stride;
323
+ const int h_low_ptr_offset = h_low * h_stride;
324
+ const int h_high_ptr_offset = h_low_ptr_offset + h_stride;
325
+ const int w_low_ptr_offset = w_low * w_stride;
326
+ const int w_high_ptr_offset = w_low_ptr_offset + w_stride;
327
+ const int base_ptr = m * channels + c;
328
+
329
+ const scalar_t w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw;
330
+ const scalar_t top_grad_value = top_grad * attn_weight;
331
+ scalar_t grad_h_weight = 0, grad_w_weight = 0;
332
+
333
+ scalar_t v1 = 0;
334
+ if (h_low >= 0 && w_low >= 0)
335
+ {
336
+ const int ptr1 = h_low_ptr_offset + w_low_ptr_offset + base_ptr;
337
+ v1 = bottom_data[ptr1];
338
+ grad_h_weight -= hw * v1;
339
+ grad_w_weight -= hh * v1;
340
+ atomicAdd(grad_value+ptr1, w1*top_grad_value);
341
+ }
342
+ scalar_t v2 = 0;
343
+ if (h_low >= 0 && w_high <= width - 1)
344
+ {
345
+ const int ptr2 = h_low_ptr_offset + w_high_ptr_offset + base_ptr;
346
+ v2 = bottom_data[ptr2];
347
+ grad_h_weight -= lw * v2;
348
+ grad_w_weight += hh * v2;
349
+ atomicAdd(grad_value+ptr2, w2*top_grad_value);
350
+ }
351
+ scalar_t v3 = 0;
352
+ if (h_high <= height - 1 && w_low >= 0)
353
+ {
354
+ const int ptr3 = h_high_ptr_offset + w_low_ptr_offset + base_ptr;
355
+ v3 = bottom_data[ptr3];
356
+ grad_h_weight += hw * v3;
357
+ grad_w_weight -= lh * v3;
358
+ atomicAdd(grad_value+ptr3, w3*top_grad_value);
359
+ }
360
+ scalar_t v4 = 0;
361
+ if (h_high <= height - 1 && w_high <= width - 1)
362
+ {
363
+ const int ptr4 = h_high_ptr_offset + w_high_ptr_offset + base_ptr;
364
+ v4 = bottom_data[ptr4];
365
+ grad_h_weight += lw * v4;
366
+ grad_w_weight += lh * v4;
367
+ atomicAdd(grad_value+ptr4, w4*top_grad_value);
368
+ }
369
+
370
+ const scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
371
+ atomicAdd(grad_attn_weight, top_grad * val);
372
+ atomicAdd(grad_sampling_loc, width * grad_w_weight * top_grad_value);
373
+ atomicAdd(grad_sampling_loc + 1, height * grad_h_weight * top_grad_value);
374
+ }
375
+
376
+
377
+ template <typename scalar_t>
378
+ __global__ void ms_deformable_im2col_gpu_kernel(const int n,
379
+ const scalar_t *data_value,
380
+ const int64_t *data_spatial_shapes,
381
+ const int64_t *data_level_start_index,
382
+ const scalar_t *data_sampling_loc,
383
+ const scalar_t *data_attn_weight,
384
+ const int batch_size,
385
+ const int spatial_size,
386
+ const int num_heads,
387
+ const int channels,
388
+ const int num_levels,
389
+ const int num_query,
390
+ const int num_point,
391
+ scalar_t *data_col)
392
+ {
393
+ CUDA_KERNEL_LOOP(index, n)
394
+ {
395
+ int _temp = index;
396
+ const int c_col = _temp % channels;
397
+ _temp /= channels;
398
+ const int sampling_index = _temp;
399
+ const int m_col = _temp % num_heads;
400
+ _temp /= num_heads;
401
+ const int q_col = _temp % num_query;
402
+ _temp /= num_query;
403
+ const int b_col = _temp;
404
+
405
+ scalar_t *data_col_ptr = data_col + index;
406
+ int data_weight_ptr = sampling_index * num_levels * num_point;
407
+ int data_loc_w_ptr = data_weight_ptr << 1;
408
+ const int qid_stride = num_heads * channels;
409
+ const int data_value_ptr_init_offset = b_col * spatial_size * qid_stride;
410
+ scalar_t col = 0;
411
+
412
+ for (int l_col=0; l_col < num_levels; ++l_col)
413
+ {
414
+ const int level_start_id = data_level_start_index[l_col];
415
+ const int spatial_h_ptr = l_col << 1;
416
+ const int spatial_h = data_spatial_shapes[spatial_h_ptr];
417
+ const int spatial_w = data_spatial_shapes[spatial_h_ptr + 1];
418
+ const scalar_t *data_value_ptr = data_value + (data_value_ptr_init_offset + level_start_id * qid_stride);
419
+ for (int p_col=0; p_col < num_point; ++p_col)
420
+ {
421
+ const scalar_t loc_w = data_sampling_loc[data_loc_w_ptr];
422
+ const scalar_t loc_h = data_sampling_loc[data_loc_w_ptr + 1];
423
+ const scalar_t weight = data_attn_weight[data_weight_ptr];
424
+
425
+ const scalar_t h_im = loc_h * spatial_h - 0.5;
426
+ const scalar_t w_im = loc_w * spatial_w - 0.5;
427
+
428
+ if (h_im > -1 && w_im > -1 && h_im < spatial_h && w_im < spatial_w)
429
+ {
430
+ col += ms_deform_attn_im2col_bilinear(data_value_ptr, spatial_h, spatial_w, num_heads, channels, h_im, w_im, m_col, c_col) * weight;
431
+ }
432
+
433
+ data_weight_ptr += 1;
434
+ data_loc_w_ptr += 2;
435
+ }
436
+ }
437
+ *data_col_ptr = col;
438
+ }
439
+ }
440
+
441
+ template <typename scalar_t, unsigned int blockSize>
442
+ __global__ void ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v1(const int n,
443
+ const scalar_t *grad_col,
444
+ const scalar_t *data_value,
445
+ const int64_t *data_spatial_shapes,
446
+ const int64_t *data_level_start_index,
447
+ const scalar_t *data_sampling_loc,
448
+ const scalar_t *data_attn_weight,
449
+ const int batch_size,
450
+ const int spatial_size,
451
+ const int num_heads,
452
+ const int channels,
453
+ const int num_levels,
454
+ const int num_query,
455
+ const int num_point,
456
+ scalar_t *grad_value,
457
+ scalar_t *grad_sampling_loc,
458
+ scalar_t *grad_attn_weight)
459
+ {
460
+ CUDA_KERNEL_LOOP(index, n)
461
+ {
462
+ __shared__ scalar_t cache_grad_sampling_loc[blockSize * 2];
463
+ __shared__ scalar_t cache_grad_attn_weight[blockSize];
464
+ unsigned int tid = threadIdx.x;
465
+ int _temp = index;
466
+ const int c_col = _temp % channels;
467
+ _temp /= channels;
468
+ const int sampling_index = _temp;
469
+ const int m_col = _temp % num_heads;
470
+ _temp /= num_heads;
471
+ const int q_col = _temp % num_query;
472
+ _temp /= num_query;
473
+ const int b_col = _temp;
474
+
475
+ const scalar_t top_grad = grad_col[index];
476
+
477
+ int data_weight_ptr = sampling_index * num_levels * num_point;
478
+ int data_loc_w_ptr = data_weight_ptr << 1;
479
+ const int grad_sampling_ptr = data_weight_ptr;
480
+ grad_sampling_loc += grad_sampling_ptr << 1;
481
+ grad_attn_weight += grad_sampling_ptr;
482
+ const int grad_weight_stride = 1;
483
+ const int grad_loc_stride = 2;
484
+ const int qid_stride = num_heads * channels;
485
+ const int data_value_ptr_init_offset = b_col * spatial_size * qid_stride;
486
+
487
+ for (int l_col=0; l_col < num_levels; ++l_col)
488
+ {
489
+ const int level_start_id = data_level_start_index[l_col];
490
+ const int spatial_h_ptr = l_col << 1;
491
+ const int spatial_h = data_spatial_shapes[spatial_h_ptr];
492
+ const int spatial_w = data_spatial_shapes[spatial_h_ptr + 1];
493
+ const int value_ptr_offset = data_value_ptr_init_offset + level_start_id * qid_stride;
494
+ const scalar_t *data_value_ptr = data_value + value_ptr_offset;
495
+ scalar_t *grad_value_ptr = grad_value + value_ptr_offset;
496
+
497
+ for (int p_col=0; p_col < num_point; ++p_col)
498
+ {
499
+ const scalar_t loc_w = data_sampling_loc[data_loc_w_ptr];
500
+ const scalar_t loc_h = data_sampling_loc[data_loc_w_ptr + 1];
501
+ const scalar_t weight = data_attn_weight[data_weight_ptr];
502
+
503
+ const scalar_t h_im = loc_h * spatial_h - 0.5;
504
+ const scalar_t w_im = loc_w * spatial_w - 0.5;
505
+ *(cache_grad_sampling_loc+(threadIdx.x << 1)) = 0;
506
+ *(cache_grad_sampling_loc+((threadIdx.x << 1) + 1)) = 0;
507
+ *(cache_grad_attn_weight+threadIdx.x)=0;
508
+ if (h_im > -1 && w_im > -1 && h_im < spatial_h && w_im < spatial_w)
509
+ {
510
+ ms_deform_attn_col2im_bilinear(
511
+ data_value_ptr, spatial_h, spatial_w, num_heads, channels, h_im, w_im, m_col, c_col,
512
+ top_grad, weight, grad_value_ptr,
513
+ cache_grad_sampling_loc+(threadIdx.x << 1), cache_grad_attn_weight+threadIdx.x);
514
+ }
515
+
516
+ __syncthreads();
517
+ if (tid == 0)
518
+ {
519
+ scalar_t _grad_w=cache_grad_sampling_loc[0], _grad_h=cache_grad_sampling_loc[1], _grad_a=cache_grad_attn_weight[0];
520
+ int sid=2;
521
+ for (unsigned int tid = 1; tid < blockSize; ++tid)
522
+ {
523
+ _grad_w += cache_grad_sampling_loc[sid];
524
+ _grad_h += cache_grad_sampling_loc[sid + 1];
525
+ _grad_a += cache_grad_attn_weight[tid];
526
+ sid += 2;
527
+ }
528
+
529
+
530
+ *grad_sampling_loc = _grad_w;
531
+ *(grad_sampling_loc + 1) = _grad_h;
532
+ *grad_attn_weight = _grad_a;
533
+ }
534
+ __syncthreads();
535
+
536
+ data_weight_ptr += 1;
537
+ data_loc_w_ptr += 2;
538
+ grad_attn_weight += grad_weight_stride;
539
+ grad_sampling_loc += grad_loc_stride;
540
+ }
541
+ }
542
+ }
543
+ }
544
+
545
+
546
+ template <typename scalar_t, unsigned int blockSize>
547
+ __global__ void ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v2(const int n,
548
+ const scalar_t *grad_col,
549
+ const scalar_t *data_value,
550
+ const int64_t *data_spatial_shapes,
551
+ const int64_t *data_level_start_index,
552
+ const scalar_t *data_sampling_loc,
553
+ const scalar_t *data_attn_weight,
554
+ const int batch_size,
555
+ const int spatial_size,
556
+ const int num_heads,
557
+ const int channels,
558
+ const int num_levels,
559
+ const int num_query,
560
+ const int num_point,
561
+ scalar_t *grad_value,
562
+ scalar_t *grad_sampling_loc,
563
+ scalar_t *grad_attn_weight)
564
+ {
565
+ CUDA_KERNEL_LOOP(index, n)
566
+ {
567
+ __shared__ scalar_t cache_grad_sampling_loc[blockSize * 2];
568
+ __shared__ scalar_t cache_grad_attn_weight[blockSize];
569
+ unsigned int tid = threadIdx.x;
570
+ int _temp = index;
571
+ const int c_col = _temp % channels;
572
+ _temp /= channels;
573
+ const int sampling_index = _temp;
574
+ const int m_col = _temp % num_heads;
575
+ _temp /= num_heads;
576
+ const int q_col = _temp % num_query;
577
+ _temp /= num_query;
578
+ const int b_col = _temp;
579
+
580
+ const scalar_t top_grad = grad_col[index];
581
+
582
+ int data_weight_ptr = sampling_index * num_levels * num_point;
583
+ int data_loc_w_ptr = data_weight_ptr << 1;
584
+ const int grad_sampling_ptr = data_weight_ptr;
585
+ grad_sampling_loc += grad_sampling_ptr << 1;
586
+ grad_attn_weight += grad_sampling_ptr;
587
+ const int grad_weight_stride = 1;
588
+ const int grad_loc_stride = 2;
589
+ const int qid_stride = num_heads * channels;
590
+ const int data_value_ptr_init_offset = b_col * spatial_size * qid_stride;
591
+
592
+ for (int l_col=0; l_col < num_levels; ++l_col)
593
+ {
594
+ const int level_start_id = data_level_start_index[l_col];
595
+ const int spatial_h_ptr = l_col << 1;
596
+ const int spatial_h = data_spatial_shapes[spatial_h_ptr];
597
+ const int spatial_w = data_spatial_shapes[spatial_h_ptr + 1];
598
+ const int value_ptr_offset = data_value_ptr_init_offset + level_start_id * qid_stride;
599
+ const scalar_t *data_value_ptr = data_value + value_ptr_offset;
600
+ scalar_t *grad_value_ptr = grad_value + value_ptr_offset;
601
+
602
+ for (int p_col=0; p_col < num_point; ++p_col)
603
+ {
604
+ const scalar_t loc_w = data_sampling_loc[data_loc_w_ptr];
605
+ const scalar_t loc_h = data_sampling_loc[data_loc_w_ptr + 1];
606
+ const scalar_t weight = data_attn_weight[data_weight_ptr];
607
+
608
+ const scalar_t h_im = loc_h * spatial_h - 0.5;
609
+ const scalar_t w_im = loc_w * spatial_w - 0.5;
610
+ *(cache_grad_sampling_loc+(threadIdx.x << 1)) = 0;
611
+ *(cache_grad_sampling_loc+((threadIdx.x << 1) + 1)) = 0;
612
+ *(cache_grad_attn_weight+threadIdx.x)=0;
613
+ if (h_im > -1 && w_im > -1 && h_im < spatial_h && w_im < spatial_w)
614
+ {
615
+ ms_deform_attn_col2im_bilinear(
616
+ data_value_ptr, spatial_h, spatial_w, num_heads, channels, h_im, w_im, m_col, c_col,
617
+ top_grad, weight, grad_value_ptr,
618
+ cache_grad_sampling_loc+(threadIdx.x << 1), cache_grad_attn_weight+threadIdx.x);
619
+ }
620
+
621
+ __syncthreads();
622
+
623
+ for (unsigned int s=blockSize/2; s>0; s>>=1)
624
+ {
625
+ if (tid < s) {
626
+ const unsigned int xid1 = tid << 1;
627
+ const unsigned int xid2 = (tid + s) << 1;
628
+ cache_grad_attn_weight[tid] += cache_grad_attn_weight[tid + s];
629
+ cache_grad_sampling_loc[xid1] += cache_grad_sampling_loc[xid2];
630
+ cache_grad_sampling_loc[xid1 + 1] += cache_grad_sampling_loc[xid2 + 1];
631
+ }
632
+ __syncthreads();
633
+ }
634
+
635
+ if (tid == 0)
636
+ {
637
+ *grad_sampling_loc = cache_grad_sampling_loc[0];
638
+ *(grad_sampling_loc + 1) = cache_grad_sampling_loc[1];
639
+ *grad_attn_weight = cache_grad_attn_weight[0];
640
+ }
641
+ __syncthreads();
642
+
643
+ data_weight_ptr += 1;
644
+ data_loc_w_ptr += 2;
645
+ grad_attn_weight += grad_weight_stride;
646
+ grad_sampling_loc += grad_loc_stride;
647
+ }
648
+ }
649
+ }
650
+ }
651
+
652
+
653
+ template <typename scalar_t>
654
+ __global__ void ms_deformable_col2im_gpu_kernel_shm_reduce_v1(const int n,
655
+ const scalar_t *grad_col,
656
+ const scalar_t *data_value,
657
+ const int64_t *data_spatial_shapes,
658
+ const int64_t *data_level_start_index,
659
+ const scalar_t *data_sampling_loc,
660
+ const scalar_t *data_attn_weight,
661
+ const int batch_size,
662
+ const int spatial_size,
663
+ const int num_heads,
664
+ const int channels,
665
+ const int num_levels,
666
+ const int num_query,
667
+ const int num_point,
668
+ scalar_t *grad_value,
669
+ scalar_t *grad_sampling_loc,
670
+ scalar_t *grad_attn_weight)
671
+ {
672
+ CUDA_KERNEL_LOOP(index, n)
673
+ {
674
+ extern __shared__ int _s[];
675
+ scalar_t* cache_grad_sampling_loc = (scalar_t*)_s;
676
+ scalar_t* cache_grad_attn_weight = cache_grad_sampling_loc + 2 * blockDim.x;
677
+ unsigned int tid = threadIdx.x;
678
+ int _temp = index;
679
+ const int c_col = _temp % channels;
680
+ _temp /= channels;
681
+ const int sampling_index = _temp;
682
+ const int m_col = _temp % num_heads;
683
+ _temp /= num_heads;
684
+ const int q_col = _temp % num_query;
685
+ _temp /= num_query;
686
+ const int b_col = _temp;
687
+
688
+ const scalar_t top_grad = grad_col[index];
689
+
690
+ int data_weight_ptr = sampling_index * num_levels * num_point;
691
+ int data_loc_w_ptr = data_weight_ptr << 1;
692
+ const int grad_sampling_ptr = data_weight_ptr;
693
+ grad_sampling_loc += grad_sampling_ptr << 1;
694
+ grad_attn_weight += grad_sampling_ptr;
695
+ const int grad_weight_stride = 1;
696
+ const int grad_loc_stride = 2;
697
+ const int qid_stride = num_heads * channels;
698
+ const int data_value_ptr_init_offset = b_col * spatial_size * qid_stride;
699
+
700
+ for (int l_col=0; l_col < num_levels; ++l_col)
701
+ {
702
+ const int level_start_id = data_level_start_index[l_col];
703
+ const int spatial_h_ptr = l_col << 1;
704
+ const int spatial_h = data_spatial_shapes[spatial_h_ptr];
705
+ const int spatial_w = data_spatial_shapes[spatial_h_ptr + 1];
706
+ const int value_ptr_offset = data_value_ptr_init_offset + level_start_id * qid_stride;
707
+ const scalar_t *data_value_ptr = data_value + value_ptr_offset;
708
+ scalar_t *grad_value_ptr = grad_value + value_ptr_offset;
709
+
710
+ for (int p_col=0; p_col < num_point; ++p_col)
711
+ {
712
+ const scalar_t loc_w = data_sampling_loc[data_loc_w_ptr];
713
+ const scalar_t loc_h = data_sampling_loc[data_loc_w_ptr + 1];
714
+ const scalar_t weight = data_attn_weight[data_weight_ptr];
715
+
716
+ const scalar_t h_im = loc_h * spatial_h - 0.5;
717
+ const scalar_t w_im = loc_w * spatial_w - 0.5;
718
+ *(cache_grad_sampling_loc+(threadIdx.x << 1)) = 0;
719
+ *(cache_grad_sampling_loc+((threadIdx.x << 1) + 1)) = 0;
720
+ *(cache_grad_attn_weight+threadIdx.x)=0;
721
+ if (h_im > -1 && w_im > -1 && h_im < spatial_h && w_im < spatial_w)
722
+ {
723
+ ms_deform_attn_col2im_bilinear(
724
+ data_value_ptr, spatial_h, spatial_w, num_heads, channels, h_im, w_im, m_col, c_col,
725
+ top_grad, weight, grad_value_ptr,
726
+ cache_grad_sampling_loc+(threadIdx.x << 1), cache_grad_attn_weight+threadIdx.x);
727
+ }
728
+
729
+ __syncthreads();
730
+ if (tid == 0)
731
+ {
732
+ scalar_t _grad_w=cache_grad_sampling_loc[0], _grad_h=cache_grad_sampling_loc[1], _grad_a=cache_grad_attn_weight[0];
733
+ int sid=2;
734
+ for (unsigned int tid = 1; tid < blockDim.x; ++tid)
735
+ {
736
+ _grad_w += cache_grad_sampling_loc[sid];
737
+ _grad_h += cache_grad_sampling_loc[sid + 1];
738
+ _grad_a += cache_grad_attn_weight[tid];
739
+ sid += 2;
740
+ }
741
+
742
+
743
+ *grad_sampling_loc = _grad_w;
744
+ *(grad_sampling_loc + 1) = _grad_h;
745
+ *grad_attn_weight = _grad_a;
746
+ }
747
+ __syncthreads();
748
+
749
+ data_weight_ptr += 1;
750
+ data_loc_w_ptr += 2;
751
+ grad_attn_weight += grad_weight_stride;
752
+ grad_sampling_loc += grad_loc_stride;
753
+ }
754
+ }
755
+ }
756
+ }
757
+
758
+ template <typename scalar_t>
759
+ __global__ void ms_deformable_col2im_gpu_kernel_shm_reduce_v2(const int n,
760
+ const scalar_t *grad_col,
761
+ const scalar_t *data_value,
762
+ const int64_t *data_spatial_shapes,
763
+ const int64_t *data_level_start_index,
764
+ const scalar_t *data_sampling_loc,
765
+ const scalar_t *data_attn_weight,
766
+ const int batch_size,
767
+ const int spatial_size,
768
+ const int num_heads,
769
+ const int channels,
770
+ const int num_levels,
771
+ const int num_query,
772
+ const int num_point,
773
+ scalar_t *grad_value,
774
+ scalar_t *grad_sampling_loc,
775
+ scalar_t *grad_attn_weight)
776
+ {
777
+ CUDA_KERNEL_LOOP(index, n)
778
+ {
779
+ extern __shared__ int _s[];
780
+ scalar_t* cache_grad_sampling_loc = (scalar_t*)_s;
781
+ scalar_t* cache_grad_attn_weight = cache_grad_sampling_loc + 2 * blockDim.x;
782
+ unsigned int tid = threadIdx.x;
783
+ int _temp = index;
784
+ const int c_col = _temp % channels;
785
+ _temp /= channels;
786
+ const int sampling_index = _temp;
787
+ const int m_col = _temp % num_heads;
788
+ _temp /= num_heads;
789
+ const int q_col = _temp % num_query;
790
+ _temp /= num_query;
791
+ const int b_col = _temp;
792
+
793
+ const scalar_t top_grad = grad_col[index];
794
+
795
+ int data_weight_ptr = sampling_index * num_levels * num_point;
796
+ int data_loc_w_ptr = data_weight_ptr << 1;
797
+ const int grad_sampling_ptr = data_weight_ptr;
798
+ grad_sampling_loc += grad_sampling_ptr << 1;
799
+ grad_attn_weight += grad_sampling_ptr;
800
+ const int grad_weight_stride = 1;
801
+ const int grad_loc_stride = 2;
802
+ const int qid_stride = num_heads * channels;
803
+ const int data_value_ptr_init_offset = b_col * spatial_size * qid_stride;
804
+
805
+ for (int l_col=0; l_col < num_levels; ++l_col)
806
+ {
807
+ const int level_start_id = data_level_start_index[l_col];
808
+ const int spatial_h_ptr = l_col << 1;
809
+ const int spatial_h = data_spatial_shapes[spatial_h_ptr];
810
+ const int spatial_w = data_spatial_shapes[spatial_h_ptr + 1];
811
+ const int value_ptr_offset = data_value_ptr_init_offset + level_start_id * qid_stride;
812
+ const scalar_t *data_value_ptr = data_value + value_ptr_offset;
813
+ scalar_t *grad_value_ptr = grad_value + value_ptr_offset;
814
+
815
+ for (int p_col=0; p_col < num_point; ++p_col)
816
+ {
817
+ const scalar_t loc_w = data_sampling_loc[data_loc_w_ptr];
818
+ const scalar_t loc_h = data_sampling_loc[data_loc_w_ptr + 1];
819
+ const scalar_t weight = data_attn_weight[data_weight_ptr];
820
+
821
+ const scalar_t h_im = loc_h * spatial_h - 0.5;
822
+ const scalar_t w_im = loc_w * spatial_w - 0.5;
823
+ *(cache_grad_sampling_loc+(threadIdx.x << 1)) = 0;
824
+ *(cache_grad_sampling_loc+((threadIdx.x << 1) + 1)) = 0;
825
+ *(cache_grad_attn_weight+threadIdx.x)=0;
826
+ if (h_im > -1 && w_im > -1 && h_im < spatial_h && w_im < spatial_w)
827
+ {
828
+ ms_deform_attn_col2im_bilinear(
829
+ data_value_ptr, spatial_h, spatial_w, num_heads, channels, h_im, w_im, m_col, c_col,
830
+ top_grad, weight, grad_value_ptr,
831
+ cache_grad_sampling_loc+(threadIdx.x << 1), cache_grad_attn_weight+threadIdx.x);
832
+ }
833
+
834
+ __syncthreads();
835
+
836
+ for (unsigned int s=blockDim.x/2, spre=blockDim.x; s>0; s>>=1, spre>>=1)
837
+ {
838
+ if (tid < s) {
839
+ const unsigned int xid1 = tid << 1;
840
+ const unsigned int xid2 = (tid + s) << 1;
841
+ cache_grad_attn_weight[tid] += cache_grad_attn_weight[tid + s];
842
+ cache_grad_sampling_loc[xid1] += cache_grad_sampling_loc[xid2];
843
+ cache_grad_sampling_loc[xid1 + 1] += cache_grad_sampling_loc[xid2 + 1];
844
+ if (tid + (s << 1) < spre)
845
+ {
846
+ cache_grad_attn_weight[tid] += cache_grad_attn_weight[tid + (s << 1)];
847
+ cache_grad_sampling_loc[xid1] += cache_grad_sampling_loc[xid2 + (s << 1)];
848
+ cache_grad_sampling_loc[xid1 + 1] += cache_grad_sampling_loc[xid2 + 1 + (s << 1)];
849
+ }
850
+ }
851
+ __syncthreads();
852
+ }
853
+
854
+ if (tid == 0)
855
+ {
856
+ *grad_sampling_loc = cache_grad_sampling_loc[0];
857
+ *(grad_sampling_loc + 1) = cache_grad_sampling_loc[1];
858
+ *grad_attn_weight = cache_grad_attn_weight[0];
859
+ }
860
+ __syncthreads();
861
+
862
+ data_weight_ptr += 1;
863
+ data_loc_w_ptr += 2;
864
+ grad_attn_weight += grad_weight_stride;
865
+ grad_sampling_loc += grad_loc_stride;
866
+ }
867
+ }
868
+ }
869
+ }
870
+
871
+ template <typename scalar_t>
872
+ __global__ void ms_deformable_col2im_gpu_kernel_shm_reduce_v2_multi_blocks(const int n,
873
+ const scalar_t *grad_col,
874
+ const scalar_t *data_value,
875
+ const int64_t *data_spatial_shapes,
876
+ const int64_t *data_level_start_index,
877
+ const scalar_t *data_sampling_loc,
878
+ const scalar_t *data_attn_weight,
879
+ const int batch_size,
880
+ const int spatial_size,
881
+ const int num_heads,
882
+ const int channels,
883
+ const int num_levels,
884
+ const int num_query,
885
+ const int num_point,
886
+ scalar_t *grad_value,
887
+ scalar_t *grad_sampling_loc,
888
+ scalar_t *grad_attn_weight)
889
+ {
890
+ CUDA_KERNEL_LOOP(index, n)
891
+ {
892
+ extern __shared__ int _s[];
893
+ scalar_t* cache_grad_sampling_loc = (scalar_t*)_s;
894
+ scalar_t* cache_grad_attn_weight = cache_grad_sampling_loc + 2 * blockDim.x;
895
+ unsigned int tid = threadIdx.x;
896
+ int _temp = index;
897
+ const int c_col = _temp % channels;
898
+ _temp /= channels;
899
+ const int sampling_index = _temp;
900
+ const int m_col = _temp % num_heads;
901
+ _temp /= num_heads;
902
+ const int q_col = _temp % num_query;
903
+ _temp /= num_query;
904
+ const int b_col = _temp;
905
+
906
+ const scalar_t top_grad = grad_col[index];
907
+
908
+ int data_weight_ptr = sampling_index * num_levels * num_point;
909
+ int data_loc_w_ptr = data_weight_ptr << 1;
910
+ const int grad_sampling_ptr = data_weight_ptr;
911
+ grad_sampling_loc += grad_sampling_ptr << 1;
912
+ grad_attn_weight += grad_sampling_ptr;
913
+ const int grad_weight_stride = 1;
914
+ const int grad_loc_stride = 2;
915
+ const int qid_stride = num_heads * channels;
916
+ const int data_value_ptr_init_offset = b_col * spatial_size * qid_stride;
917
+
918
+ for (int l_col=0; l_col < num_levels; ++l_col)
919
+ {
920
+ const int level_start_id = data_level_start_index[l_col];
921
+ const int spatial_h_ptr = l_col << 1;
922
+ const int spatial_h = data_spatial_shapes[spatial_h_ptr];
923
+ const int spatial_w = data_spatial_shapes[spatial_h_ptr + 1];
924
+ const int value_ptr_offset = data_value_ptr_init_offset + level_start_id * qid_stride;
925
+ const scalar_t *data_value_ptr = data_value + value_ptr_offset;
926
+ scalar_t *grad_value_ptr = grad_value + value_ptr_offset;
927
+
928
+ for (int p_col=0; p_col < num_point; ++p_col)
929
+ {
930
+ const scalar_t loc_w = data_sampling_loc[data_loc_w_ptr];
931
+ const scalar_t loc_h = data_sampling_loc[data_loc_w_ptr + 1];
932
+ const scalar_t weight = data_attn_weight[data_weight_ptr];
933
+
934
+ const scalar_t h_im = loc_h * spatial_h - 0.5;
935
+ const scalar_t w_im = loc_w * spatial_w - 0.5;
936
+ *(cache_grad_sampling_loc+(threadIdx.x << 1)) = 0;
937
+ *(cache_grad_sampling_loc+((threadIdx.x << 1) + 1)) = 0;
938
+ *(cache_grad_attn_weight+threadIdx.x)=0;
939
+ if (h_im > -1 && w_im > -1 && h_im < spatial_h && w_im < spatial_w)
940
+ {
941
+ ms_deform_attn_col2im_bilinear(
942
+ data_value_ptr, spatial_h, spatial_w, num_heads, channels, h_im, w_im, m_col, c_col,
943
+ top_grad, weight, grad_value_ptr,
944
+ cache_grad_sampling_loc+(threadIdx.x << 1), cache_grad_attn_weight+threadIdx.x);
945
+ }
946
+
947
+ __syncthreads();
948
+
949
+ for (unsigned int s=blockDim.x/2, spre=blockDim.x; s>0; s>>=1, spre>>=1)
950
+ {
951
+ if (tid < s) {
952
+ const unsigned int xid1 = tid << 1;
953
+ const unsigned int xid2 = (tid + s) << 1;
954
+ cache_grad_attn_weight[tid] += cache_grad_attn_weight[tid + s];
955
+ cache_grad_sampling_loc[xid1] += cache_grad_sampling_loc[xid2];
956
+ cache_grad_sampling_loc[xid1 + 1] += cache_grad_sampling_loc[xid2 + 1];
957
+ if (tid + (s << 1) < spre)
958
+ {
959
+ cache_grad_attn_weight[tid] += cache_grad_attn_weight[tid + (s << 1)];
960
+ cache_grad_sampling_loc[xid1] += cache_grad_sampling_loc[xid2 + (s << 1)];
961
+ cache_grad_sampling_loc[xid1 + 1] += cache_grad_sampling_loc[xid2 + 1 + (s << 1)];
962
+ }
963
+ }
964
+ __syncthreads();
965
+ }
966
+
967
+ if (tid == 0)
968
+ {
969
+ atomicAdd(grad_sampling_loc, cache_grad_sampling_loc[0]);
970
+ atomicAdd(grad_sampling_loc + 1, cache_grad_sampling_loc[1]);
971
+ atomicAdd(grad_attn_weight, cache_grad_attn_weight[0]);
972
+ }
973
+ __syncthreads();
974
+
975
+ data_weight_ptr += 1;
976
+ data_loc_w_ptr += 2;
977
+ grad_attn_weight += grad_weight_stride;
978
+ grad_sampling_loc += grad_loc_stride;
979
+ }
980
+ }
981
+ }
982
+ }
983
+
984
+
985
+ template <typename scalar_t>
986
+ __global__ void ms_deformable_col2im_gpu_kernel_gm(const int n,
987
+ const scalar_t *grad_col,
988
+ const scalar_t *data_value,
989
+ const int64_t *data_spatial_shapes,
990
+ const int64_t *data_level_start_index,
991
+ const scalar_t *data_sampling_loc,
992
+ const scalar_t *data_attn_weight,
993
+ const int batch_size,
994
+ const int spatial_size,
995
+ const int num_heads,
996
+ const int channels,
997
+ const int num_levels,
998
+ const int num_query,
999
+ const int num_point,
1000
+ scalar_t *grad_value,
1001
+ scalar_t *grad_sampling_loc,
1002
+ scalar_t *grad_attn_weight)
1003
+ {
1004
+ CUDA_KERNEL_LOOP(index, n)
1005
+ {
1006
+ int _temp = index;
1007
+ const int c_col = _temp % channels;
1008
+ _temp /= channels;
1009
+ const int sampling_index = _temp;
1010
+ const int m_col = _temp % num_heads;
1011
+ _temp /= num_heads;
1012
+ const int q_col = _temp % num_query;
1013
+ _temp /= num_query;
1014
+ const int b_col = _temp;
1015
+
1016
+ const scalar_t top_grad = grad_col[index];
1017
+
1018
+ int data_weight_ptr = sampling_index * num_levels * num_point;
1019
+ int data_loc_w_ptr = data_weight_ptr << 1;
1020
+ const int grad_sampling_ptr = data_weight_ptr;
1021
+ grad_sampling_loc += grad_sampling_ptr << 1;
1022
+ grad_attn_weight += grad_sampling_ptr;
1023
+ const int grad_weight_stride = 1;
1024
+ const int grad_loc_stride = 2;
1025
+ const int qid_stride = num_heads * channels;
1026
+ const int data_value_ptr_init_offset = b_col * spatial_size * qid_stride;
1027
+
1028
+ for (int l_col=0; l_col < num_levels; ++l_col)
1029
+ {
1030
+ const int level_start_id = data_level_start_index[l_col];
1031
+ const int spatial_h_ptr = l_col << 1;
1032
+ const int spatial_h = data_spatial_shapes[spatial_h_ptr];
1033
+ const int spatial_w = data_spatial_shapes[spatial_h_ptr + 1];
1034
+ const int value_ptr_offset = data_value_ptr_init_offset + level_start_id * qid_stride;
1035
+ const scalar_t *data_value_ptr = data_value + value_ptr_offset;
1036
+ scalar_t *grad_value_ptr = grad_value + value_ptr_offset;
1037
+
1038
+ for (int p_col=0; p_col < num_point; ++p_col)
1039
+ {
1040
+ const scalar_t loc_w = data_sampling_loc[data_loc_w_ptr];
1041
+ const scalar_t loc_h = data_sampling_loc[data_loc_w_ptr + 1];
1042
+ const scalar_t weight = data_attn_weight[data_weight_ptr];
1043
+
1044
+ const scalar_t h_im = loc_h * spatial_h - 0.5;
1045
+ const scalar_t w_im = loc_w * spatial_w - 0.5;
1046
+ if (h_im > -1 && w_im > -1 && h_im < spatial_h && w_im < spatial_w)
1047
+ {
1048
+ ms_deform_attn_col2im_bilinear_gm(
1049
+ data_value_ptr, spatial_h, spatial_w, num_heads, channels, h_im, w_im, m_col, c_col,
1050
+ top_grad, weight, grad_value_ptr,
1051
+ grad_sampling_loc, grad_attn_weight);
1052
+ }
1053
+ data_weight_ptr += 1;
1054
+ data_loc_w_ptr += 2;
1055
+ grad_attn_weight += grad_weight_stride;
1056
+ grad_sampling_loc += grad_loc_stride;
1057
+ }
1058
+ }
1059
+ }
1060
+ }
1061
+
1062
+
1063
+ template <typename scalar_t>
1064
+ void ms_deformable_im2col_cuda(cudaStream_t stream,
1065
+ const scalar_t* data_value,
1066
+ const int64_t* data_spatial_shapes,
1067
+ const int64_t* data_level_start_index,
1068
+ const scalar_t* data_sampling_loc,
1069
+ const scalar_t* data_attn_weight,
1070
+ const int batch_size,
1071
+ const int spatial_size,
1072
+ const int num_heads,
1073
+ const int channels,
1074
+ const int num_levels,
1075
+ const int num_query,
1076
+ const int num_point,
1077
+ scalar_t* data_col)
1078
+ {
1079
+ const int num_kernels = batch_size * num_query * num_heads * channels;
1080
+ const int num_actual_kernels = batch_size * num_query * num_heads * channels;
1081
+ const int num_threads = CUDA_NUM_THREADS;
1082
+ ms_deformable_im2col_gpu_kernel<scalar_t>
1083
+ <<<GET_BLOCKS(num_actual_kernels, num_threads), num_threads,
1084
+ 0, stream>>>(
1085
+ num_kernels, data_value, data_spatial_shapes, data_level_start_index, data_sampling_loc, data_attn_weight,
1086
+ batch_size, spatial_size, num_heads, channels, num_levels, num_query, num_point, data_col);
1087
+
1088
+ cudaError_t err = cudaGetLastError();
1089
+ if (err != cudaSuccess)
1090
+ {
1091
+ printf("error in ms_deformable_im2col_cuda: %s\n", cudaGetErrorString(err));
1092
+ }
1093
+
1094
+ }
1095
+
1096
+ template <typename scalar_t>
1097
+ void ms_deformable_col2im_cuda(cudaStream_t stream,
1098
+ const scalar_t* grad_col,
1099
+ const scalar_t* data_value,
1100
+ const int64_t * data_spatial_shapes,
1101
+ const int64_t * data_level_start_index,
1102
+ const scalar_t * data_sampling_loc,
1103
+ const scalar_t * data_attn_weight,
1104
+ const int batch_size,
1105
+ const int spatial_size,
1106
+ const int num_heads,
1107
+ const int channels,
1108
+ const int num_levels,
1109
+ const int num_query,
1110
+ const int num_point,
1111
+ scalar_t* grad_value,
1112
+ scalar_t* grad_sampling_loc,
1113
+ scalar_t* grad_attn_weight)
1114
+ {
1115
+ const int num_threads = (channels > CUDA_NUM_THREADS)?CUDA_NUM_THREADS:channels;
1116
+ const int num_kernels = batch_size * num_query * num_heads * channels;
1117
+ const int num_actual_kernels = batch_size * num_query * num_heads * channels;
1118
+ if (channels > 1024)
1119
+ {
1120
+ if ((channels & 1023) == 0)
1121
+ {
1122
+ ms_deformable_col2im_gpu_kernel_shm_reduce_v2_multi_blocks<scalar_t>
1123
+ <<<GET_BLOCKS(num_actual_kernels, num_threads), num_threads,
1124
+ num_threads*3*sizeof(scalar_t), stream>>>(
1125
+ num_kernels,
1126
+ grad_col,
1127
+ data_value,
1128
+ data_spatial_shapes,
1129
+ data_level_start_index,
1130
+ data_sampling_loc,
1131
+ data_attn_weight,
1132
+ batch_size,
1133
+ spatial_size,
1134
+ num_heads,
1135
+ channels,
1136
+ num_levels,
1137
+ num_query,
1138
+ num_point,
1139
+ grad_value,
1140
+ grad_sampling_loc,
1141
+ grad_attn_weight);
1142
+ }
1143
+ else
1144
+ {
1145
+ ms_deformable_col2im_gpu_kernel_gm<scalar_t>
1146
+ <<<GET_BLOCKS(num_actual_kernels, num_threads), num_threads,
1147
+ 0, stream>>>(
1148
+ num_kernels,
1149
+ grad_col,
1150
+ data_value,
1151
+ data_spatial_shapes,
1152
+ data_level_start_index,
1153
+ data_sampling_loc,
1154
+ data_attn_weight,
1155
+ batch_size,
1156
+ spatial_size,
1157
+ num_heads,
1158
+ channels,
1159
+ num_levels,
1160
+ num_query,
1161
+ num_point,
1162
+ grad_value,
1163
+ grad_sampling_loc,
1164
+ grad_attn_weight);
1165
+ }
1166
+ }
1167
+ else{
1168
+ switch(channels)
1169
+ {
1170
+ case 1:
1171
+ ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v1<scalar_t, 1>
1172
+ <<<GET_BLOCKS(num_actual_kernels, num_threads), num_threads,
1173
+ 0, stream>>>(
1174
+ num_kernels,
1175
+ grad_col,
1176
+ data_value,
1177
+ data_spatial_shapes,
1178
+ data_level_start_index,
1179
+ data_sampling_loc,
1180
+ data_attn_weight,
1181
+ batch_size,
1182
+ spatial_size,
1183
+ num_heads,
1184
+ channels,
1185
+ num_levels,
1186
+ num_query,
1187
+ num_point,
1188
+ grad_value,
1189
+ grad_sampling_loc,
1190
+ grad_attn_weight);
1191
+ break;
1192
+ case 2:
1193
+ ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v1<scalar_t, 2>
1194
+ <<<GET_BLOCKS(num_actual_kernels, num_threads), num_threads,
1195
+ 0, stream>>>(
1196
+ num_kernels,
1197
+ grad_col,
1198
+ data_value,
1199
+ data_spatial_shapes,
1200
+ data_level_start_index,
1201
+ data_sampling_loc,
1202
+ data_attn_weight,
1203
+ batch_size,
1204
+ spatial_size,
1205
+ num_heads,
1206
+ channels,
1207
+ num_levels,
1208
+ num_query,
1209
+ num_point,
1210
+ grad_value,
1211
+ grad_sampling_loc,
1212
+ grad_attn_weight);
1213
+ break;
1214
+ case 4:
1215
+ ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v1<scalar_t, 4>
1216
+ <<<GET_BLOCKS(num_actual_kernels, num_threads), num_threads,
1217
+ 0, stream>>>(
1218
+ num_kernels,
1219
+ grad_col,
1220
+ data_value,
1221
+ data_spatial_shapes,
1222
+ data_level_start_index,
1223
+ data_sampling_loc,
1224
+ data_attn_weight,
1225
+ batch_size,
1226
+ spatial_size,
1227
+ num_heads,
1228
+ channels,
1229
+ num_levels,
1230
+ num_query,
1231
+ num_point,
1232
+ grad_value,
1233
+ grad_sampling_loc,
1234
+ grad_attn_weight);
1235
+ break;
1236
+ case 8:
1237
+ ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v1<scalar_t, 8>
1238
+ <<<GET_BLOCKS(num_actual_kernels, num_threads), num_threads,
1239
+ 0, stream>>>(
1240
+ num_kernels,
1241
+ grad_col,
1242
+ data_value,
1243
+ data_spatial_shapes,
1244
+ data_level_start_index,
1245
+ data_sampling_loc,
1246
+ data_attn_weight,
1247
+ batch_size,
1248
+ spatial_size,
1249
+ num_heads,
1250
+ channels,
1251
+ num_levels,
1252
+ num_query,
1253
+ num_point,
1254
+ grad_value,
1255
+ grad_sampling_loc,
1256
+ grad_attn_weight);
1257
+ break;
1258
+ case 16:
1259
+ ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v1<scalar_t, 16>
1260
+ <<<GET_BLOCKS(num_actual_kernels, num_threads), num_threads,
1261
+ 0, stream>>>(
1262
+ num_kernels,
1263
+ grad_col,
1264
+ data_value,
1265
+ data_spatial_shapes,
1266
+ data_level_start_index,
1267
+ data_sampling_loc,
1268
+ data_attn_weight,
1269
+ batch_size,
1270
+ spatial_size,
1271
+ num_heads,
1272
+ channels,
1273
+ num_levels,
1274
+ num_query,
1275
+ num_point,
1276
+ grad_value,
1277
+ grad_sampling_loc,
1278
+ grad_attn_weight);
1279
+ break;
1280
+ case 32:
1281
+ ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v1<scalar_t, 32>
1282
+ <<<GET_BLOCKS(num_actual_kernels, num_threads), num_threads,
1283
+ 0, stream>>>(
1284
+ num_kernels,
1285
+ grad_col,
1286
+ data_value,
1287
+ data_spatial_shapes,
1288
+ data_level_start_index,
1289
+ data_sampling_loc,
1290
+ data_attn_weight,
1291
+ batch_size,
1292
+ spatial_size,
1293
+ num_heads,
1294
+ channels,
1295
+ num_levels,
1296
+ num_query,
1297
+ num_point,
1298
+ grad_value,
1299
+ grad_sampling_loc,
1300
+ grad_attn_weight);
1301
+ break;
1302
+ case 64:
1303
+ ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v2<scalar_t, 64>
1304
+ <<<GET_BLOCKS(num_actual_kernels, num_threads), num_threads,
1305
+ 0, stream>>>(
1306
+ num_kernels,
1307
+ grad_col,
1308
+ data_value,
1309
+ data_spatial_shapes,
1310
+ data_level_start_index,
1311
+ data_sampling_loc,
1312
+ data_attn_weight,
1313
+ batch_size,
1314
+ spatial_size,
1315
+ num_heads,
1316
+ channels,
1317
+ num_levels,
1318
+ num_query,
1319
+ num_point,
1320
+ grad_value,
1321
+ grad_sampling_loc,
1322
+ grad_attn_weight);
1323
+ break;
1324
+ case 128:
1325
+ ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v2<scalar_t, 128>
1326
+ <<<GET_BLOCKS(num_actual_kernels, num_threads), num_threads,
1327
+ 0, stream>>>(
1328
+ num_kernels,
1329
+ grad_col,
1330
+ data_value,
1331
+ data_spatial_shapes,
1332
+ data_level_start_index,
1333
+ data_sampling_loc,
1334
+ data_attn_weight,
1335
+ batch_size,
1336
+ spatial_size,
1337
+ num_heads,
1338
+ channels,
1339
+ num_levels,
1340
+ num_query,
1341
+ num_point,
1342
+ grad_value,
1343
+ grad_sampling_loc,
1344
+ grad_attn_weight);
1345
+ break;
1346
+ case 256:
1347
+ ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v2<scalar_t, 256>
1348
+ <<<GET_BLOCKS(num_actual_kernels, num_threads), num_threads,
1349
+ 0, stream>>>(
1350
+ num_kernels,
1351
+ grad_col,
1352
+ data_value,
1353
+ data_spatial_shapes,
1354
+ data_level_start_index,
1355
+ data_sampling_loc,
1356
+ data_attn_weight,
1357
+ batch_size,
1358
+ spatial_size,
1359
+ num_heads,
1360
+ channels,
1361
+ num_levels,
1362
+ num_query,
1363
+ num_point,
1364
+ grad_value,
1365
+ grad_sampling_loc,
1366
+ grad_attn_weight);
1367
+ break;
1368
+ case 512:
1369
+ ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v2<scalar_t, 512>
1370
+ <<<GET_BLOCKS(num_actual_kernels, num_threads), num_threads,
1371
+ 0, stream>>>(
1372
+ num_kernels,
1373
+ grad_col,
1374
+ data_value,
1375
+ data_spatial_shapes,
1376
+ data_level_start_index,
1377
+ data_sampling_loc,
1378
+ data_attn_weight,
1379
+ batch_size,
1380
+ spatial_size,
1381
+ num_heads,
1382
+ channels,
1383
+ num_levels,
1384
+ num_query,
1385
+ num_point,
1386
+ grad_value,
1387
+ grad_sampling_loc,
1388
+ grad_attn_weight);
1389
+ break;
1390
+ case 1024:
1391
+ ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v2<scalar_t, 1024>
1392
+ <<<GET_BLOCKS(num_actual_kernels, num_threads), num_threads,
1393
+ 0, stream>>>(
1394
+ num_kernels,
1395
+ grad_col,
1396
+ data_value,
1397
+ data_spatial_shapes,
1398
+ data_level_start_index,
1399
+ data_sampling_loc,
1400
+ data_attn_weight,
1401
+ batch_size,
1402
+ spatial_size,
1403
+ num_heads,
1404
+ channels,
1405
+ num_levels,
1406
+ num_query,
1407
+ num_point,
1408
+ grad_value,
1409
+ grad_sampling_loc,
1410
+ grad_attn_weight);
1411
+ break;
1412
+ default:
1413
+ if (channels < 64)
1414
+ {
1415
+ ms_deformable_col2im_gpu_kernel_shm_reduce_v1<scalar_t>
1416
+ <<<GET_BLOCKS(num_actual_kernels, num_threads), num_threads,
1417
+ num_threads*3*sizeof(scalar_t), stream>>>(
1418
+ num_kernels,
1419
+ grad_col,
1420
+ data_value,
1421
+ data_spatial_shapes,
1422
+ data_level_start_index,
1423
+ data_sampling_loc,
1424
+ data_attn_weight,
1425
+ batch_size,
1426
+ spatial_size,
1427
+ num_heads,
1428
+ channels,
1429
+ num_levels,
1430
+ num_query,
1431
+ num_point,
1432
+ grad_value,
1433
+ grad_sampling_loc,
1434
+ grad_attn_weight);
1435
+ }
1436
+ else
1437
+ {
1438
+ ms_deformable_col2im_gpu_kernel_shm_reduce_v2<scalar_t>
1439
+ <<<GET_BLOCKS(num_actual_kernels, num_threads), num_threads,
1440
+ num_threads*3*sizeof(scalar_t), stream>>>(
1441
+ num_kernels,
1442
+ grad_col,
1443
+ data_value,
1444
+ data_spatial_shapes,
1445
+ data_level_start_index,
1446
+ data_sampling_loc,
1447
+ data_attn_weight,
1448
+ batch_size,
1449
+ spatial_size,
1450
+ num_heads,
1451
+ channels,
1452
+ num_levels,
1453
+ num_query,
1454
+ num_point,
1455
+ grad_value,
1456
+ grad_sampling_loc,
1457
+ grad_attn_weight);
1458
+ }
1459
+ }
1460
+ }
1461
+ cudaError_t err = cudaGetLastError();
1462
+ if (err != cudaSuccess)
1463
+ {
1464
+ printf("error in ms_deformable_col2im_cuda: %s\n", cudaGetErrorString(err));
1465
+ }
1466
+
1467
+ }
venv/lib/python3.10/site-packages/transformers/kernels/deta/cuda/ms_deform_attn_cuda.h ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*!
2
+ **************************************************************************************************
3
+ * Deformable DETR
4
+ * Copyright (c) 2020 SenseTime. All Rights Reserved.
5
+ * Licensed under the Apache License, Version 2.0 [see LICENSE for details]
6
+ **************************************************************************************************
7
+ * Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0
8
+ **************************************************************************************************
9
+ */
10
+
11
+ #pragma once
12
+ #include <torch/extension.h>
13
+
14
+ at::Tensor ms_deform_attn_cuda_forward(
15
+ const at::Tensor &value,
16
+ const at::Tensor &spatial_shapes,
17
+ const at::Tensor &level_start_index,
18
+ const at::Tensor &sampling_loc,
19
+ const at::Tensor &attn_weight,
20
+ const int im2col_step);
21
+
22
+ std::vector<at::Tensor> ms_deform_attn_cuda_backward(
23
+ const at::Tensor &value,
24
+ const at::Tensor &spatial_shapes,
25
+ const at::Tensor &level_start_index,
26
+ const at::Tensor &sampling_loc,
27
+ const at::Tensor &attn_weight,
28
+ const at::Tensor &grad_output,
29
+ const int im2col_step);
venv/lib/python3.10/site-packages/transformers/kernels/deta/cuda/ms_deform_im2col_cuda.cuh ADDED
@@ -0,0 +1,1327 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*!
2
+ **************************************************************************
3
+ * Deformable DETR
4
+ * Copyright (c) 2020 SenseTime. All Rights Reserved.
5
+ * Licensed under the Apache License, Version 2.0 [see LICENSE for details]
6
+ **************************************************************************
7
+ * Modified from DCN (https://github.com/msracver/Deformable-ConvNets)
8
+ * Copyright (c) 2018 Microsoft
9
+ **************************************************************************
10
+ */
11
+
12
+ #include <cstdio>
13
+ #include <algorithm>
14
+ #include <cstring>
15
+
16
+ #include <ATen/ATen.h>
17
+ #include <ATen/cuda/CUDAContext.h>
18
+
19
+ #include <THC/THCAtomics.cuh>
20
+
21
+ #define CUDA_KERNEL_LOOP(i, n) \
22
+ for (int i = blockIdx.x * blockDim.x + threadIdx.x; \
23
+ i < (n); \
24
+ i += blockDim.x * gridDim.x)
25
+
26
+ const int CUDA_NUM_THREADS = 1024;
27
+ inline int GET_BLOCKS(const int N, const int num_threads)
28
+ {
29
+ return (N + num_threads - 1) / num_threads;
30
+ }
31
+
32
+
33
+ template <typename scalar_t>
34
+ __device__ scalar_t ms_deform_attn_im2col_bilinear(const scalar_t* &bottom_data,
35
+ const int &height, const int &width, const int &nheads, const int &channels,
36
+ const scalar_t &h, const scalar_t &w, const int &m, const int &c)
37
+ {
38
+ const int h_low = floor(h);
39
+ const int w_low = floor(w);
40
+ const int h_high = h_low + 1;
41
+ const int w_high = w_low + 1;
42
+
43
+ const scalar_t lh = h - h_low;
44
+ const scalar_t lw = w - w_low;
45
+ const scalar_t hh = 1 - lh, hw = 1 - lw;
46
+
47
+ const int w_stride = nheads * channels;
48
+ const int h_stride = width * w_stride;
49
+ const int h_low_ptr_offset = h_low * h_stride;
50
+ const int h_high_ptr_offset = h_low_ptr_offset + h_stride;
51
+ const int w_low_ptr_offset = w_low * w_stride;
52
+ const int w_high_ptr_offset = w_low_ptr_offset + w_stride;
53
+ const int base_ptr = m * channels + c;
54
+
55
+ scalar_t v1 = 0;
56
+ if (h_low >= 0 && w_low >= 0)
57
+ {
58
+ const int ptr1 = h_low_ptr_offset + w_low_ptr_offset + base_ptr;
59
+ v1 = bottom_data[ptr1];
60
+ }
61
+ scalar_t v2 = 0;
62
+ if (h_low >= 0 && w_high <= width - 1)
63
+ {
64
+ const int ptr2 = h_low_ptr_offset + w_high_ptr_offset + base_ptr;
65
+ v2 = bottom_data[ptr2];
66
+ }
67
+ scalar_t v3 = 0;
68
+ if (h_high <= height - 1 && w_low >= 0)
69
+ {
70
+ const int ptr3 = h_high_ptr_offset + w_low_ptr_offset + base_ptr;
71
+ v3 = bottom_data[ptr3];
72
+ }
73
+ scalar_t v4 = 0;
74
+ if (h_high <= height - 1 && w_high <= width - 1)
75
+ {
76
+ const int ptr4 = h_high_ptr_offset + w_high_ptr_offset + base_ptr;
77
+ v4 = bottom_data[ptr4];
78
+ }
79
+
80
+ const scalar_t w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw;
81
+
82
+ const scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
83
+ return val;
84
+ }
85
+
86
+
87
+ template <typename scalar_t>
88
+ __device__ void ms_deform_attn_col2im_bilinear(const scalar_t* &bottom_data,
89
+ const int &height, const int &width, const int &nheads, const int &channels,
90
+ const scalar_t &h, const scalar_t &w, const int &m, const int &c,
91
+ const scalar_t &top_grad,
92
+ const scalar_t &attn_weight,
93
+ scalar_t* &grad_value,
94
+ scalar_t* grad_sampling_loc,
95
+ scalar_t* grad_attn_weight)
96
+ {
97
+ const int h_low = floor(h);
98
+ const int w_low = floor(w);
99
+ const int h_high = h_low + 1;
100
+ const int w_high = w_low + 1;
101
+
102
+ const scalar_t lh = h - h_low;
103
+ const scalar_t lw = w - w_low;
104
+ const scalar_t hh = 1 - lh, hw = 1 - lw;
105
+
106
+ const int w_stride = nheads * channels;
107
+ const int h_stride = width * w_stride;
108
+ const int h_low_ptr_offset = h_low * h_stride;
109
+ const int h_high_ptr_offset = h_low_ptr_offset + h_stride;
110
+ const int w_low_ptr_offset = w_low * w_stride;
111
+ const int w_high_ptr_offset = w_low_ptr_offset + w_stride;
112
+ const int base_ptr = m * channels + c;
113
+
114
+ const scalar_t w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw;
115
+ const scalar_t top_grad_value = top_grad * attn_weight;
116
+ scalar_t grad_h_weight = 0, grad_w_weight = 0;
117
+
118
+ scalar_t v1 = 0;
119
+ if (h_low >= 0 && w_low >= 0)
120
+ {
121
+ const int ptr1 = h_low_ptr_offset + w_low_ptr_offset + base_ptr;
122
+ v1 = bottom_data[ptr1];
123
+ grad_h_weight -= hw * v1;
124
+ grad_w_weight -= hh * v1;
125
+ atomicAdd(grad_value+ptr1, w1*top_grad_value);
126
+ }
127
+ scalar_t v2 = 0;
128
+ if (h_low >= 0 && w_high <= width - 1)
129
+ {
130
+ const int ptr2 = h_low_ptr_offset + w_high_ptr_offset + base_ptr;
131
+ v2 = bottom_data[ptr2];
132
+ grad_h_weight -= lw * v2;
133
+ grad_w_weight += hh * v2;
134
+ atomicAdd(grad_value+ptr2, w2*top_grad_value);
135
+ }
136
+ scalar_t v3 = 0;
137
+ if (h_high <= height - 1 && w_low >= 0)
138
+ {
139
+ const int ptr3 = h_high_ptr_offset + w_low_ptr_offset + base_ptr;
140
+ v3 = bottom_data[ptr3];
141
+ grad_h_weight += hw * v3;
142
+ grad_w_weight -= lh * v3;
143
+ atomicAdd(grad_value+ptr3, w3*top_grad_value);
144
+ }
145
+ scalar_t v4 = 0;
146
+ if (h_high <= height - 1 && w_high <= width - 1)
147
+ {
148
+ const int ptr4 = h_high_ptr_offset + w_high_ptr_offset + base_ptr;
149
+ v4 = bottom_data[ptr4];
150
+ grad_h_weight += lw * v4;
151
+ grad_w_weight += lh * v4;
152
+ atomicAdd(grad_value+ptr4, w4*top_grad_value);
153
+ }
154
+
155
+ const scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
156
+ *grad_attn_weight = top_grad * val;
157
+ *grad_sampling_loc = width * grad_w_weight * top_grad_value;
158
+ *(grad_sampling_loc + 1) = height * grad_h_weight * top_grad_value;
159
+ }
160
+
161
+
162
+ template <typename scalar_t>
163
+ __device__ void ms_deform_attn_col2im_bilinear_gm(const scalar_t* &bottom_data,
164
+ const int &height, const int &width, const int &nheads, const int &channels,
165
+ const scalar_t &h, const scalar_t &w, const int &m, const int &c,
166
+ const scalar_t &top_grad,
167
+ const scalar_t &attn_weight,
168
+ scalar_t* &grad_value,
169
+ scalar_t* grad_sampling_loc,
170
+ scalar_t* grad_attn_weight)
171
+ {
172
+ const int h_low = floor(h);
173
+ const int w_low = floor(w);
174
+ const int h_high = h_low + 1;
175
+ const int w_high = w_low + 1;
176
+
177
+ const scalar_t lh = h - h_low;
178
+ const scalar_t lw = w - w_low;
179
+ const scalar_t hh = 1 - lh, hw = 1 - lw;
180
+
181
+ const int w_stride = nheads * channels;
182
+ const int h_stride = width * w_stride;
183
+ const int h_low_ptr_offset = h_low * h_stride;
184
+ const int h_high_ptr_offset = h_low_ptr_offset + h_stride;
185
+ const int w_low_ptr_offset = w_low * w_stride;
186
+ const int w_high_ptr_offset = w_low_ptr_offset + w_stride;
187
+ const int base_ptr = m * channels + c;
188
+
189
+ const scalar_t w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw;
190
+ const scalar_t top_grad_value = top_grad * attn_weight;
191
+ scalar_t grad_h_weight = 0, grad_w_weight = 0;
192
+
193
+ scalar_t v1 = 0;
194
+ if (h_low >= 0 && w_low >= 0)
195
+ {
196
+ const int ptr1 = h_low_ptr_offset + w_low_ptr_offset + base_ptr;
197
+ v1 = bottom_data[ptr1];
198
+ grad_h_weight -= hw * v1;
199
+ grad_w_weight -= hh * v1;
200
+ atomicAdd(grad_value+ptr1, w1*top_grad_value);
201
+ }
202
+ scalar_t v2 = 0;
203
+ if (h_low >= 0 && w_high <= width - 1)
204
+ {
205
+ const int ptr2 = h_low_ptr_offset + w_high_ptr_offset + base_ptr;
206
+ v2 = bottom_data[ptr2];
207
+ grad_h_weight -= lw * v2;
208
+ grad_w_weight += hh * v2;
209
+ atomicAdd(grad_value+ptr2, w2*top_grad_value);
210
+ }
211
+ scalar_t v3 = 0;
212
+ if (h_high <= height - 1 && w_low >= 0)
213
+ {
214
+ const int ptr3 = h_high_ptr_offset + w_low_ptr_offset + base_ptr;
215
+ v3 = bottom_data[ptr3];
216
+ grad_h_weight += hw * v3;
217
+ grad_w_weight -= lh * v3;
218
+ atomicAdd(grad_value+ptr3, w3*top_grad_value);
219
+ }
220
+ scalar_t v4 = 0;
221
+ if (h_high <= height - 1 && w_high <= width - 1)
222
+ {
223
+ const int ptr4 = h_high_ptr_offset + w_high_ptr_offset + base_ptr;
224
+ v4 = bottom_data[ptr4];
225
+ grad_h_weight += lw * v4;
226
+ grad_w_weight += lh * v4;
227
+ atomicAdd(grad_value+ptr4, w4*top_grad_value);
228
+ }
229
+
230
+ const scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
231
+ atomicAdd(grad_attn_weight, top_grad * val);
232
+ atomicAdd(grad_sampling_loc, width * grad_w_weight * top_grad_value);
233
+ atomicAdd(grad_sampling_loc + 1, height * grad_h_weight * top_grad_value);
234
+ }
235
+
236
+
237
+ template <typename scalar_t>
238
+ __global__ void ms_deformable_im2col_gpu_kernel(const int n,
239
+ const scalar_t *data_value,
240
+ const int64_t *data_spatial_shapes,
241
+ const int64_t *data_level_start_index,
242
+ const scalar_t *data_sampling_loc,
243
+ const scalar_t *data_attn_weight,
244
+ const int batch_size,
245
+ const int spatial_size,
246
+ const int num_heads,
247
+ const int channels,
248
+ const int num_levels,
249
+ const int num_query,
250
+ const int num_point,
251
+ scalar_t *data_col)
252
+ {
253
+ CUDA_KERNEL_LOOP(index, n)
254
+ {
255
+ int _temp = index;
256
+ const int c_col = _temp % channels;
257
+ _temp /= channels;
258
+ const int sampling_index = _temp;
259
+ const int m_col = _temp % num_heads;
260
+ _temp /= num_heads;
261
+ const int q_col = _temp % num_query;
262
+ _temp /= num_query;
263
+ const int b_col = _temp;
264
+
265
+ scalar_t *data_col_ptr = data_col + index;
266
+ int data_weight_ptr = sampling_index * num_levels * num_point;
267
+ int data_loc_w_ptr = data_weight_ptr << 1;
268
+ const int qid_stride = num_heads * channels;
269
+ const int data_value_ptr_init_offset = b_col * spatial_size * qid_stride;
270
+ scalar_t col = 0;
271
+
272
+ for (int l_col=0; l_col < num_levels; ++l_col)
273
+ {
274
+ const int level_start_id = data_level_start_index[l_col];
275
+ const int spatial_h_ptr = l_col << 1;
276
+ const int spatial_h = data_spatial_shapes[spatial_h_ptr];
277
+ const int spatial_w = data_spatial_shapes[spatial_h_ptr + 1];
278
+ const scalar_t *data_value_ptr = data_value + (data_value_ptr_init_offset + level_start_id * qid_stride);
279
+ for (int p_col=0; p_col < num_point; ++p_col)
280
+ {
281
+ const scalar_t loc_w = data_sampling_loc[data_loc_w_ptr];
282
+ const scalar_t loc_h = data_sampling_loc[data_loc_w_ptr + 1];
283
+ const scalar_t weight = data_attn_weight[data_weight_ptr];
284
+
285
+ const scalar_t h_im = loc_h * spatial_h - 0.5;
286
+ const scalar_t w_im = loc_w * spatial_w - 0.5;
287
+
288
+ if (h_im > -1 && w_im > -1 && h_im < spatial_h && w_im < spatial_w)
289
+ {
290
+ col += ms_deform_attn_im2col_bilinear(data_value_ptr, spatial_h, spatial_w, num_heads, channels, h_im, w_im, m_col, c_col) * weight;
291
+ }
292
+
293
+ data_weight_ptr += 1;
294
+ data_loc_w_ptr += 2;
295
+ }
296
+ }
297
+ *data_col_ptr = col;
298
+ }
299
+ }
300
+
301
+ template <typename scalar_t, unsigned int blockSize>
302
+ __global__ void ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v1(const int n,
303
+ const scalar_t *grad_col,
304
+ const scalar_t *data_value,
305
+ const int64_t *data_spatial_shapes,
306
+ const int64_t *data_level_start_index,
307
+ const scalar_t *data_sampling_loc,
308
+ const scalar_t *data_attn_weight,
309
+ const int batch_size,
310
+ const int spatial_size,
311
+ const int num_heads,
312
+ const int channels,
313
+ const int num_levels,
314
+ const int num_query,
315
+ const int num_point,
316
+ scalar_t *grad_value,
317
+ scalar_t *grad_sampling_loc,
318
+ scalar_t *grad_attn_weight)
319
+ {
320
+ CUDA_KERNEL_LOOP(index, n)
321
+ {
322
+ __shared__ scalar_t cache_grad_sampling_loc[blockSize * 2];
323
+ __shared__ scalar_t cache_grad_attn_weight[blockSize];
324
+ unsigned int tid = threadIdx.x;
325
+ int _temp = index;
326
+ const int c_col = _temp % channels;
327
+ _temp /= channels;
328
+ const int sampling_index = _temp;
329
+ const int m_col = _temp % num_heads;
330
+ _temp /= num_heads;
331
+ const int q_col = _temp % num_query;
332
+ _temp /= num_query;
333
+ const int b_col = _temp;
334
+
335
+ const scalar_t top_grad = grad_col[index];
336
+
337
+ int data_weight_ptr = sampling_index * num_levels * num_point;
338
+ int data_loc_w_ptr = data_weight_ptr << 1;
339
+ const int grad_sampling_ptr = data_weight_ptr;
340
+ grad_sampling_loc += grad_sampling_ptr << 1;
341
+ grad_attn_weight += grad_sampling_ptr;
342
+ const int grad_weight_stride = 1;
343
+ const int grad_loc_stride = 2;
344
+ const int qid_stride = num_heads * channels;
345
+ const int data_value_ptr_init_offset = b_col * spatial_size * qid_stride;
346
+
347
+ for (int l_col=0; l_col < num_levels; ++l_col)
348
+ {
349
+ const int level_start_id = data_level_start_index[l_col];
350
+ const int spatial_h_ptr = l_col << 1;
351
+ const int spatial_h = data_spatial_shapes[spatial_h_ptr];
352
+ const int spatial_w = data_spatial_shapes[spatial_h_ptr + 1];
353
+ const int value_ptr_offset = data_value_ptr_init_offset + level_start_id * qid_stride;
354
+ const scalar_t *data_value_ptr = data_value + value_ptr_offset;
355
+ scalar_t *grad_value_ptr = grad_value + value_ptr_offset;
356
+
357
+ for (int p_col=0; p_col < num_point; ++p_col)
358
+ {
359
+ const scalar_t loc_w = data_sampling_loc[data_loc_w_ptr];
360
+ const scalar_t loc_h = data_sampling_loc[data_loc_w_ptr + 1];
361
+ const scalar_t weight = data_attn_weight[data_weight_ptr];
362
+
363
+ const scalar_t h_im = loc_h * spatial_h - 0.5;
364
+ const scalar_t w_im = loc_w * spatial_w - 0.5;
365
+ *(cache_grad_sampling_loc+(threadIdx.x << 1)) = 0;
366
+ *(cache_grad_sampling_loc+((threadIdx.x << 1) + 1)) = 0;
367
+ *(cache_grad_attn_weight+threadIdx.x)=0;
368
+ if (h_im > -1 && w_im > -1 && h_im < spatial_h && w_im < spatial_w)
369
+ {
370
+ ms_deform_attn_col2im_bilinear(
371
+ data_value_ptr, spatial_h, spatial_w, num_heads, channels, h_im, w_im, m_col, c_col,
372
+ top_grad, weight, grad_value_ptr,
373
+ cache_grad_sampling_loc+(threadIdx.x << 1), cache_grad_attn_weight+threadIdx.x);
374
+ }
375
+
376
+ __syncthreads();
377
+ if (tid == 0)
378
+ {
379
+ scalar_t _grad_w=cache_grad_sampling_loc[0], _grad_h=cache_grad_sampling_loc[1], _grad_a=cache_grad_attn_weight[0];
380
+ int sid=2;
381
+ for (unsigned int tid = 1; tid < blockSize; ++tid)
382
+ {
383
+ _grad_w += cache_grad_sampling_loc[sid];
384
+ _grad_h += cache_grad_sampling_loc[sid + 1];
385
+ _grad_a += cache_grad_attn_weight[tid];
386
+ sid += 2;
387
+ }
388
+
389
+
390
+ *grad_sampling_loc = _grad_w;
391
+ *(grad_sampling_loc + 1) = _grad_h;
392
+ *grad_attn_weight = _grad_a;
393
+ }
394
+ __syncthreads();
395
+
396
+ data_weight_ptr += 1;
397
+ data_loc_w_ptr += 2;
398
+ grad_attn_weight += grad_weight_stride;
399
+ grad_sampling_loc += grad_loc_stride;
400
+ }
401
+ }
402
+ }
403
+ }
404
+
405
+
406
+ template <typename scalar_t, unsigned int blockSize>
407
+ __global__ void ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v2(const int n,
408
+ const scalar_t *grad_col,
409
+ const scalar_t *data_value,
410
+ const int64_t *data_spatial_shapes,
411
+ const int64_t *data_level_start_index,
412
+ const scalar_t *data_sampling_loc,
413
+ const scalar_t *data_attn_weight,
414
+ const int batch_size,
415
+ const int spatial_size,
416
+ const int num_heads,
417
+ const int channels,
418
+ const int num_levels,
419
+ const int num_query,
420
+ const int num_point,
421
+ scalar_t *grad_value,
422
+ scalar_t *grad_sampling_loc,
423
+ scalar_t *grad_attn_weight)
424
+ {
425
+ CUDA_KERNEL_LOOP(index, n)
426
+ {
427
+ __shared__ scalar_t cache_grad_sampling_loc[blockSize * 2];
428
+ __shared__ scalar_t cache_grad_attn_weight[blockSize];
429
+ unsigned int tid = threadIdx.x;
430
+ int _temp = index;
431
+ const int c_col = _temp % channels;
432
+ _temp /= channels;
433
+ const int sampling_index = _temp;
434
+ const int m_col = _temp % num_heads;
435
+ _temp /= num_heads;
436
+ const int q_col = _temp % num_query;
437
+ _temp /= num_query;
438
+ const int b_col = _temp;
439
+
440
+ const scalar_t top_grad = grad_col[index];
441
+
442
+ int data_weight_ptr = sampling_index * num_levels * num_point;
443
+ int data_loc_w_ptr = data_weight_ptr << 1;
444
+ const int grad_sampling_ptr = data_weight_ptr;
445
+ grad_sampling_loc += grad_sampling_ptr << 1;
446
+ grad_attn_weight += grad_sampling_ptr;
447
+ const int grad_weight_stride = 1;
448
+ const int grad_loc_stride = 2;
449
+ const int qid_stride = num_heads * channels;
450
+ const int data_value_ptr_init_offset = b_col * spatial_size * qid_stride;
451
+
452
+ for (int l_col=0; l_col < num_levels; ++l_col)
453
+ {
454
+ const int level_start_id = data_level_start_index[l_col];
455
+ const int spatial_h_ptr = l_col << 1;
456
+ const int spatial_h = data_spatial_shapes[spatial_h_ptr];
457
+ const int spatial_w = data_spatial_shapes[spatial_h_ptr + 1];
458
+ const int value_ptr_offset = data_value_ptr_init_offset + level_start_id * qid_stride;
459
+ const scalar_t *data_value_ptr = data_value + value_ptr_offset;
460
+ scalar_t *grad_value_ptr = grad_value + value_ptr_offset;
461
+
462
+ for (int p_col=0; p_col < num_point; ++p_col)
463
+ {
464
+ const scalar_t loc_w = data_sampling_loc[data_loc_w_ptr];
465
+ const scalar_t loc_h = data_sampling_loc[data_loc_w_ptr + 1];
466
+ const scalar_t weight = data_attn_weight[data_weight_ptr];
467
+
468
+ const scalar_t h_im = loc_h * spatial_h - 0.5;
469
+ const scalar_t w_im = loc_w * spatial_w - 0.5;
470
+ *(cache_grad_sampling_loc+(threadIdx.x << 1)) = 0;
471
+ *(cache_grad_sampling_loc+((threadIdx.x << 1) + 1)) = 0;
472
+ *(cache_grad_attn_weight+threadIdx.x)=0;
473
+ if (h_im > -1 && w_im > -1 && h_im < spatial_h && w_im < spatial_w)
474
+ {
475
+ ms_deform_attn_col2im_bilinear(
476
+ data_value_ptr, spatial_h, spatial_w, num_heads, channels, h_im, w_im, m_col, c_col,
477
+ top_grad, weight, grad_value_ptr,
478
+ cache_grad_sampling_loc+(threadIdx.x << 1), cache_grad_attn_weight+threadIdx.x);
479
+ }
480
+
481
+ __syncthreads();
482
+
483
+ for (unsigned int s=blockSize/2; s>0; s>>=1)
484
+ {
485
+ if (tid < s) {
486
+ const unsigned int xid1 = tid << 1;
487
+ const unsigned int xid2 = (tid + s) << 1;
488
+ cache_grad_attn_weight[tid] += cache_grad_attn_weight[tid + s];
489
+ cache_grad_sampling_loc[xid1] += cache_grad_sampling_loc[xid2];
490
+ cache_grad_sampling_loc[xid1 + 1] += cache_grad_sampling_loc[xid2 + 1];
491
+ }
492
+ __syncthreads();
493
+ }
494
+
495
+ if (tid == 0)
496
+ {
497
+ *grad_sampling_loc = cache_grad_sampling_loc[0];
498
+ *(grad_sampling_loc + 1) = cache_grad_sampling_loc[1];
499
+ *grad_attn_weight = cache_grad_attn_weight[0];
500
+ }
501
+ __syncthreads();
502
+
503
+ data_weight_ptr += 1;
504
+ data_loc_w_ptr += 2;
505
+ grad_attn_weight += grad_weight_stride;
506
+ grad_sampling_loc += grad_loc_stride;
507
+ }
508
+ }
509
+ }
510
+ }
511
+
512
+
513
+ template <typename scalar_t>
514
+ __global__ void ms_deformable_col2im_gpu_kernel_shm_reduce_v1(const int n,
515
+ const scalar_t *grad_col,
516
+ const scalar_t *data_value,
517
+ const int64_t *data_spatial_shapes,
518
+ const int64_t *data_level_start_index,
519
+ const scalar_t *data_sampling_loc,
520
+ const scalar_t *data_attn_weight,
521
+ const int batch_size,
522
+ const int spatial_size,
523
+ const int num_heads,
524
+ const int channels,
525
+ const int num_levels,
526
+ const int num_query,
527
+ const int num_point,
528
+ scalar_t *grad_value,
529
+ scalar_t *grad_sampling_loc,
530
+ scalar_t *grad_attn_weight)
531
+ {
532
+ CUDA_KERNEL_LOOP(index, n)
533
+ {
534
+ extern __shared__ int _s[];
535
+ scalar_t* cache_grad_sampling_loc = (scalar_t*)_s;
536
+ scalar_t* cache_grad_attn_weight = cache_grad_sampling_loc + 2 * blockDim.x;
537
+ unsigned int tid = threadIdx.x;
538
+ int _temp = index;
539
+ const int c_col = _temp % channels;
540
+ _temp /= channels;
541
+ const int sampling_index = _temp;
542
+ const int m_col = _temp % num_heads;
543
+ _temp /= num_heads;
544
+ const int q_col = _temp % num_query;
545
+ _temp /= num_query;
546
+ const int b_col = _temp;
547
+
548
+ const scalar_t top_grad = grad_col[index];
549
+
550
+ int data_weight_ptr = sampling_index * num_levels * num_point;
551
+ int data_loc_w_ptr = data_weight_ptr << 1;
552
+ const int grad_sampling_ptr = data_weight_ptr;
553
+ grad_sampling_loc += grad_sampling_ptr << 1;
554
+ grad_attn_weight += grad_sampling_ptr;
555
+ const int grad_weight_stride = 1;
556
+ const int grad_loc_stride = 2;
557
+ const int qid_stride = num_heads * channels;
558
+ const int data_value_ptr_init_offset = b_col * spatial_size * qid_stride;
559
+
560
+ for (int l_col=0; l_col < num_levels; ++l_col)
561
+ {
562
+ const int level_start_id = data_level_start_index[l_col];
563
+ const int spatial_h_ptr = l_col << 1;
564
+ const int spatial_h = data_spatial_shapes[spatial_h_ptr];
565
+ const int spatial_w = data_spatial_shapes[spatial_h_ptr + 1];
566
+ const int value_ptr_offset = data_value_ptr_init_offset + level_start_id * qid_stride;
567
+ const scalar_t *data_value_ptr = data_value + value_ptr_offset;
568
+ scalar_t *grad_value_ptr = grad_value + value_ptr_offset;
569
+
570
+ for (int p_col=0; p_col < num_point; ++p_col)
571
+ {
572
+ const scalar_t loc_w = data_sampling_loc[data_loc_w_ptr];
573
+ const scalar_t loc_h = data_sampling_loc[data_loc_w_ptr + 1];
574
+ const scalar_t weight = data_attn_weight[data_weight_ptr];
575
+
576
+ const scalar_t h_im = loc_h * spatial_h - 0.5;
577
+ const scalar_t w_im = loc_w * spatial_w - 0.5;
578
+ *(cache_grad_sampling_loc+(threadIdx.x << 1)) = 0;
579
+ *(cache_grad_sampling_loc+((threadIdx.x << 1) + 1)) = 0;
580
+ *(cache_grad_attn_weight+threadIdx.x)=0;
581
+ if (h_im > -1 && w_im > -1 && h_im < spatial_h && w_im < spatial_w)
582
+ {
583
+ ms_deform_attn_col2im_bilinear(
584
+ data_value_ptr, spatial_h, spatial_w, num_heads, channels, h_im, w_im, m_col, c_col,
585
+ top_grad, weight, grad_value_ptr,
586
+ cache_grad_sampling_loc+(threadIdx.x << 1), cache_grad_attn_weight+threadIdx.x);
587
+ }
588
+
589
+ __syncthreads();
590
+ if (tid == 0)
591
+ {
592
+ scalar_t _grad_w=cache_grad_sampling_loc[0], _grad_h=cache_grad_sampling_loc[1], _grad_a=cache_grad_attn_weight[0];
593
+ int sid=2;
594
+ for (unsigned int tid = 1; tid < blockDim.x; ++tid)
595
+ {
596
+ _grad_w += cache_grad_sampling_loc[sid];
597
+ _grad_h += cache_grad_sampling_loc[sid + 1];
598
+ _grad_a += cache_grad_attn_weight[tid];
599
+ sid += 2;
600
+ }
601
+
602
+
603
+ *grad_sampling_loc = _grad_w;
604
+ *(grad_sampling_loc + 1) = _grad_h;
605
+ *grad_attn_weight = _grad_a;
606
+ }
607
+ __syncthreads();
608
+
609
+ data_weight_ptr += 1;
610
+ data_loc_w_ptr += 2;
611
+ grad_attn_weight += grad_weight_stride;
612
+ grad_sampling_loc += grad_loc_stride;
613
+ }
614
+ }
615
+ }
616
+ }
617
+
618
+ template <typename scalar_t>
619
+ __global__ void ms_deformable_col2im_gpu_kernel_shm_reduce_v2(const int n,
620
+ const scalar_t *grad_col,
621
+ const scalar_t *data_value,
622
+ const int64_t *data_spatial_shapes,
623
+ const int64_t *data_level_start_index,
624
+ const scalar_t *data_sampling_loc,
625
+ const scalar_t *data_attn_weight,
626
+ const int batch_size,
627
+ const int spatial_size,
628
+ const int num_heads,
629
+ const int channels,
630
+ const int num_levels,
631
+ const int num_query,
632
+ const int num_point,
633
+ scalar_t *grad_value,
634
+ scalar_t *grad_sampling_loc,
635
+ scalar_t *grad_attn_weight)
636
+ {
637
+ CUDA_KERNEL_LOOP(index, n)
638
+ {
639
+ extern __shared__ int _s[];
640
+ scalar_t* cache_grad_sampling_loc = (scalar_t*)_s;
641
+ scalar_t* cache_grad_attn_weight = cache_grad_sampling_loc + 2 * blockDim.x;
642
+ unsigned int tid = threadIdx.x;
643
+ int _temp = index;
644
+ const int c_col = _temp % channels;
645
+ _temp /= channels;
646
+ const int sampling_index = _temp;
647
+ const int m_col = _temp % num_heads;
648
+ _temp /= num_heads;
649
+ const int q_col = _temp % num_query;
650
+ _temp /= num_query;
651
+ const int b_col = _temp;
652
+
653
+ const scalar_t top_grad = grad_col[index];
654
+
655
+ int data_weight_ptr = sampling_index * num_levels * num_point;
656
+ int data_loc_w_ptr = data_weight_ptr << 1;
657
+ const int grad_sampling_ptr = data_weight_ptr;
658
+ grad_sampling_loc += grad_sampling_ptr << 1;
659
+ grad_attn_weight += grad_sampling_ptr;
660
+ const int grad_weight_stride = 1;
661
+ const int grad_loc_stride = 2;
662
+ const int qid_stride = num_heads * channels;
663
+ const int data_value_ptr_init_offset = b_col * spatial_size * qid_stride;
664
+
665
+ for (int l_col=0; l_col < num_levels; ++l_col)
666
+ {
667
+ const int level_start_id = data_level_start_index[l_col];
668
+ const int spatial_h_ptr = l_col << 1;
669
+ const int spatial_h = data_spatial_shapes[spatial_h_ptr];
670
+ const int spatial_w = data_spatial_shapes[spatial_h_ptr + 1];
671
+ const int value_ptr_offset = data_value_ptr_init_offset + level_start_id * qid_stride;
672
+ const scalar_t *data_value_ptr = data_value + value_ptr_offset;
673
+ scalar_t *grad_value_ptr = grad_value + value_ptr_offset;
674
+
675
+ for (int p_col=0; p_col < num_point; ++p_col)
676
+ {
677
+ const scalar_t loc_w = data_sampling_loc[data_loc_w_ptr];
678
+ const scalar_t loc_h = data_sampling_loc[data_loc_w_ptr + 1];
679
+ const scalar_t weight = data_attn_weight[data_weight_ptr];
680
+
681
+ const scalar_t h_im = loc_h * spatial_h - 0.5;
682
+ const scalar_t w_im = loc_w * spatial_w - 0.5;
683
+ *(cache_grad_sampling_loc+(threadIdx.x << 1)) = 0;
684
+ *(cache_grad_sampling_loc+((threadIdx.x << 1) + 1)) = 0;
685
+ *(cache_grad_attn_weight+threadIdx.x)=0;
686
+ if (h_im > -1 && w_im > -1 && h_im < spatial_h && w_im < spatial_w)
687
+ {
688
+ ms_deform_attn_col2im_bilinear(
689
+ data_value_ptr, spatial_h, spatial_w, num_heads, channels, h_im, w_im, m_col, c_col,
690
+ top_grad, weight, grad_value_ptr,
691
+ cache_grad_sampling_loc+(threadIdx.x << 1), cache_grad_attn_weight+threadIdx.x);
692
+ }
693
+
694
+ __syncthreads();
695
+
696
+ for (unsigned int s=blockDim.x/2, spre=blockDim.x; s>0; s>>=1, spre>>=1)
697
+ {
698
+ if (tid < s) {
699
+ const unsigned int xid1 = tid << 1;
700
+ const unsigned int xid2 = (tid + s) << 1;
701
+ cache_grad_attn_weight[tid] += cache_grad_attn_weight[tid + s];
702
+ cache_grad_sampling_loc[xid1] += cache_grad_sampling_loc[xid2];
703
+ cache_grad_sampling_loc[xid1 + 1] += cache_grad_sampling_loc[xid2 + 1];
704
+ if (tid + (s << 1) < spre)
705
+ {
706
+ cache_grad_attn_weight[tid] += cache_grad_attn_weight[tid + (s << 1)];
707
+ cache_grad_sampling_loc[xid1] += cache_grad_sampling_loc[xid2 + (s << 1)];
708
+ cache_grad_sampling_loc[xid1 + 1] += cache_grad_sampling_loc[xid2 + 1 + (s << 1)];
709
+ }
710
+ }
711
+ __syncthreads();
712
+ }
713
+
714
+ if (tid == 0)
715
+ {
716
+ *grad_sampling_loc = cache_grad_sampling_loc[0];
717
+ *(grad_sampling_loc + 1) = cache_grad_sampling_loc[1];
718
+ *grad_attn_weight = cache_grad_attn_weight[0];
719
+ }
720
+ __syncthreads();
721
+
722
+ data_weight_ptr += 1;
723
+ data_loc_w_ptr += 2;
724
+ grad_attn_weight += grad_weight_stride;
725
+ grad_sampling_loc += grad_loc_stride;
726
+ }
727
+ }
728
+ }
729
+ }
730
+
731
+ template <typename scalar_t>
732
+ __global__ void ms_deformable_col2im_gpu_kernel_shm_reduce_v2_multi_blocks(const int n,
733
+ const scalar_t *grad_col,
734
+ const scalar_t *data_value,
735
+ const int64_t *data_spatial_shapes,
736
+ const int64_t *data_level_start_index,
737
+ const scalar_t *data_sampling_loc,
738
+ const scalar_t *data_attn_weight,
739
+ const int batch_size,
740
+ const int spatial_size,
741
+ const int num_heads,
742
+ const int channels,
743
+ const int num_levels,
744
+ const int num_query,
745
+ const int num_point,
746
+ scalar_t *grad_value,
747
+ scalar_t *grad_sampling_loc,
748
+ scalar_t *grad_attn_weight)
749
+ {
750
+ CUDA_KERNEL_LOOP(index, n)
751
+ {
752
+ extern __shared__ int _s[];
753
+ scalar_t* cache_grad_sampling_loc = (scalar_t*)_s;
754
+ scalar_t* cache_grad_attn_weight = cache_grad_sampling_loc + 2 * blockDim.x;
755
+ unsigned int tid = threadIdx.x;
756
+ int _temp = index;
757
+ const int c_col = _temp % channels;
758
+ _temp /= channels;
759
+ const int sampling_index = _temp;
760
+ const int m_col = _temp % num_heads;
761
+ _temp /= num_heads;
762
+ const int q_col = _temp % num_query;
763
+ _temp /= num_query;
764
+ const int b_col = _temp;
765
+
766
+ const scalar_t top_grad = grad_col[index];
767
+
768
+ int data_weight_ptr = sampling_index * num_levels * num_point;
769
+ int data_loc_w_ptr = data_weight_ptr << 1;
770
+ const int grad_sampling_ptr = data_weight_ptr;
771
+ grad_sampling_loc += grad_sampling_ptr << 1;
772
+ grad_attn_weight += grad_sampling_ptr;
773
+ const int grad_weight_stride = 1;
774
+ const int grad_loc_stride = 2;
775
+ const int qid_stride = num_heads * channels;
776
+ const int data_value_ptr_init_offset = b_col * spatial_size * qid_stride;
777
+
778
+ for (int l_col=0; l_col < num_levels; ++l_col)
779
+ {
780
+ const int level_start_id = data_level_start_index[l_col];
781
+ const int spatial_h_ptr = l_col << 1;
782
+ const int spatial_h = data_spatial_shapes[spatial_h_ptr];
783
+ const int spatial_w = data_spatial_shapes[spatial_h_ptr + 1];
784
+ const int value_ptr_offset = data_value_ptr_init_offset + level_start_id * qid_stride;
785
+ const scalar_t *data_value_ptr = data_value + value_ptr_offset;
786
+ scalar_t *grad_value_ptr = grad_value + value_ptr_offset;
787
+
788
+ for (int p_col=0; p_col < num_point; ++p_col)
789
+ {
790
+ const scalar_t loc_w = data_sampling_loc[data_loc_w_ptr];
791
+ const scalar_t loc_h = data_sampling_loc[data_loc_w_ptr + 1];
792
+ const scalar_t weight = data_attn_weight[data_weight_ptr];
793
+
794
+ const scalar_t h_im = loc_h * spatial_h - 0.5;
795
+ const scalar_t w_im = loc_w * spatial_w - 0.5;
796
+ *(cache_grad_sampling_loc+(threadIdx.x << 1)) = 0;
797
+ *(cache_grad_sampling_loc+((threadIdx.x << 1) + 1)) = 0;
798
+ *(cache_grad_attn_weight+threadIdx.x)=0;
799
+ if (h_im > -1 && w_im > -1 && h_im < spatial_h && w_im < spatial_w)
800
+ {
801
+ ms_deform_attn_col2im_bilinear(
802
+ data_value_ptr, spatial_h, spatial_w, num_heads, channels, h_im, w_im, m_col, c_col,
803
+ top_grad, weight, grad_value_ptr,
804
+ cache_grad_sampling_loc+(threadIdx.x << 1), cache_grad_attn_weight+threadIdx.x);
805
+ }
806
+
807
+ __syncthreads();
808
+
809
+ for (unsigned int s=blockDim.x/2, spre=blockDim.x; s>0; s>>=1, spre>>=1)
810
+ {
811
+ if (tid < s) {
812
+ const unsigned int xid1 = tid << 1;
813
+ const unsigned int xid2 = (tid + s) << 1;
814
+ cache_grad_attn_weight[tid] += cache_grad_attn_weight[tid + s];
815
+ cache_grad_sampling_loc[xid1] += cache_grad_sampling_loc[xid2];
816
+ cache_grad_sampling_loc[xid1 + 1] += cache_grad_sampling_loc[xid2 + 1];
817
+ if (tid + (s << 1) < spre)
818
+ {
819
+ cache_grad_attn_weight[tid] += cache_grad_attn_weight[tid + (s << 1)];
820
+ cache_grad_sampling_loc[xid1] += cache_grad_sampling_loc[xid2 + (s << 1)];
821
+ cache_grad_sampling_loc[xid1 + 1] += cache_grad_sampling_loc[xid2 + 1 + (s << 1)];
822
+ }
823
+ }
824
+ __syncthreads();
825
+ }
826
+
827
+ if (tid == 0)
828
+ {
829
+ atomicAdd(grad_sampling_loc, cache_grad_sampling_loc[0]);
830
+ atomicAdd(grad_sampling_loc + 1, cache_grad_sampling_loc[1]);
831
+ atomicAdd(grad_attn_weight, cache_grad_attn_weight[0]);
832
+ }
833
+ __syncthreads();
834
+
835
+ data_weight_ptr += 1;
836
+ data_loc_w_ptr += 2;
837
+ grad_attn_weight += grad_weight_stride;
838
+ grad_sampling_loc += grad_loc_stride;
839
+ }
840
+ }
841
+ }
842
+ }
843
+
844
+
845
+ template <typename scalar_t>
846
+ __global__ void ms_deformable_col2im_gpu_kernel_gm(const int n,
847
+ const scalar_t *grad_col,
848
+ const scalar_t *data_value,
849
+ const int64_t *data_spatial_shapes,
850
+ const int64_t *data_level_start_index,
851
+ const scalar_t *data_sampling_loc,
852
+ const scalar_t *data_attn_weight,
853
+ const int batch_size,
854
+ const int spatial_size,
855
+ const int num_heads,
856
+ const int channels,
857
+ const int num_levels,
858
+ const int num_query,
859
+ const int num_point,
860
+ scalar_t *grad_value,
861
+ scalar_t *grad_sampling_loc,
862
+ scalar_t *grad_attn_weight)
863
+ {
864
+ CUDA_KERNEL_LOOP(index, n)
865
+ {
866
+ int _temp = index;
867
+ const int c_col = _temp % channels;
868
+ _temp /= channels;
869
+ const int sampling_index = _temp;
870
+ const int m_col = _temp % num_heads;
871
+ _temp /= num_heads;
872
+ const int q_col = _temp % num_query;
873
+ _temp /= num_query;
874
+ const int b_col = _temp;
875
+
876
+ const scalar_t top_grad = grad_col[index];
877
+
878
+ int data_weight_ptr = sampling_index * num_levels * num_point;
879
+ int data_loc_w_ptr = data_weight_ptr << 1;
880
+ const int grad_sampling_ptr = data_weight_ptr;
881
+ grad_sampling_loc += grad_sampling_ptr << 1;
882
+ grad_attn_weight += grad_sampling_ptr;
883
+ const int grad_weight_stride = 1;
884
+ const int grad_loc_stride = 2;
885
+ const int qid_stride = num_heads * channels;
886
+ const int data_value_ptr_init_offset = b_col * spatial_size * qid_stride;
887
+
888
+ for (int l_col=0; l_col < num_levels; ++l_col)
889
+ {
890
+ const int level_start_id = data_level_start_index[l_col];
891
+ const int spatial_h_ptr = l_col << 1;
892
+ const int spatial_h = data_spatial_shapes[spatial_h_ptr];
893
+ const int spatial_w = data_spatial_shapes[spatial_h_ptr + 1];
894
+ const int value_ptr_offset = data_value_ptr_init_offset + level_start_id * qid_stride;
895
+ const scalar_t *data_value_ptr = data_value + value_ptr_offset;
896
+ scalar_t *grad_value_ptr = grad_value + value_ptr_offset;
897
+
898
+ for (int p_col=0; p_col < num_point; ++p_col)
899
+ {
900
+ const scalar_t loc_w = data_sampling_loc[data_loc_w_ptr];
901
+ const scalar_t loc_h = data_sampling_loc[data_loc_w_ptr + 1];
902
+ const scalar_t weight = data_attn_weight[data_weight_ptr];
903
+
904
+ const scalar_t h_im = loc_h * spatial_h - 0.5;
905
+ const scalar_t w_im = loc_w * spatial_w - 0.5;
906
+ if (h_im > -1 && w_im > -1 && h_im < spatial_h && w_im < spatial_w)
907
+ {
908
+ ms_deform_attn_col2im_bilinear_gm(
909
+ data_value_ptr, spatial_h, spatial_w, num_heads, channels, h_im, w_im, m_col, c_col,
910
+ top_grad, weight, grad_value_ptr,
911
+ grad_sampling_loc, grad_attn_weight);
912
+ }
913
+ data_weight_ptr += 1;
914
+ data_loc_w_ptr += 2;
915
+ grad_attn_weight += grad_weight_stride;
916
+ grad_sampling_loc += grad_loc_stride;
917
+ }
918
+ }
919
+ }
920
+ }
921
+
922
+
923
+ template <typename scalar_t>
924
+ void ms_deformable_im2col_cuda(cudaStream_t stream,
925
+ const scalar_t* data_value,
926
+ const int64_t* data_spatial_shapes,
927
+ const int64_t* data_level_start_index,
928
+ const scalar_t* data_sampling_loc,
929
+ const scalar_t* data_attn_weight,
930
+ const int batch_size,
931
+ const int spatial_size,
932
+ const int num_heads,
933
+ const int channels,
934
+ const int num_levels,
935
+ const int num_query,
936
+ const int num_point,
937
+ scalar_t* data_col)
938
+ {
939
+ const int num_kernels = batch_size * num_query * num_heads * channels;
940
+ const int num_actual_kernels = batch_size * num_query * num_heads * channels;
941
+ const int num_threads = CUDA_NUM_THREADS;
942
+ ms_deformable_im2col_gpu_kernel<scalar_t>
943
+ <<<GET_BLOCKS(num_actual_kernels, num_threads), num_threads,
944
+ 0, stream>>>(
945
+ num_kernels, data_value, data_spatial_shapes, data_level_start_index, data_sampling_loc, data_attn_weight,
946
+ batch_size, spatial_size, num_heads, channels, num_levels, num_query, num_point, data_col);
947
+
948
+ cudaError_t err = cudaGetLastError();
949
+ if (err != cudaSuccess)
950
+ {
951
+ printf("error in ms_deformable_im2col_cuda: %s\n", cudaGetErrorString(err));
952
+ }
953
+
954
+ }
955
+
956
+ template <typename scalar_t>
957
+ void ms_deformable_col2im_cuda(cudaStream_t stream,
958
+ const scalar_t* grad_col,
959
+ const scalar_t* data_value,
960
+ const int64_t * data_spatial_shapes,
961
+ const int64_t * data_level_start_index,
962
+ const scalar_t * data_sampling_loc,
963
+ const scalar_t * data_attn_weight,
964
+ const int batch_size,
965
+ const int spatial_size,
966
+ const int num_heads,
967
+ const int channels,
968
+ const int num_levels,
969
+ const int num_query,
970
+ const int num_point,
971
+ scalar_t* grad_value,
972
+ scalar_t* grad_sampling_loc,
973
+ scalar_t* grad_attn_weight)
974
+ {
975
+ const int num_threads = (channels > CUDA_NUM_THREADS)?CUDA_NUM_THREADS:channels;
976
+ const int num_kernels = batch_size * num_query * num_heads * channels;
977
+ const int num_actual_kernels = batch_size * num_query * num_heads * channels;
978
+ if (channels > 1024)
979
+ {
980
+ if ((channels & 1023) == 0)
981
+ {
982
+ ms_deformable_col2im_gpu_kernel_shm_reduce_v2_multi_blocks<scalar_t>
983
+ <<<GET_BLOCKS(num_actual_kernels, num_threads), num_threads,
984
+ num_threads*3*sizeof(scalar_t), stream>>>(
985
+ num_kernels,
986
+ grad_col,
987
+ data_value,
988
+ data_spatial_shapes,
989
+ data_level_start_index,
990
+ data_sampling_loc,
991
+ data_attn_weight,
992
+ batch_size,
993
+ spatial_size,
994
+ num_heads,
995
+ channels,
996
+ num_levels,
997
+ num_query,
998
+ num_point,
999
+ grad_value,
1000
+ grad_sampling_loc,
1001
+ grad_attn_weight);
1002
+ }
1003
+ else
1004
+ {
1005
+ ms_deformable_col2im_gpu_kernel_gm<scalar_t>
1006
+ <<<GET_BLOCKS(num_actual_kernels, num_threads), num_threads,
1007
+ 0, stream>>>(
1008
+ num_kernels,
1009
+ grad_col,
1010
+ data_value,
1011
+ data_spatial_shapes,
1012
+ data_level_start_index,
1013
+ data_sampling_loc,
1014
+ data_attn_weight,
1015
+ batch_size,
1016
+ spatial_size,
1017
+ num_heads,
1018
+ channels,
1019
+ num_levels,
1020
+ num_query,
1021
+ num_point,
1022
+ grad_value,
1023
+ grad_sampling_loc,
1024
+ grad_attn_weight);
1025
+ }
1026
+ }
1027
+ else{
1028
+ switch(channels)
1029
+ {
1030
+ case 1:
1031
+ ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v1<scalar_t, 1>
1032
+ <<<GET_BLOCKS(num_actual_kernels, num_threads), num_threads,
1033
+ 0, stream>>>(
1034
+ num_kernels,
1035
+ grad_col,
1036
+ data_value,
1037
+ data_spatial_shapes,
1038
+ data_level_start_index,
1039
+ data_sampling_loc,
1040
+ data_attn_weight,
1041
+ batch_size,
1042
+ spatial_size,
1043
+ num_heads,
1044
+ channels,
1045
+ num_levels,
1046
+ num_query,
1047
+ num_point,
1048
+ grad_value,
1049
+ grad_sampling_loc,
1050
+ grad_attn_weight);
1051
+ break;
1052
+ case 2:
1053
+ ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v1<scalar_t, 2>
1054
+ <<<GET_BLOCKS(num_actual_kernels, num_threads), num_threads,
1055
+ 0, stream>>>(
1056
+ num_kernels,
1057
+ grad_col,
1058
+ data_value,
1059
+ data_spatial_shapes,
1060
+ data_level_start_index,
1061
+ data_sampling_loc,
1062
+ data_attn_weight,
1063
+ batch_size,
1064
+ spatial_size,
1065
+ num_heads,
1066
+ channels,
1067
+ num_levels,
1068
+ num_query,
1069
+ num_point,
1070
+ grad_value,
1071
+ grad_sampling_loc,
1072
+ grad_attn_weight);
1073
+ break;
1074
+ case 4:
1075
+ ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v1<scalar_t, 4>
1076
+ <<<GET_BLOCKS(num_actual_kernels, num_threads), num_threads,
1077
+ 0, stream>>>(
1078
+ num_kernels,
1079
+ grad_col,
1080
+ data_value,
1081
+ data_spatial_shapes,
1082
+ data_level_start_index,
1083
+ data_sampling_loc,
1084
+ data_attn_weight,
1085
+ batch_size,
1086
+ spatial_size,
1087
+ num_heads,
1088
+ channels,
1089
+ num_levels,
1090
+ num_query,
1091
+ num_point,
1092
+ grad_value,
1093
+ grad_sampling_loc,
1094
+ grad_attn_weight);
1095
+ break;
1096
+ case 8:
1097
+ ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v1<scalar_t, 8>
1098
+ <<<GET_BLOCKS(num_actual_kernels, num_threads), num_threads,
1099
+ 0, stream>>>(
1100
+ num_kernels,
1101
+ grad_col,
1102
+ data_value,
1103
+ data_spatial_shapes,
1104
+ data_level_start_index,
1105
+ data_sampling_loc,
1106
+ data_attn_weight,
1107
+ batch_size,
1108
+ spatial_size,
1109
+ num_heads,
1110
+ channels,
1111
+ num_levels,
1112
+ num_query,
1113
+ num_point,
1114
+ grad_value,
1115
+ grad_sampling_loc,
1116
+ grad_attn_weight);
1117
+ break;
1118
+ case 16:
1119
+ ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v1<scalar_t, 16>
1120
+ <<<GET_BLOCKS(num_actual_kernels, num_threads), num_threads,
1121
+ 0, stream>>>(
1122
+ num_kernels,
1123
+ grad_col,
1124
+ data_value,
1125
+ data_spatial_shapes,
1126
+ data_level_start_index,
1127
+ data_sampling_loc,
1128
+ data_attn_weight,
1129
+ batch_size,
1130
+ spatial_size,
1131
+ num_heads,
1132
+ channels,
1133
+ num_levels,
1134
+ num_query,
1135
+ num_point,
1136
+ grad_value,
1137
+ grad_sampling_loc,
1138
+ grad_attn_weight);
1139
+ break;
1140
+ case 32:
1141
+ ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v1<scalar_t, 32>
1142
+ <<<GET_BLOCKS(num_actual_kernels, num_threads), num_threads,
1143
+ 0, stream>>>(
1144
+ num_kernels,
1145
+ grad_col,
1146
+ data_value,
1147
+ data_spatial_shapes,
1148
+ data_level_start_index,
1149
+ data_sampling_loc,
1150
+ data_attn_weight,
1151
+ batch_size,
1152
+ spatial_size,
1153
+ num_heads,
1154
+ channels,
1155
+ num_levels,
1156
+ num_query,
1157
+ num_point,
1158
+ grad_value,
1159
+ grad_sampling_loc,
1160
+ grad_attn_weight);
1161
+ break;
1162
+ case 64:
1163
+ ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v2<scalar_t, 64>
1164
+ <<<GET_BLOCKS(num_actual_kernels, num_threads), num_threads,
1165
+ 0, stream>>>(
1166
+ num_kernels,
1167
+ grad_col,
1168
+ data_value,
1169
+ data_spatial_shapes,
1170
+ data_level_start_index,
1171
+ data_sampling_loc,
1172
+ data_attn_weight,
1173
+ batch_size,
1174
+ spatial_size,
1175
+ num_heads,
1176
+ channels,
1177
+ num_levels,
1178
+ num_query,
1179
+ num_point,
1180
+ grad_value,
1181
+ grad_sampling_loc,
1182
+ grad_attn_weight);
1183
+ break;
1184
+ case 128:
1185
+ ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v2<scalar_t, 128>
1186
+ <<<GET_BLOCKS(num_actual_kernels, num_threads), num_threads,
1187
+ 0, stream>>>(
1188
+ num_kernels,
1189
+ grad_col,
1190
+ data_value,
1191
+ data_spatial_shapes,
1192
+ data_level_start_index,
1193
+ data_sampling_loc,
1194
+ data_attn_weight,
1195
+ batch_size,
1196
+ spatial_size,
1197
+ num_heads,
1198
+ channels,
1199
+ num_levels,
1200
+ num_query,
1201
+ num_point,
1202
+ grad_value,
1203
+ grad_sampling_loc,
1204
+ grad_attn_weight);
1205
+ break;
1206
+ case 256:
1207
+ ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v2<scalar_t, 256>
1208
+ <<<GET_BLOCKS(num_actual_kernels, num_threads), num_threads,
1209
+ 0, stream>>>(
1210
+ num_kernels,
1211
+ grad_col,
1212
+ data_value,
1213
+ data_spatial_shapes,
1214
+ data_level_start_index,
1215
+ data_sampling_loc,
1216
+ data_attn_weight,
1217
+ batch_size,
1218
+ spatial_size,
1219
+ num_heads,
1220
+ channels,
1221
+ num_levels,
1222
+ num_query,
1223
+ num_point,
1224
+ grad_value,
1225
+ grad_sampling_loc,
1226
+ grad_attn_weight);
1227
+ break;
1228
+ case 512:
1229
+ ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v2<scalar_t, 512>
1230
+ <<<GET_BLOCKS(num_actual_kernels, num_threads), num_threads,
1231
+ 0, stream>>>(
1232
+ num_kernels,
1233
+ grad_col,
1234
+ data_value,
1235
+ data_spatial_shapes,
1236
+ data_level_start_index,
1237
+ data_sampling_loc,
1238
+ data_attn_weight,
1239
+ batch_size,
1240
+ spatial_size,
1241
+ num_heads,
1242
+ channels,
1243
+ num_levels,
1244
+ num_query,
1245
+ num_point,
1246
+ grad_value,
1247
+ grad_sampling_loc,
1248
+ grad_attn_weight);
1249
+ break;
1250
+ case 1024:
1251
+ ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v2<scalar_t, 1024>
1252
+ <<<GET_BLOCKS(num_actual_kernels, num_threads), num_threads,
1253
+ 0, stream>>>(
1254
+ num_kernels,
1255
+ grad_col,
1256
+ data_value,
1257
+ data_spatial_shapes,
1258
+ data_level_start_index,
1259
+ data_sampling_loc,
1260
+ data_attn_weight,
1261
+ batch_size,
1262
+ spatial_size,
1263
+ num_heads,
1264
+ channels,
1265
+ num_levels,
1266
+ num_query,
1267
+ num_point,
1268
+ grad_value,
1269
+ grad_sampling_loc,
1270
+ grad_attn_weight);
1271
+ break;
1272
+ default:
1273
+ if (channels < 64)
1274
+ {
1275
+ ms_deformable_col2im_gpu_kernel_shm_reduce_v1<scalar_t>
1276
+ <<<GET_BLOCKS(num_actual_kernels, num_threads), num_threads,
1277
+ num_threads*3*sizeof(scalar_t), stream>>>(
1278
+ num_kernels,
1279
+ grad_col,
1280
+ data_value,
1281
+ data_spatial_shapes,
1282
+ data_level_start_index,
1283
+ data_sampling_loc,
1284
+ data_attn_weight,
1285
+ batch_size,
1286
+ spatial_size,
1287
+ num_heads,
1288
+ channels,
1289
+ num_levels,
1290
+ num_query,
1291
+ num_point,
1292
+ grad_value,
1293
+ grad_sampling_loc,
1294
+ grad_attn_weight);
1295
+ }
1296
+ else
1297
+ {
1298
+ ms_deformable_col2im_gpu_kernel_shm_reduce_v2<scalar_t>
1299
+ <<<GET_BLOCKS(num_actual_kernels, num_threads), num_threads,
1300
+ num_threads*3*sizeof(scalar_t), stream>>>(
1301
+ num_kernels,
1302
+ grad_col,
1303
+ data_value,
1304
+ data_spatial_shapes,
1305
+ data_level_start_index,
1306
+ data_sampling_loc,
1307
+ data_attn_weight,
1308
+ batch_size,
1309
+ spatial_size,
1310
+ num_heads,
1311
+ channels,
1312
+ num_levels,
1313
+ num_query,
1314
+ num_point,
1315
+ grad_value,
1316
+ grad_sampling_loc,
1317
+ grad_attn_weight);
1318
+ }
1319
+ }
1320
+ }
1321
+ cudaError_t err = cudaGetLastError();
1322
+ if (err != cudaSuccess)
1323
+ {
1324
+ printf("error in ms_deformable_col2im_cuda: %s\n", cudaGetErrorString(err));
1325
+ }
1326
+
1327
+ }
venv/lib/python3.10/site-packages/transformers/kernels/deta/ms_deform_attn.h ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*!
2
+ **************************************************************************************************
3
+ * Deformable DETR
4
+ * Copyright (c) 2020 SenseTime. All Rights Reserved.
5
+ * Licensed under the Apache License, Version 2.0 [see LICENSE for details]
6
+ **************************************************************************************************
7
+ * Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0
8
+ **************************************************************************************************
9
+ */
10
+
11
+ #pragma once
12
+
13
+ #include "cpu/ms_deform_attn_cpu.h"
14
+
15
+ #ifdef WITH_CUDA
16
+ #include "cuda/ms_deform_attn_cuda.h"
17
+ #endif
18
+
19
+
20
+ at::Tensor
21
+ ms_deform_attn_forward(
22
+ const at::Tensor &value,
23
+ const at::Tensor &spatial_shapes,
24
+ const at::Tensor &level_start_index,
25
+ const at::Tensor &sampling_loc,
26
+ const at::Tensor &attn_weight,
27
+ const int im2col_step)
28
+ {
29
+ if (value.type().is_cuda())
30
+ {
31
+ #ifdef WITH_CUDA
32
+ return ms_deform_attn_cuda_forward(
33
+ value, spatial_shapes, level_start_index, sampling_loc, attn_weight, im2col_step);
34
+ #else
35
+ AT_ERROR("Not compiled with GPU support");
36
+ #endif
37
+ }
38
+ AT_ERROR("Not implemented on the CPU");
39
+ }
40
+
41
+ std::vector<at::Tensor>
42
+ ms_deform_attn_backward(
43
+ const at::Tensor &value,
44
+ const at::Tensor &spatial_shapes,
45
+ const at::Tensor &level_start_index,
46
+ const at::Tensor &sampling_loc,
47
+ const at::Tensor &attn_weight,
48
+ const at::Tensor &grad_output,
49
+ const int im2col_step)
50
+ {
51
+ if (value.type().is_cuda())
52
+ {
53
+ #ifdef WITH_CUDA
54
+ return ms_deform_attn_cuda_backward(
55
+ value, spatial_shapes, level_start_index, sampling_loc, attn_weight, grad_output, im2col_step);
56
+ #else
57
+ AT_ERROR("Not compiled with GPU support");
58
+ #endif
59
+ }
60
+ AT_ERROR("Not implemented on the CPU");
61
+ }
venv/lib/python3.10/site-packages/transformers/kernels/deta/vision.cpp ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*!
2
+ **************************************************************************************************
3
+ * Deformable DETR
4
+ * Copyright (c) 2020 SenseTime. All Rights Reserved.
5
+ * Licensed under the Apache License, Version 2.0 [see LICENSE for details]
6
+ **************************************************************************************************
7
+ * Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0
8
+ **************************************************************************************************
9
+ */
10
+
11
+ #include "ms_deform_attn.h"
12
+
13
+ PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
14
+ m.def("ms_deform_attn_forward", &ms_deform_attn_forward, "ms_deform_attn_forward");
15
+ m.def("ms_deform_attn_backward", &ms_deform_attn_backward, "ms_deform_attn_backward");
16
+ }
venv/lib/python3.10/site-packages/transformers/kernels/yoso/common.h ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ #define min(a, b) ((a)<(b)?(a):(b))
3
+ #define max(a, b) ((a)>(b)?(a):(b))
4
+ #define ceil_divide(a, b) ((a)/(b)+((a)%(b)!=0))
5
+ #define select(cond, a, b) ((cond)?(a):(b))
6
+ #define PI 3.141592
7
+ #define EPSILON 1e-8
8
+ #define MAX_VAL 1e12
9
+ #define MIN_VAL -1e12
10
+ #define EMPTY_VALUE -1
venv/lib/python3.10/site-packages/transformers/kernels/yoso/common_cuda.h ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+
2
+ #define MAX_THREADS_PER_BLOCK 1024
3
+ #define OPTIMAL_THREADS_PER_BLOCK 256
4
+ #define WARP_SIZE 32
5
+ #define MAX_NUM_BLOCK_X 2147483647
6
+ #define MAX_NUM_BLOCK_Y 65535
7
+ #define MAX_NUM_BLOCK_Z 65535
8
+ #define MAX_SHARED_MEM_PER_BLOCK 48000
9
+ #define FULL_MASK 0xffffffff
venv/lib/python3.10/site-packages/transformers/kernels/yoso/common_cuda_device.h ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ #include "common.h"
3
+
4
+ template<typename T>
5
+ __device__ int set_insert(T *set, int set_size, T value) {
6
+ int slot = value % set_size;
7
+ int start_slot = slot;
8
+ while (true) {
9
+ T prev = atomicCAS(&set[slot], EMPTY_VALUE, value);
10
+ if (prev == EMPTY_VALUE || prev == value) {
11
+ return slot;
12
+ }
13
+ slot = (slot + 1) % set_size;
14
+ if (slot == start_slot) {
15
+ return -1;
16
+ }
17
+ }
18
+ return -1;
19
+ }
20
+
21
+ template<typename T>
22
+ __device__ int set_lookup(T *set, int set_size, T value) {
23
+ int slot = value % set_size;
24
+ int start_slot = slot;
25
+ while (true) {
26
+ if (set[slot] == value) {
27
+ return slot;
28
+ }
29
+ slot = (slot + 1) % set_size;
30
+ if (slot == start_slot) {
31
+ return -1;
32
+ }
33
+ }
34
+ return -1;
35
+ }
36
+
37
+ template<typename T>
38
+ __device__ void init_buffer(T init_value, T *buffer, int buffer_size, int num_threads, int thread_id) {
39
+ __syncthreads();
40
+ for (int i = 0; i < buffer_size; i = i + num_threads) {
41
+ int offset_idx = i + thread_id;
42
+ if (offset_idx < buffer_size) {
43
+ buffer[offset_idx] = init_value;
44
+ }
45
+ }
46
+ __syncthreads();
47
+ }
48
+
49
+ template<typename T>
50
+ __device__ void copy_data(T *src_pt, T *dist_pt, int data_length, int num_threads, int thread_id) {
51
+ __syncthreads();
52
+ for (int i = 0; i < data_length; i = i + num_threads) {
53
+ int offset_idx = i + thread_id;
54
+ if (offset_idx < data_length) {
55
+ dist_pt[offset_idx] = src_pt[offset_idx];
56
+ }
57
+ }
58
+ __syncthreads();
59
+ }
60
+
61
+ template<typename T>
62
+ __device__ void init_buffer_nonblocking(T init_value, T *buffer, int buffer_size, int num_threads, int thread_id) {
63
+ for (int i = 0; i < buffer_size; i = i + num_threads) {
64
+ int offset_idx = i + thread_id;
65
+ if (offset_idx < buffer_size) {
66
+ buffer[offset_idx] = init_value;
67
+ }
68
+ }
69
+ }
70
+
71
+ template<typename T>
72
+ __device__ void copy_data_nonblocking(T *src_pt, T *dist_pt, int data_length, int num_threads, int thread_id) {
73
+ for (int i = 0; i < data_length; i = i + num_threads) {
74
+ int offset_idx = i + thread_id;
75
+ if (offset_idx < data_length) {
76
+ dist_pt[offset_idx] = src_pt[offset_idx];
77
+ }
78
+ }
79
+ }
venv/lib/python3.10/site-packages/transformers/kernels/yoso/fast_lsh_cumulation.cu ADDED
@@ -0,0 +1,588 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // File from https://github.com/mlpen/YOSO/blob/main/encoders/backbones/efficient_attentions/yoso/yoso_v1/cuda/fast_lsh_cumulation.cu
2
+
3
+ #include <torch/extension.h>
4
+ #include <ATen/ATen.h>
5
+ #include "fast_lsh_cumulation.h"
6
+ #include "fast_lsh_cumulation_cuda.h"
7
+ #include "common_cuda.h"
8
+ #include "common.h"
9
+ #include <vector>
10
+ //////////////////////////////////////////////////////////////////////////////////////////////////
11
+ //////////////////////////////////////////////////////////////////////////////////////////////////
12
+
13
+ std::vector<at::Tensor> fast_hash_ver1_kernel(
14
+ at::Tensor query_mask,
15
+ at::Tensor query_vector,
16
+ at::Tensor key_mask,
17
+ at::Tensor key_vector,
18
+ int num_hash_f,
19
+ int hash_code_len,
20
+ bool use_cuda
21
+ ) {
22
+
23
+ int batch_size = query_vector.size(0);
24
+ int num_query = query_vector.size(1);
25
+ int num_key = key_vector.size(1);
26
+ int vector_dim = query_vector.size(2);
27
+
28
+ int num_hash_per_part = vector_dim / hash_code_len;
29
+ int num_part = max(1, ceil_divide(num_hash_f, num_hash_per_part));
30
+
31
+ at::Tensor Dmat = 2 * at::randint(0, 2, {batch_size, 3, num_part, vector_dim}, query_mask.options()) - 1;
32
+ at::Tensor query_hash_code = at::zeros({batch_size, num_query, num_hash_f}, query_mask.options());
33
+ at::Tensor key_hash_code = at::zeros({batch_size, num_key, num_hash_f}, key_mask.options());
34
+
35
+ int *query_mask_ptr = query_mask.data_ptr<int>();
36
+ float *query_vector_ptr = query_vector.data_ptr<float>();
37
+ int *key_mask_ptr = key_mask.data_ptr<int>();
38
+ float *key_vector_ptr = key_vector.data_ptr<float>();
39
+
40
+ int *Dmat_ptr = Dmat.data_ptr<int>();
41
+
42
+ int *query_hash_code_ptr = query_hash_code.data_ptr<int>();
43
+ int *key_hash_code_ptr = key_hash_code.data_ptr<int>();
44
+
45
+ if (use_cuda) {
46
+ {
47
+ dim3 threads(vector_dim);
48
+ dim3 blocks(num_part, num_query, batch_size);
49
+ int shared_mem = vector_dim * sizeof(float);
50
+ fast_hash_ver1_cuda_kernel<<<blocks, threads, shared_mem>>>(
51
+ query_mask_ptr,
52
+ query_vector_ptr,
53
+ Dmat_ptr,
54
+ query_hash_code_ptr,
55
+ batch_size,
56
+ num_query,
57
+ vector_dim,
58
+ num_part,
59
+ num_hash_f,
60
+ hash_code_len
61
+ );
62
+ }
63
+ {
64
+ dim3 threads(vector_dim);
65
+ dim3 blocks(num_part, num_key, batch_size);
66
+ int shared_mem = vector_dim * sizeof(float);
67
+ fast_hash_ver1_cuda_kernel<<<blocks, threads, shared_mem>>>(
68
+ key_mask_ptr,
69
+ key_vector_ptr,
70
+ Dmat_ptr,
71
+ key_hash_code_ptr,
72
+ batch_size,
73
+ num_key,
74
+ vector_dim,
75
+ num_part,
76
+ num_hash_f,
77
+ hash_code_len
78
+ );
79
+ }
80
+ }
81
+
82
+ return {query_hash_code, key_hash_code};
83
+
84
+ }
85
+
86
+ at::Tensor lsh_cumulation_ver1_kernel(
87
+ at::Tensor query_mask,
88
+ at::Tensor query_hash_code,
89
+ at::Tensor key_mask,
90
+ at::Tensor key_hash_code,
91
+ at::Tensor value,
92
+ int hashtable_capacity,
93
+ bool use_cuda
94
+ ) {
95
+
96
+ int batch_size = query_hash_code.size(0);
97
+ int num_hash_f = query_hash_code.size(2);
98
+
99
+ int num_query = query_hash_code.size(1);
100
+ int num_key = key_hash_code.size(1);
101
+ int value_dim = value.size(2);
102
+
103
+ at::Tensor hashtable_value = at::empty({batch_size, num_hash_f, hashtable_capacity, WARP_SIZE}, value.options());
104
+ at::Tensor cumulation_value = at::zeros({batch_size, num_query, value_dim}, value.options());
105
+
106
+ if (use_cuda) {
107
+ int threads_x = WARP_SIZE;
108
+ int threads_y = OPTIMAL_THREADS_PER_BLOCK / WARP_SIZE;
109
+ int block_x_step1 = num_key / threads_y;
110
+ int block_x_step2 = num_query / threads_y;
111
+ int block_y = batch_size;
112
+
113
+ dim3 threads(threads_x, threads_y);
114
+ dim3 blocks_step1(block_x_step1, block_y);
115
+ dim3 blocks_step2(block_x_step2, block_y);
116
+
117
+ int *query_mask_ptr = query_mask.data_ptr<int>();
118
+ int *query_hash_code_ptr = query_hash_code.data_ptr<int>();
119
+ int *key_mask_ptr = key_mask.data_ptr<int>();
120
+ int *key_hash_code_ptr = key_hash_code.data_ptr<int>();
121
+ float *value_ptr = value.data_ptr<float>();
122
+ float *hashtable_value_ptr = hashtable_value.data_ptr<float>();
123
+ float *cumulation_value_ptr = cumulation_value.data_ptr<float>();
124
+
125
+ for (int value_offset = 0; value_offset < value_dim; value_offset = value_offset + WARP_SIZE) {
126
+
127
+ cudaMemset(hashtable_value_ptr, 0, (batch_size * num_hash_f * hashtable_capacity * WARP_SIZE) * sizeof(float));
128
+
129
+ lsh_cumulation_ver1_step1_cuda_kernel<<<blocks_step1, threads>>>(
130
+ key_mask_ptr,
131
+ key_hash_code_ptr,
132
+ value_ptr,
133
+ hashtable_value_ptr,
134
+ batch_size,
135
+ num_hash_f,
136
+ hashtable_capacity,
137
+ num_key,
138
+ value_dim,
139
+ value_offset
140
+ );
141
+
142
+ lsh_cumulation_ver1_step2_cuda_kernel<<<blocks_step2, threads>>>(
143
+ query_mask_ptr,
144
+ query_hash_code_ptr,
145
+ hashtable_value_ptr,
146
+ cumulation_value_ptr,
147
+ batch_size,
148
+ num_hash_f,
149
+ hashtable_capacity,
150
+ num_query,
151
+ value_dim,
152
+ value_offset
153
+ );
154
+ }
155
+
156
+ }
157
+
158
+ return cumulation_value;
159
+
160
+ }
161
+
162
+ at::Tensor lsh_weighted_cumulation_ver1_kernel(
163
+ at::Tensor query_mask,
164
+ at::Tensor query_hash_code,
165
+ at::Tensor query_weight,
166
+ at::Tensor key_mask,
167
+ at::Tensor key_hash_code,
168
+ at::Tensor key_weight,
169
+ at::Tensor value,
170
+ int hashtable_capacity,
171
+ bool use_cuda
172
+ ) {
173
+
174
+ int batch_size = query_hash_code.size(0);
175
+ int num_hash_f = query_hash_code.size(2);
176
+
177
+ int num_query = query_hash_code.size(1);
178
+ int num_key = key_hash_code.size(1);
179
+ int value_dim = value.size(2);
180
+ int weight_dim = query_weight.size(2);
181
+
182
+ at::Tensor hashtable_value = at::zeros({batch_size, num_hash_f, hashtable_capacity, WARP_SIZE}, value.options());
183
+ at::Tensor cumulation_value = at::zeros({batch_size, num_query, value_dim}, value.options());
184
+
185
+ if (use_cuda) {
186
+ int threads_x = WARP_SIZE;
187
+ int threads_y = OPTIMAL_THREADS_PER_BLOCK / WARP_SIZE;
188
+ int block_x_step1 = num_key / threads_y;
189
+ int block_x_step2 = num_query / threads_y;
190
+ int block_y = batch_size;
191
+
192
+ dim3 threads(threads_x, threads_y);
193
+ dim3 blocks_step1(block_x_step1, block_y);
194
+ dim3 blocks_step2(block_x_step2, block_y);
195
+
196
+ int *query_mask_ptr = query_mask.data_ptr<int>();
197
+ int *query_hash_code_ptr = query_hash_code.data_ptr<int>();
198
+ float *query_weight_ptr = query_weight.data_ptr<float>();
199
+ int *key_mask_ptr = key_mask.data_ptr<int>();
200
+ int *key_hash_code_ptr = key_hash_code.data_ptr<int>();
201
+ float *key_weight_ptr = key_weight.data_ptr<float>();
202
+ float *value_ptr = value.data_ptr<float>();
203
+ float *hashtable_value_ptr = hashtable_value.data_ptr<float>();
204
+ float *cumulation_value_ptr = cumulation_value.data_ptr<float>();
205
+
206
+ for (int value_offset = 0; value_offset < value_dim; value_offset = value_offset + WARP_SIZE) {
207
+ for (int weight_idx = 0; weight_idx < weight_dim; weight_idx++) {
208
+
209
+ cudaMemset(hashtable_value_ptr, 0, (batch_size * num_hash_f * hashtable_capacity * WARP_SIZE) * sizeof(float));
210
+
211
+ lsh_weighted_cumulation_ver1_step1_cuda_kernel<<<blocks_step1, threads>>>(
212
+ key_mask_ptr,
213
+ key_hash_code_ptr,
214
+ key_weight_ptr,
215
+ value_ptr,
216
+ hashtable_value_ptr,
217
+ batch_size,
218
+ num_hash_f,
219
+ hashtable_capacity,
220
+ num_key,
221
+ value_dim,
222
+ weight_dim,
223
+ value_offset,
224
+ weight_idx
225
+ );
226
+
227
+ lsh_weighted_cumulation_ver1_step2_cuda_kernel<<<blocks_step2, threads>>>(
228
+ query_mask_ptr,
229
+ query_hash_code_ptr,
230
+ query_weight_ptr,
231
+ hashtable_value_ptr,
232
+ cumulation_value_ptr,
233
+ batch_size,
234
+ num_hash_f,
235
+ hashtable_capacity,
236
+ num_query,
237
+ value_dim,
238
+ weight_dim,
239
+ value_offset,
240
+ weight_idx
241
+ );
242
+ }
243
+ }
244
+
245
+ }
246
+
247
+ return cumulation_value;
248
+
249
+ }
250
+
251
+ at::Tensor lsh_weighted_cumulation_ver2_kernel(
252
+ at::Tensor query_mask,
253
+ at::Tensor query_hash_code,
254
+ at::Tensor query_weight,
255
+ at::Tensor key_mask,
256
+ at::Tensor key_hash_code,
257
+ at::Tensor key_weight,
258
+ at::Tensor value,
259
+ int hashtable_capacity,
260
+ bool use_cuda
261
+ ) {
262
+
263
+ int batch_size = query_hash_code.size(0);
264
+ int num_hash_f = query_hash_code.size(2);
265
+
266
+ int num_query = query_hash_code.size(1);
267
+ int num_key = key_hash_code.size(1);
268
+ int value_dim = value.size(2);
269
+ int weight_dim = query_weight.size(2);
270
+
271
+ at::Tensor count_sort_table = at::zeros({batch_size, num_hash_f, hashtable_capacity}, query_hash_code.options());
272
+ at::Tensor key_sorted_idxes = at::zeros({batch_size, num_hash_f, num_key}, query_hash_code.options());
273
+ at::Tensor query_info = at::zeros({batch_size, num_query, 2, num_hash_f}, query_hash_code.options());
274
+ at::Tensor cumulation_value = at::zeros({batch_size, num_query, value_dim}, value.options());
275
+
276
+ if (use_cuda) {
277
+
278
+ int *query_mask_ptr = query_mask.data_ptr<int>();
279
+ int *query_hash_code_ptr = query_hash_code.data_ptr<int>();
280
+ float *query_weight_ptr = query_weight.data_ptr<float>();
281
+ int *key_mask_ptr = key_mask.data_ptr<int>();
282
+ int *key_hash_code_ptr = key_hash_code.data_ptr<int>();
283
+ float *key_weight_ptr = key_weight.data_ptr<float>();
284
+ float *value_ptr = value.data_ptr<float>();
285
+
286
+ int *count_sort_table_ptr = count_sort_table.data_ptr<int>();
287
+ int *key_sorted_idxes_ptr = key_sorted_idxes.data_ptr<int>();
288
+ int *query_info_ptr = query_info.data_ptr<int>();
289
+
290
+ float *cumulation_value_ptr = cumulation_value.data_ptr<float>();
291
+
292
+ {
293
+ dim3 threads_step13(num_hash_f, max(1, OPTIMAL_THREADS_PER_BLOCK / num_hash_f));
294
+ dim3 blocks_step13(num_key / max(1, OPTIMAL_THREADS_PER_BLOCK / num_hash_f), batch_size);
295
+ dim3 threads_step2(min(hashtable_capacity, OPTIMAL_THREADS_PER_BLOCK));
296
+ dim3 blocks_step2(num_hash_f, batch_size);
297
+ int shared_mem = hashtable_capacity * sizeof(float);
298
+ count_sort_step1_cuda_kernel<<<blocks_step13, threads_step13>>>(
299
+ key_mask_ptr,
300
+ key_hash_code_ptr,
301
+ count_sort_table_ptr,
302
+ batch_size,
303
+ num_hash_f,
304
+ hashtable_capacity,
305
+ num_key
306
+ );
307
+ count_sort_step2_cuda_kernel<<<blocks_step2, threads_step2, shared_mem>>>(
308
+ count_sort_table_ptr,
309
+ batch_size,
310
+ num_hash_f,
311
+ hashtable_capacity
312
+ );
313
+ count_sort_step3_cuda_kernel<<<blocks_step13, threads_step13>>>(
314
+ key_mask_ptr,
315
+ key_hash_code_ptr,
316
+ count_sort_table_ptr,
317
+ key_sorted_idxes_ptr,
318
+ batch_size,
319
+ num_hash_f,
320
+ hashtable_capacity,
321
+ num_key
322
+ );
323
+ }
324
+ {
325
+ dim3 threads(num_hash_f, max(1, OPTIMAL_THREADS_PER_BLOCK / num_hash_f));
326
+ dim3 blocks(num_query / max(1, OPTIMAL_THREADS_PER_BLOCK / num_hash_f), batch_size);
327
+ extract_query_info_cuda_kernel<<<blocks, threads>>>(
328
+ query_mask_ptr,
329
+ query_hash_code_ptr,
330
+ count_sort_table_ptr,
331
+ query_info_ptr,
332
+ batch_size,
333
+ num_hash_f,
334
+ hashtable_capacity,
335
+ num_query
336
+ );
337
+ }
338
+ {
339
+ dim3 threads(WARP_SIZE, OPTIMAL_THREADS_PER_BLOCK / WARP_SIZE);
340
+ dim3 blocks(num_query, num_hash_f, batch_size);
341
+ int shared_mem = (weight_dim + WARP_SIZE) * sizeof(float);
342
+ lsh_weighted_cumulation_ver2_step2_cuda_kernel<<<blocks, threads, shared_mem>>>(
343
+ query_mask_ptr,
344
+ query_info_ptr,
345
+ key_sorted_idxes_ptr,
346
+ query_weight_ptr,
347
+ key_weight_ptr,
348
+ value_ptr,
349
+ cumulation_value_ptr,
350
+ batch_size,
351
+ num_hash_f,
352
+ num_query,
353
+ num_key,
354
+ value_dim,
355
+ weight_dim
356
+ );
357
+ }
358
+ }
359
+
360
+ return cumulation_value;
361
+
362
+ }
363
+
364
+ at::Tensor lsh_weighted_cumulation_ver3_kernel(
365
+ at::Tensor query_mask,
366
+ at::Tensor query_hash_code,
367
+ at::Tensor query_weight,
368
+ at::Tensor key_mask,
369
+ at::Tensor key_hash_code,
370
+ at::Tensor key_weight,
371
+ at::Tensor value,
372
+ int hashtable_capacity,
373
+ bool use_cuda
374
+ ) {
375
+
376
+ int batch_size = query_hash_code.size(0);
377
+ int num_hash_f = query_hash_code.size(2);
378
+
379
+ int num_query = query_hash_code.size(1);
380
+ int num_key = key_hash_code.size(1);
381
+ int value_dim = value.size(2);
382
+ int weight_dim = query_weight.size(2);
383
+
384
+ at::Tensor count_sort_table = at::zeros({batch_size, num_hash_f, hashtable_capacity}, query_hash_code.options());
385
+ at::Tensor query_sorted_idxes = at::zeros({batch_size, num_hash_f, num_query}, query_hash_code.options());
386
+ at::Tensor key_info = at::zeros({batch_size, num_key, 2, num_hash_f}, query_hash_code.options());
387
+ at::Tensor cumulation_value = at::zeros({batch_size, num_query, value_dim}, value.options());
388
+
389
+ if (use_cuda) {
390
+
391
+ int *query_mask_ptr = query_mask.data_ptr<int>();
392
+ int *query_hash_code_ptr = query_hash_code.data_ptr<int>();
393
+ float *query_weight_ptr = query_weight.data_ptr<float>();
394
+ int *key_mask_ptr = key_mask.data_ptr<int>();
395
+ int *key_hash_code_ptr = key_hash_code.data_ptr<int>();
396
+ float *key_weight_ptr = key_weight.data_ptr<float>();
397
+ float *value_ptr = value.data_ptr<float>();
398
+
399
+ int *count_sort_table_ptr = count_sort_table.data_ptr<int>();
400
+ int *query_sorted_idxes_ptr = query_sorted_idxes.data_ptr<int>();
401
+ int *key_info_ptr = key_info.data_ptr<int>();
402
+
403
+ float *cumulation_value_ptr = cumulation_value.data_ptr<float>();
404
+
405
+ {
406
+ dim3 threads_step13(num_hash_f, max(1, OPTIMAL_THREADS_PER_BLOCK / num_hash_f));
407
+ dim3 blocks_step13(num_query / max(1, OPTIMAL_THREADS_PER_BLOCK / num_hash_f), batch_size);
408
+ dim3 threads_step2(min(hashtable_capacity, OPTIMAL_THREADS_PER_BLOCK));
409
+ dim3 blocks_step2(num_hash_f, batch_size);
410
+ int shared_mem = hashtable_capacity * sizeof(float);
411
+ count_sort_step1_cuda_kernel<<<blocks_step13, threads_step13>>>(
412
+ query_mask_ptr,
413
+ query_hash_code_ptr,
414
+ count_sort_table_ptr,
415
+ batch_size,
416
+ num_hash_f,
417
+ hashtable_capacity,
418
+ num_query
419
+ );
420
+ count_sort_step2_cuda_kernel<<<blocks_step2, threads_step2, shared_mem>>>(
421
+ count_sort_table_ptr,
422
+ batch_size,
423
+ num_hash_f,
424
+ hashtable_capacity
425
+ );
426
+ count_sort_step3_cuda_kernel<<<blocks_step13, threads_step13>>>(
427
+ query_mask_ptr,
428
+ query_hash_code_ptr,
429
+ count_sort_table_ptr,
430
+ query_sorted_idxes_ptr,
431
+ batch_size,
432
+ num_hash_f,
433
+ hashtable_capacity,
434
+ num_query
435
+ );
436
+ }
437
+ {
438
+ dim3 threads(num_hash_f, max(1, OPTIMAL_THREADS_PER_BLOCK / num_hash_f));
439
+ dim3 blocks(num_key / max(1, OPTIMAL_THREADS_PER_BLOCK / num_hash_f), batch_size);
440
+ extract_query_info_cuda_kernel<<<blocks, threads>>>(
441
+ key_mask_ptr,
442
+ key_hash_code_ptr,
443
+ count_sort_table_ptr,
444
+ key_info_ptr,
445
+ batch_size,
446
+ num_hash_f,
447
+ hashtable_capacity,
448
+ num_key
449
+ );
450
+ }
451
+ {
452
+ dim3 threads(WARP_SIZE, OPTIMAL_THREADS_PER_BLOCK / WARP_SIZE);
453
+ dim3 blocks(num_key, num_hash_f, batch_size);
454
+ int shared_mem = (weight_dim + value_dim + WARP_SIZE) * sizeof(float);
455
+ lsh_weighted_cumulation_ver3_step2_cuda_kernel<<<blocks, threads, shared_mem>>>(
456
+ query_sorted_idxes_ptr,
457
+ key_mask_ptr,
458
+ key_info_ptr,
459
+ query_weight_ptr,
460
+ key_weight_ptr,
461
+ value_ptr,
462
+ cumulation_value_ptr,
463
+ batch_size,
464
+ num_hash_f,
465
+ num_query,
466
+ num_key,
467
+ value_dim,
468
+ weight_dim
469
+ );
470
+ }
471
+ }
472
+
473
+ return cumulation_value;
474
+
475
+ }
476
+
477
+ at::Tensor lsh_weighted_cumulation_ver4_kernel(
478
+ at::Tensor query_mask,
479
+ at::Tensor query_hash_code,
480
+ at::Tensor query_weight,
481
+ at::Tensor key_mask,
482
+ at::Tensor key_hash_code,
483
+ at::Tensor key_weight,
484
+ at::Tensor value,
485
+ int hashtable_capacity,
486
+ bool use_cuda
487
+ ) {
488
+
489
+ int batch_size = query_hash_code.size(0);
490
+ int num_hash_f = query_hash_code.size(2);
491
+
492
+ int num_query = query_hash_code.size(1);
493
+ int num_key = key_hash_code.size(1);
494
+ int value_dim = value.size(2);
495
+ int weight_dim = query_weight.size(2);
496
+
497
+ at::Tensor count_sort_table = at::zeros({batch_size, num_hash_f, hashtable_capacity}, query_hash_code.options());
498
+ at::Tensor query_sorted_idxes = at::zeros({batch_size, num_hash_f, num_query}, query_hash_code.options());
499
+ at::Tensor key_info = at::zeros({batch_size, num_key, 2, num_hash_f}, query_hash_code.options());
500
+ at::Tensor cumulation_value = at::zeros({batch_size, num_query, value_dim}, value.options());
501
+
502
+ if (use_cuda) {
503
+
504
+ int *query_mask_ptr = query_mask.data_ptr<int>();
505
+ int *query_hash_code_ptr = query_hash_code.data_ptr<int>();
506
+ float *query_weight_ptr = query_weight.data_ptr<float>();
507
+ int *key_mask_ptr = key_mask.data_ptr<int>();
508
+ int *key_hash_code_ptr = key_hash_code.data_ptr<int>();
509
+ float *key_weight_ptr = key_weight.data_ptr<float>();
510
+ float *value_ptr = value.data_ptr<float>();
511
+
512
+ int *count_sort_table_ptr = count_sort_table.data_ptr<int>();
513
+ int *query_sorted_idxes_ptr = query_sorted_idxes.data_ptr<int>();
514
+ int *key_info_ptr = key_info.data_ptr<int>();
515
+
516
+ float *cumulation_value_ptr = cumulation_value.data_ptr<float>();
517
+
518
+ {
519
+ dim3 threads_step13(num_hash_f, max(1, OPTIMAL_THREADS_PER_BLOCK / num_hash_f));
520
+ dim3 blocks_step13(num_query / max(1, OPTIMAL_THREADS_PER_BLOCK / num_hash_f), batch_size);
521
+ dim3 threads_step2(min(hashtable_capacity, OPTIMAL_THREADS_PER_BLOCK));
522
+ dim3 blocks_step2(num_hash_f, batch_size);
523
+ int shared_mem = hashtable_capacity * sizeof(float);
524
+ count_sort_step1_cuda_kernel<<<blocks_step13, threads_step13>>>(
525
+ query_mask_ptr,
526
+ query_hash_code_ptr,
527
+ count_sort_table_ptr,
528
+ batch_size,
529
+ num_hash_f,
530
+ hashtable_capacity,
531
+ num_query
532
+ );
533
+ count_sort_step2_cuda_kernel<<<blocks_step2, threads_step2, shared_mem>>>(
534
+ count_sort_table_ptr,
535
+ batch_size,
536
+ num_hash_f,
537
+ hashtable_capacity
538
+ );
539
+ count_sort_step3_cuda_kernel<<<blocks_step13, threads_step13>>>(
540
+ query_mask_ptr,
541
+ query_hash_code_ptr,
542
+ count_sort_table_ptr,
543
+ query_sorted_idxes_ptr,
544
+ batch_size,
545
+ num_hash_f,
546
+ hashtable_capacity,
547
+ num_query
548
+ );
549
+ }
550
+ {
551
+ dim3 threads(num_hash_f, max(1, OPTIMAL_THREADS_PER_BLOCK / num_hash_f));
552
+ dim3 blocks(num_key / max(1, OPTIMAL_THREADS_PER_BLOCK / num_hash_f), batch_size);
553
+ extract_query_info_cuda_kernel<<<blocks, threads>>>(
554
+ key_mask_ptr,
555
+ key_hash_code_ptr,
556
+ count_sort_table_ptr,
557
+ key_info_ptr,
558
+ batch_size,
559
+ num_hash_f,
560
+ hashtable_capacity,
561
+ num_key
562
+ );
563
+ }
564
+ {
565
+ dim3 threads(WARP_SIZE, OPTIMAL_THREADS_PER_BLOCK / WARP_SIZE);
566
+ dim3 blocks(num_key, batch_size);
567
+ int shared_mem = (weight_dim + value_dim + 2 * num_hash_f) * sizeof(float);
568
+ lsh_weighted_cumulation_ver4_step2_cuda_kernel<<<blocks, threads, shared_mem>>>(
569
+ query_sorted_idxes_ptr,
570
+ key_mask_ptr,
571
+ key_info_ptr,
572
+ query_weight_ptr,
573
+ key_weight_ptr,
574
+ value_ptr,
575
+ cumulation_value_ptr,
576
+ batch_size,
577
+ num_hash_f,
578
+ num_query,
579
+ num_key,
580
+ value_dim,
581
+ weight_dim
582
+ );
583
+ }
584
+ }
585
+
586
+ return cumulation_value;
587
+
588
+ }
venv/lib/python3.10/site-packages/transformers/kernels/yoso/fast_lsh_cumulation.h ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include <torch/extension.h>
2
+ #include <ATen/ATen.h>
3
+ #include <vector>
4
+
5
+ std::vector<at::Tensor> fast_hash_ver1_kernel(
6
+ at::Tensor query_mask,
7
+ at::Tensor query_vector,
8
+ at::Tensor key_mask,
9
+ at::Tensor key_vector,
10
+ int num_hash_f,
11
+ int hash_code_len,
12
+ bool use_cuda
13
+ );
14
+
15
+ at::Tensor lsh_cumulation_ver1_kernel(
16
+ at::Tensor query_mask,
17
+ at::Tensor query_hash_code,
18
+ at::Tensor key_mask,
19
+ at::Tensor key_hash_code,
20
+ at::Tensor value,
21
+ int hashtable_capacity,
22
+ bool use_cuda
23
+ );
24
+
25
+ at::Tensor lsh_weighted_cumulation_ver1_kernel(
26
+ at::Tensor query_mask,
27
+ at::Tensor query_hash_code,
28
+ at::Tensor query_weight,
29
+ at::Tensor key_mask,
30
+ at::Tensor key_hash_code,
31
+ at::Tensor key_weight,
32
+ at::Tensor value,
33
+ int hashtable_capacity,
34
+ bool use_cuda
35
+ );
36
+
37
+ at::Tensor lsh_weighted_cumulation_ver2_kernel(
38
+ at::Tensor query_mask,
39
+ at::Tensor query_hash_code,
40
+ at::Tensor query_weight,
41
+ at::Tensor key_mask,
42
+ at::Tensor key_hash_code,
43
+ at::Tensor key_weight,
44
+ at::Tensor value,
45
+ int hashtable_capacity,
46
+ bool use_cuda
47
+ );
48
+
49
+ at::Tensor lsh_weighted_cumulation_ver3_kernel(
50
+ at::Tensor query_mask,
51
+ at::Tensor query_hash_code,
52
+ at::Tensor query_weight,
53
+ at::Tensor key_mask,
54
+ at::Tensor key_hash_code,
55
+ at::Tensor key_weight,
56
+ at::Tensor value,
57
+ int hashtable_capacity,
58
+ bool use_cuda
59
+ );
60
+
61
+ at::Tensor lsh_weighted_cumulation_ver4_kernel(
62
+ at::Tensor query_mask,
63
+ at::Tensor query_hash_code,
64
+ at::Tensor query_weight,
65
+ at::Tensor key_mask,
66
+ at::Tensor key_hash_code,
67
+ at::Tensor key_weight,
68
+ at::Tensor value,
69
+ int hashtable_capacity,
70
+ bool use_cuda
71
+ );
venv/lib/python3.10/site-packages/transformers/kernels/yoso/fast_lsh_cumulation_cuda.cu ADDED
@@ -0,0 +1,825 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // File from https://github.com/mlpen/YOSO/blob/main/encoders/backbones/efficient_attentions/yoso/yoso_v1/cuda/fast_lsh_cumulation_cuda.cu
2
+
3
+ #include "fast_lsh_cumulation_cuda.h"
4
+ #include "common_cuda_device.h"
5
+ #include "common_cuda.h"
6
+ #include "common.h"
7
+ #include <stdio.h>
8
+ //////////////////////////////////////////////////////////////////////////////////////////////////
9
+ //////////////////////////////////////////////////////////////////////////////////////////////////
10
+
11
+ inline __device__ void fast_hadamard_transform(float *vector_buffer, int vector_dim, int dim_idx) {
12
+ int stride = vector_dim / 2;
13
+ while (stride > (WARP_SIZE / 2)) {
14
+ __syncthreads();
15
+ int sign = 1 - ((dim_idx / stride) % 2) * 2;
16
+ float val1 = vector_buffer[dim_idx];
17
+ float val2 = vector_buffer[dim_idx + sign * stride];
18
+ __syncthreads();
19
+ vector_buffer[dim_idx] = float(sign) * val1 + val2;
20
+ stride = stride / 2;
21
+ }
22
+
23
+ float val = vector_buffer[dim_idx];
24
+ #pragma unroll
25
+ for (stride = (WARP_SIZE / 2); stride > 0; stride = stride / 2) {
26
+ int sign = 1 - ((dim_idx / stride) % 2) * 2;
27
+ val = float(sign) * val + __shfl_xor_sync(FULL_MASK, val, stride);
28
+ }
29
+ vector_buffer[dim_idx] = val;
30
+ }
31
+
32
+ __global__ void fast_hash_ver1_cuda_kernel(
33
+ int *mask, // [batch_size, num_vector]
34
+ float *vector, // [batch_size, num_vector, vector_dim]
35
+ int *Dmat, // [batch_size, 3, num_part, vector_dim]
36
+ int *hash_code, // [batch_size, num_vector, num_hash_f]
37
+ int batch_size,
38
+ int num_vector,
39
+ int vector_dim,
40
+ int num_part,
41
+ int num_hash_f,
42
+ int hash_code_len
43
+ ) {
44
+
45
+ int batch_idx = blockIdx.z;
46
+ int vector_idx = blockIdx.y;
47
+ int part_idx = blockIdx.x;
48
+
49
+ int dim_idx = threadIdx.x;
50
+
51
+ int batch_idx__vector_idx = batch_idx * num_vector + vector_idx;
52
+ if (mask[batch_idx__vector_idx] == 0) {
53
+ return;
54
+ }
55
+
56
+ extern __shared__ float buffer[];
57
+ float *vector_buffer = buffer;
58
+
59
+ vector_buffer[dim_idx] = vector[batch_idx__vector_idx * vector_dim + dim_idx];
60
+
61
+ vector_buffer[dim_idx] = vector_buffer[dim_idx] * (float)Dmat[((batch_idx * 3 + 0) * num_part + part_idx) * vector_dim + dim_idx];
62
+ fast_hadamard_transform(vector_buffer, vector_dim, dim_idx);
63
+ vector_buffer[dim_idx] = vector_buffer[dim_idx] * (float)Dmat[((batch_idx * 3 + 1) * num_part + part_idx) * vector_dim + dim_idx];
64
+ fast_hadamard_transform(vector_buffer, vector_dim, dim_idx);
65
+ vector_buffer[dim_idx] = vector_buffer[dim_idx] * (float)Dmat[((batch_idx * 3 + 2) * num_part + part_idx) * vector_dim + dim_idx];
66
+ fast_hadamard_transform(vector_buffer, vector_dim, dim_idx);
67
+
68
+ int num_hash_per_part = vector_dim / hash_code_len;
69
+ if (hash_code_len == 8 || hash_code_len == 16) {
70
+ int code = select(vector_buffer[dim_idx] > 0, 1 << (dim_idx % hash_code_len), 0);
71
+ for (int offset = 1; offset < hash_code_len; offset = offset * 2) {
72
+ code += __shfl_xor_sync(FULL_MASK, code, offset);
73
+ }
74
+ if (dim_idx % hash_code_len == 0) {
75
+ int hash_f_idx = part_idx * num_hash_per_part + dim_idx / hash_code_len;
76
+ if (hash_f_idx < num_hash_f) {
77
+ hash_code[batch_idx__vector_idx * num_hash_f + hash_f_idx] = code;
78
+ }
79
+ }
80
+ } else {
81
+ vector_buffer[dim_idx] = select(vector_buffer[dim_idx] > 0, 1 << (dim_idx % hash_code_len), 0);
82
+ __syncthreads();
83
+ if (dim_idx < num_hash_per_part) {
84
+ int code = 0;
85
+ for (int i = 0; i < hash_code_len; i++) {
86
+ code += vector_buffer[dim_idx * hash_code_len + i];
87
+ }
88
+ int hash_f_idx = part_idx * num_hash_per_part + dim_idx;
89
+ if (hash_f_idx < num_hash_f) {
90
+ hash_code[batch_idx__vector_idx * num_hash_f + hash_f_idx] = code;
91
+ }
92
+ }
93
+ }
94
+ }
95
+
96
+ __global__ void lsh_cumulation_ver1_step1_cuda_kernel(
97
+ int *key_mask, // [batch_size, num_key]
98
+ int *key_hash_code, // [batch_size, num_key, num_hash_f]
99
+ float *value, // [batch_size, num_key, value_dim]
100
+ float *hashtable_value, // [batch_size, num_hash_f, hashtable_capacity, WARP_SIZE]
101
+ int batch_size,
102
+ int num_hash_f,
103
+ int hashtable_capacity,
104
+ int num_key,
105
+ int value_dim,
106
+ int offset_warp
107
+ ) {
108
+
109
+ int warp_thread_idx = threadIdx.x;
110
+
111
+ int batch_idx = blockIdx.y;
112
+ int key_idx = blockIdx.x * blockDim.y + threadIdx.y;
113
+
114
+ int batch_idx__key_idx = batch_idx * num_key + key_idx;
115
+ if (key_mask[batch_idx__key_idx] == 0) {
116
+ return;
117
+ }
118
+
119
+ if (num_hash_f > WARP_SIZE) {
120
+ float warp_value = value[batch_idx__key_idx * value_dim + offset_warp + warp_thread_idx];
121
+ for (int hash_f_start = 0; hash_f_start < num_hash_f; hash_f_start = hash_f_start + WARP_SIZE) {
122
+ int warp_hashcode = key_hash_code[batch_idx__key_idx * num_hash_f + hash_f_start + warp_thread_idx];
123
+ #pragma unroll
124
+ for (int hash_f_offset = 0; hash_f_offset < WARP_SIZE; hash_f_offset++) {
125
+ int current_hashcode = warp_hashcode;
126
+ current_hashcode = __shfl_sync(FULL_MASK, current_hashcode, hash_f_offset);
127
+ int hashtable_idx = (batch_idx * num_hash_f + (hash_f_start + hash_f_offset)) * hashtable_capacity + current_hashcode;
128
+ atomicAdd(&hashtable_value[hashtable_idx * WARP_SIZE + warp_thread_idx], warp_value);
129
+ }
130
+ }
131
+ } else {
132
+ float warp_value = value[batch_idx__key_idx * value_dim + offset_warp + warp_thread_idx];
133
+ int warp_hashcode = 0;
134
+ if (warp_thread_idx < num_hash_f) {
135
+ warp_hashcode = key_hash_code[batch_idx__key_idx * num_hash_f + warp_thread_idx];
136
+ }
137
+ for (int hash_f_idx = 0; hash_f_idx < num_hash_f; hash_f_idx++) {
138
+ int current_hashcode = warp_hashcode;
139
+ current_hashcode = __shfl_sync(FULL_MASK, current_hashcode, hash_f_idx);
140
+ int hashtable_idx = (batch_idx * num_hash_f + hash_f_idx) * hashtable_capacity + current_hashcode;
141
+ atomicAdd(&hashtable_value[hashtable_idx * WARP_SIZE + warp_thread_idx], warp_value);
142
+ }
143
+ }
144
+
145
+ }
146
+
147
+ __global__ void lsh_cumulation_ver1_step2_cuda_kernel(
148
+ int *query_mask, // [batch_size, num_query]
149
+ int *query_hash_code, // [batch_size, num_query, num_hash_f]
150
+ float *hashtable_value, // [batch_size, num_hash_f, hashtable_capacity, WARP_SIZE]
151
+ float *cumulation_value, // [batch_size, num_query, value_dim]
152
+ int batch_size,
153
+ int num_hash_f,
154
+ int hashtable_capacity,
155
+ int num_query,
156
+ int value_dim,
157
+ int offset_warp
158
+ ) {
159
+
160
+ int warp_thread_idx = threadIdx.x;
161
+
162
+ int batch_idx = blockIdx.y;
163
+ int query_idx = blockIdx.x * blockDim.y + threadIdx.y;
164
+
165
+ int batch_idx__query_idx = batch_idx * num_query + query_idx;
166
+ if (query_mask[batch_idx__query_idx] == 0) {
167
+ return;
168
+ }
169
+
170
+ if (num_hash_f > WARP_SIZE) {
171
+ float warp_value = 0;
172
+ for (int hash_f_start = 0; hash_f_start < num_hash_f; hash_f_start = hash_f_start + WARP_SIZE) {
173
+ int warp_hashcode = query_hash_code[batch_idx__query_idx * num_hash_f + hash_f_start + warp_thread_idx];
174
+ #pragma unroll
175
+ for (int hash_f_offset = 0; hash_f_offset < WARP_SIZE; hash_f_offset++) {
176
+ int current_hashcode = warp_hashcode;
177
+ current_hashcode = __shfl_sync(FULL_MASK, current_hashcode, hash_f_offset);
178
+ int hashtable_idx = (batch_idx * num_hash_f + (hash_f_start + hash_f_offset)) * hashtable_capacity + current_hashcode;
179
+ warp_value = warp_value + hashtable_value[hashtable_idx * WARP_SIZE + warp_thread_idx];
180
+ }
181
+ }
182
+ cumulation_value[batch_idx__query_idx * value_dim + offset_warp + warp_thread_idx] = warp_value / float(num_hash_f);
183
+ } else {
184
+ float warp_value = 0;
185
+ int warp_hashcode = 0;
186
+ if (warp_thread_idx < num_hash_f) {
187
+ warp_hashcode = query_hash_code[batch_idx__query_idx * num_hash_f + warp_thread_idx];
188
+ }
189
+ for (int hash_f_idx = 0; hash_f_idx < num_hash_f; hash_f_idx++) {
190
+ int current_hashcode = warp_hashcode;
191
+ current_hashcode = __shfl_sync(FULL_MASK, current_hashcode, hash_f_idx);
192
+ int hashtable_idx = (batch_idx * num_hash_f + hash_f_idx) * hashtable_capacity + current_hashcode;
193
+ warp_value = warp_value + hashtable_value[hashtable_idx * WARP_SIZE + warp_thread_idx];
194
+ }
195
+ cumulation_value[batch_idx__query_idx * value_dim + offset_warp + warp_thread_idx] = warp_value / float(num_hash_f);
196
+ }
197
+
198
+ }
199
+
200
+ __global__ void lsh_weighted_cumulation_ver1_step1_cuda_kernel(
201
+ int *key_mask, // [batch_size, num_key]
202
+ int *key_hash_code, // [batch_size, num_key, num_hash_f]
203
+ float *key_weight, // [batch_size, num_key, weight_dim]
204
+ float *value, // [batch_size, num_key, value_dim]
205
+ float *hashtable_value, // [batch_size, num_hash_f, hashtable_capacity, WARP_SIZE]
206
+ int batch_size,
207
+ int num_hash_f,
208
+ int hashtable_capacity,
209
+ int num_key,
210
+ int value_dim,
211
+ int weight_dim,
212
+ int offset_warp,
213
+ int weight_idx
214
+ ) {
215
+
216
+ int warp_thread_idx = threadIdx.x;
217
+
218
+ int batch_idx = blockIdx.y;
219
+ int key_idx = blockIdx.x * blockDim.y + threadIdx.y;
220
+
221
+ int batch_idx__key_idx = batch_idx * num_key + key_idx;
222
+ if (key_mask[batch_idx__key_idx] == 0) {
223
+ return;
224
+ }
225
+
226
+ if (num_hash_f > WARP_SIZE) {
227
+ float warp_value = key_weight[batch_idx__key_idx * weight_dim + weight_idx] * value[batch_idx__key_idx * value_dim + offset_warp + warp_thread_idx];
228
+ for (int hash_f_start = 0; hash_f_start < num_hash_f; hash_f_start = hash_f_start + WARP_SIZE) {
229
+ int warp_hashcode = key_hash_code[batch_idx__key_idx * num_hash_f + hash_f_start + warp_thread_idx];
230
+ #pragma unroll
231
+ for (int hash_f_offset = 0; hash_f_offset < WARP_SIZE; hash_f_offset++) {
232
+ int current_hashcode = warp_hashcode;
233
+ current_hashcode = __shfl_sync(FULL_MASK, current_hashcode, hash_f_offset);
234
+ int hashtable_idx = (batch_idx * num_hash_f + (hash_f_start + hash_f_offset)) * hashtable_capacity + current_hashcode;
235
+ atomicAdd(&hashtable_value[hashtable_idx * WARP_SIZE + warp_thread_idx], warp_value);
236
+ }
237
+ }
238
+ } else {
239
+ float warp_value = key_weight[batch_idx__key_idx * weight_dim + weight_idx] * value[batch_idx__key_idx * value_dim + offset_warp + warp_thread_idx];
240
+ int warp_hashcode = 0;
241
+ if (warp_thread_idx < num_hash_f) {
242
+ warp_hashcode = key_hash_code[batch_idx__key_idx * num_hash_f + warp_thread_idx];
243
+ }
244
+ for (int hash_f_idx = 0; hash_f_idx < num_hash_f; hash_f_idx++) {
245
+ int current_hashcode = warp_hashcode;
246
+ current_hashcode = __shfl_sync(FULL_MASK, current_hashcode, hash_f_idx);
247
+ int hashtable_idx = (batch_idx * num_hash_f + hash_f_idx) * hashtable_capacity + current_hashcode;
248
+ atomicAdd(&hashtable_value[hashtable_idx * WARP_SIZE + warp_thread_idx], warp_value);
249
+ }
250
+ }
251
+
252
+ }
253
+
254
+ __global__ void lsh_weighted_cumulation_ver1_step2_cuda_kernel(
255
+ int *query_mask, // [batch_size, num_query]
256
+ int *query_hash_code, // [batch_size, num_query, num_hash_f]
257
+ float *query_weight, // [batch_size, num_query, weight_dim]
258
+ float *hashtable_value, // [batch_size, num_hash_f, hashtable_capacity, WARP_SIZE]
259
+ float *cumulation_value, // [batch_size, num_query, value_dim]
260
+ int batch_size,
261
+ int num_hash_f,
262
+ int hashtable_capacity,
263
+ int num_query,
264
+ int value_dim,
265
+ int weight_dim,
266
+ int offset_warp,
267
+ int weight_idx
268
+ ) {
269
+
270
+ int warp_thread_idx = threadIdx.x;
271
+
272
+ int batch_idx = blockIdx.y;
273
+ int query_idx = blockIdx.x * blockDim.y + threadIdx.y;
274
+
275
+ int batch_idx__query_idx = batch_idx * num_query + query_idx;
276
+ if (query_mask[batch_idx__query_idx] == 0) {
277
+ return;
278
+ }
279
+
280
+ if (num_hash_f > WARP_SIZE) {
281
+ float warp_value = 0;
282
+ for (int hash_f_start = 0; hash_f_start < num_hash_f; hash_f_start = hash_f_start + WARP_SIZE) {
283
+ int warp_hashcode = query_hash_code[batch_idx__query_idx * num_hash_f + hash_f_start + warp_thread_idx];
284
+ #pragma unroll
285
+ for (int hash_f_offset = 0; hash_f_offset < WARP_SIZE; hash_f_offset++) {
286
+ int current_hashcode = warp_hashcode;
287
+ current_hashcode = __shfl_sync(FULL_MASK, current_hashcode, hash_f_offset);
288
+ int hashtable_idx = (batch_idx * num_hash_f + (hash_f_start + hash_f_offset)) * hashtable_capacity + current_hashcode;
289
+ warp_value = warp_value + hashtable_value[hashtable_idx * WARP_SIZE + warp_thread_idx];
290
+ }
291
+ }
292
+ float warp_weight = query_weight[batch_idx__query_idx * weight_dim + weight_idx];
293
+ cumulation_value[batch_idx__query_idx * value_dim + offset_warp + warp_thread_idx] += warp_weight * warp_value / float(num_hash_f);
294
+ } else {
295
+ float warp_value = 0;
296
+ int warp_hashcode = 0;
297
+ if (warp_thread_idx < num_hash_f) {
298
+ warp_hashcode = query_hash_code[batch_idx__query_idx * num_hash_f + warp_thread_idx];
299
+ }
300
+ for (int hash_f_idx = 0; hash_f_idx < num_hash_f; hash_f_idx++) {
301
+ int current_hashcode = warp_hashcode;
302
+ current_hashcode = __shfl_sync(FULL_MASK, current_hashcode, hash_f_idx);
303
+ int hashtable_idx = (batch_idx * num_hash_f + hash_f_idx) * hashtable_capacity + current_hashcode;
304
+ warp_value = warp_value + hashtable_value[hashtable_idx * WARP_SIZE + warp_thread_idx];
305
+ }
306
+ float warp_weight = query_weight[batch_idx__query_idx * weight_dim + weight_idx];
307
+ cumulation_value[batch_idx__query_idx * value_dim + offset_warp + warp_thread_idx] += warp_weight * warp_value / float(num_hash_f);
308
+ }
309
+
310
+ }
311
+
312
+ __global__ void count_sort_step1_cuda_kernel(
313
+ int *key_mask, // [batch_size, num_key]
314
+ int *key_hash_code, // [batch_size, num_key, num_hash_f]
315
+ int *count_sort_table, // [batch_size, num_hash_f, hashtable_capacity]
316
+ int batch_size,
317
+ int num_hash_f,
318
+ int hashtable_capacity,
319
+ int num_key
320
+ ) {
321
+
322
+ int batch_idx = blockIdx.y;
323
+ int key_idx = blockIdx.x * blockDim.y + threadIdx.y;
324
+ int hash_f_idx = threadIdx.x;
325
+
326
+ int batch_idx__key_idx = batch_idx * num_key + key_idx;
327
+ if (key_mask[batch_idx__key_idx] == 0) {
328
+ return;
329
+ }
330
+
331
+ int hash_code = key_hash_code[batch_idx__key_idx * num_hash_f + hash_f_idx];
332
+ atomicAdd(&count_sort_table[(batch_idx * num_hash_f + hash_f_idx) * hashtable_capacity + hash_code], 1);
333
+
334
+ }
335
+
336
+ __global__ void count_sort_step2_cuda_kernel(
337
+ int *count_sort_table, // [batch_size, num_hash_f, hashtable_capacity]
338
+ int batch_size,
339
+ int num_hash_f,
340
+ int hashtable_capacity
341
+ ) {
342
+
343
+ int batch_idx = blockIdx.y;
344
+ int hash_f_idx = blockIdx.x;
345
+
346
+ int num_threads = blockDim.x;
347
+ int thread_id = threadIdx.x;
348
+
349
+ int batch_idx__hash_f_idx = batch_idx * num_hash_f + hash_f_idx;
350
+
351
+ extern __shared__ float buffer[];
352
+ int *table_buffer = (int*)buffer;
353
+
354
+ if (thread_id == 0) {
355
+ table_buffer[0] = 0;
356
+ }
357
+ copy_data<int>(&count_sort_table[batch_idx__hash_f_idx * hashtable_capacity], &table_buffer[1], hashtable_capacity - 1, num_threads, thread_id);
358
+
359
+ for (int table_idx_start = 0; table_idx_start < hashtable_capacity; table_idx_start = table_idx_start + num_threads) {
360
+ int thread_value = table_buffer[table_idx_start + thread_id];
361
+ int next_thread_value = 0;
362
+ for (int offset = 1; offset < WARP_SIZE; offset = offset << 1) {
363
+ next_thread_value = __shfl_up_sync(FULL_MASK, thread_value, offset);
364
+ if (thread_id % WARP_SIZE >= offset) {
365
+ thread_value = thread_value + next_thread_value;
366
+ }
367
+ }
368
+ table_buffer[table_idx_start + thread_id] = thread_value;
369
+ }
370
+ __syncthreads();
371
+
372
+ if (hashtable_capacity > WARP_SIZE) {
373
+ if (thread_id < WARP_SIZE) {
374
+ for (int table_idx_start = WARP_SIZE; table_idx_start < hashtable_capacity; table_idx_start = table_idx_start + WARP_SIZE) {
375
+ table_buffer[table_idx_start + thread_id] += table_buffer[table_idx_start - 1];
376
+ }
377
+ }
378
+ }
379
+
380
+ copy_data<int>(table_buffer, &count_sort_table[batch_idx__hash_f_idx * hashtable_capacity], hashtable_capacity, num_threads, thread_id);
381
+
382
+ }
383
+
384
+
385
+ __global__ void count_sort_step3_cuda_kernel(
386
+ int *key_mask, // [batch_size, num_key]
387
+ int *key_hash_code, // [batch_size, num_key, num_hash_f]
388
+ int *count_sort_table, // [batch_size, num_hash_f, hashtable_capacity]
389
+ int *key_sorted_idxes, // [batch_size, num_hash_f, num_key]
390
+ int batch_size,
391
+ int num_hash_f,
392
+ int hashtable_capacity,
393
+ int num_key
394
+ ) {
395
+
396
+ int batch_idx = blockIdx.y;
397
+ int key_idx = blockIdx.x * blockDim.y + threadIdx.y;
398
+ int hash_f_idx = threadIdx.x;
399
+
400
+ int batch_idx__key_idx = batch_idx * num_key + key_idx;
401
+ if (key_mask[batch_idx__key_idx] == 0) {
402
+ return;
403
+ }
404
+
405
+ int batch_idx__hash_f_idx = batch_idx * num_hash_f + hash_f_idx;
406
+
407
+ int hash_code = key_hash_code[batch_idx__key_idx * num_hash_f + hash_f_idx];
408
+ int sort_idx = atomicAdd(&count_sort_table[batch_idx__hash_f_idx * hashtable_capacity + hash_code], 1);
409
+ key_sorted_idxes[batch_idx__hash_f_idx * num_key + sort_idx] = key_idx;
410
+
411
+ }
412
+
413
+ __global__ void extract_query_info_cuda_kernel(
414
+ int *query_mask, // [batch_size, num_query]
415
+ int *query_hash_code, // [batch_size, num_query, num_hash_f]
416
+ int *count_sort_table, // [batch_size, num_hash_f, hashtable_capacity]
417
+ int *query_info, // [batch_size, num_query, 2, num_hash_f]
418
+ int batch_size,
419
+ int num_hash_f,
420
+ int hashtable_capacity,
421
+ int num_query
422
+ ) {
423
+
424
+ int batch_idx = blockIdx.y;
425
+ int query_idx = blockIdx.x * blockDim.y + threadIdx.y;
426
+ int hash_f_idx = threadIdx.x;
427
+
428
+ int batch_idx__query_idx = batch_idx * num_query + query_idx;
429
+ if (query_mask[batch_idx__query_idx] == 0) {
430
+ return;
431
+ }
432
+
433
+ int hash_code = query_hash_code[batch_idx__query_idx * num_hash_f + hash_f_idx];
434
+ int batch_idx__hash_f_idx__hash_code = (batch_idx * num_hash_f + hash_f_idx) * hashtable_capacity + hash_code;
435
+
436
+ int key_offset = select(hash_code == 0, 0, count_sort_table[batch_idx__hash_f_idx__hash_code - 1]);
437
+ int key_count = count_sort_table[batch_idx__hash_f_idx__hash_code] - key_offset;
438
+
439
+ query_info[batch_idx__query_idx * 2 * num_hash_f + hash_f_idx] = key_offset;
440
+ query_info[(batch_idx__query_idx * 2 + 1) * num_hash_f + hash_f_idx] = key_count;
441
+
442
+ }
443
+
444
+ __global__ void lsh_weighted_cumulation_ver2_step2_cuda_kernel(
445
+ int *query_mask, // [batch_size, num_query]
446
+ int *query_info, // [batch_size, num_query, 2, num_hash_f]
447
+ int *key_sorted_idxes, // [batch_size, num_hash_f, num_key]
448
+ float *query_weight, // [batch_size, num_query, weight_dim]
449
+ float *key_weight, // [batch_size, num_key, weight_dim]
450
+ float *value, // [batch_size, num_key, value_dim]
451
+ float *cumulation_value, // [batch_size, num_query, value_dim]
452
+ int batch_size,
453
+ int num_hash_f,
454
+ int num_query,
455
+ int num_key,
456
+ int value_dim,
457
+ int weight_dim
458
+ ) {
459
+
460
+ int batch_idx = blockIdx.z;
461
+ int hash_f_idx = blockIdx.y;
462
+ int query_idx = blockIdx.x;
463
+
464
+ int num_threads = blockDim.y * blockDim.x;
465
+ int thread_id = threadIdx.y * blockDim.x + threadIdx.x;
466
+
467
+ int num_warps = blockDim.y;
468
+ int warp_idx = threadIdx.y;
469
+ int warp_thread_idx = threadIdx.x;
470
+
471
+ int batch_idx__query_idx = batch_idx * num_query + query_idx;
472
+ if (query_mask[batch_idx__query_idx] == 0) {
473
+ return;
474
+ }
475
+
476
+ int key_offset = query_info[batch_idx__query_idx * 2 * num_hash_f + hash_f_idx];
477
+ int key_count = query_info[(batch_idx__query_idx * 2 + 1) * num_hash_f + hash_f_idx];
478
+
479
+ if (key_count == 0) {
480
+ return;
481
+ }
482
+
483
+ extern __shared__ float buffer[];
484
+
485
+ if (key_count == 1) {
486
+ if (warp_idx == 0) {
487
+ int key_idx = key_sorted_idxes[(batch_idx * num_hash_f + hash_f_idx) * num_key + key_offset];
488
+ int batch_idx__key_idx = batch_idx * num_key + key_idx;
489
+ float weight = 0;
490
+ for (int weight_offset = 0; weight_offset < weight_dim; weight_offset = weight_offset + WARP_SIZE) {
491
+ int weight_dim_idx = weight_offset + warp_thread_idx;
492
+ float val = query_weight[batch_idx__query_idx * weight_dim + weight_dim_idx] * key_weight[batch_idx__key_idx * weight_dim + weight_dim_idx];
493
+ #pragma unroll
494
+ for (int offset = 1; offset < WARP_SIZE; offset = offset << 1) {
495
+ val += __shfl_xor_sync(FULL_MASK, val, offset);
496
+ }
497
+ weight = weight + val;
498
+ }
499
+ weight = weight / float(num_hash_f);
500
+ for (int value_offset = 0; value_offset < value_dim; value_offset = value_offset + WARP_SIZE) {
501
+ int value_dim_idx = value_offset + warp_thread_idx;
502
+ float val = value[batch_idx__key_idx * value_dim + value_dim_idx];
503
+ atomicAdd(&cumulation_value[batch_idx__query_idx * value_dim + value_dim_idx], weight * val);
504
+ }
505
+ }
506
+ } else {
507
+ float *weight_buffer = buffer;
508
+ int *key_idxes_buffer = (int*)&buffer[weight_dim];
509
+
510
+ copy_data_nonblocking<float>(&query_weight[batch_idx__query_idx * weight_dim], weight_buffer, weight_dim, num_threads, thread_id);
511
+
512
+ while (key_count > 0) {
513
+ int work_size = min(WARP_SIZE, key_count);
514
+ copy_data_nonblocking<int>(&key_sorted_idxes[(batch_idx * num_hash_f + hash_f_idx) * num_key + key_offset], key_idxes_buffer, work_size, num_threads, thread_id);
515
+ __syncthreads();
516
+ for (int work_offset = 0; work_offset < WARP_SIZE; work_offset = work_offset + num_warps) {
517
+ int work_idx = work_offset + warp_idx;
518
+ if (work_idx < key_count) {
519
+ int key_idx = key_idxes_buffer[work_idx];
520
+ int batch_idx__key_idx = batch_idx * num_key + key_idx;
521
+ float weight = 0;
522
+ for (int weight_offset = 0; weight_offset < weight_dim; weight_offset = weight_offset + WARP_SIZE) {
523
+ int weight_dim_idx = weight_offset + warp_thread_idx;
524
+ float val = weight_buffer[weight_dim_idx] * key_weight[batch_idx__key_idx * weight_dim + weight_dim_idx];
525
+ #pragma unroll
526
+ for (int offset = 1; offset < WARP_SIZE; offset = offset << 1) {
527
+ val += __shfl_xor_sync(FULL_MASK, val, offset);
528
+ }
529
+ weight = weight + val;
530
+ }
531
+ weight = weight / float(num_hash_f);
532
+ for (int value_offset = 0; value_offset < value_dim; value_offset = value_offset + WARP_SIZE) {
533
+ int value_dim_idx = value_offset + warp_thread_idx;
534
+ float val = value[batch_idx__key_idx * value_dim + value_dim_idx];
535
+ atomicAdd(&cumulation_value[batch_idx__query_idx * value_dim + value_dim_idx], weight * val);
536
+ }
537
+ }
538
+ }
539
+ key_count = key_count - work_size;
540
+ key_offset = key_offset + work_size;
541
+ }
542
+ }
543
+
544
+ }
545
+
546
+ __global__ void lsh_weighted_cumulation_ver3_step2_cuda_kernel(
547
+ int *query_sorted_idxes, // [batch_size, num_hash_f, num_query]
548
+ int *key_mask, // [batch_size, num_key]
549
+ int *key_info, // [batch_size, num_key, 2, num_hash_f]
550
+ float *query_weight, // [batch_size, num_query, weight_dim]
551
+ float *key_weight, // [batch_size, num_key, weight_dim]
552
+ float *value, // [batch_size, num_key, value_dim]
553
+ float *cumulation_value, // [batch_size, num_query, value_dim]
554
+ int batch_size,
555
+ int num_hash_f,
556
+ int num_query,
557
+ int num_key,
558
+ int value_dim,
559
+ int weight_dim
560
+ ) {
561
+
562
+ int batch_idx = blockIdx.z;
563
+ int hash_f_idx = blockIdx.y;
564
+ int key_idx = blockIdx.x;
565
+
566
+ int num_threads = blockDim.y * blockDim.x;
567
+ int thread_id = threadIdx.y * blockDim.x + threadIdx.x;
568
+
569
+ int num_warps = blockDim.y;
570
+ int warp_idx = threadIdx.y;
571
+ int warp_thread_idx = threadIdx.x;
572
+
573
+ int batch_idx__key_idx = batch_idx * num_key + key_idx;
574
+ if (key_mask[batch_idx__key_idx] == 0) {
575
+ return;
576
+ }
577
+
578
+ int query_offset = key_info[batch_idx__key_idx * 2 * num_hash_f + hash_f_idx];
579
+ int query_count = key_info[(batch_idx__key_idx * 2 + 1) * num_hash_f + hash_f_idx];
580
+
581
+ if (query_count == 0) {
582
+ return;
583
+ }
584
+
585
+ extern __shared__ float buffer[];
586
+
587
+ if (query_count == 1) {
588
+ if (warp_idx == 0) {
589
+ int query_idx = query_sorted_idxes[(batch_idx * num_hash_f + hash_f_idx) * num_query + query_offset];
590
+ int batch_idx__query_idx = batch_idx * num_query + query_idx;
591
+ float weight = 0;
592
+ for (int weight_offset = 0; weight_offset < weight_dim; weight_offset = weight_offset + WARP_SIZE) {
593
+ int weight_dim_idx = weight_offset + warp_thread_idx;
594
+ float val = key_weight[batch_idx__key_idx * weight_dim + weight_dim_idx] * query_weight[batch_idx__query_idx * weight_dim + weight_dim_idx];
595
+ #pragma unroll
596
+ for (int offset = 1; offset < WARP_SIZE; offset = offset << 1) {
597
+ val += __shfl_xor_sync(FULL_MASK, val, offset);
598
+ }
599
+ weight = weight + val;
600
+ }
601
+ weight = weight / float(num_hash_f);
602
+ for (int value_offset = 0; value_offset < value_dim; value_offset = value_offset + WARP_SIZE) {
603
+ int value_dim_idx = value_offset + warp_thread_idx;
604
+ float val = value[batch_idx__key_idx * value_dim + value_dim_idx];
605
+ atomicAdd(&cumulation_value[batch_idx__query_idx * value_dim + value_dim_idx], weight * val);
606
+ }
607
+ }
608
+ } else {
609
+ float *weight_buffer = buffer;
610
+ float *value_buffer = &buffer[weight_dim];
611
+ int *query_idxes_buffer = (int*)&buffer[weight_dim + value_dim];
612
+
613
+ copy_data_nonblocking<float>(&key_weight[batch_idx__key_idx * weight_dim], weight_buffer, weight_dim, num_threads, thread_id);
614
+ copy_data_nonblocking<float>(&value[batch_idx__key_idx * value_dim], value_buffer, value_dim, num_threads, thread_id);
615
+
616
+ while (query_count > 0) {
617
+ int work_size = min(WARP_SIZE, query_count);
618
+ copy_data_nonblocking<int>(&query_sorted_idxes[(batch_idx * num_hash_f + hash_f_idx) * num_query + query_offset], query_idxes_buffer, work_size, num_threads, thread_id);
619
+ __syncthreads();
620
+ for (int work_offset = 0; work_offset < WARP_SIZE; work_offset = work_offset + num_warps) {
621
+ int work_idx = work_offset + warp_idx;
622
+ if (work_idx < query_count) {
623
+ int query_idx = query_idxes_buffer[work_idx];
624
+ int batch_idx__query_idx = batch_idx * num_query + query_idx;
625
+ float weight = 0;
626
+ for (int weight_offset = 0; weight_offset < weight_dim; weight_offset = weight_offset + WARP_SIZE) {
627
+ int weight_dim_idx = weight_offset + warp_thread_idx;
628
+ float val = weight_buffer[weight_dim_idx] * query_weight[batch_idx__query_idx * weight_dim + weight_dim_idx];
629
+ #pragma unroll
630
+ for (int offset = 1; offset < WARP_SIZE; offset = offset << 1) {
631
+ val += __shfl_xor_sync(FULL_MASK, val, offset);
632
+ }
633
+ weight = weight + val;
634
+ }
635
+ weight = weight / float(num_hash_f);
636
+ for (int value_offset = 0; value_offset < value_dim; value_offset = value_offset + WARP_SIZE) {
637
+ int value_dim_idx = value_offset + warp_thread_idx;
638
+ float val = value_buffer[value_dim_idx];
639
+ atomicAdd(&cumulation_value[batch_idx__query_idx * value_dim + value_dim_idx], weight * val);
640
+ }
641
+ }
642
+ }
643
+ query_count = query_count - work_size;
644
+ query_offset = query_offset + work_size;
645
+ }
646
+ }
647
+
648
+ }
649
+
650
+ __global__ void lsh_weighted_cumulation_ver4_step2_cuda_kernel(
651
+ int *query_sorted_idxes, // [batch_size, num_hash_f, num_query]
652
+ int *key_mask, // [batch_size, num_key]
653
+ int *key_info, // [batch_size, num_key, 2, num_hash_f]
654
+ float *query_weight, // [batch_size, num_query, weight_dim]
655
+ float *key_weight, // [batch_size, num_key, weight_dim]
656
+ float *value, // [batch_size, num_key, value_dim]
657
+ float *cumulation_value, // [batch_size, num_query, value_dim]
658
+ int batch_size,
659
+ int num_hash_f,
660
+ int num_query,
661
+ int num_key,
662
+ int value_dim,
663
+ int weight_dim
664
+ ) {
665
+
666
+ int batch_idx = blockIdx.y;
667
+ int key_idx = blockIdx.x;
668
+
669
+ int num_threads = blockDim.y * blockDim.x;
670
+ int thread_id = threadIdx.y * blockDim.x + threadIdx.x;
671
+
672
+ int num_warps = blockDim.y;
673
+ int warp_idx = threadIdx.y;
674
+ int warp_thread_idx = threadIdx.x;
675
+
676
+ int batch_idx__key_idx = batch_idx * num_key + key_idx;
677
+ if (key_mask[batch_idx__key_idx] == 0) {
678
+ return;
679
+ }
680
+
681
+ extern __shared__ float buffer[];
682
+ float *weight_buffer = buffer;
683
+ float *value_buffer = &buffer[weight_dim];
684
+ int *key_info_buffer = (int*)&buffer[weight_dim + value_dim];
685
+
686
+ copy_data_nonblocking<float>(&key_weight[batch_idx__key_idx * weight_dim], weight_buffer, weight_dim, num_threads, thread_id);
687
+ copy_data_nonblocking<float>(&value[batch_idx__key_idx * value_dim], value_buffer, value_dim, num_threads, thread_id);
688
+ copy_data_nonblocking<int>(&key_info[batch_idx__key_idx * 2 * num_hash_f], key_info_buffer, 2 * num_hash_f, num_threads, thread_id);
689
+
690
+ int *query_offset_buffer = key_info_buffer;
691
+ int *query_count_buffer = &key_info_buffer[num_hash_f];
692
+
693
+ const int hashtable_size = 1024 + OPTIMAL_THREADS_PER_BLOCK;
694
+ __shared__ int hashtable_query[hashtable_size];
695
+ __shared__ int hashtable_count[hashtable_size];
696
+ __shared__ int inserted_query[hashtable_size];
697
+ __shared__ int query_counter[1];
698
+
699
+ int hash_f_idx_base = 0;
700
+
701
+ while (true) {
702
+
703
+ init_buffer_nonblocking<int>(EMPTY_VALUE, hashtable_query, hashtable_size, num_threads, thread_id);
704
+ init_buffer_nonblocking<int>(0, hashtable_count, hashtable_size, num_threads, thread_id);
705
+ init_buffer_nonblocking<int>(EMPTY_VALUE, inserted_query, hashtable_size, num_threads, thread_id);
706
+ init_buffer_nonblocking<int>(0, query_counter, 1, num_threads, thread_id);
707
+ __syncthreads();
708
+
709
+ while (hash_f_idx_base < num_hash_f) {
710
+
711
+ int hash_f_idx = hash_f_idx_base + warp_idx;
712
+ int batch_idx__hash_f_idx = batch_idx * num_hash_f + hash_f_idx;
713
+
714
+ int stop_flag = 0;
715
+
716
+ int query_offset = query_offset_buffer[hash_f_idx];
717
+ int query_count = query_count_buffer[hash_f_idx];
718
+
719
+ while (query_count > 0) {
720
+
721
+ int work_size = min(query_count, WARP_SIZE);
722
+
723
+ // try inserting query to set and check whether the query is new
724
+ int found_new_query = 0;
725
+ int query_idx = -1;
726
+ if (warp_thread_idx < work_size) {
727
+ query_idx = query_sorted_idxes[batch_idx__hash_f_idx * num_query + query_offset + warp_thread_idx];
728
+ int slot = set_insert<int>(hashtable_query, hashtable_size, query_idx);
729
+ if (slot >= 0) {
730
+ found_new_query = atomicAdd(&hashtable_count[slot], 1) == 0;
731
+ }
732
+ }
733
+
734
+ // compute cumulative offset
735
+ int position_offset = found_new_query;
736
+ int next_position_offset = 0;
737
+ #pragma unroll
738
+ for (int offset = 1; offset < WARP_SIZE; offset = offset << 1) {
739
+ next_position_offset = __shfl_up_sync(FULL_MASK, position_offset, offset);
740
+ if (thread_id % WARP_SIZE >= offset) {
741
+ position_offset = position_offset + next_position_offset;
742
+ }
743
+ }
744
+
745
+ // get the inserted query list end index
746
+ int inserted_query_base = 0;
747
+ if (thread_id % WARP_SIZE == WARP_SIZE - 1) {
748
+ inserted_query_base = atomicAdd(query_counter, position_offset);
749
+ }
750
+ inserted_query_base = __shfl_sync(FULL_MASK, inserted_query_base, WARP_SIZE - 1);
751
+
752
+ // insert new queries to list
753
+ int insert_idx = inserted_query_base + position_offset - 1;
754
+ if (found_new_query) {
755
+ inserted_query[insert_idx] = query_idx;
756
+ }
757
+
758
+ // remove inserted queries from list
759
+ query_offset_buffer[hash_f_idx] += work_size;
760
+ query_count_buffer[hash_f_idx] -= work_size;
761
+ query_offset += work_size;
762
+ query_count -= work_size;
763
+
764
+ // if list is almost full, stop inserting
765
+ if (inserted_query_base + OPTIMAL_THREADS_PER_BLOCK > hashtable_size) {
766
+ stop_flag = 1;
767
+ break;
768
+ }
769
+
770
+ }
771
+
772
+ if (stop_flag) {
773
+ break;
774
+ }
775
+
776
+ hash_f_idx_base = hash_f_idx_base + num_warps;
777
+
778
+ }
779
+
780
+ __syncthreads();
781
+
782
+ int num_distint_query = query_counter[0];
783
+
784
+ if (num_distint_query > 0) {
785
+ for (int idx_base = 0; idx_base < num_distint_query; idx_base = idx_base + num_warps) {
786
+ int idx = idx_base + warp_idx;
787
+ if (idx < num_distint_query) {
788
+ int query_idx = inserted_query[idx];
789
+ int batch_idx__query_idx = batch_idx * num_query + query_idx;
790
+
791
+ int slot = set_lookup<int>(hashtable_query, hashtable_size, query_idx);
792
+ int duplicate_count = hashtable_count[slot];
793
+
794
+ float weight = 0;
795
+ for (int weight_idx_base = 0; weight_idx_base < weight_dim; weight_idx_base = weight_idx_base + WARP_SIZE) {
796
+ int weight_dim_idx = weight_idx_base + warp_thread_idx;
797
+ float val = weight_buffer[weight_dim_idx] * query_weight[batch_idx__query_idx * weight_dim + weight_dim_idx];
798
+ #pragma unroll
799
+ for (int offset = 1; offset < WARP_SIZE; offset = offset << 1) {
800
+ val += __shfl_xor_sync(FULL_MASK, val, offset);
801
+ }
802
+ weight = weight + val;
803
+ }
804
+
805
+ weight = (float)duplicate_count * weight / float(num_hash_f);
806
+
807
+ for (int value_idx_base = 0; value_idx_base < value_dim; value_idx_base = value_idx_base + WARP_SIZE) {
808
+ int value_dim_idx = value_idx_base + warp_thread_idx;
809
+ float val = value_buffer[value_dim_idx];
810
+ atomicAdd(&cumulation_value[batch_idx__query_idx * value_dim + value_dim_idx], weight * val);
811
+ }
812
+ }
813
+ }
814
+ } else {
815
+
816
+ // all computation is completed if num_distint_query == 0
817
+ break;
818
+
819
+ }
820
+
821
+ __syncthreads();
822
+
823
+ }
824
+
825
+ }