applied-ai-018 commited on
Commit
efc8380
·
verified ·
1 Parent(s): abc1575

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. llmeval-env/lib/python3.10/site-packages/transformers/models/clipseg/__pycache__/configuration_clipseg.cpython-310.pyc +0 -0
  2. llmeval-env/lib/python3.10/site-packages/transformers/models/dialogpt/__pycache__/__init__.cpython-310.pyc +0 -0
  3. llmeval-env/lib/python3.10/site-packages/transformers/models/dialogpt/__pycache__/convert_dialogpt_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc +0 -0
  4. llmeval-env/lib/python3.10/site-packages/transformers/models/donut/__pycache__/__init__.cpython-310.pyc +0 -0
  5. llmeval-env/lib/python3.10/site-packages/transformers/models/donut/__pycache__/convert_donut_to_pytorch.cpython-310.pyc +0 -0
  6. llmeval-env/lib/python3.10/site-packages/transformers/models/donut/__pycache__/feature_extraction_donut.cpython-310.pyc +0 -0
  7. llmeval-env/lib/python3.10/site-packages/transformers/models/donut/__pycache__/image_processing_donut.cpython-310.pyc +0 -0
  8. llmeval-env/lib/python3.10/site-packages/transformers/models/donut/__pycache__/modeling_donut_swin.cpython-310.pyc +0 -0
  9. llmeval-env/lib/python3.10/site-packages/transformers/models/donut/__pycache__/processing_donut.cpython-310.pyc +0 -0
  10. llmeval-env/lib/python3.10/site-packages/transformers/models/donut/feature_extraction_donut.py +33 -0
  11. llmeval-env/lib/python3.10/site-packages/transformers/models/donut/image_processing_donut.py +480 -0
  12. llmeval-env/lib/python3.10/site-packages/transformers/models/flava/__init__.py +97 -0
  13. llmeval-env/lib/python3.10/site-packages/transformers/models/flava/__pycache__/__init__.cpython-310.pyc +0 -0
  14. llmeval-env/lib/python3.10/site-packages/transformers/models/flava/__pycache__/configuration_flava.cpython-310.pyc +0 -0
  15. llmeval-env/lib/python3.10/site-packages/transformers/models/flava/__pycache__/convert_dalle_to_flava_codebook.cpython-310.pyc +0 -0
  16. llmeval-env/lib/python3.10/site-packages/transformers/models/flava/__pycache__/convert_flava_original_pytorch_to_hf.cpython-310.pyc +0 -0
  17. llmeval-env/lib/python3.10/site-packages/transformers/models/flava/__pycache__/feature_extraction_flava.cpython-310.pyc +0 -0
  18. llmeval-env/lib/python3.10/site-packages/transformers/models/flava/__pycache__/image_processing_flava.cpython-310.pyc +0 -0
  19. llmeval-env/lib/python3.10/site-packages/transformers/models/flava/__pycache__/modeling_flava.cpython-310.pyc +0 -0
  20. llmeval-env/lib/python3.10/site-packages/transformers/models/flava/__pycache__/processing_flava.cpython-310.pyc +0 -0
  21. llmeval-env/lib/python3.10/site-packages/transformers/models/flava/configuration_flava.py +764 -0
  22. llmeval-env/lib/python3.10/site-packages/transformers/models/flava/convert_dalle_to_flava_codebook.py +102 -0
  23. llmeval-env/lib/python3.10/site-packages/transformers/models/flava/convert_flava_original_pytorch_to_hf.py +99 -0
  24. llmeval-env/lib/python3.10/site-packages/transformers/models/flava/feature_extraction_flava.py +33 -0
  25. llmeval-env/lib/python3.10/site-packages/transformers/models/flava/image_processing_flava.py +738 -0
  26. llmeval-env/lib/python3.10/site-packages/transformers/models/flava/modeling_flava.py +2098 -0
  27. llmeval-env/lib/python3.10/site-packages/transformers/models/flava/processing_flava.py +165 -0
  28. llmeval-env/lib/python3.10/site-packages/transformers/models/markuplm/__init__.py +83 -0
  29. llmeval-env/lib/python3.10/site-packages/transformers/models/markuplm/__pycache__/__init__.cpython-310.pyc +0 -0
  30. llmeval-env/lib/python3.10/site-packages/transformers/models/markuplm/__pycache__/configuration_markuplm.cpython-310.pyc +0 -0
  31. llmeval-env/lib/python3.10/site-packages/transformers/models/markuplm/__pycache__/feature_extraction_markuplm.cpython-310.pyc +0 -0
  32. llmeval-env/lib/python3.10/site-packages/transformers/models/markuplm/__pycache__/modeling_markuplm.cpython-310.pyc +0 -0
  33. llmeval-env/lib/python3.10/site-packages/transformers/models/markuplm/__pycache__/processing_markuplm.cpython-310.pyc +0 -0
  34. llmeval-env/lib/python3.10/site-packages/transformers/models/markuplm/__pycache__/tokenization_markuplm.cpython-310.pyc +0 -0
  35. llmeval-env/lib/python3.10/site-packages/transformers/models/markuplm/__pycache__/tokenization_markuplm_fast.cpython-310.pyc +0 -0
  36. llmeval-env/lib/python3.10/site-packages/transformers/models/markuplm/configuration_markuplm.py +156 -0
  37. llmeval-env/lib/python3.10/site-packages/transformers/models/markuplm/feature_extraction_markuplm.py +183 -0
  38. llmeval-env/lib/python3.10/site-packages/transformers/models/markuplm/modeling_markuplm.py +1316 -0
  39. llmeval-env/lib/python3.10/site-packages/transformers/models/markuplm/processing_markuplm.py +146 -0
  40. llmeval-env/lib/python3.10/site-packages/transformers/models/markuplm/tokenization_markuplm.py +1445 -0
  41. llmeval-env/lib/python3.10/site-packages/transformers/models/markuplm/tokenization_markuplm_fast.py +918 -0
  42. llmeval-env/lib/python3.10/site-packages/transformers/models/oneformer/__init__.py +73 -0
  43. llmeval-env/lib/python3.10/site-packages/transformers/models/oneformer/__pycache__/__init__.cpython-310.pyc +0 -0
  44. llmeval-env/lib/python3.10/site-packages/transformers/models/oneformer/__pycache__/configuration_oneformer.cpython-310.pyc +0 -0
  45. llmeval-env/lib/python3.10/site-packages/transformers/models/oneformer/__pycache__/convert_to_hf_oneformer.cpython-310.pyc +0 -0
  46. llmeval-env/lib/python3.10/site-packages/transformers/models/oneformer/__pycache__/image_processing_oneformer.cpython-310.pyc +0 -0
  47. llmeval-env/lib/python3.10/site-packages/transformers/models/oneformer/__pycache__/modeling_oneformer.cpython-310.pyc +0 -0
  48. llmeval-env/lib/python3.10/site-packages/transformers/models/oneformer/__pycache__/processing_oneformer.cpython-310.pyc +0 -0
  49. llmeval-env/lib/python3.10/site-packages/transformers/models/oneformer/configuration_oneformer.py +276 -0
  50. llmeval-env/lib/python3.10/site-packages/transformers/models/oneformer/convert_to_hf_oneformer.py +1191 -0
llmeval-env/lib/python3.10/site-packages/transformers/models/clipseg/__pycache__/configuration_clipseg.cpython-310.pyc ADDED
Binary file (16 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/dialogpt/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (201 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/dialogpt/__pycache__/convert_dialogpt_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc ADDED
Binary file (1.1 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/donut/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.29 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/donut/__pycache__/convert_donut_to_pytorch.cpython-310.pyc ADDED
Binary file (5.94 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/donut/__pycache__/feature_extraction_donut.cpython-310.pyc ADDED
Binary file (1.01 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/donut/__pycache__/image_processing_donut.cpython-310.pyc ADDED
Binary file (17.9 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/donut/__pycache__/modeling_donut_swin.cpython-310.pyc ADDED
Binary file (31 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/donut/__pycache__/processing_donut.cpython-310.pyc ADDED
Binary file (6.04 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/donut/feature_extraction_donut.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Feature extractor class for Donut."""
16
+
17
+ import warnings
18
+
19
+ from ...utils import logging
20
+ from .image_processing_donut import DonutImageProcessor
21
+
22
+
23
+ logger = logging.get_logger(__name__)
24
+
25
+
26
+ class DonutFeatureExtractor(DonutImageProcessor):
27
+ def __init__(self, *args, **kwargs) -> None:
28
+ warnings.warn(
29
+ "The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
30
+ " use DonutImageProcessor instead.",
31
+ FutureWarning,
32
+ )
33
+ super().__init__(*args, **kwargs)
llmeval-env/lib/python3.10/site-packages/transformers/models/donut/image_processing_donut.py ADDED
@@ -0,0 +1,480 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Image processor class for Donut."""
16
+
17
+ from typing import Dict, List, Optional, Union
18
+
19
+ import numpy as np
20
+
21
+ from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
22
+ from ...image_transforms import (
23
+ get_resize_output_image_size,
24
+ pad,
25
+ resize,
26
+ to_channel_dimension_format,
27
+ )
28
+ from ...image_utils import (
29
+ IMAGENET_STANDARD_MEAN,
30
+ IMAGENET_STANDARD_STD,
31
+ ChannelDimension,
32
+ ImageInput,
33
+ PILImageResampling,
34
+ get_image_size,
35
+ infer_channel_dimension_format,
36
+ is_scaled_image,
37
+ make_list_of_images,
38
+ to_numpy_array,
39
+ valid_images,
40
+ validate_kwargs,
41
+ validate_preprocess_arguments,
42
+ )
43
+ from ...utils import TensorType, logging
44
+ from ...utils.import_utils import is_vision_available
45
+
46
+
47
+ logger = logging.get_logger(__name__)
48
+
49
+
50
+ if is_vision_available():
51
+ import PIL
52
+
53
+
54
+ class DonutImageProcessor(BaseImageProcessor):
55
+ r"""
56
+ Constructs a Donut image processor.
57
+
58
+ Args:
59
+ do_resize (`bool`, *optional*, defaults to `True`):
60
+ Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by
61
+ `do_resize` in the `preprocess` method.
62
+ size (`Dict[str, int]` *optional*, defaults to `{"shortest_edge": 224}`):
63
+ Size of the image after resizing. The shortest edge of the image is resized to size["shortest_edge"], with
64
+ the longest edge resized to keep the input aspect ratio. Can be overridden by `size` in the `preprocess`
65
+ method.
66
+ resample (`PILImageResampling`, *optional*, defaults to `Resampling.BILINEAR`):
67
+ Resampling filter to use if resizing the image. Can be overridden by `resample` in the `preprocess` method.
68
+ do_thumbnail (`bool`, *optional*, defaults to `True`):
69
+ Whether to resize the image using thumbnail method.
70
+ do_align_long_axis (`bool`, *optional*, defaults to `False`):
71
+ Whether to align the long axis of the image with the long axis of `size` by rotating by 90 degrees.
72
+ do_pad (`bool`, *optional*, defaults to `True`):
73
+ Whether to pad the image. If `random_padding` is set to `True` in `preprocess`, each image is padded with a
74
+ random amont of padding on each size, up to the largest image size in the batch. Otherwise, all images are
75
+ padded to the largest image size in the batch.
76
+ do_rescale (`bool`, *optional*, defaults to `True`):
77
+ Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by `do_rescale` in
78
+ the `preprocess` method.
79
+ rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
80
+ Scale factor to use if rescaling the image. Can be overridden by `rescale_factor` in the `preprocess`
81
+ method.
82
+ do_normalize (`bool`, *optional*, defaults to `True`):
83
+ Whether to normalize the image. Can be overridden by `do_normalize` in the `preprocess` method.
84
+ image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`):
85
+ Mean to use if normalizing the image. This is a float or list of floats the length of the number of
86
+ channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method.
87
+ image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`):
88
+ Image standard deviation.
89
+ """
90
+
91
+ model_input_names = ["pixel_values"]
92
+
93
+ def __init__(
94
+ self,
95
+ do_resize: bool = True,
96
+ size: Dict[str, int] = None,
97
+ resample: PILImageResampling = PILImageResampling.BILINEAR,
98
+ do_thumbnail: bool = True,
99
+ do_align_long_axis: bool = False,
100
+ do_pad: bool = True,
101
+ do_rescale: bool = True,
102
+ rescale_factor: Union[int, float] = 1 / 255,
103
+ do_normalize: bool = True,
104
+ image_mean: Optional[Union[float, List[float]]] = None,
105
+ image_std: Optional[Union[float, List[float]]] = None,
106
+ **kwargs,
107
+ ) -> None:
108
+ super().__init__(**kwargs)
109
+
110
+ size = size if size is not None else {"height": 2560, "width": 1920}
111
+ if isinstance(size, (tuple, list)):
112
+ # The previous feature extractor size parameter was in (width, height) format
113
+ size = size[::-1]
114
+ size = get_size_dict(size)
115
+
116
+ self.do_resize = do_resize
117
+ self.size = size
118
+ self.resample = resample
119
+ self.do_thumbnail = do_thumbnail
120
+ self.do_align_long_axis = do_align_long_axis
121
+ self.do_pad = do_pad
122
+ self.do_rescale = do_rescale
123
+ self.rescale_factor = rescale_factor
124
+ self.do_normalize = do_normalize
125
+ self.image_mean = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
126
+ self.image_std = image_std if image_std is not None else IMAGENET_STANDARD_STD
127
+ self._valid_processor_keys = [
128
+ "images",
129
+ "do_resize",
130
+ "size",
131
+ "resample",
132
+ "do_thumbnail",
133
+ "do_align_long_axis",
134
+ "do_pad",
135
+ "random_padding",
136
+ "do_rescale",
137
+ "rescale_factor",
138
+ "do_normalize",
139
+ "image_mean",
140
+ "image_std",
141
+ "return_tensors",
142
+ "data_format",
143
+ "input_data_format",
144
+ ]
145
+
146
+ def align_long_axis(
147
+ self,
148
+ image: np.ndarray,
149
+ size: Dict[str, int],
150
+ data_format: Optional[Union[str, ChannelDimension]] = None,
151
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
152
+ ) -> np.ndarray:
153
+ """
154
+ Align the long axis of the image to the longest axis of the specified size.
155
+
156
+ Args:
157
+ image (`np.ndarray`):
158
+ The image to be aligned.
159
+ size (`Dict[str, int]`):
160
+ The size `{"height": h, "width": w}` to align the long axis to.
161
+ data_format (`str` or `ChannelDimension`, *optional*):
162
+ The data format of the output image. If unset, the same format as the input image is used.
163
+ input_data_format (`ChannelDimension` or `str`, *optional*):
164
+ The channel dimension format of the input image. If not provided, it will be inferred.
165
+
166
+ Returns:
167
+ `np.ndarray`: The aligned image.
168
+ """
169
+ input_height, input_width = get_image_size(image, channel_dim=input_data_format)
170
+ output_height, output_width = size["height"], size["width"]
171
+
172
+ if (output_width < output_height and input_width > input_height) or (
173
+ output_width > output_height and input_width < input_height
174
+ ):
175
+ image = np.rot90(image, 3)
176
+
177
+ if data_format is not None:
178
+ image = to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format)
179
+
180
+ return image
181
+
182
+ def pad_image(
183
+ self,
184
+ image: np.ndarray,
185
+ size: Dict[str, int],
186
+ random_padding: bool = False,
187
+ data_format: Optional[Union[str, ChannelDimension]] = None,
188
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
189
+ ) -> np.ndarray:
190
+ """
191
+ Pad the image to the specified size.
192
+
193
+ Args:
194
+ image (`np.ndarray`):
195
+ The image to be padded.
196
+ size (`Dict[str, int]`):
197
+ The size `{"height": h, "width": w}` to pad the image to.
198
+ random_padding (`bool`, *optional*, defaults to `False`):
199
+ Whether to use random padding or not.
200
+ data_format (`str` or `ChannelDimension`, *optional*):
201
+ The data format of the output image. If unset, the same format as the input image is used.
202
+ input_data_format (`ChannelDimension` or `str`, *optional*):
203
+ The channel dimension format of the input image. If not provided, it will be inferred.
204
+ """
205
+ output_height, output_width = size["height"], size["width"]
206
+ input_height, input_width = get_image_size(image, channel_dim=input_data_format)
207
+
208
+ delta_width = output_width - input_width
209
+ delta_height = output_height - input_height
210
+
211
+ if random_padding:
212
+ pad_top = np.random.randint(low=0, high=delta_height + 1)
213
+ pad_left = np.random.randint(low=0, high=delta_width + 1)
214
+ else:
215
+ pad_top = delta_height // 2
216
+ pad_left = delta_width // 2
217
+
218
+ pad_bottom = delta_height - pad_top
219
+ pad_right = delta_width - pad_left
220
+
221
+ padding = ((pad_top, pad_bottom), (pad_left, pad_right))
222
+ return pad(image, padding, data_format=data_format, input_data_format=input_data_format)
223
+
224
+ def pad(self, *args, **kwargs):
225
+ logger.info("pad is deprecated and will be removed in version 4.27. Please use pad_image instead.")
226
+ return self.pad_image(*args, **kwargs)
227
+
228
+ def thumbnail(
229
+ self,
230
+ image: np.ndarray,
231
+ size: Dict[str, int],
232
+ resample: PILImageResampling = PILImageResampling.BICUBIC,
233
+ data_format: Optional[Union[str, ChannelDimension]] = None,
234
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
235
+ **kwargs,
236
+ ) -> np.ndarray:
237
+ """
238
+ Resize the image to make a thumbnail. The image is resized so that no dimension is larger than any
239
+ corresponding dimension of the specified size.
240
+
241
+ Args:
242
+ image (`np.ndarray`):
243
+ The image to be resized.
244
+ size (`Dict[str, int]`):
245
+ The size `{"height": h, "width": w}` to resize the image to.
246
+ resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
247
+ The resampling filter to use.
248
+ data_format (`Optional[Union[str, ChannelDimension]]`, *optional*):
249
+ The data format of the output image. If unset, the same format as the input image is used.
250
+ input_data_format (`ChannelDimension` or `str`, *optional*):
251
+ The channel dimension format of the input image. If not provided, it will be inferred.
252
+ """
253
+ input_height, input_width = get_image_size(image, channel_dim=input_data_format)
254
+ output_height, output_width = size["height"], size["width"]
255
+
256
+ # We always resize to the smallest of either the input or output size.
257
+ height = min(input_height, output_height)
258
+ width = min(input_width, output_width)
259
+
260
+ if height == input_height and width == input_width:
261
+ return image
262
+
263
+ if input_height > input_width:
264
+ width = int(input_width * height / input_height)
265
+ elif input_width > input_height:
266
+ height = int(input_height * width / input_width)
267
+
268
+ return resize(
269
+ image,
270
+ size=(height, width),
271
+ resample=resample,
272
+ reducing_gap=2.0,
273
+ data_format=data_format,
274
+ input_data_format=input_data_format,
275
+ **kwargs,
276
+ )
277
+
278
+ def resize(
279
+ self,
280
+ image: np.ndarray,
281
+ size: Dict[str, int],
282
+ resample: PILImageResampling = PILImageResampling.BICUBIC,
283
+ data_format: Optional[Union[str, ChannelDimension]] = None,
284
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
285
+ **kwargs,
286
+ ) -> np.ndarray:
287
+ """
288
+ Resizes `image` to `(height, width)` specified by `size` using the PIL library.
289
+
290
+ Args:
291
+ image (`np.ndarray`):
292
+ Image to resize.
293
+ size (`Dict[str, int]`):
294
+ Size of the output image.
295
+ resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
296
+ Resampling filter to use when resiizing the image.
297
+ data_format (`str` or `ChannelDimension`, *optional*):
298
+ The channel dimension format of the image. If not provided, it will be the same as the input image.
299
+ input_data_format (`ChannelDimension` or `str`, *optional*):
300
+ The channel dimension format of the input image. If not provided, it will be inferred.
301
+ """
302
+ size = get_size_dict(size)
303
+ shortest_edge = min(size["height"], size["width"])
304
+ output_size = get_resize_output_image_size(
305
+ image, size=shortest_edge, default_to_square=False, input_data_format=input_data_format
306
+ )
307
+ resized_image = resize(
308
+ image,
309
+ size=output_size,
310
+ resample=resample,
311
+ data_format=data_format,
312
+ input_data_format=input_data_format,
313
+ **kwargs,
314
+ )
315
+ return resized_image
316
+
317
+ def preprocess(
318
+ self,
319
+ images: ImageInput,
320
+ do_resize: bool = None,
321
+ size: Dict[str, int] = None,
322
+ resample: PILImageResampling = None,
323
+ do_thumbnail: bool = None,
324
+ do_align_long_axis: bool = None,
325
+ do_pad: bool = None,
326
+ random_padding: bool = False,
327
+ do_rescale: bool = None,
328
+ rescale_factor: float = None,
329
+ do_normalize: bool = None,
330
+ image_mean: Optional[Union[float, List[float]]] = None,
331
+ image_std: Optional[Union[float, List[float]]] = None,
332
+ return_tensors: Optional[Union[str, TensorType]] = None,
333
+ data_format: Optional[ChannelDimension] = ChannelDimension.FIRST,
334
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
335
+ **kwargs,
336
+ ) -> PIL.Image.Image:
337
+ """
338
+ Preprocess an image or batch of images.
339
+
340
+ Args:
341
+ images (`ImageInput`):
342
+ Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
343
+ passing in images with pixel values between 0 and 1, set `do_rescale=False`.
344
+ do_resize (`bool`, *optional*, defaults to `self.do_resize`):
345
+ Whether to resize the image.
346
+ size (`Dict[str, int]`, *optional*, defaults to `self.size`):
347
+ Size of the image after resizing. Shortest edge of the image is resized to min(size["height"],
348
+ size["width"]) with the longest edge resized to keep the input aspect ratio.
349
+ resample (`int`, *optional*, defaults to `self.resample`):
350
+ Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`. Only
351
+ has an effect if `do_resize` is set to `True`.
352
+ do_thumbnail (`bool`, *optional*, defaults to `self.do_thumbnail`):
353
+ Whether to resize the image using thumbnail method.
354
+ do_align_long_axis (`bool`, *optional*, defaults to `self.do_align_long_axis`):
355
+ Whether to align the long axis of the image with the long axis of `size` by rotating by 90 degrees.
356
+ do_pad (`bool`, *optional*, defaults to `self.do_pad`):
357
+ Whether to pad the image. If `random_padding` is set to `True`, each image is padded with a random
358
+ amont of padding on each size, up to the largest image size in the batch. Otherwise, all images are
359
+ padded to the largest image size in the batch.
360
+ random_padding (`bool`, *optional*, defaults to `self.random_padding`):
361
+ Whether to use random padding when padding the image. If `True`, each image in the batch with be padded
362
+ with a random amount of padding on each side up to the size of the largest image in the batch.
363
+ do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
364
+ Whether to rescale the image pixel values.
365
+ rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
366
+ Rescale factor to rescale the image by if `do_rescale` is set to `True`.
367
+ do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
368
+ Whether to normalize the image.
369
+ image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):
370
+ Image mean to use for normalization.
371
+ image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):
372
+ Image standard deviation to use for normalization.
373
+ return_tensors (`str` or `TensorType`, *optional*):
374
+ The type of tensors to return. Can be one of:
375
+ - Unset: Return a list of `np.ndarray`.
376
+ - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
377
+ - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
378
+ - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
379
+ - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
380
+ data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
381
+ The channel dimension format for the output image. Can be one of:
382
+ - `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
383
+ - `ChannelDimension.LAST`: image in (height, width, num_channels) format.
384
+ - Unset: defaults to the channel dimension format of the input image.
385
+ input_data_format (`ChannelDimension` or `str`, *optional*):
386
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
387
+ from the input image. Can be one of:
388
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
389
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
390
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
391
+ """
392
+ do_resize = do_resize if do_resize is not None else self.do_resize
393
+ size = size if size is not None else self.size
394
+ if isinstance(size, (tuple, list)):
395
+ # Previous feature extractor had size in (width, height) format
396
+ size = size[::-1]
397
+ size = get_size_dict(size)
398
+ resample = resample if resample is not None else self.resample
399
+ do_thumbnail = do_thumbnail if do_thumbnail is not None else self.do_thumbnail
400
+ do_align_long_axis = do_align_long_axis if do_align_long_axis is not None else self.do_align_long_axis
401
+ do_pad = do_pad if do_pad is not None else self.do_pad
402
+ do_rescale = do_rescale if do_rescale is not None else self.do_rescale
403
+ rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
404
+ do_normalize = do_normalize if do_normalize is not None else self.do_normalize
405
+ image_mean = image_mean if image_mean is not None else self.image_mean
406
+ image_std = image_std if image_std is not None else self.image_std
407
+
408
+ images = make_list_of_images(images)
409
+
410
+ validate_kwargs(captured_kwargs=kwargs.keys(), valid_processor_keys=self._valid_processor_keys)
411
+
412
+ if not valid_images(images):
413
+ raise ValueError(
414
+ "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
415
+ "torch.Tensor, tf.Tensor or jax.ndarray."
416
+ )
417
+ validate_preprocess_arguments(
418
+ do_rescale=do_rescale,
419
+ rescale_factor=rescale_factor,
420
+ do_normalize=do_normalize,
421
+ image_mean=image_mean,
422
+ image_std=image_std,
423
+ do_pad=do_pad,
424
+ size_divisibility=size, # There is no pad divisibility in this processor, but pad requires the size arg.
425
+ do_resize=do_resize,
426
+ size=size,
427
+ resample=resample,
428
+ )
429
+
430
+ # All transformations expect numpy arrays.
431
+ images = [to_numpy_array(image) for image in images]
432
+
433
+ if is_scaled_image(images[0]) and do_rescale:
434
+ logger.warning_once(
435
+ "It looks like you are trying to rescale already rescaled images. If the input"
436
+ " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
437
+ )
438
+
439
+ if input_data_format is None:
440
+ # We assume that all images have the same channel dimension format.
441
+ input_data_format = infer_channel_dimension_format(images[0])
442
+
443
+ if do_align_long_axis:
444
+ images = [self.align_long_axis(image, size=size, input_data_format=input_data_format) for image in images]
445
+
446
+ if do_resize:
447
+ images = [
448
+ self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format)
449
+ for image in images
450
+ ]
451
+
452
+ if do_thumbnail:
453
+ images = [self.thumbnail(image=image, size=size, input_data_format=input_data_format) for image in images]
454
+
455
+ if do_pad:
456
+ images = [
457
+ self.pad_image(
458
+ image=image, size=size, random_padding=random_padding, input_data_format=input_data_format
459
+ )
460
+ for image in images
461
+ ]
462
+
463
+ if do_rescale:
464
+ images = [
465
+ self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
466
+ for image in images
467
+ ]
468
+
469
+ if do_normalize:
470
+ images = [
471
+ self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format)
472
+ for image in images
473
+ ]
474
+
475
+ images = [
476
+ to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images
477
+ ]
478
+
479
+ data = {"pixel_values": images}
480
+ return BatchFeature(data=data, tensor_type=return_tensors)
llmeval-env/lib/python3.10/site-packages/transformers/models/flava/__init__.py ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 Meta Platforms authors and The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
17
+
18
+
19
+ _import_structure = {
20
+ "configuration_flava": [
21
+ "FLAVA_PRETRAINED_CONFIG_ARCHIVE_MAP",
22
+ "FlavaConfig",
23
+ "FlavaImageCodebookConfig",
24
+ "FlavaImageConfig",
25
+ "FlavaMultimodalConfig",
26
+ "FlavaTextConfig",
27
+ ],
28
+ }
29
+
30
+ try:
31
+ if not is_vision_available():
32
+ raise OptionalDependencyNotAvailable()
33
+ except OptionalDependencyNotAvailable:
34
+ pass
35
+ else:
36
+ _import_structure["feature_extraction_flava"] = ["FlavaFeatureExtractor"]
37
+ _import_structure["image_processing_flava"] = ["FlavaImageProcessor"]
38
+ _import_structure["processing_flava"] = ["FlavaProcessor"]
39
+
40
+ try:
41
+ if not is_torch_available():
42
+ raise OptionalDependencyNotAvailable()
43
+ except OptionalDependencyNotAvailable:
44
+ pass
45
+ else:
46
+ _import_structure["modeling_flava"] = [
47
+ "FLAVA_PRETRAINED_MODEL_ARCHIVE_LIST",
48
+ "FlavaForPreTraining",
49
+ "FlavaImageCodebook",
50
+ "FlavaImageModel",
51
+ "FlavaModel",
52
+ "FlavaMultimodalModel",
53
+ "FlavaPreTrainedModel",
54
+ "FlavaTextModel",
55
+ ]
56
+
57
+ if TYPE_CHECKING:
58
+ from .configuration_flava import (
59
+ FLAVA_PRETRAINED_CONFIG_ARCHIVE_MAP,
60
+ FlavaConfig,
61
+ FlavaImageCodebookConfig,
62
+ FlavaImageConfig,
63
+ FlavaMultimodalConfig,
64
+ FlavaTextConfig,
65
+ )
66
+
67
+ try:
68
+ if not is_vision_available():
69
+ raise OptionalDependencyNotAvailable()
70
+ except OptionalDependencyNotAvailable:
71
+ pass
72
+ else:
73
+ from .feature_extraction_flava import FlavaFeatureExtractor
74
+ from .image_processing_flava import FlavaImageProcessor
75
+ from .processing_flava import FlavaProcessor
76
+
77
+ try:
78
+ if not is_torch_available():
79
+ raise OptionalDependencyNotAvailable()
80
+ except OptionalDependencyNotAvailable:
81
+ pass
82
+ else:
83
+ from .modeling_flava import (
84
+ FLAVA_PRETRAINED_MODEL_ARCHIVE_LIST,
85
+ FlavaForPreTraining,
86
+ FlavaImageCodebook,
87
+ FlavaImageModel,
88
+ FlavaModel,
89
+ FlavaMultimodalModel,
90
+ FlavaPreTrainedModel,
91
+ FlavaTextModel,
92
+ )
93
+
94
+ else:
95
+ import sys
96
+
97
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
llmeval-env/lib/python3.10/site-packages/transformers/models/flava/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.52 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/flava/__pycache__/configuration_flava.cpython-310.pyc ADDED
Binary file (25.1 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/flava/__pycache__/convert_dalle_to_flava_codebook.cpython-310.pyc ADDED
Binary file (2.59 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/flava/__pycache__/convert_flava_original_pytorch_to_hf.cpython-310.pyc ADDED
Binary file (3.32 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/flava/__pycache__/feature_extraction_flava.cpython-310.pyc ADDED
Binary file (1.01 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/flava/__pycache__/image_processing_flava.cpython-310.pyc ADDED
Binary file (27.5 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/flava/__pycache__/modeling_flava.cpython-310.pyc ADDED
Binary file (67.1 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/flava/__pycache__/processing_flava.cpython-310.pyc ADDED
Binary file (5.29 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/flava/configuration_flava.py ADDED
@@ -0,0 +1,764 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 Meta Platforms authors and The HuggingFace Team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ FLAVA model configurations"""
16
+
17
+ import os
18
+ from typing import Any, Dict, Union
19
+
20
+ from ...configuration_utils import PretrainedConfig
21
+ from ...utils import logging
22
+
23
+
24
+ logger = logging.get_logger(__name__)
25
+
26
+
27
+ from ..deprecated._archive_maps import FLAVA_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
28
+
29
+
30
+ class FlavaImageConfig(PretrainedConfig):
31
+ r"""
32
+ This is the configuration class to store the configuration of a [`FlavaImageModel`]. It is used to instantiate an
33
+ FLAVA model according to the specified arguments, defining the model architecture.
34
+
35
+ Instantiating a configuration with the defaults will yield a similar configuration to that of the FLAVA
36
+ [facebook/flava-full](https://huggingface.co/facebook/flava-full) architecture.
37
+
38
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
39
+ documentation from [`PretrainedConfig`] for more information.
40
+
41
+
42
+ Args:
43
+ hidden_size (`int`, *optional*, defaults to 768):
44
+ Dimensionality of the encoder layers and the pooler layer.
45
+ num_hidden_layers (`int`, *optional*, defaults to 12):
46
+ Number of hidden layers in the Transformer encoder.
47
+ num_attention_heads (`int`, *optional*, defaults to 12):
48
+ Number of attention heads for each attention layer in the Transformer encoder.
49
+ intermediate_size (`int`, *optional*, defaults to 3072):
50
+ Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
51
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
52
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
53
+ `"relu"`, `"selu"` and `"gelu_new"` are supported.
54
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.0):
55
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
56
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0):
57
+ The dropout ratio for the attention probabilities.
58
+ initializer_range (`float`, *optional*, defaults to 0.02):
59
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
60
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
61
+ The epsilon used by the layer normalization layers.
62
+ image_size (`int`, *optional*, defaults to 224):
63
+ The size (resolution) of each image.
64
+ patch_size (`int`, *optional*, defaults to 16):
65
+ The size (resolution) of each patch.
66
+ num_channels (`int`, *optional*, defaults to 3):
67
+ The number of input channels.
68
+ qkv_bias (`bool`, *optional*, defaults to `True`):
69
+ Whether to add a bias to the queries, keys and values.
70
+ mask_token (`bool`, *optional*, defaults to `True`):
71
+ Whether to use a mask token or not. Used in MIM (Masked Image Modeling) loss for FLAVA.
72
+ vocab_size (`int`, *optional*, defaults to 8192):
73
+ Vocabulary size of the [`FlavaImageCodebook`] used in conjunction with [`FlavaImageModel`] for MIM (Masked
74
+ Image Modeling) loss for FLAVA.
75
+
76
+ Example:
77
+
78
+ ```python
79
+ >>> from transformers import FlavaImageConfig, FlavaImageModel
80
+
81
+ >>> # Initializing a FlavaImageModel with style configuration
82
+ >>> configuration = FlavaImageConfig()
83
+
84
+ >>> # Initializing a FlavaImageModel model (with random weights) from the style configuration
85
+ >>> model = FlavaImageModel(configuration)
86
+
87
+ >>> # Accessing the model configuration
88
+ >>> configuration = model.config
89
+ ```"""
90
+
91
+ model_type = "flava_image_model"
92
+
93
+ def __init__(
94
+ self,
95
+ hidden_size: int = 768,
96
+ num_hidden_layers: int = 12,
97
+ num_attention_heads: int = 12,
98
+ intermediate_size: int = 3072,
99
+ hidden_act: int = "gelu",
100
+ hidden_dropout_prob: float = 0.0,
101
+ attention_probs_dropout_prob: float = 0.0,
102
+ initializer_range: float = 0.02,
103
+ layer_norm_eps: float = 1e-12,
104
+ image_size: int = 224,
105
+ patch_size: int = 16,
106
+ num_channels: int = 3,
107
+ qkv_bias: bool = True,
108
+ mask_token: bool = True,
109
+ vocab_size: int = 8192,
110
+ **kwargs,
111
+ ):
112
+ super().__init__(**kwargs)
113
+
114
+ self.hidden_size = hidden_size
115
+ self.num_hidden_layers = num_hidden_layers
116
+ self.num_attention_heads = num_attention_heads
117
+ self.intermediate_size = intermediate_size
118
+ self.hidden_act = hidden_act
119
+ self.hidden_dropout_prob = hidden_dropout_prob
120
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
121
+ self.initializer_range = initializer_range
122
+ self.layer_norm_eps = layer_norm_eps
123
+ self.image_size = image_size
124
+ self.patch_size = patch_size
125
+ self.num_channels = num_channels
126
+ self.qkv_bias = qkv_bias
127
+ self.mask_token = mask_token
128
+ self.vocab_size = vocab_size
129
+
130
+ @classmethod
131
+ def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig":
132
+ cls._set_token_in_kwargs(kwargs)
133
+
134
+ config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
135
+
136
+ # get the image config dict if we are loading from FlavaConfig
137
+ if config_dict.get("model_type") == "flava":
138
+ config_dict = config_dict["image_config"]
139
+
140
+ if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
141
+ logger.warning(
142
+ f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
143
+ f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
144
+ )
145
+
146
+ return cls.from_dict(config_dict, **kwargs)
147
+
148
+
149
+ class FlavaTextConfig(PretrainedConfig):
150
+ r"""
151
+ This is the configuration class to store the configuration of a [`FlavaTextModel`]. It is used to instantiate an
152
+ FLAVA model according to the specified arguments, defining the model architecture.
153
+
154
+ Instantiating a configuration with the defaults will yield a similar configuration to that of the FLAVA
155
+ [facebook/flava-full](https://huggingface.co/facebook/flava-full) architecture.
156
+
157
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
158
+ documentation from [`PretrainedConfig`] for more information.
159
+
160
+
161
+ Args:
162
+ vocab_size (`int`, *optional*, defaults to 30522):
163
+ Vocabulary size of the BERT model. Defines the number of different tokens that can be represented by the
164
+ `inputs_ids` passed when calling [`FlavaTextModel`].
165
+ type_vocab_size (`int`, *optional*, defaults to 2):
166
+ The vocabulary size of the `token_type_ids` passed when calling [`FlavaTextModel`]. Note that even though
167
+ text encoder allows `token_type_ids`'s value as 2, for text-only pretraining and fine-tuning, only 1 is
168
+ used similar to RoBERTa.
169
+ max_position_embeddings (`int`, *optional*, defaults to 512):
170
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
171
+ just in case (e.g., 512 or 1024 or 2048). For VL, max_length passed to model is 77.
172
+ position_embedding_type (`str`, *optional*, defaults to `"absolute"`):
173
+ Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For
174
+ positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to
175
+ [Self-Attention with Relative Position Representations (Shaw et al.)](https://arxiv.org/abs/1803.02155).
176
+ For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models
177
+ with Better Relative Position Embeddings (Huang et al.)](https://arxiv.org/abs/2009.13658).
178
+ hidden_size (`int`, *optional*, defaults to 768):
179
+ Dimensionality of the encoder layers and the pooler layer.
180
+ num_hidden_layers (`int`, *optional*, defaults to 12):
181
+ Number of hidden layers in the Transformer encoder.
182
+ num_attention_heads (`int`, *optional*, defaults to 12):
183
+ Number of attention heads for each attention layer in the Transformer encoder.
184
+ intermediate_size (`int`, *optional*, defaults to 3072):
185
+ Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
186
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
187
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
188
+ `"relu"`, `"selu"` and `"gelu_new"` are supported.
189
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
190
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
191
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
192
+ The dropout ratio for the attention probabilities.
193
+ initializer_range (`float`, *optional*, defaults to 0.02):
194
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
195
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
196
+ The epsilon used by the layer normalization layers.
197
+ image_size (`int`, *optional*, defaults to 224):
198
+ The size (resolution) of each image.
199
+ patch_size (`int`, *optional*, defaults to 16):
200
+ The size (resolution) of each patch.
201
+ num_channels (`int`, *optional*, defaults to 3):
202
+ The number of input channels.
203
+ qkv_bias (`bool`, *optional*, defaults to `True`):
204
+ Whether to add a bias to the queries, keys and values.
205
+
206
+ Example:
207
+
208
+ ```python
209
+ >>> from transformers import FlavaTextConfig, FlavaTextModel
210
+
211
+ >>> # Initializing a FlavaTextModel with style configuration
212
+ >>> configuration = FlavaTextConfig()
213
+
214
+ >>> # Initializing a FlavaTextModel model (with random weights) from the style configuration
215
+ >>> model = FlavaTextModel(configuration)
216
+
217
+ >>> # Accessing the model configuration
218
+ >>> configuration = model.config
219
+ ```"""
220
+
221
+ model_type = "flava_text_model"
222
+
223
+ def __init__(
224
+ self,
225
+ vocab_size: int = 30522,
226
+ type_vocab_size: int = 2,
227
+ max_position_embeddings: int = 512,
228
+ position_embedding_type: str = "absolute",
229
+ hidden_size: int = 768,
230
+ num_hidden_layers: int = 12,
231
+ num_attention_heads: int = 12,
232
+ intermediate_size: int = 3072,
233
+ hidden_act: str = "gelu",
234
+ hidden_dropout_prob: float = 0.0,
235
+ attention_probs_dropout_prob: float = 0.0,
236
+ initializer_range: float = 0.02,
237
+ layer_norm_eps: float = 1e-12,
238
+ pad_token_id: int = 0,
239
+ qkv_bias: bool = True,
240
+ **kwargs,
241
+ ):
242
+ super().__init__(**kwargs)
243
+
244
+ self.vocab_size = vocab_size
245
+ self.type_vocab_size = type_vocab_size
246
+ self.max_position_embeddings = max_position_embeddings
247
+ self.position_embedding_type = position_embedding_type
248
+ self.hidden_size = hidden_size
249
+ self.num_hidden_layers = num_hidden_layers
250
+ self.num_attention_heads = num_attention_heads
251
+ self.intermediate_size = intermediate_size
252
+ self.hidden_act = hidden_act
253
+ self.hidden_dropout_prob = hidden_dropout_prob
254
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
255
+ self.initializer_range = initializer_range
256
+ self.layer_norm_eps = layer_norm_eps
257
+ self.qkv_bias = qkv_bias
258
+ self.pad_token_id = pad_token_id
259
+
260
+ @classmethod
261
+ def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig":
262
+ cls._set_token_in_kwargs(kwargs)
263
+
264
+ config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
265
+
266
+ # get the text config dict if we are loading from FlavaConfig
267
+ if config_dict.get("model_type") == "flava":
268
+ config_dict = config_dict["text_config"]
269
+
270
+ if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
271
+ logger.warning(
272
+ f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
273
+ f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
274
+ )
275
+
276
+ return cls.from_dict(config_dict, **kwargs)
277
+
278
+
279
+ class FlavaMultimodalConfig(PretrainedConfig):
280
+ r"""
281
+ This is the configuration class to store the configuration of a [`FlavaMultimodalModel`]. It is used to instantiate
282
+ an FLAVA model according to the specified arguments, defining the model architecture.
283
+
284
+ Instantiating a configuration with the defaults will yield a similar configuration to that of the FLAVA
285
+ [facebook/flava-full](https://huggingface.co/facebook/flava-full) architecture.
286
+
287
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
288
+ documentation from [`PretrainedConfig`] for more information.
289
+
290
+
291
+ Args:
292
+ hidden_size (`int`, *optional*, defaults to 768):
293
+ Dimensionality of the encoder layers and the pooler layer.
294
+ num_hidden_layers (`int`, *optional*, defaults to 6):
295
+ Number of hidden layers in the Transformer encoder.
296
+ num_attention_heads (`int`, *optional*, defaults to 12):
297
+ Number of attention heads for each attention layer in the Transformer encoder.
298
+ intermediate_size (`int`, *optional*, defaults to 3072):
299
+ Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
300
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
301
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
302
+ `"relu"`, `"selu"` and `"gelu_new"` are supported.
303
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.0):
304
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
305
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0):
306
+ The dropout ratio for the attention probabilities.
307
+ initializer_range (`float`, *optional*, defaults to 0.02):
308
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
309
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
310
+ The epsilon used by the layer normalization layers.
311
+ qkv_bias (`bool`, *optional*, defaults to `True`):
312
+ Whether to add a bias to the queries, keys and values.
313
+ use_cls_token (`bool`, *optional*, defaults to `True`):
314
+ Whether to use an extra CLS token for multimodal settings. Usually needed by the FLAVA model.
315
+
316
+
317
+ Example:
318
+
319
+ ```python
320
+ >>> from transformers import FlavaMultimodalConfig, FlavaMultimodalModel
321
+
322
+ >>> # Initializing a FlavaMultimodalModel with style configuration
323
+ >>> configuration = FlavaMultimodalConfig()
324
+
325
+ >>> # Initializing a FlavaMultimodalModel model (with random weights) from the style configuration
326
+ >>> model = FlavaMultimodalModel(configuration)
327
+
328
+ >>> # Accessing the model configuration
329
+ >>> configuration = model.config
330
+ ```"""
331
+
332
+ model_type = "flava_multimodal_model"
333
+
334
+ def __init__(
335
+ self,
336
+ hidden_size: int = 768,
337
+ num_hidden_layers: int = 6,
338
+ num_attention_heads: int = 12,
339
+ intermediate_size: int = 3072,
340
+ hidden_act: int = "gelu",
341
+ hidden_dropout_prob: int = 0.0,
342
+ attention_probs_dropout_prob: int = 0.0,
343
+ initializer_range: float = 0.02,
344
+ layer_norm_eps: float = 1e-12,
345
+ qkv_bias: bool = True,
346
+ use_cls_token: bool = True,
347
+ **kwargs,
348
+ ):
349
+ super().__init__(**kwargs)
350
+
351
+ self.hidden_size = hidden_size
352
+ self.num_hidden_layers = num_hidden_layers
353
+ self.num_attention_heads = num_attention_heads
354
+ self.intermediate_size = intermediate_size
355
+ self.hidden_act = hidden_act
356
+ self.hidden_dropout_prob = hidden_dropout_prob
357
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
358
+ self.initializer_range = initializer_range
359
+ self.layer_norm_eps = layer_norm_eps
360
+ self.qkv_bias = qkv_bias
361
+ self.use_cls_token = use_cls_token
362
+
363
+ @classmethod
364
+ def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig":
365
+ cls._set_token_in_kwargs(kwargs)
366
+
367
+ config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
368
+
369
+ # get the multimodal config dict if we are loading from FlavaConfig
370
+ if config_dict.get("model_type") == "flava":
371
+ config_dict = config_dict["multimodal_config"]
372
+
373
+ if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
374
+ logger.warning(
375
+ f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
376
+ f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
377
+ )
378
+
379
+ return cls.from_dict(config_dict, **kwargs)
380
+
381
+
382
+ class FlavaImageCodebookConfig(PretrainedConfig):
383
+ model_type = "flava_image_codebook"
384
+
385
+ r"""
386
+ [`FlavaImageCodebookConfig`] is the configuration class to store the configuration of a [`FlavaImageCodebook`]. It
387
+ is used to instantiate an FLAVA model according to the specified arguments, defining the model architecture.
388
+ Instantiating a configuration with the defaults will yield a similar configuration to that of the FLAVA
389
+ [facebook/flava-image-codebook](https://huggingface.co/facebook/flava-image-codebook) architecture.
390
+
391
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
392
+ documentation from [`PretrainedConfig`] for more information.
393
+
394
+ Args:
395
+ num_groups (`int`, defaults to 4):
396
+ Number of groups to be created. This parameter as of now doesn't affect the model and is used for some
397
+ internal calculation and estimations.
398
+ input_channels (`int`, defaults to 3):
399
+ Number of channels in the image to be passed.
400
+ num_blocks_per_group (`int`, defaults to 2):
401
+ Number of conv-based blocks per group.
402
+ hidden_size (`int`, defaults to 256):
403
+ Size of hidden dim for the blocks.
404
+ vocab_size (`int`, defaults to 8192):
405
+ Size of the output vocabulary for the codebook.
406
+ freeze (`bool`, defaults to `True`):
407
+ Whether to freeze the weights of the model.
408
+ initializer_range (`float`, *optional*, defaults to 0.02):
409
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
410
+ kwargs (*optional*):
411
+ Dictionary of keyword arguments.
412
+
413
+ Example:
414
+
415
+ ```python
416
+ >>> from transformers import FlavaImageCodebookConfig, FlavaImageCodebook
417
+
418
+ >>> # Initializing a FlavaImageCodebook with style configuration
419
+ >>> configuration = FlavaImageCodebookConfig()
420
+
421
+ >>> # Initializing a FlavaImageCodebook model (with random weights) from the style configuration
422
+ >>> model = FlavaImageCodebook(configuration)
423
+ >>> # Accessing the model configuration
424
+ >>> configuration = model.config
425
+ ```
426
+ """
427
+
428
+ def __init__(
429
+ self,
430
+ num_groups: int = 4,
431
+ input_channels: int = 3,
432
+ num_blocks_per_group: int = 2,
433
+ hidden_size: int = 256,
434
+ vocab_size: int = 8192,
435
+ freeze: int = True,
436
+ initializer_range: float = 0.02,
437
+ **kwargs,
438
+ ):
439
+ super().__init__(**kwargs)
440
+ self.num_groups = num_groups
441
+ self.input_channels = input_channels
442
+ self.num_blocks_per_group = num_blocks_per_group
443
+ self.hidden_size = hidden_size
444
+ self.vocab_size = vocab_size
445
+ self.freeze = freeze
446
+ self.initializer_range = initializer_range
447
+
448
+ @classmethod
449
+ def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig":
450
+ cls._set_token_in_kwargs(kwargs)
451
+
452
+ config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
453
+
454
+ # get the image codebook config dict if we are loading from FlavaConfig
455
+ if config_dict.get("model_type") == "flava":
456
+ config_dict = config_dict["image_codebook_config"]
457
+
458
+ if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
459
+ logger.warning(
460
+ f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
461
+ f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
462
+ )
463
+
464
+ return cls.from_dict(config_dict, **kwargs)
465
+
466
+
467
+ class FlavaConfig(PretrainedConfig):
468
+ r"""
469
+ [`FlavaConfig`] is the configuration class to store the configuration of a [`FlavaModel`]. It is used to
470
+ instantiate FLAVA model according to the specified arguments, defining the text model, image model, image codebook
471
+ and multimodal model configs. Instantiating a configuration with the defaults will yield a similar configuration to
472
+ that of the FLAVA [facebook/flava-full](https://huggingface.co/facebook/flava-full) architecture.
473
+
474
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
475
+ documentation from [`PretrainedConfig`] for more information.
476
+
477
+ Args:
478
+ text_config (`dict`, *optional*):
479
+ Dictionary of configuration options used to initialize [`FlavaTextConfig`].
480
+ image_config (`dict`, *optional*):
481
+ Dictionary of configuration options used to initialize [`FlavaImageConfig`].
482
+ multimodal_config (`dict`, *optional*):
483
+ Dictionary of configuration options used to initialize [`FlavaMultimodalConfig`].
484
+ hidden_size (`int`, *optional*, defaults to 768):
485
+ Dimensionality of the encoder layers and the pooler layer.
486
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
487
+ The epsilon used by the layer normalization layers.
488
+ projection_dim (`int`, *optional*, defaults to 512):
489
+ Dimentionality of text and image projection layers.
490
+ logit_scale_init_value (`float`, *optional*, defaults to 2.6592):
491
+ The inital value of the *logit_scale* paramter. Default is used as per the original FLAVA/CLIP
492
+ implementation.
493
+ initializer_range (`float`, *optional*, defaults to 0.02):
494
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
495
+ ce_ignore_index (`int`, *optional*, defaults to -100):
496
+ Cross entropy index to ignore.
497
+ mim_weight (`float`, *optional*, defaults to 1.0):
498
+ Weight to be assigned to MIM (Masked Image Modeling) unimodal loss
499
+ mlm_weight (`float`, *optional*, defaults to 1.0):
500
+ Weight to be assigned to MLM (Masked Language Modeling) unimodal loss
501
+ global_contrastive_weight (`float`, *optional*, defaults to 1.0):
502
+ Weight to be assigned to global contrastive cross-alignment loss.
503
+ itm_weight (`float`, *optional*, defaults to 1.0):
504
+ Weight to be assigned to image-text matching multimodal loss.
505
+ mmm_image_weight (`float`, *optional*, defaults to 1.0):
506
+ Weight to be assigned to MMM loss's image part.
507
+ mmm_text_weight (`float`, *optional*, defaults to 1.0):
508
+ Weight to be assigned to MMM loss's text part.
509
+ global_backprop_contrastive (`bool`, *optional*, defaults to `True`):
510
+ Whether to use global backpropgation through all workers in contrastive loss.
511
+ skip_unmasked_multimodal_encoder (`bool`, *optional*, defaults to `True`):
512
+ Whether to skip running unmasked multimodal encoder whose outputs are not used by FLAVA losses.
513
+ return_loss (`bool`, *optional*, defaults to `True`):
514
+ Whether to return loss or not
515
+
516
+ kwargs (*optional*):
517
+ Dictionary of keyword arguments.
518
+
519
+ Example:
520
+
521
+ ```python
522
+ >>> from transformers import FlavaConfig, FlavaModel, FlavaForPreTraining
523
+
524
+ >>> # Initializing a FlavaConfig with style configuration
525
+ >>> configuration = FlavaConfig()
526
+
527
+ >>> # Initializing a FlavaModel and FlavaForPreTraining model (with random weights) from the style configuration
528
+ >>> model = FlavaModel(configuration)
529
+ >>> model_pre = FlavaForPreTraining(configuration)
530
+
531
+ >>> # Accessing the model configuration
532
+ >>> configuration = model.config
533
+ >>> configuration_pre = model_pre.config
534
+ ```
535
+ """
536
+
537
+ model_type = "flava"
538
+
539
+ def __init__(
540
+ self,
541
+ image_config: Dict[str, Any] = None,
542
+ text_config: Dict[str, Any] = None,
543
+ multimodal_config: Dict[str, Any] = None,
544
+ image_codebook_config: Dict[str, Any] = None,
545
+ hidden_size: int = 768,
546
+ layer_norm_eps: float = 1e-12,
547
+ projection_dim: int = 768,
548
+ init_codebook: bool = True,
549
+ logit_scale_init_value: float = 2.6592,
550
+ initializer_range: float = 0.02,
551
+ ce_ignore_index: int = -100,
552
+ mim_weight: float = 1.0,
553
+ mlm_weight: float = 1.0,
554
+ global_contrastive_weight: float = 1.0,
555
+ itm_weight: float = 1.0,
556
+ mmm_image_weight: float = 1.0,
557
+ mmm_text_weight: float = 1.0,
558
+ global_backprop_contrastive: bool = True,
559
+ skip_unmasked_multimodal_encoder: bool = True,
560
+ return_loss: bool = True,
561
+ **kwargs,
562
+ ):
563
+ # If `_config_dict` exist, we use them for the backward compatibility.
564
+ # We pop out these 2 attributes before calling `super().__init__` to avoid them being saved (which causes a lot
565
+ # of confusion!).
566
+ text_config_dict = kwargs.pop("text_config_dict", None)
567
+ image_config_dict = kwargs.pop("image_config_dict", None)
568
+ multimodal_config_dict = kwargs.pop("multimodal_config_dict", None)
569
+ image_codebook_config_dict = kwargs.pop("image_codebook_config_dict", None)
570
+
571
+ super().__init__(**kwargs)
572
+
573
+ # Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in
574
+ # `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most
575
+ # cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.
576
+ if text_config_dict is not None:
577
+ if text_config is None:
578
+ text_config = {}
579
+
580
+ # This is the complete result when using `text_config_dict`.
581
+ _text_config_dict = FlavaTextConfig(**text_config_dict).to_dict()
582
+
583
+ # Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
584
+ for key, value in _text_config_dict.items():
585
+ if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
586
+ # If specified in `text_config_dict`
587
+ if key in text_config_dict:
588
+ message = (
589
+ f"`{key}` is found in both `text_config_dict` and `text_config` but with different values. "
590
+ f'The value `text_config_dict["{key}"]` will be used instead.'
591
+ )
592
+ # If inferred from default argument values (just to be super careful)
593
+ else:
594
+ message = (
595
+ f"`text_config_dict` is provided which will be used to initialize `FlavaTextConfig`. The "
596
+ f'value `text_config["{key}"]` will be overriden.'
597
+ )
598
+ logger.info(message)
599
+
600
+ # Update all values in `text_config` with the ones in `_text_config_dict`.
601
+ text_config.update(_text_config_dict)
602
+
603
+ if image_config_dict is not None:
604
+ if image_config is None:
605
+ image_config = {}
606
+
607
+ # This is the complete result when using `image_config_dict`.
608
+ _image_config_dict = FlavaImageConfig(**image_config_dict).to_dict()
609
+ # convert keys to string instead of integer
610
+ if "id2label" in _image_config_dict:
611
+ _image_config_dict["id2label"] = {
612
+ str(key): value for key, value in _image_config_dict["id2label"].items()
613
+ }
614
+
615
+ # Give a warning if the values exist in both `_image_config_dict` and `image_config` but being different.
616
+ for key, value in _image_config_dict.items():
617
+ if key in image_config and value != image_config[key] and key not in ["transformers_version"]:
618
+ # If specified in `image_config_dict`
619
+ if key in image_config_dict:
620
+ message = (
621
+ f"`{key}` is found in both `image_config_dict` and `image_config` but with different "
622
+ f'values. The value `image_config_dict["{key}"]` will be used instead.'
623
+ )
624
+ # If inferred from default argument values (just to be super careful)
625
+ else:
626
+ message = (
627
+ f"`image_config_dict` is provided which will be used to initialize `FlavaImageConfig`. "
628
+ f'The value `image_config["{key}"]` will be overriden.'
629
+ )
630
+ logger.info(message)
631
+
632
+ # Update all values in `image_config` with the ones in `_image_config_dict`.
633
+ image_config.update(_image_config_dict)
634
+
635
+ if multimodal_config_dict is not None:
636
+ if multimodal_config is None:
637
+ multimodal_config = {}
638
+
639
+ # This is the complete result when using `multimodal_config_dict`.
640
+ _multimodal_config_dict = FlavaMultimodalConfig(**multimodal_config_dict).to_dict()
641
+
642
+ # Give a warning if the values exist in both `_multimodal_config_dict` and `multimodal_config` but being
643
+ # different.
644
+ for key, value in _multimodal_config_dict.items():
645
+ if (
646
+ key in multimodal_config
647
+ and value != multimodal_config[key]
648
+ and key not in ["transformers_version"]
649
+ ):
650
+ # If specified in `multimodal_config_dict`
651
+ if key in multimodal_config_dict:
652
+ message = (
653
+ f"`{key}` is found in both `multimodal_config_dict` and `multimodal_config` but with "
654
+ f'different values. The value `multimodal_config_dict["{key}"]` will be used instead.'
655
+ )
656
+ # If inferred from default argument values (just to be super careful)
657
+ else:
658
+ message = (
659
+ f"`multimodal_config_dict` is provided which will be used to initialize "
660
+ f'`FlavaMultimodalConfig`. The value `multimodal_config["{key}"]` will be overriden.'
661
+ )
662
+ logger.info(message)
663
+
664
+ # Update all values in `multimodal_config` with the ones in `_multimodal_config_dict`.
665
+ multimodal_config.update(_multimodal_config_dict)
666
+
667
+ if image_codebook_config_dict is not None:
668
+ if image_codebook_config is None:
669
+ image_codebook_config = {}
670
+
671
+ # This is the complete result when using `image_codebook_config_dict`.
672
+ _image_codebook_config_dict = FlavaImageCodebookConfig(**image_codebook_config_dict).to_dict()
673
+
674
+ # Give a warning if the values exist in both `_image_codebook_config_dict` and `image_codebook_config` but
675
+ # being different.
676
+ for key, value in _image_codebook_config_dict.items():
677
+ if (
678
+ key in image_codebook_config
679
+ and value != image_codebook_config[key]
680
+ and key not in ["transformers_version"]
681
+ ):
682
+ # If specified in `image_codebook_config_dict`
683
+ if key in image_codebook_config_dict:
684
+ message = (
685
+ f"`{key}` is found in both `image_codebook_config_dict` and `image_codebook_config` but "
686
+ f'with different values. The value `image_codebook_config_dict["{key}"]` will be used '
687
+ "instead."
688
+ )
689
+ # If inferred from default argument values (just to be super careful)
690
+ else:
691
+ message = (
692
+ f"`image_codebook_config_dict` is provided which will be used to initialize "
693
+ f'`FlavaImageCodebookConfig`. The value `image_codebook_config["{key}"]` will be overriden.'
694
+ )
695
+ logger.info(message)
696
+
697
+ # Update all values in `image_codebook_config` with the ones in `_image_codebook_config_dict`.
698
+ image_codebook_config.update(_image_codebook_config_dict)
699
+
700
+ if image_config is None:
701
+ image_config = {}
702
+ logger.info("`image_config` is `None`. initializing the `FlavaImageConfig` with default values.")
703
+
704
+ if text_config is None:
705
+ text_config = {}
706
+ logger.info("`text_config` is `None`. Initializing the `FlavaTextConfig` with default values.")
707
+
708
+ if multimodal_config is None:
709
+ multimodal_config = {}
710
+ logger.info("`multimodal_config` is `None`. initializing the `FlavaMultimodalConfig` with default values.")
711
+
712
+ if image_codebook_config is None:
713
+ image_codebook_config = {}
714
+ logger.info(
715
+ "`image_codebook_config` is `None`. initializing the `FlavaImageCodebookConfig` with default values."
716
+ )
717
+
718
+ self.image_config = FlavaImageConfig(**image_config)
719
+ self.text_config = FlavaTextConfig(**text_config)
720
+ self.multimodal_config = FlavaMultimodalConfig(**multimodal_config)
721
+ self.image_codebook_config = FlavaImageCodebookConfig(**image_codebook_config)
722
+ self.projection_dim = projection_dim
723
+ self.init_codebook = init_codebook
724
+
725
+ self.hidden_size = hidden_size
726
+ self.layer_norm_eps = layer_norm_eps
727
+ self.initializer_range = initializer_range
728
+ self.logit_scale_init_value = logit_scale_init_value
729
+ self.initializer_factor = 1.0
730
+ self.ce_ignore_index = ce_ignore_index
731
+ self.mim_weight = mim_weight
732
+ self.mlm_weight = mlm_weight
733
+ self.global_contrastive_weight = global_contrastive_weight
734
+ self.itm_weight = itm_weight
735
+ self.mmm_image_weight = mmm_image_weight
736
+ self.mmm_text_weight = mmm_text_weight
737
+ self.global_backprop_contrastive = global_backprop_contrastive
738
+ self.skip_unmasked_multimodal_encoder = skip_unmasked_multimodal_encoder
739
+ self.return_loss = return_loss
740
+
741
+ @classmethod
742
+ def from_configs(
743
+ cls,
744
+ image_config: FlavaImageConfig,
745
+ text_config: FlavaTextConfig,
746
+ multimodal_config: FlavaMultimodalConfig,
747
+ image_codebook_config: FlavaImageCodebookConfig,
748
+ **kwargs,
749
+ ):
750
+ r"""
751
+ Instantiate a [`FlavaConfig`] (or a derived class) from flava text model configuration, flava image model
752
+ configuration, flava multimodal model and flava codebook model configuration.
753
+
754
+ Returns:
755
+ [`FlavaConfig`]: An instance of a configuration object
756
+ """
757
+
758
+ return cls(
759
+ image_config=image_config.to_dict(),
760
+ text_config=text_config.to_dict(),
761
+ multimodal_config=multimodal_config.to_dict(),
762
+ image_codebook_config=image_codebook_config.to_dict(),
763
+ **kwargs,
764
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/flava/convert_dalle_to_flava_codebook.py ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 Meta Platforms authors and The HuggingFace Team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import argparse
17
+ import os
18
+
19
+ import torch
20
+
21
+ from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
22
+
23
+
24
+ def rreplace(s, old, new, occurrence):
25
+ li = s.rsplit(old, occurrence)
26
+ return new.join(li)
27
+
28
+
29
+ def count_parameters(state_dict):
30
+ # encoder.embeddings are double copied in original FLAVA
31
+ return sum(param.float().sum() if "encoder.embeddings" not in key else 0 for key, param in state_dict.items())
32
+
33
+
34
+ def upgrade_state_dict(state_dict):
35
+ upgrade = {}
36
+
37
+ group_keys = ["group_1", "group_2", "group_3", "group_4"]
38
+ for key, value in state_dict.items():
39
+ for group_key in group_keys:
40
+ if group_key in key:
41
+ key = key.replace(f"{group_key}.", f"{group_key}.group.")
42
+
43
+ if "res_path" in key:
44
+ key = key.replace("res_path.", "res_path.path.")
45
+
46
+ if key.endswith(".w"):
47
+ key = rreplace(key, ".w", ".weight", 1)
48
+ if key.endswith(".b"):
49
+ key = rreplace(key, ".b", ".bias", 1)
50
+
51
+ upgrade[key] = value.float()
52
+
53
+ return upgrade
54
+
55
+
56
+ @torch.no_grad()
57
+ def convert_dalle_checkpoint(checkpoint_path, pytorch_dump_folder_path, config_path=None, save_checkpoint=True):
58
+ """
59
+ Copy/paste/tweak model's weights to transformers design.
60
+ """
61
+ from dall_e import Encoder
62
+
63
+ encoder = Encoder()
64
+ if os.path.exists(checkpoint_path):
65
+ ckpt = torch.load(checkpoint_path)
66
+ else:
67
+ ckpt = torch.hub.load_state_dict_from_url(checkpoint_path)
68
+
69
+ if isinstance(ckpt, Encoder):
70
+ ckpt = ckpt.state_dict()
71
+ encoder.load_state_dict(ckpt)
72
+
73
+ if config_path is not None:
74
+ config = FlavaImageCodebookConfig.from_pretrained(config_path)
75
+ else:
76
+ config = FlavaImageCodebookConfig()
77
+
78
+ hf_model = FlavaImageCodebook(config).eval()
79
+ state_dict = encoder.state_dict()
80
+
81
+ hf_state_dict = upgrade_state_dict(state_dict)
82
+ hf_model.load_state_dict(hf_state_dict)
83
+ hf_state_dict = hf_model.state_dict()
84
+ hf_count = count_parameters(hf_state_dict)
85
+ state_dict_count = count_parameters(state_dict)
86
+
87
+ assert torch.allclose(hf_count, state_dict_count, atol=1e-3)
88
+
89
+ if save_checkpoint:
90
+ hf_model.save_pretrained(pytorch_dump_folder_path)
91
+ else:
92
+ return hf_state_dict
93
+
94
+
95
+ if __name__ == "__main__":
96
+ parser = argparse.ArgumentParser()
97
+ parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
98
+ parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to flava checkpoint")
99
+ parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
100
+ args = parser.parse_args()
101
+
102
+ convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
llmeval-env/lib/python3.10/site-packages/transformers/models/flava/convert_flava_original_pytorch_to_hf.py ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 Meta Platforms authors and The HuggingFace Team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import argparse
17
+ import os
18
+
19
+ import torch
20
+
21
+ from transformers import FlavaConfig, FlavaForPreTraining
22
+ from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
23
+
24
+
25
+ def count_parameters(state_dict):
26
+ # encoder.embeddings are double copied in original FLAVA
27
+ return sum(param.float().sum() if "encoder.embeddings" not in key else 0 for key, param in state_dict.items())
28
+
29
+
30
+ def upgrade_state_dict(state_dict, codebook_state_dict):
31
+ upgrade = {}
32
+
33
+ for key, value in state_dict.items():
34
+ if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
35
+ continue
36
+
37
+ key = key.replace("heads.cmd.mim_head.cls.predictions", "mmm_image_head")
38
+ key = key.replace("heads.cmd.mlm_head.cls.predictions", "mmm_text_head")
39
+ key = key.replace("heads.cmd.itm_head.cls", "itm_head")
40
+ key = key.replace("heads.cmd.itm_head.pooler", "itm_head.pooler")
41
+ key = key.replace("heads.cmd.clip_head.logit_scale", "flava.logit_scale")
42
+ key = key.replace("heads.fairseq_mlm.cls.predictions", "mlm_head")
43
+ key = key.replace("heads.imagenet.mim_head.cls.predictions", "mim_head")
44
+ key = key.replace("mm_text_projection", "flava.text_to_mm_projection")
45
+ key = key.replace("mm_image_projection", "flava.image_to_mm_projection")
46
+ key = key.replace("image_encoder.module", "flava.image_model")
47
+ key = key.replace("text_encoder.module", "flava.text_model")
48
+ key = key.replace("mm_encoder.module.encoder.cls_token", "flava.multimodal_model.cls_token")
49
+ key = key.replace("mm_encoder.module", "flava.multimodal_model")
50
+ key = key.replace("text_projection", "flava.text_projection")
51
+ key = key.replace("image_projection", "flava.image_projection")
52
+
53
+ upgrade[key] = value.float()
54
+
55
+ for key, value in codebook_state_dict.items():
56
+ upgrade[f"image_codebook.{key}"] = value
57
+
58
+ return upgrade
59
+
60
+
61
+ @torch.no_grad()
62
+ def convert_flava_checkpoint(checkpoint_path, codebook_path, pytorch_dump_folder_path, config_path=None):
63
+ """
64
+ Copy/paste/tweak model's weights to transformers design.
65
+ """
66
+ if config_path is not None:
67
+ config = FlavaConfig.from_pretrained(config_path)
68
+ else:
69
+ config = FlavaConfig()
70
+
71
+ hf_model = FlavaForPreTraining(config).eval()
72
+
73
+ codebook_state_dict = convert_dalle_checkpoint(codebook_path, None, save_checkpoint=False)
74
+
75
+ if os.path.exists(checkpoint_path):
76
+ state_dict = torch.load(checkpoint_path, map_location="cpu")
77
+ else:
78
+ state_dict = torch.hub.load_state_dict_from_url(checkpoint_path, map_location="cpu")
79
+
80
+ hf_state_dict = upgrade_state_dict(state_dict, codebook_state_dict)
81
+ hf_model.load_state_dict(hf_state_dict)
82
+ hf_state_dict = hf_model.state_dict()
83
+ hf_count = count_parameters(hf_state_dict)
84
+ state_dict_count = count_parameters(state_dict) + count_parameters(codebook_state_dict)
85
+
86
+ assert torch.allclose(hf_count, state_dict_count, atol=1e-3)
87
+
88
+ hf_model.save_pretrained(pytorch_dump_folder_path)
89
+
90
+
91
+ if __name__ == "__main__":
92
+ parser = argparse.ArgumentParser()
93
+ parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
94
+ parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to flava checkpoint")
95
+ parser.add_argument("--codebook_path", default=None, type=str, help="Path to flava codebook checkpoint")
96
+ parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
97
+ args = parser.parse_args()
98
+
99
+ convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
llmeval-env/lib/python3.10/site-packages/transformers/models/flava/feature_extraction_flava.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 Meta Platforms authors and The HuggingFace Team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Feature extractor class for FLAVA."""
16
+
17
+ import warnings
18
+
19
+ from ...utils import logging
20
+ from .image_processing_flava import FlavaImageProcessor
21
+
22
+
23
+ logger = logging.get_logger(__name__)
24
+
25
+
26
+ class FlavaFeatureExtractor(FlavaImageProcessor):
27
+ def __init__(self, *args, **kwargs) -> None:
28
+ warnings.warn(
29
+ "The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
30
+ " use FlavaImageProcessor instead.",
31
+ FutureWarning,
32
+ )
33
+ super().__init__(*args, **kwargs)
llmeval-env/lib/python3.10/site-packages/transformers/models/flava/image_processing_flava.py ADDED
@@ -0,0 +1,738 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Image processor class for Flava."""
16
+
17
+ import math
18
+ import random
19
+ from functools import lru_cache
20
+ from typing import Any, Dict, Iterable, List, Optional, Tuple, Union
21
+
22
+ import numpy as np
23
+
24
+ from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
25
+ from ...image_transforms import resize, to_channel_dimension_format
26
+ from ...image_utils import (
27
+ OPENAI_CLIP_MEAN,
28
+ OPENAI_CLIP_STD,
29
+ ChannelDimension,
30
+ ImageInput,
31
+ PILImageResampling,
32
+ infer_channel_dimension_format,
33
+ is_scaled_image,
34
+ make_list_of_images,
35
+ to_numpy_array,
36
+ valid_images,
37
+ validate_kwargs,
38
+ validate_preprocess_arguments,
39
+ )
40
+ from ...utils import TensorType, is_vision_available, logging
41
+
42
+
43
+ if is_vision_available():
44
+ import PIL
45
+
46
+
47
+ logger = logging.get_logger(__name__)
48
+
49
+
50
+ # These values are taken from CLIP
51
+ FLAVA_IMAGE_MEAN = OPENAI_CLIP_MEAN
52
+ FLAVA_IMAGE_STD = OPENAI_CLIP_STD
53
+ FLAVA_CODEBOOK_MEAN = [0.0, 0.0, 0.0]
54
+ FLAVA_CODEBOOK_STD = [1.0, 1.0, 1.0]
55
+ LOGIT_LAPLACE_EPS: float = 0.1
56
+
57
+
58
+ # Inspired from https://github.com/microsoft/unilm/blob/master/beit/masking_generator.py
59
+ class FlavaMaskingGenerator:
60
+ def __init__(
61
+ self,
62
+ input_size: Union[int, Tuple[int, int]] = 14,
63
+ total_mask_patches: int = 75,
64
+ mask_group_max_patches: Optional[int] = None,
65
+ mask_group_min_patches: int = 16,
66
+ mask_group_min_aspect_ratio: Optional[float] = 0.3,
67
+ mask_group_max_aspect_ratio: float = None,
68
+ ):
69
+ if not isinstance(input_size, tuple):
70
+ input_size = (input_size,) * 2
71
+ self.height, self.width = input_size
72
+
73
+ self.num_patches = self.height * self.width
74
+ self.total_mask_patches = total_mask_patches
75
+
76
+ self.mask_group_min_patches = mask_group_min_patches
77
+ self.mask_group_max_patches = total_mask_patches if mask_group_max_patches is None else mask_group_max_patches
78
+
79
+ mask_group_max_aspect_ratio = mask_group_max_aspect_ratio or 1 / mask_group_min_aspect_ratio
80
+ self.log_aspect_ratio = (math.log(mask_group_min_aspect_ratio), math.log(mask_group_max_aspect_ratio))
81
+
82
+ def __repr__(self):
83
+ repr_str = "MaskingGenerator(%d, %d -> [%d ~ %d], max = %d, %.3f ~ %.3f)" % (
84
+ self.height,
85
+ self.width,
86
+ self.mask_group_min_patches,
87
+ self.mask_group_max_patches,
88
+ self.total_mask_patches,
89
+ self.log_aspect_ratio[0],
90
+ self.log_aspect_ratio[1],
91
+ )
92
+ return repr_str
93
+
94
+ def get_shape(self):
95
+ return self.height, self.width
96
+
97
+ def _mask(self, mask, max_mask_patches):
98
+ delta = 0
99
+ for _attempt in range(10):
100
+ target_area = random.uniform(self.mask_group_min_patches, max_mask_patches)
101
+ aspect_ratio = math.exp(random.uniform(*self.log_aspect_ratio))
102
+ height = int(round(math.sqrt(target_area * aspect_ratio)))
103
+ width = int(round(math.sqrt(target_area / aspect_ratio)))
104
+ if width < self.width and height < self.height:
105
+ top = random.randint(0, self.height - height)
106
+ left = random.randint(0, self.width - width)
107
+
108
+ num_masked = mask[top : top + height, left : left + width].sum()
109
+ # Overlap
110
+ if 0 < height * width - num_masked <= max_mask_patches:
111
+ for i in range(top, top + height):
112
+ for j in range(left, left + width):
113
+ if mask[i, j] == 0:
114
+ mask[i, j] = 1
115
+ delta += 1
116
+
117
+ if delta > 0:
118
+ break
119
+ return delta
120
+
121
+ def __call__(self):
122
+ mask = np.zeros(shape=self.get_shape(), dtype=int)
123
+ mask_count = 0
124
+ while mask_count < self.total_mask_patches:
125
+ max_mask_patches = self.total_mask_patches - mask_count
126
+ max_mask_patches = min(max_mask_patches, self.mask_group_max_patches)
127
+
128
+ delta = self._mask(mask, max_mask_patches)
129
+ if delta == 0:
130
+ break
131
+ else:
132
+ mask_count += delta
133
+
134
+ return mask
135
+
136
+
137
+ class FlavaImageProcessor(BaseImageProcessor):
138
+ r"""
139
+ Constructs a Flava image processor.
140
+
141
+ Args:
142
+ do_resize (`bool`, *optional*, defaults to `True`):
143
+ Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by the
144
+ `do_resize` parameter in `preprocess`.
145
+ size (`Dict[str, int]` *optional*, defaults to `{"height": 224, "width": 224}`):
146
+ Size of the image after resizing. Can be overridden by the `size` parameter in `preprocess`.
147
+ resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
148
+ Resampling filter to use if resizing the image. Can be overridden by the `resample` parameter in
149
+ `preprocess`.
150
+ do_center_crop (`bool`, *optional*, defaults to `True`):
151
+ Whether to center crop the images. Can be overridden by the `do_center_crop` parameter in `preprocess`.
152
+ crop_size (`Dict[str, int]` *optional*, defaults to `{"height": 224, "width": 224}`):
153
+ Size of image after the center crop `(crop_size["height"], crop_size["width"])`. Can be overridden by the
154
+ `crop_size` parameter in `preprocess`.
155
+ do_rescale (`bool`, *optional*, defaults to `True`):
156
+ Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale`
157
+ parameter in `preprocess`.
158
+ rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
159
+ Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in
160
+ `preprocess`.
161
+ do_normalize (`bool`, *optional*, defaults to `True`):
162
+ Whether to normalize the image. Can be overridden by the `do_normalize` parameter in `preprocess`.
163
+ image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`):
164
+ Mean to use if normalizing the image. This is a float or list of floats the length of the number of
165
+ channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method.
166
+ image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`):
167
+ Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
168
+ number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
169
+ return_image_mask (`bool`, *optional*, defaults to `False`):
170
+ Whether to return the image mask. Can be overridden by the `return_image_mask` parameter in `preprocess`.
171
+ input_size_patches (`int`, *optional*, defaults to 14):
172
+ Number of patches in the image in height and width direction. 14x14 = 196 total patches. Can be overridden
173
+ by the `input_size_patches` parameter in `preprocess`.
174
+ total_mask_patches (`int`, *optional*, defaults to 75):
175
+ Total number of patches that should be masked. Can be overridden by the `total_mask_patches` parameter in
176
+ `preprocess`.
177
+ mask_group_min_patches (`int`, *optional*, defaults to 16):
178
+ Minimum number of patches that should be masked. Can be overridden by the `mask_group_min_patches`
179
+ parameter in `preprocess`.
180
+ mask_group_max_patches (`int`, *optional*):
181
+ Maximum number of patches that should be masked. Can be overridden by the `mask_group_max_patches`
182
+ parameter in `preprocess`.
183
+ mask_group_min_aspect_ratio (`float`, *optional*, defaults to 0.3):
184
+ Minimum aspect ratio of the mask window. Can be overridden by the `mask_group_min_aspect_ratio` parameter
185
+ in `preprocess`.
186
+ mask_group_max_aspect_ratio (`float`, *optional*):
187
+ Maximum aspect ratio of the mask window. Can be overridden by the `mask_group_max_aspect_ratio` parameter
188
+ in `preprocess`.
189
+ codebook_do_resize (`bool`, *optional*, defaults to `True`):
190
+ Whether to resize the input for codebook to a certain. Can be overridden by the `codebook_do_resize`
191
+ parameter in `preprocess`. `codebook_size`.
192
+ codebook_size (`Dict[str, int]`, *optional*, defaults to `{"height": 224, "width": 224}`):
193
+ Resize the input for codebook to the given size. Can be overridden by the `codebook_size` parameter in
194
+ `preprocess`.
195
+ codebook_resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.LANCZOS`):
196
+ Resampling filter to use if resizing the codebook image. Can be overridden by the `codebook_resample`
197
+ parameter in `preprocess`.
198
+ codebook_do_center_crop (`bool`, *optional*, defaults to `True`):
199
+ Whether to crop the input for codebook at the center. If the input size is smaller than
200
+ `codebook_crop_size` along any edge, the image is padded with 0's and then center cropped. Can be
201
+ overridden by the `codebook_do_center_crop` parameter in `preprocess`.
202
+ codebook_crop_size (`Dict[str, int]`, *optional*, defaults to `{"height": 224, "width": 224}`):
203
+ Desired output size for codebook input when applying center-cropping. Can be overridden by the
204
+ `codebook_crop_size` parameter in `preprocess`.
205
+ codebook_do_rescale (`bool`, *optional*, defaults to `True`):
206
+ Whether to rescale the input for codebook by the specified scale `codebook_rescale_factor`. Can be
207
+ overridden by the `codebook_do_rescale` parameter in `preprocess`.
208
+ codebook_rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
209
+ Defines the scale factor to use if rescaling the codebook image. Can be overridden by the
210
+ `codebook_rescale_factor` parameter in `preprocess`.
211
+ codebook_do_map_pixels (`bool`, *optional*, defaults to `True`):
212
+ Whether to map the pixel values of the codebook input to (1 - 2e)x + e. Can be overridden by the
213
+ `codebook_do_map_pixels` parameter in `preprocess`.
214
+ codebook_do_normalize (`bool`, *optional*, defaults to `True`):
215
+ Whether or not to normalize the input for codebook with `codebook_image_mean` and `codebook_image_std`. Can
216
+ be overridden by the `codebook_do_normalize` parameter in `preprocess`.
217
+ codebook_image_mean (`Optional[Union[float, Iterable[float]]]`, *optional*, defaults to `[0, 0, 0]`):
218
+ The sequence of means for each channel, to be used when normalizing images for codebook. Can be overridden
219
+ by the `codebook_image_mean` parameter in `preprocess`.
220
+ codebook_image_std (`Optional[Union[float, Iterable[float]]]`, *optional*, defaults to `[0.5, 0.5, 0.5]`):
221
+ The sequence of standard deviations for each channel, to be used when normalizing images for codebook. Can
222
+ be overridden by the `codebook_image_std` parameter in `preprocess`.
223
+ """
224
+
225
+ model_input_names = ["pixel_values"]
226
+
227
+ def __init__(
228
+ self,
229
+ do_resize: bool = True,
230
+ size: Dict[str, int] = None,
231
+ resample: PILImageResampling = PILImageResampling.BICUBIC,
232
+ do_center_crop: bool = True,
233
+ crop_size: Dict[str, int] = None,
234
+ do_rescale: bool = True,
235
+ rescale_factor: Union[int, float] = 1 / 255,
236
+ do_normalize: bool = True,
237
+ image_mean: Optional[Union[float, Iterable[float]]] = None,
238
+ image_std: Optional[Union[float, Iterable[float]]] = None,
239
+ # Mask related params
240
+ return_image_mask: bool = False,
241
+ input_size_patches: int = 14,
242
+ total_mask_patches: int = 75,
243
+ mask_group_min_patches: int = 16,
244
+ mask_group_max_patches: Optional[int] = None,
245
+ mask_group_min_aspect_ratio: float = 0.3,
246
+ mask_group_max_aspect_ratio: Optional[float] = None,
247
+ # Codebook related params
248
+ return_codebook_pixels: bool = False,
249
+ codebook_do_resize: bool = True,
250
+ codebook_size: bool = None,
251
+ codebook_resample: int = PILImageResampling.LANCZOS,
252
+ codebook_do_center_crop: bool = True,
253
+ codebook_crop_size: int = None,
254
+ codebook_do_rescale: bool = True,
255
+ codebook_rescale_factor: Union[int, float] = 1 / 255,
256
+ codebook_do_map_pixels: bool = True,
257
+ codebook_do_normalize: bool = True,
258
+ codebook_image_mean: Optional[Union[float, Iterable[float]]] = None,
259
+ codebook_image_std: Optional[Union[float, Iterable[float]]] = None,
260
+ **kwargs,
261
+ ) -> None:
262
+ super().__init__(**kwargs)
263
+ size = size if size is not None else {"height": 224, "width": 224}
264
+ size = get_size_dict(size)
265
+ crop_size = crop_size if crop_size is not None else {"height": 224, "width": 224}
266
+ crop_size = get_size_dict(crop_size, param_name="crop_size")
267
+
268
+ codebook_size = codebook_size if codebook_size is not None else {"height": 112, "width": 112}
269
+ codebook_size = get_size_dict(codebook_size, param_name="codebook_size")
270
+ codebook_crop_size = codebook_crop_size if codebook_crop_size is not None else {"height": 112, "width": 112}
271
+ codebook_crop_size = get_size_dict(codebook_crop_size, param_name="codebook_crop_size")
272
+
273
+ self.do_resize = do_resize
274
+ self.size = size
275
+ self.resample = resample
276
+ self.do_rescale = do_rescale
277
+ self.rescale_factor = rescale_factor
278
+ self.do_center_crop = do_center_crop
279
+ self.crop_size = crop_size
280
+ self.do_normalize = do_normalize
281
+ self.image_mean = image_mean if image_mean is not None else FLAVA_IMAGE_MEAN
282
+ self.image_std = image_std if image_std is not None else FLAVA_IMAGE_STD
283
+
284
+ self.return_image_mask = return_image_mask
285
+ self.input_size_patches = input_size_patches
286
+ self.total_mask_patches = total_mask_patches
287
+ self.mask_group_min_patches = mask_group_min_patches
288
+ self.mask_group_max_patches = mask_group_max_patches
289
+ self.mask_group_min_aspect_ratio = mask_group_min_aspect_ratio
290
+ self.mask_group_max_aspect_ratio = mask_group_max_aspect_ratio
291
+
292
+ self.return_codebook_pixels = return_codebook_pixels
293
+ self.codebook_do_resize = codebook_do_resize
294
+ self.codebook_size = codebook_size
295
+ self.codebook_resample = codebook_resample
296
+ self.codebook_do_center_crop = codebook_do_center_crop
297
+ self.codebook_crop_size = codebook_crop_size
298
+ self.codebook_do_rescale = codebook_do_rescale
299
+ self.codebook_rescale_factor = codebook_rescale_factor
300
+ self.codebook_do_map_pixels = codebook_do_map_pixels
301
+ self.codebook_do_normalize = codebook_do_normalize
302
+ self.codebook_image_mean = codebook_image_mean
303
+ self.codebook_image_mean = codebook_image_mean if codebook_image_mean is not None else FLAVA_CODEBOOK_MEAN
304
+ self.codebook_image_std = codebook_image_std if codebook_image_std is not None else FLAVA_CODEBOOK_STD
305
+ self._valid_processor_keys = [
306
+ "images",
307
+ "do_resize",
308
+ "size",
309
+ "resample",
310
+ "do_center_crop",
311
+ "crop_size",
312
+ "do_rescale",
313
+ "rescale_factor",
314
+ "do_normalize",
315
+ "image_mean",
316
+ "image_std",
317
+ "return_image_mask",
318
+ "input_size_patches",
319
+ "total_mask_patches",
320
+ "mask_group_min_patches",
321
+ "mask_group_max_patches",
322
+ "mask_group_min_aspect_ratio",
323
+ "mask_group_max_aspect_ratio",
324
+ "return_codebook_pixels",
325
+ "codebook_do_resize",
326
+ "codebook_size",
327
+ "codebook_resample",
328
+ "codebook_do_center_crop",
329
+ "codebook_crop_size",
330
+ "codebook_do_rescale",
331
+ "codebook_rescale_factor",
332
+ "codebook_do_map_pixels",
333
+ "codebook_do_normalize",
334
+ "codebook_image_mean",
335
+ "codebook_image_std",
336
+ "return_tensors",
337
+ "data_format",
338
+ "input_data_format",
339
+ ]
340
+
341
+ @classmethod
342
+ def from_dict(cls, image_processor_dict: Dict[str, Any], **kwargs):
343
+ """
344
+ Overrides the `from_dict` method from the base class to make sure parameters are updated if image processor is
345
+ created using from_dict and kwargs e.g. `FlavaImageProcessor.from_pretrained(checkpoint, codebook_size=600)`
346
+ """
347
+ image_processor_dict = image_processor_dict.copy()
348
+ if "codebook_size" in kwargs:
349
+ image_processor_dict["codebook_size"] = kwargs.pop("codebook_size")
350
+ if "codebook_crop_size" in kwargs:
351
+ image_processor_dict["codebook_crop_size"] = kwargs.pop("codebook_crop_size")
352
+ return super().from_dict(image_processor_dict, **kwargs)
353
+
354
+ @lru_cache()
355
+ def masking_generator(
356
+ self,
357
+ input_size_patches,
358
+ total_mask_patches,
359
+ mask_group_min_patches,
360
+ mask_group_max_patches,
361
+ mask_group_min_aspect_ratio,
362
+ mask_group_max_aspect_ratio,
363
+ ) -> FlavaMaskingGenerator:
364
+ return FlavaMaskingGenerator(
365
+ input_size=input_size_patches,
366
+ total_mask_patches=total_mask_patches,
367
+ mask_group_min_patches=mask_group_min_patches,
368
+ mask_group_max_patches=mask_group_max_patches,
369
+ mask_group_min_aspect_ratio=mask_group_min_aspect_ratio,
370
+ mask_group_max_aspect_ratio=mask_group_max_aspect_ratio,
371
+ )
372
+
373
+ # Copied from transformers.models.vit.image_processing_vit.ViTImageProcessor.resize with PILImageResampling.BILINEAR->PILImageResampling.BICUBIC
374
+ def resize(
375
+ self,
376
+ image: np.ndarray,
377
+ size: Dict[str, int],
378
+ resample: PILImageResampling = PILImageResampling.BICUBIC,
379
+ data_format: Optional[Union[str, ChannelDimension]] = None,
380
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
381
+ **kwargs,
382
+ ) -> np.ndarray:
383
+ """
384
+ Resize an image to `(size["height"], size["width"])`.
385
+
386
+ Args:
387
+ image (`np.ndarray`):
388
+ Image to resize.
389
+ size (`Dict[str, int]`):
390
+ Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image.
391
+ resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
392
+ `PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BICUBIC`.
393
+ data_format (`ChannelDimension` or `str`, *optional*):
394
+ The channel dimension format for the output image. If unset, the channel dimension format of the input
395
+ image is used. Can be one of:
396
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
397
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
398
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
399
+ input_data_format (`ChannelDimension` or `str`, *optional*):
400
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
401
+ from the input image. Can be one of:
402
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
403
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
404
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
405
+
406
+ Returns:
407
+ `np.ndarray`: The resized image.
408
+ """
409
+ size = get_size_dict(size)
410
+ if "height" not in size or "width" not in size:
411
+ raise ValueError(f"The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}")
412
+ output_size = (size["height"], size["width"])
413
+ return resize(
414
+ image,
415
+ size=output_size,
416
+ resample=resample,
417
+ data_format=data_format,
418
+ input_data_format=input_data_format,
419
+ **kwargs,
420
+ )
421
+
422
+ def map_pixels(self, image: np.ndarray) -> np.ndarray:
423
+ return (1 - 2 * LOGIT_LAPLACE_EPS) * image + LOGIT_LAPLACE_EPS
424
+
425
+ def _preprocess_image(
426
+ self,
427
+ image: ImageInput,
428
+ do_resize: bool = None,
429
+ size: Dict[str, int] = None,
430
+ resample: PILImageResampling = None,
431
+ do_center_crop: bool = None,
432
+ crop_size: Dict[str, int] = None,
433
+ do_rescale: bool = None,
434
+ rescale_factor: float = None,
435
+ do_normalize: bool = None,
436
+ image_mean: Optional[Union[float, List[float]]] = None,
437
+ image_std: Optional[Union[float, List[float]]] = None,
438
+ do_map_pixels: bool = None,
439
+ data_format: Optional[ChannelDimension] = ChannelDimension.FIRST,
440
+ input_data_format: Optional[ChannelDimension] = None,
441
+ ) -> np.ndarray:
442
+ """Preprocesses a single image."""
443
+
444
+ validate_preprocess_arguments(
445
+ do_rescale=do_rescale,
446
+ rescale_factor=rescale_factor,
447
+ do_normalize=do_normalize,
448
+ image_mean=image_mean,
449
+ image_std=image_std,
450
+ do_center_crop=do_center_crop,
451
+ crop_size=crop_size,
452
+ do_resize=do_resize,
453
+ size=size,
454
+ resample=resample,
455
+ )
456
+
457
+ # All transformations expect numpy arrays.
458
+ image = to_numpy_array(image)
459
+
460
+ if is_scaled_image(image) and do_rescale:
461
+ logger.warning_once(
462
+ "It looks like you are trying to rescale already rescaled images. If the input"
463
+ " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
464
+ )
465
+
466
+ if input_data_format is None:
467
+ # We assume that all images have the same channel dimension format.
468
+ input_data_format = infer_channel_dimension_format(image)
469
+
470
+ if do_resize:
471
+ image = self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format)
472
+
473
+ if do_center_crop:
474
+ image = self.center_crop(image=image, size=crop_size, input_data_format=input_data_format)
475
+
476
+ if do_rescale:
477
+ image = self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
478
+
479
+ if do_normalize:
480
+ image = self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format)
481
+
482
+ if do_map_pixels:
483
+ image = self.map_pixels(image)
484
+
485
+ if data_format is not None:
486
+ image = to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format)
487
+ return image
488
+
489
+ def preprocess(
490
+ self,
491
+ images: ImageInput,
492
+ do_resize: Optional[bool] = None,
493
+ size: Dict[str, int] = None,
494
+ resample: PILImageResampling = None,
495
+ do_center_crop: Optional[bool] = None,
496
+ crop_size: Optional[Dict[str, int]] = None,
497
+ do_rescale: Optional[bool] = None,
498
+ rescale_factor: Optional[float] = None,
499
+ do_normalize: Optional[bool] = None,
500
+ image_mean: Optional[Union[float, List[float]]] = None,
501
+ image_std: Optional[Union[float, List[float]]] = None,
502
+ # Mask related params
503
+ return_image_mask: Optional[bool] = None,
504
+ input_size_patches: Optional[int] = None,
505
+ total_mask_patches: Optional[int] = None,
506
+ mask_group_min_patches: Optional[int] = None,
507
+ mask_group_max_patches: Optional[int] = None,
508
+ mask_group_min_aspect_ratio: Optional[float] = None,
509
+ mask_group_max_aspect_ratio: Optional[float] = None,
510
+ # Codebook related params
511
+ return_codebook_pixels: Optional[bool] = None,
512
+ codebook_do_resize: Optional[bool] = None,
513
+ codebook_size: Optional[Dict[str, int]] = None,
514
+ codebook_resample: Optional[int] = None,
515
+ codebook_do_center_crop: Optional[bool] = None,
516
+ codebook_crop_size: Optional[Dict[str, int]] = None,
517
+ codebook_do_rescale: Optional[bool] = None,
518
+ codebook_rescale_factor: Optional[float] = None,
519
+ codebook_do_map_pixels: Optional[bool] = None,
520
+ codebook_do_normalize: Optional[bool] = None,
521
+ codebook_image_mean: Optional[Iterable[float]] = None,
522
+ codebook_image_std: Optional[Iterable[float]] = None,
523
+ return_tensors: Optional[Union[str, TensorType]] = None,
524
+ data_format: ChannelDimension = ChannelDimension.FIRST,
525
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
526
+ **kwargs,
527
+ ) -> PIL.Image.Image:
528
+ """
529
+ Preprocess an image or batch of images.
530
+
531
+ Args:
532
+ images (`ImageInput`):
533
+ Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
534
+ passing in images with pixel values between 0 and 1, set `do_rescale=False`.
535
+ do_resize (`bool`, *optional*, defaults to `self.do_resize`):
536
+ Whether to resize the image.
537
+ size (`Dict[str, int]`, *optional*, defaults to `self.size`):
538
+ Size of the image.
539
+ resample (`int`, *optional*, defaults to `self.resample`):
540
+ Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`, Only
541
+ has an effect if `do_resize` is set to `True`.
542
+ do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`):
543
+ Whether to center crop the image.
544
+ crop_size (`Dict[str, int]`, *optional*, defaults to `self.crop_size`):
545
+ Size of the center crop. Only has an effect if `do_center_crop` is set to `True`.
546
+ do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
547
+ Whether to rescale the image values between [0 - 1].
548
+ rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
549
+ Rescale factor to rescale the image by if `do_rescale` is set to `True`.
550
+ do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
551
+ Whether to normalize the image.
552
+ image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):
553
+ Image mean.
554
+ image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):
555
+ Image standard deviation.
556
+ return_image_mask (`bool`, *optional*, defaults to `self.return_image_mask`):
557
+ Whether to return the image mask.
558
+ input_size_patches (`int`, *optional*, defaults to `self.input_size_patches`):
559
+ Size of the patches to extract from the image.
560
+ total_mask_patches (`int`, *optional*, defaults to `self.total_mask_patches`):
561
+ Total number of patches to extract from the image.
562
+ mask_group_min_patches (`int`, *optional*, defaults to `self.mask_group_min_patches`):
563
+ Minimum number of patches to extract from the image.
564
+ mask_group_max_patches (`int`, *optional*, defaults to `self.mask_group_max_patches`):
565
+ Maximum number of patches to extract from the image.
566
+ mask_group_min_aspect_ratio (`float`, *optional*, defaults to `self.mask_group_min_aspect_ratio`):
567
+ Minimum aspect ratio of the patches to extract from the image.
568
+ mask_group_max_aspect_ratio (`float`, *optional*, defaults to `self.mask_group_max_aspect_ratio`):
569
+ Maximum aspect ratio of the patches to extract from the image.
570
+ return_codebook_pixels (`bool`, *optional*, defaults to `self.return_codebook_pixels`):
571
+ Whether to return the codebook pixels.
572
+ codebook_do_resize (`bool`, *optional*, defaults to `self.codebook_do_resize`):
573
+ Whether to resize the codebook pixels.
574
+ codebook_size (`Dict[str, int]`, *optional*, defaults to `self.codebook_size`):
575
+ Size of the codebook pixels.
576
+ codebook_resample (`int`, *optional*, defaults to `self.codebook_resample`):
577
+ Resampling filter to use if resizing the codebook pixels. This can be one of the enum
578
+ `PILImageResampling`, Only has an effect if `codebook_do_resize` is set to `True`.
579
+ codebook_do_center_crop (`bool`, *optional*, defaults to `self.codebook_do_center_crop`):
580
+ Whether to center crop the codebook pixels.
581
+ codebook_crop_size (`Dict[str, int]`, *optional*, defaults to `self.codebook_crop_size`):
582
+ Size of the center crop of the codebook pixels. Only has an effect if `codebook_do_center_crop` is set
583
+ to `True`.
584
+ codebook_do_rescale (`bool`, *optional*, defaults to `self.codebook_do_rescale`):
585
+ Whether to rescale the codebook pixels values between [0 - 1].
586
+ codebook_rescale_factor (`float`, *optional*, defaults to `self.codebook_rescale_factor`):
587
+ Rescale factor to rescale the codebook pixels by if `codebook_do_rescale` is set to `True`.
588
+ codebook_do_map_pixels (`bool`, *optional*, defaults to `self.codebook_do_map_pixels`):
589
+ Whether to map the codebook pixels values.
590
+ codebook_do_normalize (`bool`, *optional*, defaults to `self.codebook_do_normalize`):
591
+ Whether to normalize the codebook pixels.
592
+ codebook_image_mean (`float` or `List[float]`, *optional*, defaults to `self.codebook_image_mean`):
593
+ Codebook pixels mean to normalize the codebook pixels by if `codebook_do_normalize` is set to `True`.
594
+ codebook_image_std (`float` or `List[float]`, *optional*, defaults to `self.codebook_image_std`):
595
+ Codebook pixels standard deviation to normalize the codebook pixels by if `codebook_do_normalize` is
596
+ set to `True`.
597
+ return_tensors (`str` or `TensorType`, *optional*):
598
+ The type of tensors to return. Can be one of:
599
+ - Unset: Return a list of `np.ndarray`.
600
+ - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
601
+ - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
602
+ - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
603
+ - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
604
+ data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
605
+ The channel dimension format for the output image. Can be one of:
606
+ - `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
607
+ - `ChannelDimension.LAST`: image in (height, width, num_channels) format.
608
+ input_data_format (`ChannelDimension` or `str`, *optional*):
609
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
610
+ from the input image. Can be one of:
611
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
612
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
613
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
614
+ """
615
+ do_resize = do_resize if do_resize is not None else self.do_resize
616
+ size = size if size is not None else self.size
617
+ size = get_size_dict(size)
618
+ resample = resample if resample is not None else self.resample
619
+ do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop
620
+ crop_size = crop_size if crop_size is not None else self.crop_size
621
+ crop_size = get_size_dict(crop_size, param_name="crop_size")
622
+ do_rescale = do_rescale if do_rescale is not None else self.do_rescale
623
+ rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
624
+ do_normalize = do_normalize if do_normalize is not None else self.do_normalize
625
+ image_mean = image_mean if image_mean is not None else self.image_mean
626
+ image_std = image_std if image_std is not None else self.image_std
627
+
628
+ return_image_mask = return_image_mask if return_image_mask is not None else self.return_image_mask
629
+ input_size_patches = input_size_patches if input_size_patches is not None else self.input_size_patches
630
+ total_mask_patches = total_mask_patches if total_mask_patches is not None else self.total_mask_patches
631
+ mask_group_min_patches = (
632
+ mask_group_min_patches if mask_group_min_patches is not None else self.mask_group_min_patches
633
+ )
634
+ mask_group_max_patches = (
635
+ mask_group_max_patches if mask_group_max_patches is not None else self.mask_group_max_patches
636
+ )
637
+ mask_group_min_aspect_ratio = (
638
+ mask_group_min_aspect_ratio
639
+ if mask_group_min_aspect_ratio is not None
640
+ else self.mask_group_min_aspect_ratio
641
+ )
642
+ mask_group_max_aspect_ratio = (
643
+ mask_group_max_aspect_ratio
644
+ if mask_group_max_aspect_ratio is not None
645
+ else self.mask_group_max_aspect_ratio
646
+ )
647
+
648
+ return_codebook_pixels = (
649
+ return_codebook_pixels if return_codebook_pixels is not None else self.return_codebook_pixels
650
+ )
651
+ codebook_do_resize = codebook_do_resize if codebook_do_resize is not None else self.codebook_do_resize
652
+ codebook_size = codebook_size if codebook_size is not None else self.codebook_size
653
+ codebook_size = get_size_dict(codebook_size, param_name="codebook_size")
654
+ codebook_resample = codebook_resample if codebook_resample is not None else self.codebook_resample
655
+ codebook_do_rescale = codebook_do_rescale if codebook_do_rescale is not None else self.codebook_do_rescale
656
+ codebook_rescale_factor = (
657
+ codebook_rescale_factor if codebook_rescale_factor is not None else self.codebook_rescale_factor
658
+ )
659
+ codebook_do_center_crop = (
660
+ codebook_do_center_crop if codebook_do_center_crop is not None else self.codebook_do_center_crop
661
+ )
662
+ codebook_crop_size = codebook_crop_size if codebook_crop_size is not None else self.codebook_crop_size
663
+ codebook_crop_size = get_size_dict(codebook_crop_size, param_name="codebook_crop_size")
664
+ codebook_do_map_pixels = (
665
+ codebook_do_map_pixels if codebook_do_map_pixels is not None else self.codebook_do_map_pixels
666
+ )
667
+ codebook_do_normalize = (
668
+ codebook_do_normalize if codebook_do_normalize is not None else self.codebook_do_normalize
669
+ )
670
+ codebook_image_mean = codebook_image_mean if codebook_image_mean is not None else self.codebook_image_mean
671
+ codebook_image_std = codebook_image_std if codebook_image_std is not None else self.codebook_image_std
672
+
673
+ images = make_list_of_images(images)
674
+
675
+ validate_kwargs(captured_kwargs=kwargs.keys(), valid_processor_keys=self._valid_processor_keys)
676
+
677
+ if not valid_images(images):
678
+ raise ValueError(
679
+ "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
680
+ "torch.Tensor, tf.Tensor or jax.ndarray."
681
+ )
682
+
683
+ processed_images = [
684
+ self._preprocess_image(
685
+ image=img,
686
+ do_resize=do_resize,
687
+ size=size,
688
+ resample=resample,
689
+ do_center_crop=do_center_crop,
690
+ crop_size=crop_size,
691
+ do_rescale=do_rescale,
692
+ rescale_factor=rescale_factor,
693
+ do_normalize=do_normalize,
694
+ image_mean=image_mean,
695
+ image_std=image_std,
696
+ do_map_pixels=False,
697
+ data_format=data_format,
698
+ input_data_format=input_data_format,
699
+ )
700
+ for img in images
701
+ ]
702
+ data = {"pixel_values": processed_images}
703
+
704
+ if return_codebook_pixels:
705
+ codebook_images = [
706
+ self._preprocess_image(
707
+ image=img,
708
+ do_resize=codebook_do_resize,
709
+ size=codebook_size,
710
+ resample=codebook_resample,
711
+ do_center_crop=codebook_do_center_crop,
712
+ crop_size=codebook_crop_size,
713
+ do_rescale=codebook_do_rescale,
714
+ rescale_factor=codebook_rescale_factor,
715
+ do_normalize=codebook_do_normalize,
716
+ image_mean=codebook_image_mean,
717
+ image_std=codebook_image_std,
718
+ do_map_pixels=codebook_do_map_pixels,
719
+ data_format=data_format,
720
+ input_data_format=input_data_format,
721
+ )
722
+ for img in images
723
+ ]
724
+ data["codebook_pixel_values"] = codebook_images
725
+
726
+ if return_image_mask:
727
+ mask_generator = self.masking_generator(
728
+ input_size_patches=input_size_patches,
729
+ total_mask_patches=total_mask_patches,
730
+ mask_group_min_patches=mask_group_min_patches,
731
+ mask_group_max_patches=mask_group_max_patches,
732
+ mask_group_min_aspect_ratio=mask_group_min_aspect_ratio,
733
+ mask_group_max_aspect_ratio=mask_group_max_aspect_ratio,
734
+ )
735
+ masks = [mask_generator() for _ in images]
736
+ data["bool_masked_pos"] = masks
737
+
738
+ return BatchFeature(data=data, tensor_type=return_tensors)
llmeval-env/lib/python3.10/site-packages/transformers/models/flava/modeling_flava.py ADDED
@@ -0,0 +1,2098 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 Meta Platforms authors and The HuggingFace Team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch FLAVA model."""
16
+
17
+ import collections
18
+ import math
19
+ from collections import OrderedDict
20
+ from dataclasses import dataclass
21
+ from typing import Any, Dict, List, Optional, Set, Tuple, Union
22
+
23
+ import torch
24
+ import torch.utils.checkpoint
25
+ from torch import nn
26
+
27
+ from ...activations import ACT2FN
28
+ from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling
29
+ from ...modeling_utils import PreTrainedModel, find_pruneable_heads_and_indices, prune_linear_layer
30
+ from ...utils import (
31
+ ModelOutput,
32
+ add_code_sample_docstrings,
33
+ add_start_docstrings,
34
+ add_start_docstrings_to_model_forward,
35
+ logging,
36
+ replace_return_docstrings,
37
+ )
38
+ from .configuration_flava import (
39
+ FlavaConfig,
40
+ FlavaImageCodebookConfig,
41
+ FlavaImageConfig,
42
+ FlavaMultimodalConfig,
43
+ FlavaTextConfig,
44
+ )
45
+
46
+
47
+ logger = logging.get_logger(__name__)
48
+
49
+ _CHECKPOINT_FOR_DOC = "facebook/flava-full"
50
+
51
+ # Codebook docstring
52
+ _CHECKPOINT_FOR_CODEBOOK_DOC = "facebook/flava-image-codebook"
53
+ _CONFIG_CLASS_FOR_IMAGE_MODEL_DOC = "FlavaImageConfig"
54
+ _CONFIG_CLASS_FOR_TEXT_MODEL_DOC = "FlavaTextConfig"
55
+ _CONFIG_CLASS_FOR_MULTIMODAL_MODEL_DOC = "FlavaMultimodalConfig"
56
+ _EXPECTED_IMAGE_OUTPUT_SHAPE = [1, 197, 768]
57
+
58
+ from ..deprecated._archive_maps import FLAVA_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
59
+
60
+
61
+ FLAVA_CODEBOOK_PRETRAINED_MODEL_ARCHIVE_LIST = ["facebook/flava-image-codebook"]
62
+ LOGIT_SCALE_CLAMP_MIN = 0
63
+ LOGIT_SCALE_CLAMP_MAX = 4.6052
64
+
65
+ FlavaPossibleConfigs = Union[FlavaTextConfig, FlavaImageConfig, FlavaMultimodalConfig]
66
+
67
+
68
+ @dataclass
69
+ class FlavaModelOutput(ModelOutput):
70
+ """
71
+ Output from FlavaModel containing embeddings and outputs from individual encoders.
72
+
73
+ Note that `image_embeddings` and `text_embeddigns` returned are similar to pooled output returned from a
74
+ transformer. If you want embeddings for contrastive loss or retrieval use a FLAVA model's `image_projection` and
75
+ `text_projection` layers on `image_embeddings` and `text_embeddings` respectively.
76
+
77
+ Args:
78
+ image_embeddings (`torch.FloatTensor` of shape `(batch_size, output_dim)`, *optional*, returned when `pixel_values` are present):
79
+ The image embeddings which are basically the pooled output of [`FlavaImageModel`].
80
+ image_output (`BaseModelOutputWithPooling`, *optional*, returned when `pixel_values` are present):
81
+ The output of the [`FlavaImageModel`].
82
+ text_embeddings (`torch.FloatTensor` of shape `(batch_size, output_dim)`, *optional*, returned when `input_ids` are present):
83
+ The text embeddings which are basically the pooled output of [`FlavaTextModel`].
84
+ text_output (`BaseModelOutputWithPooling`, *optional*, returned when `input_ids` are present):
85
+ The output of the [`FlavaTextModel`].
86
+ multimodal_embeddings (`torch.FloatTensor` of shape `(batch_size, output_dim)`, *optional*, returned when `input_ids` and `pixel_values` are present and `skip_multimodal_encoder` is `None` or `False`):
87
+ The multimodal embeddings which are basically the pooled output of [`FlavaTextModel`].
88
+ multimodal_output (`BaseModelOutputWithPooling`, returned when `input_ids` and `pixel_values` are present and `skip_multimodal_encoder` is `None` or `False`):
89
+ The output of the [`FlavaMultimodalModel`].
90
+ """
91
+
92
+ image_embeddings: Optional[torch.FloatTensor] = None
93
+ image_output: Optional[BaseModelOutputWithPooling] = None
94
+ text_embeddings: Optional[torch.FloatTensor] = None
95
+ text_output: Optional[BaseModelOutputWithPooling] = None
96
+ multimodal_embeddings: Optional[torch.FloatTensor] = None
97
+ multimodal_output: Optional[BaseModelOutputWithPooling] = None
98
+
99
+ def to_tuple(self) -> Tuple[Any]:
100
+ return tuple(
101
+ self[k] if k not in ["text_output", "image_output", "multimodal_output"] else getattr(self, k).to_tuple()
102
+ for k in self.keys()
103
+ )
104
+
105
+
106
+ @dataclass
107
+ class FlavaLosses(ModelOutput):
108
+ """Class representing pretraining losses from FLAVA model
109
+
110
+ Args:
111
+ mim (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `mim_labels` and `pixel_values` are present, `input_ids_masked` is absent and `mim_weight` > 0.:
112
+ Masked Image Modeling loss as used in BeIT calculated only for unimodal image data.
113
+ mlm (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `mlm_labels` and `input_ids_masked` are present, `pixel_values` is absent and `mlm_weight` > 0.:
114
+ Masked Language Modeling loss as used in BERT calculated only for unimodal text data.
115
+ itm (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `itm_labels`, `input_ids_masked`, `pixel_values` are present and `itm_weight` > 0.:
116
+ Image Text Matching (ITM) loss calculated for paired image-text data. Note that ITM loss is calculated on
117
+ masked pairs in FLAVA.
118
+ global_contrastive (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `input_ids` and `pixel_values` are present and `global_contrastive_weight` > 0.:
119
+ Contrastive loss for image-text similarity similar to CLIP but calculated globally for paired image-text
120
+ data. This is calculated on unmasked images and texts.
121
+ mmm_image (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `mim_labels`, `pixel_values` and `input_ids_masked` are present and `mmm_image_weight` > 0.:
122
+ Masked Multimodal Modeling loss's image component calculated on paired image-text data.
123
+ mmm_text (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `mlm_labels`, `pixel_values` and `input_ids_masked` are present and `mmm_text_weight` > 0.:
124
+ Masked Multimodal Modeling loss's text component calculated on paired image-text data.
125
+ """
126
+
127
+ mim: Optional[torch.FloatTensor] = None
128
+ mlm: Optional[torch.FloatTensor] = None
129
+ itm: Optional[torch.FloatTensor] = None
130
+ global_contrastive: Optional[torch.FloatTensor] = None
131
+ mmm_image: Optional[torch.FloatTensor] = None
132
+ mmm_text: Optional[torch.FloatTensor] = None
133
+
134
+ def all_none(self) -> bool:
135
+ all_none = True
136
+ for v in self.values():
137
+ if v is not None:
138
+ all_none = False
139
+ break
140
+ return all_none
141
+
142
+
143
+ @dataclass
144
+ class FlavaForPreTrainingOutput(ModelOutput):
145
+ """
146
+ Output from FlavaForPreTraining containing embeddings, and outputs from individual encoders.
147
+
148
+ Note that `image_embeddings` and `text_embeddings` returned are similar to pooled output returned from a
149
+ transformer. If you want embeddings for contrastive loss or retrieval use a FLAVA model's `image_projection` and
150
+ `text_projection` layers on `image_embeddings` and `text_embeddings` respectively.
151
+
152
+ Args:
153
+ loss (`torch.FloatTensor`, *optional*, returned when `return_loss` is True):
154
+ Total loss calculated for this model.
155
+ loss_info (`FlavaLosses`):
156
+ Detailed info for FLAVA Pretraining losses. Check `FlavaLosses` class description for the information on
157
+ the keys.
158
+ image_embeddings (`torch.FloatTensor` of shape `(batch_size, output_dim)`, *optional*, returned when `pixel_values` are present):
159
+ The image embeddings which are basically the pooled output of [`FlavaImageModel`].
160
+ image_output (`BaseModelOutputWithPooling`, *optional*, returned when `pixel_values` are present):
161
+ The output of the [`FlavaImageModel`].
162
+ text_embeddings (`torch.FloatTensor` of shape `(batch_size, output_dim)`, *optional*, returned when `input_ids` are present):
163
+ The text embeddings which are basically the pooled output of [`FlavaTextModel`].
164
+ text_output (`BaseModelOutputWithPooling`, *optional*, returned when `input_ids` are present):
165
+ The output of the [`FlavaTextModel`].
166
+ multimodal_embeddings (`torch.FloatTensor` of shape `(batch_size, output_dim)`, *optional*, returned when `input_ids` and `pixel_values` are present and `skip_unmasked_multimodal_encoder` is `None` or `False`):
167
+ The multimodal embeddings which are basically the pooled output of [`FlavaTextModel`].
168
+ multimodal_output (`BaseModelOutputWithPooling`, returned when `input_ids` and `pixel_values` are present and `skip_unmasked_multimodal_encoder` is `None` or `False`):
169
+ The output of the [`FlavaMultimodalModel`].
170
+
171
+ image_masked_embeddings (`torch.FloatTensor` of shape `(batch_size, output_dim)`, *optional*, returned when `pixel_values` are present):
172
+ The image embeddings which are basically the pooled output of [`FlavaImageModel`]. Uses `bool_masked_pos`
173
+ to create masked images.
174
+ image_masked_output (`BaseModelOutputWithPooling`, *optional*, returned when `pixel_values` are present):
175
+ The output of the [`FlavaImageModel`]. Uses `bool_masked_pos` to create masked images.
176
+ text_masked_embeddings (`torch.FloatTensor` of shape `(batch_size, output_dim)`, *optional*, returned when `input_ids_masked` are present):
177
+ The text embeddings which are basically the pooled output of [`FlavaTextModel`].
178
+ text_masked_output (`BaseModelOutputWithPooling`, *optional*, returned when `input_ids_masked` are present):
179
+ The output of the [`FlavaTextModel`].
180
+ multimodal_masked_embeddings (`torch.FloatTensor` of shape `(batch_size, output_dim)`, *optional*, returned when `input_ids` and `pixel_values` are present):
181
+ The multimodal embeddings which are basically the pooled output of [`FlavaTextModel`].
182
+ multimodal_masked_output (`BaseModelOutputWithPooling`, returned when `input_ids_masked` and `pixel_values` are present):
183
+ The output of the [`FlavaMultimodalModel`].
184
+
185
+ mim_logits (`torch.FloatTensor` of shape `(batch_size, num_image_patches, image_vocab_size)` or of shape `(total_masked_patches, image_vocab_size)` , *optional*, returned when `pixel_values` are present and `input_ids_masked` are not):
186
+ The logits for MIM unimodal loss. Uses `book_masked_pos` to get masked patches. The flattened output is
187
+ returned when `bool_masked_pos` has some of the patches masked.
188
+ mlm_logits (`torch.FloatTensor` of shape `(batch_size, text_seq_length, text_vocab_size)` or of shape `(total_masked_seq_length, text_vocab_size)`, *optional*, returned when `input_ids_masked` are present and `pixel_values` are not):
189
+ The logits for MLM unimodal loss. The flattened output is returned when `input_ids_masked` has some of
190
+ the tokens masked.
191
+ itm_logits (`torch.FloatTensor` of shape `(batch_size, 2)`, *optional*, returned when `input_ids_masked` and `pixel_values` are present):
192
+ The logits for ITM loss. Note that ITM loss is calculated on masked pairs in FLAVA.
193
+ mmm_image_logits (`torch.FloatTensor` of shape `(batch_size, num_image_patches, image_vocab_size)` or of shape`(total_masked_patches, image_vocab_size)`, *optional*, returned when `pixel_values` and `input_ids_masked` are present):
194
+ The logits for MMM image multimodal loss. Uses `book_masked_pos` to get masked patches. The flattened
195
+ output is returned when `bool_masked_pos` has some of the patches masked.
196
+ mmm_text_logits (`torch.FloatTensor` of shape `(batch_size, text_seq_length, text_vocab_size)` or of shape `(`(total_masked_seq_length, text_vocab_size)`), *optional*, returned when `pixel_values` and `input_ids_masked` are present):
197
+ The logits for MMM text multimodal loss. The flattened output is returned when `input_ids_masked` has
198
+ some of the tokens masked.
199
+ contrastive_logits_per_image (`torch.FloatTensor` of shape `(image_batch_size, text_batch_size)`):
200
+ The scaled dot product scores between `image_embeddings` and `text_embeddings` but passed through FLAVA's
201
+ `image_projection` and `text_projection` layers respectively. This represents the image-text similarity
202
+ scores. This is calculated on unmasked images and texts.
203
+ contrastive_logits_per_text (`torch.FloatTensor` of shape `(text_batch_size, image_batch_size)`):
204
+ The scaled dot product scores between `text_embeddings` and `image_embeddings` but passed through FLAVA's
205
+ `text_projection` and `image_projection` layers respectively. This is calculated on unmasked images and
206
+ texts.
207
+ """
208
+
209
+ loss: Optional[torch.FloatTensor] = None
210
+ loss_info: FlavaLosses = None
211
+ image_embeddings: Optional[torch.FloatTensor] = None
212
+ image_output: Optional[BaseModelOutputWithPooling] = None
213
+ text_embeddings: Optional[torch.FloatTensor] = None
214
+ text_output: Optional[BaseModelOutputWithPooling] = None
215
+ multimodal_embeddings: Optional[torch.FloatTensor] = None
216
+ multimodal_output: Optional[BaseModelOutputWithPooling] = None
217
+ image_masked_embeddings: Optional[torch.FloatTensor] = None
218
+ image_masked_output: Optional[BaseModelOutputWithPooling] = None
219
+ text_masked_embeddings: Optional[torch.FloatTensor] = None
220
+ text_masked_output: Optional[BaseModelOutputWithPooling] = None
221
+ multimodal_masked_embeddings: Optional[torch.FloatTensor] = None
222
+ multimodal_masked_output: Optional[BaseModelOutputWithPooling] = None
223
+ mim_logits: Optional[torch.FloatTensor] = None
224
+ mlm_logits: Optional[torch.FloatTensor] = None
225
+ itm_logits: Optional[torch.FloatTensor] = None
226
+ contrastive_logits_per_image: Optional[torch.FloatTensor] = None
227
+ contrastive_logits_per_text: Optional[torch.FloatTensor] = None
228
+ mmm_image_logits: Optional[torch.FloatTensor] = None
229
+ mmm_text_logits: Optional[torch.FloatTensor] = None
230
+
231
+ def to_tuple(self) -> Tuple[Any]:
232
+ transformer_outputs = [
233
+ "text_output",
234
+ "image_output",
235
+ "multimodal_output",
236
+ "text_masked_output",
237
+ "image_masked_output",
238
+ "multimodal_masked_output",
239
+ ]
240
+ return tuple(self[k] if k not in transformer_outputs else getattr(self, k).to_tuple() for k in self.keys())
241
+
242
+
243
+ # Based on timm implementation, which can be found here:
244
+ # https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/image_transformer.py
245
+ class FlavaImageEmbeddings(nn.Module):
246
+ """
247
+ Construct the CLS token, position and patch embeddings. Optionally, also the mask token.
248
+ """
249
+
250
+ def __init__(self, config: FlavaImageConfig, use_mask_token: bool = False) -> None:
251
+ super().__init__()
252
+
253
+ use_mask_token = use_mask_token or config.mask_token
254
+ self.cls_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size))
255
+ self.mask_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size)) if use_mask_token else None
256
+ self.patch_embeddings = PatchEmbeddings(
257
+ image_size=config.image_size,
258
+ patch_size=config.patch_size,
259
+ num_channels=config.num_channels,
260
+ embed_dim=config.hidden_size,
261
+ )
262
+ num_patches = self.patch_embeddings.num_patches
263
+ self.position_embeddings = nn.Parameter(torch.zeros(1, num_patches + 1, config.hidden_size))
264
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
265
+ self.config = config
266
+
267
+ def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor:
268
+ """
269
+ This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher
270
+ resolution images.
271
+
272
+ Source:
273
+ https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/image_transformer.py#L174
274
+ """
275
+
276
+ npatch = embeddings.shape[1] - 1
277
+ num_pos = self.position_embeddings.shape[1] - 1
278
+ if npatch == num_pos and height == width:
279
+ return self.position_embeddings
280
+ class_pos_embed = self.position_embeddings[:, 0]
281
+ patch_pos_embed = self.position_embeddings[:, 1:]
282
+ dim = embeddings.shape[-1]
283
+ num_h_patches = height // self.config.patch_size
284
+ num_w_patches = width // self.config.patch_size
285
+ # we add a small number to avoid floating point error in the interpolation
286
+ # see discussion at https://github.com/facebookresearch/dino/issues/8
287
+ num_h_patches, num_w_patches = num_h_patches + 0.1, num_w_patches + 0.1
288
+ patch_pos_embed = nn.functional.interpolate(
289
+ patch_pos_embed.reshape(1, int(math.sqrt(num_pos)), int(math.sqrt(num_pos)), dim).permute(0, 3, 1, 2),
290
+ scale_factor=(num_h_patches / math.sqrt(num_pos), num_w_patches / math.sqrt(num_pos)),
291
+ mode="bicubic",
292
+ align_corners=False,
293
+ )
294
+ if int(num_h_patches) != patch_pos_embed.shape[-2] or int(num_w_patches) != patch_pos_embed.shape[-1]:
295
+ raise ValueError(
296
+ f"Number of patches for images ({int(num_h_patches), int(num_w_patches)}) don't match the "
297
+ f"shape of position embedding ({patch_pos_embed.shape[-2], patch_pos_embed.shape[-1]})"
298
+ )
299
+ patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)
300
+ return torch.cat((class_pos_embed.unsqueeze(0), patch_pos_embed), dim=1)
301
+
302
+ def forward(
303
+ self,
304
+ pixel_values: torch.Tensor,
305
+ bool_masked_pos: Optional[torch.BoolTensor] = None,
306
+ interpolate_pos_encoding: bool = False,
307
+ ) -> torch.Tensor:
308
+ batch_size, num_channels, height, width = pixel_values.shape
309
+ embeddings = self.patch_embeddings(pixel_values, interpolate_pos_encoding=interpolate_pos_encoding)
310
+
311
+ batch_size, seq_len, _ = embeddings.size()
312
+ if bool_masked_pos is not None:
313
+ mask_tokens = self.mask_token.expand(batch_size, seq_len, -1)
314
+ # B X H X W = B X HW
315
+ if bool_masked_pos.dim() == 3:
316
+ bool_masked_pos = bool_masked_pos.view(bool_masked_pos.size(0), -1)
317
+ # replace the masked visual tokens by mask_tokens
318
+ mask = bool_masked_pos.unsqueeze(-1).type_as(mask_tokens)
319
+ embeddings = embeddings * (1.0 - mask) + mask_tokens * mask
320
+
321
+ # add the [CLS] token to the embedded patch tokens
322
+ cls_tokens = self.cls_token.expand(batch_size, -1, -1)
323
+ embeddings = torch.cat((cls_tokens, embeddings), dim=1)
324
+
325
+ # add positional encoding to each token
326
+ if interpolate_pos_encoding:
327
+ embeddings = embeddings + self.interpolate_pos_encoding(embeddings, height, width)
328
+ else:
329
+ embeddings = embeddings + self.position_embeddings
330
+
331
+ embeddings = self.dropout(embeddings)
332
+
333
+ return embeddings
334
+
335
+
336
+ # Based on timm implementation, which can be found here:
337
+ # https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/image_transformer.py
338
+ class PatchEmbeddings(nn.Module):
339
+ """
340
+ Image to Patch Embedding.
341
+ """
342
+
343
+ def __init__(
344
+ self,
345
+ image_size: int = 224,
346
+ patch_size: Union[int, Tuple[int, int]] = 16,
347
+ num_channels: int = 3,
348
+ embed_dim: int = 768,
349
+ ):
350
+ super().__init__()
351
+ if not isinstance(image_size, collections.abc.Iterable):
352
+ image_size = (image_size, image_size)
353
+ if not isinstance(patch_size, collections.abc.Iterable):
354
+ patch_size = (patch_size, patch_size)
355
+ num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
356
+ self.image_size = image_size
357
+ self.patch_size = patch_size
358
+ self.num_patches = num_patches
359
+
360
+ self.projection = nn.Conv2d(num_channels, embed_dim, kernel_size=patch_size, stride=patch_size)
361
+
362
+ def forward(self, pixel_values: torch.Tensor, interpolate_pos_encoding: bool = False) -> torch.Tensor:
363
+ batch_size, num_channels, height, width = pixel_values.shape
364
+ if not interpolate_pos_encoding:
365
+ if height != self.image_size[0] or width != self.image_size[1]:
366
+ raise ValueError(
367
+ f"Input image size ({height}*{width}) doesn't match model"
368
+ f" ({self.image_size[0]}*{self.image_size[1]})."
369
+ )
370
+ x = self.projection(pixel_values).flatten(2).transpose(1, 2)
371
+ return x
372
+
373
+
374
+ class FlavaTextEmbeddings(nn.Module):
375
+ """Construct the embeddings from word, position and token_type embeddings."""
376
+
377
+ def __init__(self, config):
378
+ super().__init__()
379
+ self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
380
+ self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
381
+ self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
382
+
383
+ # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
384
+ # any TensorFlow checkpoint file
385
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
386
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
387
+ # position_ids (1, len position emb) is contiguous in memory and exported when serialized
388
+ self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
389
+ self.register_buffer(
390
+ "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
391
+ )
392
+ self.register_buffer(
393
+ "token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False
394
+ )
395
+
396
+ def forward(
397
+ self,
398
+ input_ids: Optional[torch.Tensor] = None,
399
+ token_type_ids: Optional[torch.Tensor] = None,
400
+ position_ids: Optional[torch.Tensor] = None,
401
+ ):
402
+ input_shape = input_ids.size()
403
+ seq_length = input_shape[1]
404
+
405
+ if position_ids is None:
406
+ position_ids = self.position_ids[:, :seq_length]
407
+
408
+ # Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs
409
+ # when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves
410
+ # issue #5664
411
+ if token_type_ids is None:
412
+ if hasattr(self, "token_type_ids"):
413
+ buffered_token_type_ids = self.token_type_ids[:, :seq_length]
414
+ buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length)
415
+ token_type_ids = buffered_token_type_ids_expanded
416
+ else:
417
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
418
+
419
+ inputs_embeds = self.word_embeddings(input_ids)
420
+ token_type_embeddings = self.token_type_embeddings(token_type_ids)
421
+
422
+ embeddings = inputs_embeds + token_type_embeddings
423
+ if self.position_embedding_type == "absolute":
424
+ position_embeddings = self.position_embeddings(position_ids)
425
+ embeddings += position_embeddings
426
+ embeddings = self.LayerNorm(embeddings)
427
+ embeddings = self.dropout(embeddings)
428
+ return embeddings
429
+
430
+
431
+ class FlavaSelfAttention(nn.Module):
432
+ def __init__(self, config: FlavaPossibleConfigs) -> None:
433
+ super().__init__()
434
+ if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
435
+ raise ValueError(
436
+ f"The hidden size {config.hidden_size,} is not a multiple of the number of attention "
437
+ f"heads {config.num_attention_heads}."
438
+ )
439
+
440
+ self.num_attention_heads = config.num_attention_heads
441
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
442
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
443
+
444
+ self.query = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias)
445
+ self.key = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias)
446
+ self.value = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias)
447
+
448
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
449
+
450
+ def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor:
451
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
452
+ x = x.view(*new_x_shape)
453
+ return x.permute(0, 2, 1, 3)
454
+
455
+ def forward(
456
+ self,
457
+ hidden_states: torch.Tensor,
458
+ attention_mask: Optional[torch.Tensor] = None,
459
+ head_mask: Optional[torch.Tensor] = None,
460
+ output_attentions: bool = False,
461
+ ) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]:
462
+ mixed_query_layer = self.query(hidden_states)
463
+
464
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
465
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
466
+ query_layer = self.transpose_for_scores(mixed_query_layer)
467
+
468
+ # Take the dot product between "query" and "key" to get the raw attention scores.
469
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
470
+
471
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
472
+ if attention_mask is not None:
473
+ # Apply the attention mask is (precomputed for all layers in BertModel forward() function)
474
+ attention_scores = attention_scores + attention_mask
475
+
476
+ # Normalize the attention scores to probabilities.
477
+ attention_probs = nn.functional.softmax(attention_scores, dim=-1)
478
+ # Normalize the attention scores to probabilities.
479
+ attention_probs = nn.functional.softmax(attention_scores, dim=-1)
480
+
481
+ # This is actually dropping out entire tokens to attend to, which might
482
+ # seem a bit unusual, but is taken from the original Transformer paper.
483
+ attention_probs = self.dropout(attention_probs)
484
+
485
+ # Mask heads if we want to
486
+ if head_mask is not None:
487
+ attention_probs = attention_probs * head_mask
488
+
489
+ context_layer = torch.matmul(attention_probs, value_layer)
490
+
491
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
492
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
493
+ context_layer = context_layer.view(*new_context_layer_shape)
494
+
495
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
496
+
497
+ return outputs
498
+
499
+
500
+ class FlavaSelfOutput(nn.Module):
501
+ """
502
+ The residual connection is defined in FlavaLayer (same as ViTLayer) instead of here (as is the case with other
503
+ models), due to the layernorm applied before each block.
504
+ """
505
+
506
+ def __init__(self, config: FlavaPossibleConfigs) -> None:
507
+ super().__init__()
508
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
509
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
510
+
511
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
512
+ hidden_states = self.dense(hidden_states)
513
+ hidden_states = self.dropout(hidden_states)
514
+
515
+ return hidden_states
516
+
517
+
518
+ class FlavaAttention(nn.Module):
519
+ def __init__(self, config: FlavaPossibleConfigs) -> None:
520
+ super().__init__()
521
+ self.attention = FlavaSelfAttention(config)
522
+ self.output = FlavaSelfOutput(config)
523
+ self.pruned_heads = set()
524
+
525
+ def prune_heads(self, heads: Set[int]) -> None:
526
+ if len(heads) == 0:
527
+ return
528
+ heads, index = find_pruneable_heads_and_indices(
529
+ heads, self.attention.num_attention_heads, self.attention.attention_head_size, self.pruned_heads
530
+ )
531
+
532
+ # Prune linear layers
533
+ self.attention.query = prune_linear_layer(self.attention.query, index)
534
+ self.attention.key = prune_linear_layer(self.attention.key, index)
535
+ self.attention.value = prune_linear_layer(self.attention.value, index)
536
+ self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
537
+
538
+ # Update hyper params and store pruned heads
539
+ self.attention.num_attention_heads = self.attention.num_attention_heads - len(heads)
540
+ self.attention.all_head_size = self.attention.attention_head_size * self.attention.num_attention_heads
541
+ self.pruned_heads = self.pruned_heads.union(heads)
542
+
543
+ def forward(
544
+ self,
545
+ hidden_states: torch.Tensor,
546
+ attention_mask: Optional[torch.Tensor] = None,
547
+ head_mask: Optional[torch.Tensor] = None,
548
+ output_attentions: bool = False,
549
+ ) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]:
550
+ self_outputs = self.attention(
551
+ hidden_states, attention_mask=attention_mask, head_mask=head_mask, output_attentions=output_attentions
552
+ )
553
+
554
+ attention_output = self.output(self_outputs[0], hidden_states)
555
+
556
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
557
+ return outputs
558
+
559
+
560
+ class FlavaIntermediate(nn.Module):
561
+ def __init__(self, config: FlavaPossibleConfigs) -> None:
562
+ super().__init__()
563
+ self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
564
+ if isinstance(config.hidden_act, str):
565
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
566
+ else:
567
+ self.intermediate_act_fn = config.hidden_act
568
+
569
+ # Copied from transformers.models.vit.modeling_vit.ViTIntermediate.forward
570
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
571
+ hidden_states = self.dense(hidden_states)
572
+ hidden_states = self.intermediate_act_fn(hidden_states)
573
+
574
+ return hidden_states
575
+
576
+
577
+ class FlavaOutput(nn.Module):
578
+ def __init__(self, config: FlavaPossibleConfigs) -> None:
579
+ super().__init__()
580
+ self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
581
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
582
+
583
+ # Copied from transformers.models.vit.modeling_vit.ViTOutput.forward
584
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
585
+ hidden_states = self.dense(hidden_states)
586
+ hidden_states = self.dropout(hidden_states)
587
+
588
+ hidden_states = hidden_states + input_tensor
589
+
590
+ return hidden_states
591
+
592
+
593
+ class FlavaLayer(nn.Module):
594
+ """This corresponds to the Block class in the timm implementation."""
595
+
596
+ def __init__(self, config: FlavaPossibleConfigs) -> None:
597
+ super().__init__()
598
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
599
+ self.seq_len_dim = 1
600
+ self.attention = FlavaAttention(config)
601
+ self.intermediate = FlavaIntermediate(config)
602
+ self.output = FlavaOutput(config)
603
+
604
+ # TODO: Check fp32 layer norm possiblity
605
+ self.layernorm_before = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
606
+ self.layernorm_after = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
607
+
608
+ def forward(
609
+ self,
610
+ hidden_states: torch.Tensor,
611
+ attention_mask: Optional[torch.Tensor] = None,
612
+ head_mask: Optional[torch.Tensor] = None,
613
+ output_attentions: bool = False,
614
+ ) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]:
615
+ self_attention_outputs = self.attention(
616
+ self.layernorm_before(hidden_states), # in ViT, layernorm is applied before self-attention
617
+ attention_mask=attention_mask,
618
+ head_mask=head_mask,
619
+ output_attentions=output_attentions,
620
+ )
621
+ attention_output = self_attention_outputs[0]
622
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
623
+
624
+ # first residual connection
625
+ hidden_states = attention_output + hidden_states
626
+
627
+ # in ViT, layernorm is also applied after self-attention
628
+ layer_output = self.layernorm_after(hidden_states)
629
+ layer_output = self.intermediate(layer_output)
630
+
631
+ # second residual connection is done here
632
+ layer_output = self.output(layer_output, hidden_states)
633
+
634
+ outputs = (layer_output,) + outputs
635
+
636
+ return outputs
637
+
638
+
639
+ class FlavaEncoder(nn.Module):
640
+ def __init__(self, config: FlavaConfig) -> None:
641
+ super().__init__()
642
+ self.config = config
643
+ self.layer = nn.ModuleList([FlavaLayer(config) for _ in range(config.num_hidden_layers)])
644
+ self.gradient_checkpointing = False
645
+
646
+ def forward(
647
+ self,
648
+ hidden_states: torch.Tensor,
649
+ attention_mask: Optional[torch.Tensor] = None,
650
+ head_mask: Optional[torch.Tensor] = None,
651
+ output_attentions: bool = False,
652
+ output_hidden_states: bool = False,
653
+ return_dict: bool = True,
654
+ ) -> Union[tuple, BaseModelOutput]:
655
+ all_hidden_states = () if output_hidden_states else None
656
+ all_self_attentions = () if output_attentions else None
657
+
658
+ for i, layer_module in enumerate(self.layer):
659
+ if output_hidden_states:
660
+ all_hidden_states = all_hidden_states + (hidden_states,)
661
+
662
+ layer_head_mask = head_mask[i] if head_mask is not None else None
663
+
664
+ if self.gradient_checkpointing and self.training:
665
+ layer_outputs = self._gradient_checkpointing_func(
666
+ layer_module.__call__,
667
+ hidden_states,
668
+ attention_mask,
669
+ layer_head_mask,
670
+ output_attentions,
671
+ )
672
+ else:
673
+ layer_outputs = layer_module(hidden_states, attention_mask, layer_head_mask, output_attentions)
674
+
675
+ hidden_states = layer_outputs[0]
676
+
677
+ if output_attentions:
678
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
679
+
680
+ if output_hidden_states:
681
+ all_hidden_states = all_hidden_states + (hidden_states,)
682
+
683
+ if not return_dict:
684
+ return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
685
+ return BaseModelOutput(
686
+ last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions
687
+ )
688
+
689
+
690
+ class FlavaPooler(nn.Module):
691
+ def __init__(self, config: FlavaPossibleConfigs):
692
+ super().__init__()
693
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
694
+ self.activation = nn.Tanh()
695
+
696
+ def forward(self, hidden_states: torch.Tensor):
697
+ # We "pool" the model by simply taking the hidden state corresponding
698
+ # to the first token.
699
+ first_token_tensor = hidden_states[:, 0]
700
+ pooled_output = self.dense(first_token_tensor)
701
+ pooled_output = self.activation(pooled_output)
702
+ return pooled_output
703
+
704
+
705
+ FLAVA_START_DOCSTRING = r"""
706
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
707
+ as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
708
+ behavior.
709
+
710
+ Parameters:
711
+ config ([`{config}`]): Model configuration class with all the parameters of the model.
712
+ Initializing with a config file does not load the weights associated with the model, only the
713
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
714
+ """
715
+
716
+ FLAVA_INPUTS_DOCSTRING_COMMON = r"""
717
+ attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
718
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
719
+ - 1 for tokens that are **not masked**,
720
+ - 0 for tokens that are **masked**.
721
+ [What are attention masks?](../glossary#attention-mask)
722
+
723
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
724
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
725
+
726
+ - 1 indicates the head is **not masked**,
727
+ - 0 indicates the head is **masked**.
728
+
729
+ output_attentions (`bool`, *optional*):
730
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
731
+ tensors for more detail.
732
+ output_hidden_states (`bool`, *optional*):
733
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
734
+ more detail.
735
+
736
+ return_dict (`bool`, *optional*):
737
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
738
+ """
739
+
740
+ FLAVA_IMAGE_INPUTS_DOCSTRING_BASE = r"""
741
+ Args:
742
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
743
+ Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
744
+ [`FlavaImageProcessor.__call__`] for details.
745
+
746
+ bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, image_num_patches)`):
747
+ Boolean masked positions. Indicates which patches are masked (1) and which aren't (0).
748
+
749
+ interpolate_pos_encoding (`bool`, *optional*):
750
+ Whether to interpolate the pre-trained position encodings.
751
+ """
752
+
753
+ FLAVA_IMAGE_INPUTS_DOCSTRING = FLAVA_IMAGE_INPUTS_DOCSTRING_BASE + FLAVA_INPUTS_DOCSTRING_COMMON
754
+
755
+ FLAVA_TEXT_INPUTS_DOCSTRING_BASE = r"""
756
+ Args:
757
+ input_ids (`torch.LongTensor` of shape `({0})`):
758
+ Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See
759
+ [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input
760
+ IDs?](../glossary#input-ids)
761
+
762
+ token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
763
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
764
+ 1]`:
765
+ - 0 corresponds to a *sentence A* token,
766
+ - 1 corresponds to a *sentence B* token.
767
+ [What are token type IDs?](../glossary#token-type-ids)
768
+ """
769
+
770
+ FLAVA_TEXT_INPUTS_DOCSTRING = FLAVA_TEXT_INPUTS_DOCSTRING_BASE + FLAVA_INPUTS_DOCSTRING_COMMON
771
+
772
+ FLAVA_MULTIMODAL_INPUTS_DOCSTRING = (
773
+ r"""
774
+ Args:
775
+ hidden_states (`torch.FloatTensor` of shape `(batch_size, image_num_patches + text_seq_len, hidden_size)`):
776
+ The concatenated hidden states of unimodal encoders.
777
+ """
778
+ + FLAVA_INPUTS_DOCSTRING_COMMON
779
+ )
780
+
781
+ FLAVA_MODEL_INPUTS_DOCSTRING_BASE = r"""
782
+ Args:
783
+ skip_multimodal_encoder (*bool*, *optional*):
784
+ Skip any calculations for multimodal encoder. Useful if multimodal encoding is not going to be used.
785
+ """
786
+
787
+ FLAVA_MODEL_INPUTS_DOCSTRING = (
788
+ FLAVA_IMAGE_INPUTS_DOCSTRING_BASE
789
+ + FLAVA_TEXT_INPUTS_DOCSTRING_BASE
790
+ + FLAVA_INPUTS_DOCSTRING_COMMON
791
+ + FLAVA_MODEL_INPUTS_DOCSTRING_BASE
792
+ )
793
+
794
+
795
+ FLAVA_PRETRAINING_INPUTS_DOCSTRING = (
796
+ r"""
797
+ Args:
798
+ input_ids_masked (`torch.LongTensor` of shape `({0})`):
799
+ Indices of input sequence tokens in the vocabulary. These ones are the masked version of the original task
800
+ to be used with MLM. Indices can be obtained using [`AutoTokenizer`] along with
801
+ [`DataCollatorForMaskedLanguageModeling`]. See [`PreTrainedTokenizer.encode`] and
802
+ [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids)
803
+
804
+ """
805
+ + FLAVA_TEXT_INPUTS_DOCSTRING_BASE
806
+ + FLAVA_IMAGE_INPUTS_DOCSTRING_BASE
807
+ + r"""
808
+ image_attention_mask (`torch.FloatTensor` of shape `({1})`, *optional*):
809
+ Mask to avoid performing attention on padding token indices specifically for images. Mask values selected
810
+ in `[0, 1]`:
811
+ - 1 for tokens that are **not masked**,
812
+ - 0 for tokens that are **masked**.
813
+ [What are attention masks?](../glossary#attention-mask)
814
+
815
+ skip_unmasked_multimodal_encoder (*bool*, *optional*):
816
+ Skip any calculations for multimodal encoder for unmasked inputs. FLAVA pretraining doesn't need unmasked
817
+ multimodal embeddings or outputs as of now.
818
+
819
+ mlm_labels (`torch.LongTensor` of shape `(batch_size, text_seq_len)`, *optional*):
820
+ Labels for computing the left-to-right language and multimodal masked modeling loss (next word prediction).
821
+ Indices should be in `[-100, 0, ..., text_config.vocab_size - 1]` (see `input_ids` docstring). Tokens with
822
+ indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0,
823
+ ..., text_config.vocab_size - 1]`.
824
+
825
+ mim_labels (`torch.LongTensor` of shape `(batch_size, image_num_patches)`, *optional*):
826
+ Labels for computing the image and multimodal masked modeling loss. Indices should be in `[-100, 0, ...,
827
+ image_config.vocab_size - 1]`. Tokens with indices set to `-100` are ignored (masked), the loss is only
828
+ computed for the tokens with labels in `[0, ..., image_config.vocab_size - 1]`. If not passed, they are
829
+ generated automatically using the image codebook assigned to the model. By default, it uses
830
+ [`FlavaImageCodebook`]. See [`FlavaImageCodebook`] to understand how to generate mim_labels.
831
+
832
+ itm_labels (`torch.LongTensor` of shape `(batch_size, 1)`, *optional*):
833
+ Labels for computing the image-text matching loss. 0 means the pairs don't match and 1 means they match.
834
+ The pairs with 0 will be skipped for calculation of MMM and global contrastive losses as well.
835
+
836
+ return_loss (`bool`, *optional*, default to None):
837
+ Whether to return calculated loss or not.
838
+ """
839
+ + FLAVA_INPUTS_DOCSTRING_COMMON
840
+ )
841
+
842
+ FLAVA_PRETRAINING_START_DOCSTRING_EXTRA = r"""
843
+ Parameters:
844
+ image_codebook ([`nn.Module`]): If passed, the image codebook will be set to this. Otherwise. it will
845
+ be initialized using the image_codebook_config defined in the config first as the first parameter.
846
+ """
847
+
848
+
849
+ class FlavaPreTrainedModel(PreTrainedModel):
850
+ """
851
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
852
+ models.
853
+ """
854
+
855
+ config_class = FlavaConfig
856
+ base_model_prefix = "flava"
857
+ supports_gradient_checkpointing = True
858
+
859
+ def _init_weights(self, module: Union[nn.Linear, nn.Conv2d, nn.LayerNorm]) -> None:
860
+ """Initialize the weights"""
861
+ if isinstance(module, (nn.Linear, nn.Conv2d)):
862
+ # Slightly different from the TF version which uses truncated_normal for initialization
863
+ # cf https://github.com/pytorch/pytorch/pull/5617
864
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
865
+ if module.bias is not None:
866
+ module.bias.data.zero_()
867
+ elif isinstance(module, nn.Embedding):
868
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
869
+ if module.padding_idx is not None:
870
+ module.weight.data[module.padding_idx].zero_()
871
+ elif isinstance(module, nn.LayerNorm):
872
+ module.bias.data.zero_()
873
+ module.weight.data.fill_(1.0)
874
+
875
+
876
+ @add_start_docstrings(
877
+ "The bare FLAVA Image Model transformer outputting raw hidden-states without any specific head on top.",
878
+ FLAVA_START_DOCSTRING.format(config="FlavaImageConfig"),
879
+ )
880
+ class FlavaImageModel(FlavaPreTrainedModel):
881
+ config_class = FlavaImageConfig
882
+ # This override allows us to load FlavaImageModel from FlavaModel/FlavaForPreTraining checkpoints.
883
+ base_model_prefix = "flava.image_model"
884
+ main_input_name = "pixel_values"
885
+
886
+ def __init__(self, config: FlavaImageConfig, add_pooling_layer: bool = True):
887
+ super().__init__(config)
888
+
889
+ self.config = config
890
+
891
+ self.embeddings = FlavaImageEmbeddings(config)
892
+ self.encoder = FlavaEncoder(config)
893
+
894
+ self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
895
+ self.pooler = FlavaPooler(config) if add_pooling_layer else None
896
+
897
+ self.post_init()
898
+
899
+ def get_input_embeddings(self) -> nn.Module:
900
+ return self.embeddings.patch_embeddings
901
+
902
+ def set_input_embeddings(self, value: nn.Module):
903
+ self.embeddings.patch_embeddings = value
904
+
905
+ def _prune_heads(self, heads_to_prune: Dict[int, List[int]]) -> None:
906
+ """
907
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
908
+ class PreTrainedModel
909
+ """
910
+ for layer, heads in heads_to_prune.items():
911
+ self.encoder.layer[layer].attention.prune_heads(heads)
912
+
913
+ @add_start_docstrings_to_model_forward(FLAVA_IMAGE_INPUTS_DOCSTRING.format("batch_size, image_num_patches"))
914
+ @add_code_sample_docstrings(
915
+ checkpoint=_CHECKPOINT_FOR_DOC,
916
+ output_type=BaseModelOutputWithPooling,
917
+ config_class=_CONFIG_CLASS_FOR_IMAGE_MODEL_DOC,
918
+ modality="vision",
919
+ expected_output=_EXPECTED_IMAGE_OUTPUT_SHAPE,
920
+ )
921
+ def forward(
922
+ self,
923
+ pixel_values: Optional[torch.Tensor] = None,
924
+ bool_masked_pos: Optional[torch.BoolTensor] = None,
925
+ interpolate_pos_encoding: Optional[bool] = None,
926
+ attention_mask: Optional[torch.Tensor] = None,
927
+ head_mask: Optional[torch.Tensor] = None,
928
+ output_attentions: Optional[bool] = None,
929
+ output_hidden_states: Optional[bool] = None,
930
+ return_dict: Optional[bool] = None,
931
+ ) -> Union[tuple, BaseModelOutputWithPooling]:
932
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
933
+ output_hidden_states = (
934
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
935
+ )
936
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
937
+
938
+ if pixel_values is None:
939
+ raise ValueError("You have to specify pixel_values")
940
+
941
+ # Prepare head mask if needed
942
+ # 1.0 in head_mask indicate we keep the head
943
+ # attention_probs has shape bsz x n_heads x N x N
944
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
945
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
946
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
947
+
948
+ embedding_output = self.embeddings(
949
+ pixel_values, bool_masked_pos=bool_masked_pos, interpolate_pos_encoding=interpolate_pos_encoding
950
+ )
951
+
952
+ encoder_outputs = self.encoder(
953
+ embedding_output,
954
+ attention_mask=attention_mask,
955
+ head_mask=head_mask,
956
+ output_attentions=output_attentions,
957
+ output_hidden_states=output_hidden_states,
958
+ return_dict=return_dict,
959
+ )
960
+ sequence_output = encoder_outputs[0]
961
+ sequence_output = self.layernorm(sequence_output)
962
+ pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
963
+
964
+ if not return_dict:
965
+ return (sequence_output, pooled_output) + encoder_outputs[1:]
966
+
967
+ return BaseModelOutputWithPooling(
968
+ last_hidden_state=sequence_output,
969
+ pooler_output=pooled_output,
970
+ hidden_states=encoder_outputs.hidden_states,
971
+ attentions=encoder_outputs.attentions,
972
+ )
973
+
974
+
975
+ @add_start_docstrings(
976
+ "The bare FLAVA Text Model transformer outputting raw hidden-states without any specific head on top.",
977
+ FLAVA_START_DOCSTRING.format(config="FlavaTextConfig"),
978
+ )
979
+ class FlavaTextModel(FlavaPreTrainedModel):
980
+ config_class = FlavaTextConfig
981
+ # This override allows us to load FlavaTextModel from FlavaModel/FlavaForPreTraining checkpoints.
982
+ base_model_prefix = "flava.text_model"
983
+
984
+ def __init__(self, config: FlavaTextConfig, add_pooling_layer: bool = True):
985
+ super().__init__(config)
986
+ self.config = config
987
+
988
+ self.embeddings = FlavaTextEmbeddings(config)
989
+ self.encoder = FlavaEncoder(config)
990
+
991
+ self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
992
+ self.pooler = FlavaPooler(config) if add_pooling_layer else None
993
+
994
+ self.post_init()
995
+
996
+ def get_input_embeddings(self) -> PatchEmbeddings:
997
+ return self.embeddings.word_embeddings
998
+
999
+ def set_input_embeddings(self, value: nn.Module):
1000
+ self.embeddings.word_embeddings = value
1001
+
1002
+ def _prune_heads(self, heads_to_prune: Dict[int, List[int]]) -> None:
1003
+ """
1004
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
1005
+ class PreTrainedModel
1006
+ """
1007
+ for layer, heads in heads_to_prune.items():
1008
+ self.encoder.layer[layer].attention.prune_heads(heads)
1009
+
1010
+ @add_start_docstrings_to_model_forward(FLAVA_TEXT_INPUTS_DOCSTRING.format("batch_size, text_seq_length"))
1011
+ @add_code_sample_docstrings(
1012
+ checkpoint=_CHECKPOINT_FOR_DOC,
1013
+ output_type=BaseModelOutputWithPooling,
1014
+ config_class=_CONFIG_CLASS_FOR_TEXT_MODEL_DOC,
1015
+ )
1016
+ def forward(
1017
+ self,
1018
+ input_ids: Optional[torch.Tensor] = None,
1019
+ attention_mask: Optional[torch.Tensor] = None,
1020
+ token_type_ids: Optional[torch.Tensor] = None,
1021
+ position_ids: Optional[torch.Tensor] = None,
1022
+ head_mask: Optional[torch.Tensor] = None,
1023
+ output_attentions: Optional[bool] = None,
1024
+ output_hidden_states: Optional[bool] = None,
1025
+ return_dict: Optional[bool] = None,
1026
+ ) -> Union[tuple, BaseModelOutputWithPooling]:
1027
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1028
+ output_hidden_states = (
1029
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1030
+ )
1031
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1032
+
1033
+ if input_ids is None:
1034
+ raise ValueError("You have to specify input_ids")
1035
+
1036
+ input_shape = input_ids.size()
1037
+
1038
+ if attention_mask is None:
1039
+ attention_mask = torch.ones(input_shape, device=input_ids.device)
1040
+
1041
+ # Prepare head mask if needed
1042
+ # 1.0 in head_mask indicate we keep the head
1043
+ # attention_probs has shape bsz x n_heads x N x N
1044
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
1045
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
1046
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
1047
+ extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(
1048
+ attention_mask, input_shape, input_ids.device
1049
+ )
1050
+
1051
+ embedding_output = self.embeddings(
1052
+ input_ids=input_ids,
1053
+ token_type_ids=token_type_ids,
1054
+ position_ids=position_ids,
1055
+ )
1056
+
1057
+ encoder_outputs = self.encoder(
1058
+ embedding_output,
1059
+ attention_mask=extended_attention_mask,
1060
+ head_mask=head_mask,
1061
+ output_attentions=output_attentions,
1062
+ output_hidden_states=output_hidden_states,
1063
+ return_dict=return_dict,
1064
+ )
1065
+ sequence_output = encoder_outputs[0]
1066
+ sequence_output = self.layernorm(sequence_output)
1067
+ pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
1068
+
1069
+ if not return_dict:
1070
+ return (sequence_output, pooled_output) + encoder_outputs[1:]
1071
+
1072
+ return BaseModelOutputWithPooling(
1073
+ last_hidden_state=sequence_output,
1074
+ pooler_output=pooled_output,
1075
+ hidden_states=encoder_outputs.hidden_states,
1076
+ attentions=encoder_outputs.attentions,
1077
+ )
1078
+
1079
+
1080
+ @add_start_docstrings(
1081
+ "The bare FLAVA Multimodal Model transformer outputting raw hidden-states without any specific head on top.",
1082
+ FLAVA_START_DOCSTRING.format(config="FlavaMultimodalConfig"),
1083
+ )
1084
+ class FlavaMultimodalModel(FlavaPreTrainedModel):
1085
+ config_class = FlavaMultimodalConfig
1086
+ # This override allows us to load FlavaMultimodalModel from FlavaModel/FlavaForPreTraining checkpoints.
1087
+ base_model_prefix = "flava.multimodal_model"
1088
+ main_input_name = "hidden_states"
1089
+
1090
+ def __init__(self, config: FlavaMultimodalConfig, add_pooling_layer=True):
1091
+ super().__init__(config)
1092
+ self.config = config
1093
+ self.use_cls_token = self.config.use_cls_token
1094
+ if self.use_cls_token:
1095
+ self.cls_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size))
1096
+
1097
+ self.encoder = FlavaEncoder(config)
1098
+
1099
+ self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
1100
+ self.pooler = FlavaPooler(config) if add_pooling_layer else None
1101
+
1102
+ self.post_init()
1103
+
1104
+ def _prune_heads(self, heads_to_prune: Dict[int, List[int]]) -> None:
1105
+ """
1106
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
1107
+ class PreTrainedModel
1108
+ """
1109
+ for layer, heads in heads_to_prune.items():
1110
+ self.encoder.layer[layer].attention.prune_heads(heads)
1111
+
1112
+ @add_start_docstrings_to_model_forward(
1113
+ FLAVA_MULTIMODAL_INPUTS_DOCSTRING.format("batch_size, image_num_patches + text_seq_len")
1114
+ )
1115
+ @add_code_sample_docstrings(
1116
+ checkpoint=_CHECKPOINT_FOR_DOC,
1117
+ output_type=BaseModelOutputWithPooling,
1118
+ config_class=_CONFIG_CLASS_FOR_MULTIMODAL_MODEL_DOC,
1119
+ )
1120
+ def forward(
1121
+ self,
1122
+ hidden_states: torch.Tensor,
1123
+ attention_mask: Optional[torch.Tensor] = None,
1124
+ head_mask: Optional[torch.Tensor] = None,
1125
+ output_attentions: Optional[bool] = None,
1126
+ output_hidden_states: Optional[bool] = None,
1127
+ return_dict: Optional[bool] = None,
1128
+ ) -> Union[tuple, BaseModelOutputWithPooling]:
1129
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1130
+ output_hidden_states = (
1131
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1132
+ )
1133
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1134
+
1135
+ batch_size, seq_length, _ = hidden_states.size()
1136
+
1137
+ if self.use_cls_token:
1138
+ cls_tokens = self.cls_token.expand(batch_size, -1, -1)
1139
+ hidden_states = torch.cat((cls_tokens, hidden_states), dim=1)
1140
+ seq_length += 1
1141
+
1142
+ if attention_mask is None:
1143
+ attention_mask = torch.ones((batch_size, seq_length), device=hidden_states.device)
1144
+
1145
+ # Prepare head mask if needed
1146
+ # 1.0 in head_mask indicate we keep the head
1147
+ # attention_probs has shape bsz x n_heads x N x N
1148
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
1149
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
1150
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
1151
+ extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(
1152
+ attention_mask, (batch_size, seq_length), hidden_states.device
1153
+ )
1154
+
1155
+ encoder_outputs = self.encoder(
1156
+ hidden_states,
1157
+ attention_mask=extended_attention_mask,
1158
+ head_mask=head_mask,
1159
+ output_attentions=output_attentions,
1160
+ output_hidden_states=output_hidden_states,
1161
+ return_dict=return_dict,
1162
+ )
1163
+ sequence_output = encoder_outputs[0]
1164
+ sequence_output = self.layernorm(sequence_output)
1165
+ pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
1166
+
1167
+ if not return_dict:
1168
+ return (sequence_output, pooled_output) + encoder_outputs[1:]
1169
+
1170
+ return BaseModelOutputWithPooling(
1171
+ last_hidden_state=sequence_output,
1172
+ pooler_output=pooled_output,
1173
+ hidden_states=encoder_outputs.hidden_states,
1174
+ attentions=encoder_outputs.attentions,
1175
+ )
1176
+
1177
+
1178
+ @add_start_docstrings(
1179
+ "The bare FLAVA Model transformer outputting raw hidden-states without any specific head on top.",
1180
+ FLAVA_START_DOCSTRING.format(config="FlavaConfig"),
1181
+ )
1182
+ class FlavaModel(FlavaPreTrainedModel):
1183
+ config_class = FlavaConfig
1184
+
1185
+ def __init__(self, config: FlavaConfig):
1186
+ super().__init__(config)
1187
+
1188
+ if not isinstance(config.text_config, FlavaTextConfig):
1189
+ raise ValueError(
1190
+ "config.text_config is expected to be of type FlavaTextConfig but is of type"
1191
+ f" {type(config.text_config)}."
1192
+ )
1193
+
1194
+ if not isinstance(config.image_config, FlavaImageConfig):
1195
+ raise ValueError(
1196
+ "config.image_config is expected to be of type FlavaImageConfig but is of type"
1197
+ f" {type(config.image_config)}."
1198
+ )
1199
+
1200
+ if not isinstance(config.multimodal_config, FlavaMultimodalConfig):
1201
+ raise ValueError(
1202
+ "config.multimodal_config is expected to be of type FlavaMultimodalConfig but "
1203
+ + f"is of type {type(config.multimodal_config)}."
1204
+ )
1205
+
1206
+ text_config = config.text_config
1207
+ image_config = config.image_config
1208
+ multimodal_config = config.multimodal_config
1209
+
1210
+ self.projection_dim = config.projection_dim
1211
+ self.text_hidden_size = text_config.hidden_size
1212
+ self.image_hidden_size = image_config.hidden_size
1213
+ self.mm_hidden_size = multimodal_config.hidden_size
1214
+
1215
+ self.text_model = FlavaTextModel(text_config)
1216
+ self.image_model = FlavaImageModel(image_config)
1217
+ self.multimodal_model = FlavaMultimodalModel(multimodal_config)
1218
+
1219
+ self.image_projection = nn.Linear(self.image_hidden_size, self.projection_dim)
1220
+ self.text_projection = nn.Linear(self.text_hidden_size, self.projection_dim)
1221
+ self.logit_scale = nn.Parameter(torch.tensor(self.config.logit_scale_init_value))
1222
+
1223
+ self.image_to_mm_projection = nn.Linear(self.image_hidden_size, self.mm_hidden_size)
1224
+ self.text_to_mm_projection = nn.Linear(self.text_hidden_size, self.mm_hidden_size)
1225
+ # Initialize weights and apply final processing
1226
+ self.post_init()
1227
+
1228
+ @add_start_docstrings_to_model_forward(FLAVA_TEXT_INPUTS_DOCSTRING.format("batch_size, text_seq_length"))
1229
+ def get_text_features(
1230
+ self,
1231
+ input_ids: Optional[torch.Tensor] = None,
1232
+ attention_mask: Optional[torch.Tensor] = None,
1233
+ token_type_ids: Optional[torch.Tensor] = None,
1234
+ position_ids: Optional[torch.Tensor] = None,
1235
+ output_attentions: Optional[bool] = None,
1236
+ output_hidden_states: Optional[bool] = None,
1237
+ return_dict: Optional[bool] = None,
1238
+ ) -> torch.FloatTensor:
1239
+ r"""
1240
+ Returns:
1241
+ text_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by
1242
+ applying the projection layer to the pooled output of [`FlavaTextModel`].
1243
+
1244
+ Examples:
1245
+
1246
+ ```python
1247
+ >>> from transformers import AutoProcessor, FlavaModel
1248
+
1249
+ >>> model = FlavaModel.from_pretrained("{0}")
1250
+ >>> processor = AutoProcessor.from_pretrained("{0}")
1251
+
1252
+ >>> inputs = processor(
1253
+ ... text=["a photo of a cat", "a photo of a dog"], max_length=77, padding="max_length", return_tensors="pt"
1254
+ ... )
1255
+ >>> text_features = model.get_text_features(**inputs)
1256
+ ```""".format(_CHECKPOINT_FOR_DOC)
1257
+ text_outputs = self.text_model(
1258
+ input_ids=input_ids,
1259
+ attention_mask=attention_mask,
1260
+ token_type_ids=token_type_ids,
1261
+ position_ids=position_ids,
1262
+ output_attentions=output_attentions,
1263
+ output_hidden_states=output_hidden_states,
1264
+ return_dict=return_dict,
1265
+ )
1266
+
1267
+ pooled_output = text_outputs[0] # last_hidden_state
1268
+ text_features = self.text_projection(pooled_output)
1269
+
1270
+ return text_features
1271
+
1272
+ @add_start_docstrings_to_model_forward(FLAVA_IMAGE_INPUTS_DOCSTRING.format("batch_size, image_num_patches"))
1273
+ def get_image_features(
1274
+ self,
1275
+ pixel_values: Optional[torch.Tensor] = None,
1276
+ bool_masked_pos: Optional[torch.BoolTensor] = None,
1277
+ interpolate_pos_encoding: Optional[bool] = None,
1278
+ attention_mask: Optional[torch.Tensor] = None,
1279
+ head_mask: Optional[torch.Tensor] = None,
1280
+ output_attentions: Optional[bool] = None,
1281
+ output_hidden_states: Optional[bool] = None,
1282
+ return_dict: Optional[bool] = None,
1283
+ ) -> torch.FloatTensor:
1284
+ r"""
1285
+ Returns:
1286
+ image_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The image embeddings obtained by
1287
+ applying the projection layer to the pooled output of [`FlavaImageModel`].
1288
+
1289
+ Examples:
1290
+
1291
+ ```python
1292
+ >>> from PIL import Image
1293
+ >>> import requests
1294
+ >>> from transformers import AutoProcessor, FlavaModel
1295
+
1296
+ >>> model = FlavaModel.from_pretrained("{0}")
1297
+ >>> processor = AutoProcessor.from_pretrained("{0}")
1298
+
1299
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
1300
+ >>> image = Image.open(requests.get(url, stream=True).raw)
1301
+
1302
+ >>> inputs = processor(images=image, return_tensors="pt")
1303
+
1304
+ >>> image_features = model.get_image_features(**inputs)
1305
+ ```""".format(_CHECKPOINT_FOR_DOC)
1306
+ image_outputs = self.image_model(
1307
+ pixel_values=pixel_values,
1308
+ bool_masked_pos=bool_masked_pos,
1309
+ attention_mask=attention_mask,
1310
+ head_mask=head_mask,
1311
+ output_attentions=output_attentions,
1312
+ output_hidden_states=output_hidden_states,
1313
+ interpolate_pos_encoding=interpolate_pos_encoding,
1314
+ return_dict=return_dict,
1315
+ )
1316
+
1317
+ pooled_output = image_outputs[0] # last_hidden_state
1318
+ image_features = self.image_projection(pooled_output)
1319
+
1320
+ return image_features
1321
+
1322
+ @add_start_docstrings_to_model_forward(
1323
+ FLAVA_MODEL_INPUTS_DOCSTRING.format("batch_size, image_num_patches + text_seq_len")
1324
+ )
1325
+ @replace_return_docstrings(output_type=FlavaModelOutput, config_class=FlavaConfig)
1326
+ def forward(
1327
+ self,
1328
+ input_ids: Optional[torch.LongTensor] = None,
1329
+ pixel_values: Optional[torch.FloatTensor] = None,
1330
+ attention_mask: Optional[torch.Tensor] = None,
1331
+ token_type_ids: Optional[torch.Tensor] = None,
1332
+ bool_masked_pos: Optional[torch.Tensor] = None,
1333
+ position_ids: Optional[torch.LongTensor] = None,
1334
+ image_attention_mask: Optional[torch.Tensor] = None,
1335
+ skip_multimodal_encoder: Optional[bool] = None,
1336
+ output_attentions: Optional[bool] = None,
1337
+ output_hidden_states: bool = True,
1338
+ return_dict: Optional[bool] = None,
1339
+ ) -> Union[Tuple, FlavaOutput]:
1340
+ r"""
1341
+ Returns:
1342
+
1343
+ Examples:
1344
+
1345
+ ```python
1346
+ >>> from PIL import Image
1347
+ >>> import requests
1348
+ >>> from transformers import AutoProcessor, FlavaModel
1349
+
1350
+ >>> model = FlavaModel.from_pretrained("facebook/flava-full")
1351
+ >>> processor = AutoProcessor.from_pretrained("facebook/flava-full")
1352
+
1353
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
1354
+ >>> image = Image.open(requests.get(url, stream=True).raw)
1355
+
1356
+ >>> inputs = processor(text=["a photo of a cat"], images=image, return_tensors="pt", padding=True)
1357
+
1358
+ >>> outputs = model(**inputs)
1359
+
1360
+ >>> image_embeddings = outputs.image_embeddings
1361
+ >>> text_embeddings = outputs.text_embeddings
1362
+ >>> multimodal_embeddings = outputs.multimodal_embeddings
1363
+
1364
+ >>> outputs.image_embeddings.shape
1365
+ torch.Size([1, 197, 768])
1366
+
1367
+ >>> text_embeddings.shape
1368
+ torch.Size([1, 7, 768])
1369
+
1370
+ >>> multimodal_embeddings.shape
1371
+ torch.Size([1, 205, 768])
1372
+ ```
1373
+ """
1374
+
1375
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
1376
+ if not output_hidden_states:
1377
+ raise ValueError("FLAVA model requires hidden states to work. Please set `output_hidden_states=True`")
1378
+ image_embeddings = None
1379
+ image_states = None
1380
+ image_mm_projection = None
1381
+ image_output = None
1382
+ if pixel_values is not None:
1383
+ image_output = self.image_model(
1384
+ pixel_values=pixel_values,
1385
+ bool_masked_pos=bool_masked_pos,
1386
+ attention_mask=image_attention_mask,
1387
+ output_attentions=output_attentions,
1388
+ output_hidden_states=output_hidden_states,
1389
+ return_dict=return_dict,
1390
+ )
1391
+ image_embeddings, image_states = image_output[0], image_output[2]
1392
+ # Note that these states don't use final layernorm in the transformer model
1393
+ image_mm_projection = self.image_to_mm_projection(image_states[-1])
1394
+
1395
+ text_embeddings = None
1396
+ text_states = None
1397
+ text_mm_projection = None
1398
+ text_output = None
1399
+ if input_ids is not None:
1400
+ text_output = self.text_model(
1401
+ input_ids=input_ids,
1402
+ attention_mask=attention_mask,
1403
+ position_ids=position_ids,
1404
+ token_type_ids=token_type_ids,
1405
+ output_attentions=output_attentions,
1406
+ output_hidden_states=output_hidden_states,
1407
+ return_dict=return_dict,
1408
+ )
1409
+
1410
+ text_embeddings, text_states = text_output[0], text_output[2]
1411
+ # Note that these states don't use final layernorm in the transformer model
1412
+ text_mm_projection = self.text_to_mm_projection(text_states[-1])
1413
+
1414
+ multimodal_embeddings = None
1415
+ multimodal_output = None
1416
+ if image_mm_projection is not None and text_mm_projection is not None and not skip_multimodal_encoder:
1417
+ if attention_mask is not None:
1418
+ batch_size, seq_len, _ = image_mm_projection.shape
1419
+ if self.multimodal_model.use_cls_token:
1420
+ seq_len += 1
1421
+ attention_mask_image = torch.ones(batch_size, seq_len, device=image_mm_projection.device)
1422
+ attention_multimodal = torch.cat([attention_mask_image, attention_mask], dim=1)
1423
+ else:
1424
+ attention_multimodal = None
1425
+ multimodal_input = torch.cat([image_mm_projection, text_mm_projection], dim=1)
1426
+ multimodal_output = self.multimodal_model(
1427
+ multimodal_input, attention_mask=attention_multimodal, return_dict=return_dict
1428
+ )
1429
+ multimodal_embeddings = multimodal_output[0]
1430
+
1431
+ if not return_dict:
1432
+ return (
1433
+ image_embeddings,
1434
+ image_output,
1435
+ text_embeddings,
1436
+ text_output,
1437
+ multimodal_embeddings,
1438
+ multimodal_output,
1439
+ )
1440
+
1441
+ return FlavaModelOutput(
1442
+ image_embeddings=image_embeddings,
1443
+ image_output=image_output,
1444
+ text_embeddings=text_embeddings,
1445
+ text_output=text_output,
1446
+ multimodal_embeddings=multimodal_embeddings,
1447
+ multimodal_output=multimodal_output,
1448
+ )
1449
+
1450
+
1451
+ class FlavaImageCodebookResPath(nn.Module):
1452
+ def __init__(self, in_size: int, out_size: int, **kwargs):
1453
+ super().__init__()
1454
+ hid_size = out_size // 4
1455
+
1456
+ path = OrderedDict()
1457
+ path["relu_1"] = nn.ReLU()
1458
+ path["conv_1"] = nn.Conv2d(in_size, hid_size, kernel_size=3, padding=1)
1459
+ path["relu_2"] = nn.ReLU()
1460
+ path["conv_2"] = nn.Conv2d(hid_size, hid_size, kernel_size=3, padding=1)
1461
+ path["relu_3"] = nn.ReLU()
1462
+ path["conv_3"] = nn.Conv2d(hid_size, hid_size, kernel_size=3, padding=1)
1463
+ path["relu_4"] = nn.ReLU()
1464
+ path["conv_4"] = nn.Conv2d(hid_size, out_size, kernel_size=1, padding=0)
1465
+
1466
+ self.path = nn.Sequential(path)
1467
+
1468
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
1469
+ return self.path(x)
1470
+
1471
+
1472
+ class FlavaImageCodebookBlock(nn.Module):
1473
+ def __init__(self, in_size: int, out_size: int, num_layers: int, **kwargs):
1474
+ super().__init__()
1475
+
1476
+ self.post_gain = 1 / (num_layers**2)
1477
+
1478
+ if in_size != out_size:
1479
+ self.id_path = nn.Conv2d(in_size, out_size, kernel_size=1, padding=0)
1480
+ else:
1481
+ self.id_path = nn.Identity()
1482
+
1483
+ self.res_path = FlavaImageCodebookResPath(in_size, out_size)
1484
+
1485
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
1486
+ return self.id_path(x) + self.post_gain * self.res_path(x)
1487
+
1488
+
1489
+ class FlavaImageCodebookLayerGroup(nn.Module):
1490
+ def __init__(self, num_blocks: int, num_layers: int, in_size: int, out_size: int, use_pool: bool = True):
1491
+ super().__init__()
1492
+ blocks = OrderedDict()
1493
+ for i in range(num_blocks):
1494
+ if i == 0:
1495
+ blocks[f"block_{i+1}"] = FlavaImageCodebookBlock(in_size, out_size, num_layers)
1496
+ else:
1497
+ blocks[f"block_{i+1}"] = FlavaImageCodebookBlock(out_size, out_size, num_layers)
1498
+
1499
+ if use_pool:
1500
+ blocks["pool"] = nn.MaxPool2d(kernel_size=2)
1501
+
1502
+ self.group = nn.Sequential(blocks)
1503
+
1504
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
1505
+ return self.group(x)
1506
+
1507
+
1508
+ # Inspired by DALLE Encoder in https://github.com/openai/DALL-E/blob/5be4b236bc3ade6943662354117a0e83752cc322/dall_e/encoder.py#L42
1509
+ @add_start_docstrings(
1510
+ """
1511
+ The FLAVA's image codebook model inspired from DALL-E's original encoder. Outputs raw hidden states and can be used
1512
+ to generate image tokens for an image based on DALL-E's vocab. Used to generate labels for MIM. Use
1513
+ `get_codebook_indices` to get image tokens for an image.
1514
+ """,
1515
+ FLAVA_START_DOCSTRING.format(config="FlavaImageCodebookConfig"),
1516
+ )
1517
+ class FlavaImageCodebook(FlavaPreTrainedModel):
1518
+ base_model_prefix = ""
1519
+ config_class = FlavaImageCodebookConfig
1520
+ main_input_name = "pixel_values"
1521
+ supports_gradient_checkpointing = False
1522
+
1523
+ def __init__(
1524
+ self,
1525
+ config: FlavaImageCodebookConfig,
1526
+ **kwargs: Any,
1527
+ ):
1528
+ super().__init__(config)
1529
+
1530
+ self.config = config
1531
+ self.num_groups = config.num_groups
1532
+ self.input_channels = config.input_channels
1533
+ self.num_blocks_per_group = config.num_blocks_per_group
1534
+ self.hidden_size = config.hidden_size
1535
+ self.vocab_size = config.vocab_size
1536
+
1537
+ num_layers = self.num_groups * self.num_blocks_per_group
1538
+
1539
+ output_blocks = OrderedDict()
1540
+ output_blocks["relu"] = nn.ReLU()
1541
+ output_blocks["conv"] = nn.Conv2d(8 * self.hidden_size, self.vocab_size, kernel_size=1, padding=0)
1542
+
1543
+ blocks = OrderedDict()
1544
+ blocks["input"] = nn.Conv2d(self.input_channels, 1 * self.hidden_size, kernel_size=7, padding=3)
1545
+ blocks["group_1"] = FlavaImageCodebookLayerGroup(
1546
+ self.num_blocks_per_group, num_layers, 1 * self.hidden_size, 1 * self.hidden_size
1547
+ )
1548
+ blocks["group_2"] = FlavaImageCodebookLayerGroup(
1549
+ self.num_blocks_per_group, num_layers, 1 * self.hidden_size, 2 * self.hidden_size
1550
+ )
1551
+ blocks["group_3"] = FlavaImageCodebookLayerGroup(
1552
+ self.num_blocks_per_group, num_layers, 2 * self.hidden_size, 4 * self.hidden_size
1553
+ )
1554
+ blocks["group_4"] = FlavaImageCodebookLayerGroup(
1555
+ self.num_blocks_per_group, num_layers, 4 * self.hidden_size, 8 * self.hidden_size, use_pool=False
1556
+ )
1557
+ blocks["output"] = nn.Sequential(output_blocks)
1558
+
1559
+ self.blocks = nn.Sequential(blocks)
1560
+
1561
+ self.post_init()
1562
+
1563
+ if self.config.freeze:
1564
+ for param in self.parameters():
1565
+ param.requires_grad = False
1566
+
1567
+ def get_codebook_indices(self, pixel_values: torch.Tensor) -> torch.Tensor:
1568
+ """
1569
+ Args:
1570
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
1571
+ Pixel values. Codebook pixel values can be obtained using [`AutoImageProcessor`] by passing
1572
+ `return_codebook_pixels=True`. See [`FlavaImageProcessor.__call__`] for details.
1573
+
1574
+ Examples:
1575
+ ```python
1576
+ >>> from PIL import Image
1577
+ >>> import requests
1578
+ >>> from transformers import AutoImageProcessor, FlavaImageCodebook
1579
+
1580
+ >>> model = FlavaImageCodebook.from_pretrained("{0}")
1581
+ >>> image_processor = AutoImageProcessor.from_pretrained("{0}")
1582
+
1583
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
1584
+ >>> image = Image.open(requests.get(url, stream=True).raw)
1585
+
1586
+ >>> inputs = image_processor([image], return_codebook_pixels=True, return_tensors="pt")
1587
+ >>> inputs = dict(pixel_values=inputs.codebook_pixel_values)
1588
+
1589
+ >>> outputs = model.get_codebook_indices(**inputs)
1590
+ ```
1591
+ """.format(_CHECKPOINT_FOR_CODEBOOK_DOC)
1592
+ z_logits = self.blocks(pixel_values)
1593
+ return torch.argmax(z_logits, axis=1)
1594
+
1595
+ def get_codebook_probs(self, pixel_values: torch.Tensor) -> torch.Tensor:
1596
+ z_logits = self.blocks(pixel_values)
1597
+ return nn.Softmax(dim=1)(z_logits)
1598
+
1599
+ def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor:
1600
+ """
1601
+ Args:
1602
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
1603
+ Pixel values. Codebook pixel values can be obtained using [`AutoImageProcessor`] by passing
1604
+ `return_codebook_pixels=True`. See [`FlavaImageProcessor.__call__`] for details.
1605
+
1606
+ Examples:
1607
+
1608
+ ```python
1609
+ >>> from PIL import Image
1610
+ >>> import requests
1611
+ >>> from transformers import AutoImageProcessor, FlavaImageCodebook
1612
+
1613
+ >>> model = FlavaImageCodebook.from_pretrained("{0}")
1614
+ >>> image_processor = AutoImageProcessor.from_pretrained("{0}")
1615
+
1616
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
1617
+ >>> image = Image.open(requests.get(url, stream=True).raw)
1618
+
1619
+ >>> inputs = image_processor([image], return_codebook_pixels=True, return_tensors="pt")
1620
+ >>> inputs = dict(pixel_values=inputs.codebook_pixel_values)
1621
+
1622
+ >>> outputs = model(**inputs)
1623
+ >>> print(outputs.shape)
1624
+ (1, 196)
1625
+ ```
1626
+ """.format(_CHECKPOINT_FOR_CODEBOOK_DOC)
1627
+ if len(pixel_values.shape) != 4:
1628
+ raise ValueError(f"input shape {pixel_values.shape} is not 4d")
1629
+ if pixel_values.shape[1] != self.input_channels:
1630
+ raise ValueError(f"input has {pixel_values.shape[1]} channels but model built for {self.input_channels}")
1631
+ return self.blocks(pixel_values)
1632
+
1633
+
1634
+ class FlavaPredictionHeadTransform(nn.Module):
1635
+ def __init__(self, config):
1636
+ super().__init__()
1637
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
1638
+ if isinstance(config.hidden_act, str):
1639
+ self.transform_act_fn = ACT2FN[config.hidden_act]
1640
+ else:
1641
+ self.transform_act_fn = config.hidden_act
1642
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
1643
+
1644
+ def forward(self, hidden_states):
1645
+ hidden_states = self.dense(hidden_states)
1646
+ hidden_states = self.transform_act_fn(hidden_states)
1647
+ hidden_states = self.LayerNorm(hidden_states)
1648
+ return hidden_states
1649
+
1650
+
1651
+ class FlavaMaskedPredictionHead(nn.Module):
1652
+ def __init__(self, config, weight=None):
1653
+ super().__init__()
1654
+ self.config = config
1655
+ self.transform = FlavaPredictionHeadTransform(config)
1656
+ self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
1657
+ self.bias = nn.Parameter(torch.zeros(config.vocab_size))
1658
+ if weight is not None:
1659
+ self.decoder.weight = weight
1660
+
1661
+ # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
1662
+ self.decoder.bias = self.bias
1663
+
1664
+ def forward(self, x):
1665
+ x = self.transform(x)
1666
+ x = self.decoder(x)
1667
+ return x
1668
+
1669
+
1670
+ class FlavaITMHead(nn.Module):
1671
+ def __init__(self, config):
1672
+ super().__init__()
1673
+ self.config = config
1674
+ self.pooler = FlavaPooler(config)
1675
+ self.seq_relationship = nn.Linear(config.hidden_size, 2)
1676
+
1677
+ def forward(self, x):
1678
+ x = self.pooler(x)
1679
+ x = self.seq_relationship(x)
1680
+ return x
1681
+
1682
+
1683
+ class FlavaGlobalContrastiveHead(nn.Module):
1684
+ def __init__(self, config):
1685
+ super().__init__()
1686
+ self.config = config
1687
+ self.global_backprop_contrastive = config.global_backprop_contrastive
1688
+
1689
+ def forward(self, image_embeddings, text_embeddings, logit_scale):
1690
+ temperature = torch.exp(logit_scale)
1691
+ if not torch.distributed.is_available() or not torch.distributed.is_initialized():
1692
+ labels = torch.arange(image_embeddings.size(0), device=image_embeddings.device)
1693
+ image_embeddings_all = [image_embeddings]
1694
+ text_embeddings_all = [text_embeddings]
1695
+ else:
1696
+ local_batch_size = image_embeddings.size(0)
1697
+ world_size = torch.distributed.get_world_size()
1698
+
1699
+ if self.global_backprop_contrastive:
1700
+ # `torch.distributed.nn.functional.all_gather` does backprop on all active workers
1701
+ # whereas `torch.distributed.all_gather` does only backpropagates on the current worker.
1702
+ image_embeddings_all = torch.distributed.nn.functional.all_gather(image_embeddings)
1703
+ text_embeddings_all = torch.distributed.nn.functional.all_gather(text_embeddings)
1704
+ else:
1705
+ image_embeddings_all = [torch.zeros_like(text_embeddings) for _ in range(world_size)]
1706
+ text_embeddings_all = [torch.zeros_like(image_embeddings) for _ in range(world_size)]
1707
+ torch.distributed.all_gather(image_embeddings_all, image_embeddings)
1708
+ torch.distributed.all_gather(text_embeddings_all, text_embeddings)
1709
+
1710
+ labels = local_batch_size * torch.distributed.get_rank() + torch.arange(
1711
+ local_batch_size, device=image_embeddings.device
1712
+ )
1713
+
1714
+ image_embeddings_all = torch.cat(image_embeddings_all)
1715
+ text_embeddings_all = torch.cat(text_embeddings_all)
1716
+
1717
+ logits_per_image = torch.matmul(image_embeddings, text_embeddings_all.transpose(0, 1)) * temperature
1718
+ logits_per_text = torch.matmul(text_embeddings, image_embeddings_all.transpose(0, 1)) * temperature
1719
+
1720
+ return logits_per_image, logits_per_text, labels
1721
+
1722
+
1723
+ @add_start_docstrings(
1724
+ """
1725
+ The FLAVA model for pretraining which outputs losses, embeddings, logits and transformer outputs.
1726
+ """,
1727
+ FLAVA_START_DOCSTRING.format(config="FlavaConfig") + FLAVA_PRETRAINING_START_DOCSTRING_EXTRA,
1728
+ )
1729
+ class FlavaForPreTraining(FlavaPreTrainedModel):
1730
+ # Those are linked to xxx.bias
1731
+ _tied_weights_keys = [
1732
+ "mmm_text_head.decoder.bias",
1733
+ "mmm_image_head.decoder.bias",
1734
+ "mlm_head.decoder.bias",
1735
+ "mim_head.decoder.bias",
1736
+ ]
1737
+
1738
+ def __init__(self, config: FlavaConfig, image_codebook: Optional[nn.Module] = None):
1739
+ super().__init__(config)
1740
+ self.flava = FlavaModel(config)
1741
+
1742
+ self.image_codebook = image_codebook
1743
+ if self.image_codebook is None and config.init_codebook:
1744
+ self.image_codebook = FlavaImageCodebook(config.image_codebook_config)
1745
+
1746
+ # Levarage text and image encoder configs to create the masked
1747
+ # head since it has the right vocab
1748
+ self.mim_head = FlavaMaskedPredictionHead(config.image_config)
1749
+ self.mlm_head = FlavaMaskedPredictionHead(config.text_config)
1750
+ self.itm_head = FlavaITMHead(config)
1751
+ self.mmm_image_head = FlavaMaskedPredictionHead(config.image_config)
1752
+ self.mmm_text_head = FlavaMaskedPredictionHead(config.text_config)
1753
+ self.global_contrastive_head = FlavaGlobalContrastiveHead(config)
1754
+
1755
+ self.image_vocab_size = config.image_config.vocab_size
1756
+ self.text_vocab_size = config.text_config.vocab_size
1757
+ self.mlm_weight = config.mlm_weight
1758
+ self.mim_weight = config.mim_weight
1759
+ self.global_contrastive_weight = config.global_contrastive_weight
1760
+ self.ce_ignore_index = config.ce_ignore_index
1761
+ self.itm_weight = config.itm_weight
1762
+ self.mmm_image_weight = config.mmm_image_weight
1763
+ self.mmm_text_weight = config.mmm_text_weight
1764
+ self.skip_unmasked_multimodal_encoder = config.skip_unmasked_multimodal_encoder
1765
+
1766
+ self.post_init()
1767
+
1768
+ def _resize_to_2d(self, x: torch.Tensor):
1769
+ if x.dim() > 2:
1770
+ x = x.view(x.size(0), -1)
1771
+ return x
1772
+
1773
+ @add_start_docstrings_to_model_forward(
1774
+ FLAVA_PRETRAINING_INPUTS_DOCSTRING.format("batch_size, text_seq_len", "batch_size, image_num_patches")
1775
+ )
1776
+ @replace_return_docstrings(output_type=FlavaForPreTrainingOutput, config_class=FlavaConfig)
1777
+ def forward(
1778
+ self,
1779
+ input_ids: Optional[torch.LongTensor] = None,
1780
+ input_ids_masked: Optional[torch.LongTensor] = None,
1781
+ pixel_values: Optional[torch.FloatTensor] = None,
1782
+ codebook_pixel_values: Optional[torch.FloatTensor] = None,
1783
+ attention_mask: Optional[torch.Tensor] = None,
1784
+ token_type_ids: Optional[torch.Tensor] = None,
1785
+ bool_masked_pos: Optional[torch.Tensor] = None,
1786
+ position_ids: Optional[torch.LongTensor] = None,
1787
+ image_attention_mask: Optional[torch.Tensor] = None,
1788
+ skip_unmasked_multimodal_encoder: bool = None,
1789
+ mlm_labels: Optional[torch.Tensor] = None,
1790
+ mim_labels: Optional[torch.Tensor] = None,
1791
+ itm_labels: Optional[torch.Tensor] = None,
1792
+ output_attentions: Optional[bool] = None,
1793
+ output_hidden_states: bool = True,
1794
+ return_dict: Optional[bool] = None,
1795
+ return_loss: Optional[bool] = None,
1796
+ ) -> Union[Tuple[torch.Tensor], FlavaForPreTrainingOutput]:
1797
+ """
1798
+ Examples:
1799
+ ```python
1800
+ >>> from PIL import Image
1801
+ >>> import requests
1802
+ >>> from transformers import FlavaForPreTraining, AutoProcessor
1803
+
1804
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
1805
+ >>> image = Image.open(requests.get(url, stream=True).raw)
1806
+
1807
+ >>> model = FlavaForPreTraining.from_pretrained("facebook/flava-full")
1808
+ >>> processor = AutoProcessor.from_pretrained("facebook/flava-full")
1809
+
1810
+ >>> text = ["a photo of a cat"]
1811
+
1812
+ >>> inputs = processor(
1813
+ ... images=[image],
1814
+ ... text=text,
1815
+ ... return_masks=True,
1816
+ ... return_codebook_pixels=True,
1817
+ ... padding=True,
1818
+ ... max_length=77,
1819
+ ... return_tensors="pt",
1820
+ ... )
1821
+
1822
+
1823
+ >>> output = model(**inputs)
1824
+ ```
1825
+
1826
+ Return:
1827
+
1828
+ """
1829
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1830
+ return_loss = return_loss if return_loss is not None else self.config.return_loss
1831
+
1832
+ skip_unmasked_multimodal_encoder = (
1833
+ skip_unmasked_multimodal_encoder
1834
+ if skip_unmasked_multimodal_encoder is not None
1835
+ else self.skip_unmasked_multimodal_encoder
1836
+ )
1837
+
1838
+ if input_ids_masked is None and input_ids is not None:
1839
+ logger.warning(
1840
+ "`input_ids_masked` isn't passed which means MLM loss won't be calculated correctlySetting it to"
1841
+ " `input_ids` so that model can work. Please pass it if this is unintentional. This is usually OKAY if"
1842
+ " you are doing inference on unmasked text..."
1843
+ )
1844
+ input_ids_masked = input_ids
1845
+
1846
+ flava_output = self.flava(
1847
+ input_ids=input_ids,
1848
+ pixel_values=pixel_values,
1849
+ attention_mask=attention_mask,
1850
+ token_type_ids=token_type_ids,
1851
+ position_ids=position_ids,
1852
+ image_attention_mask=image_attention_mask,
1853
+ # Don't need unmasked multimodal embedding for anything so skip it
1854
+ # NOTE: ITM uses masked version
1855
+ skip_multimodal_encoder=skip_unmasked_multimodal_encoder,
1856
+ output_attentions=output_attentions,
1857
+ output_hidden_states=output_hidden_states,
1858
+ # Pass true to have deterministic outputs
1859
+ return_dict=True,
1860
+ )
1861
+
1862
+ flava_masked_output = self.flava(
1863
+ input_ids=input_ids_masked,
1864
+ pixel_values=pixel_values,
1865
+ attention_mask=attention_mask,
1866
+ token_type_ids=token_type_ids,
1867
+ image_attention_mask=image_attention_mask,
1868
+ bool_masked_pos=bool_masked_pos,
1869
+ output_attentions=output_attentions,
1870
+ output_hidden_states=output_hidden_states,
1871
+ return_dict=True,
1872
+ )
1873
+
1874
+ pos_mask = None
1875
+
1876
+ image_embeddings = flava_output.image_embeddings
1877
+ text_embeddings = flava_output.text_embeddings
1878
+ image_masked_embeddings = flava_masked_output.image_embeddings
1879
+ text_masked_embeddings = flava_masked_output.text_embeddings
1880
+ multimodal_masked_embeddings = flava_masked_output.multimodal_embeddings
1881
+
1882
+ total_loss = mim_loss = mlm_loss = mmm_text_loss = mmm_image_loss = gc_loss = itm_loss = None
1883
+ mim_logits = mlm_logits = mmm_text_logits = mmm_image_logits = None
1884
+ itm_logits = logits_per_image = logits_per_text = None
1885
+
1886
+ # Calculate mim_labels if necessary from the image_codebook
1887
+ if image_masked_embeddings is not None or multimodal_masked_embeddings is not None:
1888
+ if mim_labels is None and return_loss:
1889
+ if self.image_codebook is None:
1890
+ raise RuntimeError(
1891
+ "`return_loss` is set to True but the image codebook is not initialized and no `mim_labels` "
1892
+ " have been passed. Reinstantiate the model with `init_codebook` set to True or "
1893
+ "pass in your custom `mim_labels`"
1894
+ )
1895
+ if codebook_pixel_values is None:
1896
+ raise ValueError(
1897
+ "`codebook_pixel_value` are required to generate `mim_labels` if loss is expected. "
1898
+ "Call `AutoProcessor` with `return_codebook_pixels` set to True"
1899
+ )
1900
+ mim_labels = self.image_codebook.get_codebook_indices(codebook_pixel_values)
1901
+ # Unimodal MIM Loss
1902
+ # If multimodal embeddings are present, we will calculate MMM loss
1903
+ if self.mim_weight > 0 and image_masked_embeddings is not None and multimodal_masked_embeddings is None:
1904
+ sequence_for_image = image_masked_embeddings
1905
+
1906
+ if mim_labels is not None:
1907
+ mim_labels = self._resize_to_2d(mim_labels)
1908
+ bool_masked_pos = self._resize_to_2d(bool_masked_pos)
1909
+ mim_labels[bool_masked_pos.ne(True)] = self.ce_ignore_index
1910
+
1911
+ sequence_for_image = sequence_for_image[:, -mim_labels.size(1) :, :]
1912
+ masked_tokens = mim_labels.ne(self.ce_ignore_index)
1913
+ mim_labels_filtered = mim_labels[masked_tokens]
1914
+ sequence_for_image = sequence_for_image[masked_tokens, :]
1915
+ mim_logits = self.mim_head(sequence_for_image)
1916
+ if return_loss:
1917
+ mim_loss = nn.functional.cross_entropy(
1918
+ mim_logits.view(-1, self.image_vocab_size), mim_labels_filtered.view(-1)
1919
+ )
1920
+ mim_loss *= self.mim_weight
1921
+ else:
1922
+ mim_logits = self.mim_head(sequence_for_image)
1923
+
1924
+ # Unimodal MLM Loss
1925
+ if self.mlm_weight > 0 and text_masked_embeddings is not None and multimodal_masked_embeddings is None:
1926
+ sequence_for_text = text_masked_embeddings
1927
+ if mlm_labels is not None:
1928
+ mlm_labels = self._resize_to_2d(mlm_labels)
1929
+ sequence_for_text = sequence_for_text[:, -mlm_labels.size(1) :, :]
1930
+ masked_tokens = mlm_labels.ne(self.ce_ignore_index)
1931
+ mlm_labels_filtered = mlm_labels[masked_tokens]
1932
+ sequence_for_text = sequence_for_text[masked_tokens, :]
1933
+ mlm_logits = self.mlm_head(sequence_for_text)
1934
+ if return_loss:
1935
+ mlm_loss = nn.functional.cross_entropy(
1936
+ mlm_logits.view(-1, self.text_vocab_size), mlm_labels_filtered.view(-1)
1937
+ )
1938
+ mlm_loss *= self.mlm_weight
1939
+ else:
1940
+ mlm_logits = self.mlm_head(sequence_for_text)
1941
+
1942
+ # ITM Loss
1943
+ if self.itm_weight > 0 and multimodal_masked_embeddings is not None:
1944
+ itm_logits = self.itm_head(multimodal_masked_embeddings)
1945
+
1946
+ if itm_labels is not None:
1947
+ pos_pairs = itm_labels.ne(0)
1948
+ pos_mask = torch.where(pos_pairs.any(), pos_pairs, pos_pairs.new([True]))
1949
+ if return_loss:
1950
+ itm_loss = nn.functional.cross_entropy(itm_logits, itm_labels)
1951
+ itm_loss *= self.itm_weight
1952
+
1953
+ if multimodal_masked_embeddings is not None:
1954
+ multimodal_masked_embeddings = multimodal_masked_embeddings[pos_mask]
1955
+
1956
+ if mlm_labels is not None:
1957
+ mlm_labels = mlm_labels[pos_mask]
1958
+
1959
+ if mim_labels is not None:
1960
+ mim_labels = mim_labels[pos_mask]
1961
+ bool_masked_pos = bool_masked_pos[pos_mask]
1962
+
1963
+ # MMM Image Loss
1964
+ if multimodal_masked_embeddings is not None and self.mmm_image_weight > 0:
1965
+ sequence_for_image = multimodal_masked_embeddings
1966
+ end_index = image_masked_embeddings.size(1) - 1
1967
+ sequence_for_image = sequence_for_image[:, 2 : 2 + end_index, :]
1968
+
1969
+ if mim_labels is not None:
1970
+ mim_labels = self._resize_to_2d(mim_labels)
1971
+ bool_masked_pos = self._resize_to_2d(bool_masked_pos)
1972
+ mim_labels[bool_masked_pos.ne(True)] = self.ce_ignore_index
1973
+
1974
+ masked_tokens = mim_labels.ne(self.ce_ignore_index)
1975
+ mim_labels_filtered = mim_labels[masked_tokens]
1976
+ sequence_for_image = sequence_for_image[masked_tokens, :]
1977
+ mmm_image_logits = self.mmm_image_head(sequence_for_image)
1978
+ if return_loss:
1979
+ mmm_image_loss = nn.functional.cross_entropy(
1980
+ mmm_image_logits.view(-1, self.image_vocab_size), mim_labels_filtered.view(-1)
1981
+ )
1982
+ mmm_image_loss *= self.mmm_image_weight
1983
+ else:
1984
+ mmm_image_logits = self.mmm_image_head(sequence_for_image)
1985
+
1986
+ # MMM Text Loss
1987
+ if multimodal_masked_embeddings is not None and self.mmm_text_weight > 0:
1988
+ sequence_for_text = multimodal_masked_embeddings
1989
+ sequence_for_text = sequence_for_text[:, -text_masked_embeddings.size(1) :, :]
1990
+
1991
+ if mlm_labels is not None:
1992
+ mlm_labels = self._resize_to_2d(mlm_labels)
1993
+ masked_tokens = mlm_labels.ne(self.ce_ignore_index)
1994
+ mlm_labels_filtered = mlm_labels[masked_tokens]
1995
+ sequence_for_text = sequence_for_text[masked_tokens, :]
1996
+ mmm_text_logits = self.mmm_text_head(sequence_for_text)
1997
+ if return_loss:
1998
+ mmm_text_loss = nn.functional.cross_entropy(
1999
+ mmm_text_logits.view(-1, self.text_vocab_size), mlm_labels_filtered.view(-1)
2000
+ )
2001
+ mmm_text_loss *= self.mmm_text_weight
2002
+ else:
2003
+ mmm_text_logits = self.mmm_text_head(sequence_for_text)
2004
+
2005
+ # Global Contrastive Loss
2006
+ if image_embeddings is not None and text_embeddings is not None and self.global_contrastive_weight > 0:
2007
+ text_embedding = self.flava.text_projection(text_embeddings[:, 0, :])
2008
+ text_embedding = nn.functional.normalize(text_embedding, dim=-1)
2009
+
2010
+ image_embedding = self.flava.image_projection(image_embeddings[:, 0, :])
2011
+ image_embedding = nn.functional.normalize(image_embedding, dim=-1)
2012
+
2013
+ self.flava.logit_scale.data.clamp_(LOGIT_SCALE_CLAMP_MIN, LOGIT_SCALE_CLAMP_MAX)
2014
+
2015
+ logits_per_image, logits_per_text, gc_labels = self.global_contrastive_head(
2016
+ image_embedding, text_embedding, self.flava.logit_scale
2017
+ )
2018
+
2019
+ # Apply ITM negative mask if any
2020
+ if pos_mask is not None:
2021
+ logits_per_image = logits_per_image[pos_mask]
2022
+ logits_per_text = logits_per_text[pos_mask]
2023
+ gc_labels = gc_labels[pos_mask]
2024
+
2025
+ if return_loss:
2026
+ gc_loss_image = nn.functional.cross_entropy(logits_per_image, gc_labels)
2027
+ gc_loss_text = nn.functional.cross_entropy(logits_per_text, gc_labels)
2028
+ gc_loss = (gc_loss_image + gc_loss_text) / 2
2029
+ gc_loss *= self.global_contrastive_weight
2030
+
2031
+ flava_losses = FlavaLosses(
2032
+ mim=mim_loss,
2033
+ mlm=mlm_loss,
2034
+ itm=itm_loss,
2035
+ global_contrastive=gc_loss,
2036
+ mmm_image=mmm_image_loss,
2037
+ mmm_text=mmm_text_loss,
2038
+ )
2039
+
2040
+ if return_loss and not flava_losses.all_none():
2041
+ total_loss = sum(loss if loss is not None else 0 for loss in flava_losses.values())
2042
+
2043
+ if not return_dict:
2044
+ output = (
2045
+ image_embeddings,
2046
+ flava_output.image_output.to_tuple() if flava_output.image_output is not None else None,
2047
+ text_embeddings,
2048
+ flava_output.text_output.to_tuple() if flava_output.text_output is not None else None,
2049
+ flava_output.multimodal_embeddings,
2050
+ flava_output.multimodal_output.to_tuple() if flava_output.multimodal_output is not None else None,
2051
+ image_masked_embeddings,
2052
+ flava_masked_output.image_output.to_tuple() if flava_masked_output.image_output is not None else None,
2053
+ text_masked_embeddings,
2054
+ flava_masked_output.text_output.to_tuple() if flava_masked_output.text_output is not None else None,
2055
+ multimodal_masked_embeddings,
2056
+ flava_masked_output.multimodal_output.to_tuple()
2057
+ if flava_masked_output.multimodal_output is not None
2058
+ else None,
2059
+ mim_logits,
2060
+ mlm_logits,
2061
+ itm_logits,
2062
+ logits_per_image,
2063
+ logits_per_image,
2064
+ mmm_image_logits,
2065
+ mmm_text_logits,
2066
+ )
2067
+ if return_loss and not flava_losses.all_none():
2068
+ output = (
2069
+ total_loss,
2070
+ flava_losses,
2071
+ ) + output
2072
+
2073
+ # Filter None as transformer by default won't handle it
2074
+ return tuple(x for x in output if x is None)
2075
+
2076
+ return FlavaForPreTrainingOutput(
2077
+ loss=total_loss,
2078
+ loss_info=flava_losses,
2079
+ image_embeddings=image_embeddings,
2080
+ image_output=flava_output.image_output,
2081
+ text_embeddings=text_embeddings,
2082
+ text_output=flava_output.text_output,
2083
+ multimodal_embeddings=flava_output.multimodal_embeddings,
2084
+ multimodal_output=flava_output.multimodal_output,
2085
+ image_masked_embeddings=image_masked_embeddings,
2086
+ image_masked_output=flava_masked_output.image_output,
2087
+ text_masked_embeddings=text_masked_embeddings,
2088
+ text_masked_output=flava_masked_output.text_output,
2089
+ multimodal_masked_embeddings=multimodal_masked_embeddings,
2090
+ multimodal_masked_output=flava_masked_output.multimodal_output,
2091
+ mim_logits=mim_logits,
2092
+ mlm_logits=mlm_logits,
2093
+ itm_logits=itm_logits,
2094
+ contrastive_logits_per_image=logits_per_image,
2095
+ contrastive_logits_per_text=logits_per_text,
2096
+ mmm_image_logits=mmm_image_logits,
2097
+ mmm_text_logits=mmm_text_logits,
2098
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/flava/processing_flava.py ADDED
@@ -0,0 +1,165 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 Meta Platforms authors and The HuggingFace Team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """
16
+ Image/Text processor class for FLAVA
17
+ """
18
+
19
+ import warnings
20
+ from typing import List, Optional, Union
21
+
22
+ from ...image_utils import ImageInput
23
+ from ...processing_utils import ProcessorMixin
24
+ from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
25
+ from ...utils import TensorType
26
+
27
+
28
+ class FlavaProcessor(ProcessorMixin):
29
+ r"""
30
+ Constructs a FLAVA processor which wraps a FLAVA image processor and a FLAVA tokenizer into a single processor.
31
+
32
+ [`FlavaProcessor`] offers all the functionalities of [`FlavaImageProcessor`] and [`BertTokenizerFast`]. See the
33
+ [`~FlavaProcessor.__call__`] and [`~FlavaProcessor.decode`] for more information.
34
+
35
+ Args:
36
+ image_processor ([`FlavaImageProcessor`], *optional*): The image processor is a required input.
37
+ tokenizer ([`BertTokenizerFast`], *optional*): The tokenizer is a required input.
38
+ """
39
+
40
+ attributes = ["image_processor", "tokenizer"]
41
+ image_processor_class = "FlavaImageProcessor"
42
+ tokenizer_class = ("BertTokenizer", "BertTokenizerFast")
43
+
44
+ def __init__(self, image_processor=None, tokenizer=None, **kwargs):
45
+ feature_extractor = None
46
+ if "feature_extractor" in kwargs:
47
+ warnings.warn(
48
+ "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
49
+ " instead.",
50
+ FutureWarning,
51
+ )
52
+ feature_extractor = kwargs.pop("feature_extractor")
53
+
54
+ image_processor = image_processor if image_processor is not None else feature_extractor
55
+ if image_processor is None:
56
+ raise ValueError("You need to specify an `image_processor`.")
57
+ if tokenizer is None:
58
+ raise ValueError("You need to specify a `tokenizer`.")
59
+
60
+ super().__init__(image_processor, tokenizer)
61
+ self.current_processor = self.image_processor
62
+
63
+ def __call__(
64
+ self,
65
+ images: Optional[ImageInput] = None,
66
+ text: Optional[Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]] = None,
67
+ add_special_tokens: bool = True,
68
+ padding: Union[bool, str, PaddingStrategy] = False,
69
+ truncation: Union[bool, str, TruncationStrategy] = False,
70
+ max_length: Optional[int] = None,
71
+ stride: int = 0,
72
+ pad_to_multiple_of: Optional[int] = None,
73
+ return_image_mask: Optional[bool] = None,
74
+ return_codebook_pixels: Optional[bool] = None,
75
+ return_token_type_ids: Optional[bool] = None,
76
+ return_attention_mask: Optional[bool] = None,
77
+ return_overflowing_tokens: bool = False,
78
+ return_special_tokens_mask: bool = False,
79
+ return_offsets_mapping: bool = False,
80
+ return_length: bool = False,
81
+ verbose: bool = True,
82
+ return_tensors: Optional[Union[str, TensorType]] = None,
83
+ **kwargs,
84
+ ):
85
+ """
86
+ This method uses [`FlavaImageProcessor.__call__`] method to prepare image(s) for the model, and
87
+ [`BertTokenizerFast.__call__`] to prepare text for the model.
88
+
89
+ Please refer to the docstring of the above two methods for more information.
90
+ """
91
+
92
+ if text is None and images is None:
93
+ raise ValueError("You have to specify either text or images. Both cannot be none.")
94
+
95
+ if text is not None:
96
+ encoding = self.tokenizer(
97
+ text=text,
98
+ add_special_tokens=add_special_tokens,
99
+ padding=padding,
100
+ truncation=truncation,
101
+ max_length=max_length,
102
+ stride=stride,
103
+ pad_to_multiple_of=pad_to_multiple_of,
104
+ return_token_type_ids=return_token_type_ids,
105
+ return_attention_mask=return_attention_mask,
106
+ return_overflowing_tokens=return_overflowing_tokens,
107
+ return_special_tokens_mask=return_special_tokens_mask,
108
+ return_offsets_mapping=return_offsets_mapping,
109
+ return_length=return_length,
110
+ verbose=verbose,
111
+ return_tensors=return_tensors,
112
+ **kwargs,
113
+ )
114
+ if images is not None:
115
+ image_features = self.image_processor(
116
+ images,
117
+ return_image_mask=return_image_mask,
118
+ return_codebook_pixels=return_codebook_pixels,
119
+ return_tensors=return_tensors,
120
+ **kwargs,
121
+ )
122
+
123
+ if text is not None and images is not None:
124
+ encoding.update(image_features)
125
+ return encoding
126
+ elif text is not None:
127
+ return encoding
128
+ else:
129
+ return BatchEncoding(data=dict(**image_features), tensor_type=return_tensors)
130
+
131
+ def batch_decode(self, *args, **kwargs):
132
+ """
133
+ This method forwards all its arguments to BertTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please
134
+ refer to the docstring of this method for more information.
135
+ """
136
+ return self.tokenizer.batch_decode(*args, **kwargs)
137
+
138
+ def decode(self, *args, **kwargs):
139
+ """
140
+ This method forwards all its arguments to BertTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to
141
+ the docstring of this method for more information.
142
+ """
143
+ return self.tokenizer.decode(*args, **kwargs)
144
+
145
+ @property
146
+ def model_input_names(self):
147
+ tokenizer_input_names = self.tokenizer.model_input_names
148
+ image_processor_input_names = self.image_processor.model_input_names
149
+ return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
150
+
151
+ @property
152
+ def feature_extractor_class(self):
153
+ warnings.warn(
154
+ "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.",
155
+ FutureWarning,
156
+ )
157
+ return self.image_processor_class
158
+
159
+ @property
160
+ def feature_extractor(self):
161
+ warnings.warn(
162
+ "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.",
163
+ FutureWarning,
164
+ )
165
+ return self.image_processor
llmeval-env/lib/python3.10/site-packages/transformers/models/markuplm/__init__.py ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
17
+
18
+
19
+ _import_structure = {
20
+ "configuration_markuplm": ["MARKUPLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "MarkupLMConfig"],
21
+ "feature_extraction_markuplm": ["MarkupLMFeatureExtractor"],
22
+ "processing_markuplm": ["MarkupLMProcessor"],
23
+ "tokenization_markuplm": ["MarkupLMTokenizer"],
24
+ }
25
+
26
+ try:
27
+ if not is_tokenizers_available():
28
+ raise OptionalDependencyNotAvailable()
29
+ except OptionalDependencyNotAvailable:
30
+ pass
31
+ else:
32
+ _import_structure["tokenization_markuplm_fast"] = ["MarkupLMTokenizerFast"]
33
+
34
+ try:
35
+ if not is_torch_available():
36
+ raise OptionalDependencyNotAvailable()
37
+ except OptionalDependencyNotAvailable:
38
+ pass
39
+ else:
40
+ _import_structure["modeling_markuplm"] = [
41
+ "MARKUPLM_PRETRAINED_MODEL_ARCHIVE_LIST",
42
+ "MarkupLMForQuestionAnswering",
43
+ "MarkupLMForSequenceClassification",
44
+ "MarkupLMForTokenClassification",
45
+ "MarkupLMModel",
46
+ "MarkupLMPreTrainedModel",
47
+ ]
48
+
49
+
50
+ if TYPE_CHECKING:
51
+ from .configuration_markuplm import MARKUPLM_PRETRAINED_CONFIG_ARCHIVE_MAP, MarkupLMConfig
52
+ from .feature_extraction_markuplm import MarkupLMFeatureExtractor
53
+ from .processing_markuplm import MarkupLMProcessor
54
+ from .tokenization_markuplm import MarkupLMTokenizer
55
+
56
+ try:
57
+ if not is_tokenizers_available():
58
+ raise OptionalDependencyNotAvailable()
59
+ except OptionalDependencyNotAvailable:
60
+ pass
61
+ else:
62
+ from .tokenization_markuplm_fast import MarkupLMTokenizerFast
63
+
64
+ try:
65
+ if not is_torch_available():
66
+ raise OptionalDependencyNotAvailable()
67
+ except OptionalDependencyNotAvailable:
68
+ pass
69
+ else:
70
+ from .modeling_markuplm import (
71
+ MARKUPLM_PRETRAINED_MODEL_ARCHIVE_LIST,
72
+ MarkupLMForQuestionAnswering,
73
+ MarkupLMForSequenceClassification,
74
+ MarkupLMForTokenClassification,
75
+ MarkupLMModel,
76
+ MarkupLMPreTrainedModel,
77
+ )
78
+
79
+
80
+ else:
81
+ import sys
82
+
83
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure)
llmeval-env/lib/python3.10/site-packages/transformers/models/markuplm/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.48 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/markuplm/__pycache__/configuration_markuplm.cpython-310.pyc ADDED
Binary file (6.32 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/markuplm/__pycache__/feature_extraction_markuplm.cpython-310.pyc ADDED
Binary file (5.19 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/markuplm/__pycache__/modeling_markuplm.cpython-310.pyc ADDED
Binary file (37.2 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/markuplm/__pycache__/processing_markuplm.cpython-310.pyc ADDED
Binary file (5.21 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/markuplm/__pycache__/tokenization_markuplm.cpython-310.pyc ADDED
Binary file (44.3 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/markuplm/__pycache__/tokenization_markuplm_fast.cpython-310.pyc ADDED
Binary file (24.9 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/markuplm/configuration_markuplm.py ADDED
@@ -0,0 +1,156 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021, The Microsoft Research Asia MarkupLM Team authors
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ MarkupLM model configuration"""
16
+
17
+ from ...configuration_utils import PretrainedConfig
18
+ from ...utils import logging
19
+
20
+
21
+ logger = logging.get_logger(__name__)
22
+
23
+
24
+ from ..deprecated._archive_maps import MARKUPLM_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
25
+
26
+
27
+ class MarkupLMConfig(PretrainedConfig):
28
+ r"""
29
+ This is the configuration class to store the configuration of a [`MarkupLMModel`]. It is used to instantiate a
30
+ MarkupLM model according to the specified arguments, defining the model architecture. Instantiating a configuration
31
+ with the defaults will yield a similar configuration to that of the MarkupLM
32
+ [microsoft/markuplm-base](https://huggingface.co/microsoft/markuplm-base) architecture.
33
+
34
+ Configuration objects inherit from [`BertConfig`] and can be used to control the model outputs. Read the
35
+ documentation from [`BertConfig`] for more information.
36
+
37
+ Args:
38
+ vocab_size (`int`, *optional*, defaults to 30522):
39
+ Vocabulary size of the MarkupLM model. Defines the different tokens that can be represented by the
40
+ *inputs_ids* passed to the forward method of [`MarkupLMModel`].
41
+ hidden_size (`int`, *optional*, defaults to 768):
42
+ Dimensionality of the encoder layers and the pooler layer.
43
+ num_hidden_layers (`int`, *optional*, defaults to 12):
44
+ Number of hidden layers in the Transformer encoder.
45
+ num_attention_heads (`int`, *optional*, defaults to 12):
46
+ Number of attention heads for each attention layer in the Transformer encoder.
47
+ intermediate_size (`int`, *optional*, defaults to 3072):
48
+ Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
49
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
50
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
51
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
52
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
53
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
54
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
55
+ The dropout ratio for the attention probabilities.
56
+ max_position_embeddings (`int`, *optional*, defaults to 512):
57
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
58
+ just in case (e.g., 512 or 1024 or 2048).
59
+ type_vocab_size (`int`, *optional*, defaults to 2):
60
+ The vocabulary size of the `token_type_ids` passed into [`MarkupLMModel`].
61
+ initializer_range (`float`, *optional*, defaults to 0.02):
62
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
63
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
64
+ The epsilon used by the layer normalization layers.
65
+ max_tree_id_unit_embeddings (`int`, *optional*, defaults to 1024):
66
+ The maximum value that the tree id unit embedding might ever use. Typically set this to something large
67
+ just in case (e.g., 1024).
68
+ max_xpath_tag_unit_embeddings (`int`, *optional*, defaults to 256):
69
+ The maximum value that the xpath tag unit embedding might ever use. Typically set this to something large
70
+ just in case (e.g., 256).
71
+ max_xpath_subs_unit_embeddings (`int`, *optional*, defaults to 1024):
72
+ The maximum value that the xpath subscript unit embedding might ever use. Typically set this to something
73
+ large just in case (e.g., 1024).
74
+ tag_pad_id (`int`, *optional*, defaults to 216):
75
+ The id of the padding token in the xpath tags.
76
+ subs_pad_id (`int`, *optional*, defaults to 1001):
77
+ The id of the padding token in the xpath subscripts.
78
+ xpath_tag_unit_hidden_size (`int`, *optional*, defaults to 32):
79
+ The hidden size of each tree id unit. One complete tree index will have
80
+ (50*xpath_tag_unit_hidden_size)-dim.
81
+ max_depth (`int`, *optional*, defaults to 50):
82
+ The maximum depth in xpath.
83
+
84
+ Examples:
85
+
86
+ ```python
87
+ >>> from transformers import MarkupLMModel, MarkupLMConfig
88
+
89
+ >>> # Initializing a MarkupLM microsoft/markuplm-base style configuration
90
+ >>> configuration = MarkupLMConfig()
91
+
92
+ >>> # Initializing a model from the microsoft/markuplm-base style configuration
93
+ >>> model = MarkupLMModel(configuration)
94
+
95
+ >>> # Accessing the model configuration
96
+ >>> configuration = model.config
97
+ ```"""
98
+
99
+ model_type = "markuplm"
100
+
101
+ def __init__(
102
+ self,
103
+ vocab_size=30522,
104
+ hidden_size=768,
105
+ num_hidden_layers=12,
106
+ num_attention_heads=12,
107
+ intermediate_size=3072,
108
+ hidden_act="gelu",
109
+ hidden_dropout_prob=0.1,
110
+ attention_probs_dropout_prob=0.1,
111
+ max_position_embeddings=512,
112
+ type_vocab_size=2,
113
+ initializer_range=0.02,
114
+ layer_norm_eps=1e-12,
115
+ pad_token_id=0,
116
+ bos_token_id=0,
117
+ eos_token_id=2,
118
+ max_xpath_tag_unit_embeddings=256,
119
+ max_xpath_subs_unit_embeddings=1024,
120
+ tag_pad_id=216,
121
+ subs_pad_id=1001,
122
+ xpath_unit_hidden_size=32,
123
+ max_depth=50,
124
+ position_embedding_type="absolute",
125
+ use_cache=True,
126
+ classifier_dropout=None,
127
+ **kwargs,
128
+ ):
129
+ super().__init__(
130
+ pad_token_id=pad_token_id,
131
+ bos_token_id=bos_token_id,
132
+ eos_token_id=eos_token_id,
133
+ **kwargs,
134
+ )
135
+ self.vocab_size = vocab_size
136
+ self.hidden_size = hidden_size
137
+ self.num_hidden_layers = num_hidden_layers
138
+ self.num_attention_heads = num_attention_heads
139
+ self.hidden_act = hidden_act
140
+ self.intermediate_size = intermediate_size
141
+ self.hidden_dropout_prob = hidden_dropout_prob
142
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
143
+ self.max_position_embeddings = max_position_embeddings
144
+ self.type_vocab_size = type_vocab_size
145
+ self.initializer_range = initializer_range
146
+ self.layer_norm_eps = layer_norm_eps
147
+ self.position_embedding_type = position_embedding_type
148
+ self.use_cache = use_cache
149
+ self.classifier_dropout = classifier_dropout
150
+ # additional properties
151
+ self.max_depth = max_depth
152
+ self.max_xpath_tag_unit_embeddings = max_xpath_tag_unit_embeddings
153
+ self.max_xpath_subs_unit_embeddings = max_xpath_subs_unit_embeddings
154
+ self.tag_pad_id = tag_pad_id
155
+ self.subs_pad_id = subs_pad_id
156
+ self.xpath_unit_hidden_size = xpath_unit_hidden_size
llmeval-env/lib/python3.10/site-packages/transformers/models/markuplm/feature_extraction_markuplm.py ADDED
@@ -0,0 +1,183 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """
16
+ Feature extractor class for MarkupLM.
17
+ """
18
+
19
+ import html
20
+
21
+ from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
22
+ from ...utils import is_bs4_available, logging, requires_backends
23
+
24
+
25
+ if is_bs4_available():
26
+ import bs4
27
+ from bs4 import BeautifulSoup
28
+
29
+
30
+ logger = logging.get_logger(__name__)
31
+
32
+
33
+ class MarkupLMFeatureExtractor(FeatureExtractionMixin):
34
+ r"""
35
+ Constructs a MarkupLM feature extractor. This can be used to get a list of nodes and corresponding xpaths from HTML
36
+ strings.
37
+
38
+ This feature extractor inherits from [`~feature_extraction_utils.PreTrainedFeatureExtractor`] which contains most
39
+ of the main methods. Users should refer to this superclass for more information regarding those methods.
40
+
41
+ """
42
+
43
+ def __init__(self, **kwargs):
44
+ requires_backends(self, ["bs4"])
45
+ super().__init__(**kwargs)
46
+
47
+ def xpath_soup(self, element):
48
+ xpath_tags = []
49
+ xpath_subscripts = []
50
+ child = element if element.name else element.parent
51
+ for parent in child.parents: # type: bs4.element.Tag
52
+ siblings = parent.find_all(child.name, recursive=False)
53
+ xpath_tags.append(child.name)
54
+ xpath_subscripts.append(
55
+ 0 if 1 == len(siblings) else next(i for i, s in enumerate(siblings, 1) if s is child)
56
+ )
57
+ child = parent
58
+ xpath_tags.reverse()
59
+ xpath_subscripts.reverse()
60
+ return xpath_tags, xpath_subscripts
61
+
62
+ def get_three_from_single(self, html_string):
63
+ html_code = BeautifulSoup(html_string, "html.parser")
64
+
65
+ all_doc_strings = []
66
+ string2xtag_seq = []
67
+ string2xsubs_seq = []
68
+
69
+ for element in html_code.descendants:
70
+ if isinstance(element, bs4.element.NavigableString):
71
+ if type(element.parent) != bs4.element.Tag:
72
+ continue
73
+
74
+ text_in_this_tag = html.unescape(element).strip()
75
+ if not text_in_this_tag:
76
+ continue
77
+
78
+ all_doc_strings.append(text_in_this_tag)
79
+
80
+ xpath_tags, xpath_subscripts = self.xpath_soup(element)
81
+ string2xtag_seq.append(xpath_tags)
82
+ string2xsubs_seq.append(xpath_subscripts)
83
+
84
+ if len(all_doc_strings) != len(string2xtag_seq):
85
+ raise ValueError("Number of doc strings and xtags does not correspond")
86
+ if len(all_doc_strings) != len(string2xsubs_seq):
87
+ raise ValueError("Number of doc strings and xsubs does not correspond")
88
+
89
+ return all_doc_strings, string2xtag_seq, string2xsubs_seq
90
+
91
+ def construct_xpath(self, xpath_tags, xpath_subscripts):
92
+ xpath = ""
93
+ for tagname, subs in zip(xpath_tags, xpath_subscripts):
94
+ xpath += f"/{tagname}"
95
+ if subs != 0:
96
+ xpath += f"[{subs}]"
97
+ return xpath
98
+
99
+ def __call__(self, html_strings) -> BatchFeature:
100
+ """
101
+ Main method to prepare for the model one or several HTML strings.
102
+
103
+ Args:
104
+ html_strings (`str`, `List[str]`):
105
+ The HTML string or batch of HTML strings from which to extract nodes and corresponding xpaths.
106
+
107
+ Returns:
108
+ [`BatchFeature`]: A [`BatchFeature`] with the following fields:
109
+
110
+ - **nodes** -- Nodes.
111
+ - **xpaths** -- Corresponding xpaths.
112
+
113
+ Examples:
114
+
115
+ ```python
116
+ >>> from transformers import MarkupLMFeatureExtractor
117
+
118
+ >>> page_name_1 = "page1.html"
119
+ >>> page_name_2 = "page2.html"
120
+ >>> page_name_3 = "page3.html"
121
+
122
+ >>> with open(page_name_1) as f:
123
+ ... single_html_string = f.read()
124
+
125
+ >>> feature_extractor = MarkupLMFeatureExtractor()
126
+
127
+ >>> # single example
128
+ >>> encoding = feature_extractor(single_html_string)
129
+ >>> print(encoding.keys())
130
+ >>> # dict_keys(['nodes', 'xpaths'])
131
+
132
+ >>> # batched example
133
+
134
+ >>> multi_html_strings = []
135
+
136
+ >>> with open(page_name_2) as f:
137
+ ... multi_html_strings.append(f.read())
138
+ >>> with open(page_name_3) as f:
139
+ ... multi_html_strings.append(f.read())
140
+
141
+ >>> encoding = feature_extractor(multi_html_strings)
142
+ >>> print(encoding.keys())
143
+ >>> # dict_keys(['nodes', 'xpaths'])
144
+ ```"""
145
+
146
+ # Input type checking for clearer error
147
+ valid_strings = False
148
+
149
+ # Check that strings has a valid type
150
+ if isinstance(html_strings, str):
151
+ valid_strings = True
152
+ elif isinstance(html_strings, (list, tuple)):
153
+ if len(html_strings) == 0 or isinstance(html_strings[0], str):
154
+ valid_strings = True
155
+
156
+ if not valid_strings:
157
+ raise ValueError(
158
+ "HTML strings must of type `str`, `List[str]` (batch of examples), "
159
+ f"but is of type {type(html_strings)}."
160
+ )
161
+
162
+ is_batched = bool(isinstance(html_strings, (list, tuple)) and (isinstance(html_strings[0], str)))
163
+
164
+ if not is_batched:
165
+ html_strings = [html_strings]
166
+
167
+ # Get nodes + xpaths
168
+ nodes = []
169
+ xpaths = []
170
+ for html_string in html_strings:
171
+ all_doc_strings, string2xtag_seq, string2xsubs_seq = self.get_three_from_single(html_string)
172
+ nodes.append(all_doc_strings)
173
+ xpath_strings = []
174
+ for node, tag_list, sub_list in zip(all_doc_strings, string2xtag_seq, string2xsubs_seq):
175
+ xpath_string = self.construct_xpath(tag_list, sub_list)
176
+ xpath_strings.append(xpath_string)
177
+ xpaths.append(xpath_strings)
178
+
179
+ # return as Dict
180
+ data = {"nodes": nodes, "xpaths": xpaths}
181
+ encoded_inputs = BatchFeature(data=data, tensor_type=None)
182
+
183
+ return encoded_inputs
llmeval-env/lib/python3.10/site-packages/transformers/models/markuplm/modeling_markuplm.py ADDED
@@ -0,0 +1,1316 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 Microsoft Research Asia and the HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch MarkupLM model."""
16
+
17
+ import math
18
+ import os
19
+ from typing import Optional, Tuple, Union
20
+
21
+ import torch
22
+ import torch.utils.checkpoint
23
+ from torch import nn
24
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
25
+
26
+ from ...activations import ACT2FN
27
+ from ...file_utils import (
28
+ add_start_docstrings,
29
+ add_start_docstrings_to_model_forward,
30
+ replace_return_docstrings,
31
+ )
32
+ from ...modeling_outputs import (
33
+ BaseModelOutputWithPastAndCrossAttentions,
34
+ BaseModelOutputWithPoolingAndCrossAttentions,
35
+ MaskedLMOutput,
36
+ QuestionAnsweringModelOutput,
37
+ SequenceClassifierOutput,
38
+ TokenClassifierOutput,
39
+ )
40
+ from ...modeling_utils import (
41
+ PreTrainedModel,
42
+ apply_chunking_to_forward,
43
+ find_pruneable_heads_and_indices,
44
+ prune_linear_layer,
45
+ )
46
+ from ...utils import logging
47
+ from .configuration_markuplm import MarkupLMConfig
48
+
49
+
50
+ logger = logging.get_logger(__name__)
51
+
52
+ _CHECKPOINT_FOR_DOC = "microsoft/markuplm-base"
53
+ _CONFIG_FOR_DOC = "MarkupLMConfig"
54
+
55
+
56
+ from ..deprecated._archive_maps import MARKUPLM_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
57
+
58
+
59
+ class XPathEmbeddings(nn.Module):
60
+ """Construct the embeddings from xpath tags and subscripts.
61
+
62
+ We drop tree-id in this version, as its info can be covered by xpath.
63
+ """
64
+
65
+ def __init__(self, config):
66
+ super(XPathEmbeddings, self).__init__()
67
+ self.max_depth = config.max_depth
68
+
69
+ self.xpath_unitseq2_embeddings = nn.Linear(config.xpath_unit_hidden_size * self.max_depth, config.hidden_size)
70
+
71
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
72
+
73
+ self.activation = nn.ReLU()
74
+ self.xpath_unitseq2_inner = nn.Linear(config.xpath_unit_hidden_size * self.max_depth, 4 * config.hidden_size)
75
+ self.inner2emb = nn.Linear(4 * config.hidden_size, config.hidden_size)
76
+
77
+ self.xpath_tag_sub_embeddings = nn.ModuleList(
78
+ [
79
+ nn.Embedding(config.max_xpath_tag_unit_embeddings, config.xpath_unit_hidden_size)
80
+ for _ in range(self.max_depth)
81
+ ]
82
+ )
83
+
84
+ self.xpath_subs_sub_embeddings = nn.ModuleList(
85
+ [
86
+ nn.Embedding(config.max_xpath_subs_unit_embeddings, config.xpath_unit_hidden_size)
87
+ for _ in range(self.max_depth)
88
+ ]
89
+ )
90
+
91
+ def forward(self, xpath_tags_seq=None, xpath_subs_seq=None):
92
+ xpath_tags_embeddings = []
93
+ xpath_subs_embeddings = []
94
+
95
+ for i in range(self.max_depth):
96
+ xpath_tags_embeddings.append(self.xpath_tag_sub_embeddings[i](xpath_tags_seq[:, :, i]))
97
+ xpath_subs_embeddings.append(self.xpath_subs_sub_embeddings[i](xpath_subs_seq[:, :, i]))
98
+
99
+ xpath_tags_embeddings = torch.cat(xpath_tags_embeddings, dim=-1)
100
+ xpath_subs_embeddings = torch.cat(xpath_subs_embeddings, dim=-1)
101
+
102
+ xpath_embeddings = xpath_tags_embeddings + xpath_subs_embeddings
103
+
104
+ xpath_embeddings = self.inner2emb(self.dropout(self.activation(self.xpath_unitseq2_inner(xpath_embeddings))))
105
+
106
+ return xpath_embeddings
107
+
108
+
109
+ # Copied from transformers.models.roberta.modeling_roberta.create_position_ids_from_input_ids
110
+ def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_length=0):
111
+ """
112
+ Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols
113
+ are ignored. This is modified from fairseq's `utils.make_positions`.
114
+
115
+ Args:
116
+ x: torch.Tensor x:
117
+
118
+ Returns: torch.Tensor
119
+ """
120
+ # The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA.
121
+ mask = input_ids.ne(padding_idx).int()
122
+ incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask
123
+ return incremental_indices.long() + padding_idx
124
+
125
+
126
+ class MarkupLMEmbeddings(nn.Module):
127
+ """Construct the embeddings from word, position and token_type embeddings."""
128
+
129
+ def __init__(self, config):
130
+ super(MarkupLMEmbeddings, self).__init__()
131
+ self.config = config
132
+ self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
133
+ self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
134
+
135
+ self.max_depth = config.max_depth
136
+
137
+ self.xpath_embeddings = XPathEmbeddings(config)
138
+
139
+ self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
140
+
141
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
142
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
143
+
144
+ self.register_buffer(
145
+ "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
146
+ )
147
+
148
+ self.padding_idx = config.pad_token_id
149
+ self.position_embeddings = nn.Embedding(
150
+ config.max_position_embeddings, config.hidden_size, padding_idx=self.padding_idx
151
+ )
152
+
153
+ # Copied from transformers.models.roberta.modeling_roberta.RobertaEmbeddings.create_position_ids_from_inputs_embeds
154
+ def create_position_ids_from_inputs_embeds(self, inputs_embeds):
155
+ """
156
+ We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids.
157
+
158
+ Args:
159
+ inputs_embeds: torch.Tensor
160
+
161
+ Returns: torch.Tensor
162
+ """
163
+ input_shape = inputs_embeds.size()[:-1]
164
+ sequence_length = input_shape[1]
165
+
166
+ position_ids = torch.arange(
167
+ self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device
168
+ )
169
+ return position_ids.unsqueeze(0).expand(input_shape)
170
+
171
+ def forward(
172
+ self,
173
+ input_ids=None,
174
+ xpath_tags_seq=None,
175
+ xpath_subs_seq=None,
176
+ token_type_ids=None,
177
+ position_ids=None,
178
+ inputs_embeds=None,
179
+ past_key_values_length=0,
180
+ ):
181
+ if input_ids is not None:
182
+ input_shape = input_ids.size()
183
+ else:
184
+ input_shape = inputs_embeds.size()[:-1]
185
+
186
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
187
+
188
+ if position_ids is None:
189
+ if input_ids is not None:
190
+ # Create the position ids from the input token ids. Any padded tokens remain padded.
191
+ position_ids = create_position_ids_from_input_ids(input_ids, self.padding_idx, past_key_values_length)
192
+ else:
193
+ position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds)
194
+
195
+ if token_type_ids is None:
196
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
197
+
198
+ if inputs_embeds is None:
199
+ inputs_embeds = self.word_embeddings(input_ids)
200
+
201
+ # prepare xpath seq
202
+ if xpath_tags_seq is None:
203
+ xpath_tags_seq = self.config.tag_pad_id * torch.ones(
204
+ tuple(list(input_shape) + [self.max_depth]), dtype=torch.long, device=device
205
+ )
206
+ if xpath_subs_seq is None:
207
+ xpath_subs_seq = self.config.subs_pad_id * torch.ones(
208
+ tuple(list(input_shape) + [self.max_depth]), dtype=torch.long, device=device
209
+ )
210
+
211
+ words_embeddings = inputs_embeds
212
+ position_embeddings = self.position_embeddings(position_ids)
213
+
214
+ token_type_embeddings = self.token_type_embeddings(token_type_ids)
215
+
216
+ xpath_embeddings = self.xpath_embeddings(xpath_tags_seq, xpath_subs_seq)
217
+ embeddings = words_embeddings + position_embeddings + token_type_embeddings + xpath_embeddings
218
+
219
+ embeddings = self.LayerNorm(embeddings)
220
+ embeddings = self.dropout(embeddings)
221
+ return embeddings
222
+
223
+
224
+ # Copied from transformers.models.bert.modeling_bert.BertSelfOutput with Bert->MarkupLM
225
+ class MarkupLMSelfOutput(nn.Module):
226
+ def __init__(self, config):
227
+ super().__init__()
228
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
229
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
230
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
231
+
232
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
233
+ hidden_states = self.dense(hidden_states)
234
+ hidden_states = self.dropout(hidden_states)
235
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
236
+ return hidden_states
237
+
238
+
239
+ # Copied from transformers.models.bert.modeling_bert.BertIntermediate
240
+ class MarkupLMIntermediate(nn.Module):
241
+ def __init__(self, config):
242
+ super().__init__()
243
+ self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
244
+ if isinstance(config.hidden_act, str):
245
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
246
+ else:
247
+ self.intermediate_act_fn = config.hidden_act
248
+
249
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
250
+ hidden_states = self.dense(hidden_states)
251
+ hidden_states = self.intermediate_act_fn(hidden_states)
252
+ return hidden_states
253
+
254
+
255
+ # Copied from transformers.models.bert.modeling_bert.BertOutput with Bert->MarkupLM
256
+ class MarkupLMOutput(nn.Module):
257
+ def __init__(self, config):
258
+ super().__init__()
259
+ self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
260
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
261
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
262
+
263
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
264
+ hidden_states = self.dense(hidden_states)
265
+ hidden_states = self.dropout(hidden_states)
266
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
267
+ return hidden_states
268
+
269
+
270
+ # Copied from transformers.models.bert.modeling_bert.BertPooler
271
+ class MarkupLMPooler(nn.Module):
272
+ def __init__(self, config):
273
+ super().__init__()
274
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
275
+ self.activation = nn.Tanh()
276
+
277
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
278
+ # We "pool" the model by simply taking the hidden state corresponding
279
+ # to the first token.
280
+ first_token_tensor = hidden_states[:, 0]
281
+ pooled_output = self.dense(first_token_tensor)
282
+ pooled_output = self.activation(pooled_output)
283
+ return pooled_output
284
+
285
+
286
+ # Copied from transformers.models.bert.modeling_bert.BertPredictionHeadTransform with Bert->MarkupLM
287
+ class MarkupLMPredictionHeadTransform(nn.Module):
288
+ def __init__(self, config):
289
+ super().__init__()
290
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
291
+ if isinstance(config.hidden_act, str):
292
+ self.transform_act_fn = ACT2FN[config.hidden_act]
293
+ else:
294
+ self.transform_act_fn = config.hidden_act
295
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
296
+
297
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
298
+ hidden_states = self.dense(hidden_states)
299
+ hidden_states = self.transform_act_fn(hidden_states)
300
+ hidden_states = self.LayerNorm(hidden_states)
301
+ return hidden_states
302
+
303
+
304
+ # Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->MarkupLM
305
+ class MarkupLMLMPredictionHead(nn.Module):
306
+ def __init__(self, config):
307
+ super().__init__()
308
+ self.transform = MarkupLMPredictionHeadTransform(config)
309
+
310
+ # The output weights are the same as the input embeddings, but there is
311
+ # an output-only bias for each token.
312
+ self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
313
+
314
+ self.bias = nn.Parameter(torch.zeros(config.vocab_size))
315
+
316
+ # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
317
+ self.decoder.bias = self.bias
318
+
319
+ def forward(self, hidden_states):
320
+ hidden_states = self.transform(hidden_states)
321
+ hidden_states = self.decoder(hidden_states)
322
+ return hidden_states
323
+
324
+
325
+ # Copied from transformers.models.bert.modeling_bert.BertOnlyMLMHead with Bert->MarkupLM
326
+ class MarkupLMOnlyMLMHead(nn.Module):
327
+ def __init__(self, config):
328
+ super().__init__()
329
+ self.predictions = MarkupLMLMPredictionHead(config)
330
+
331
+ def forward(self, sequence_output: torch.Tensor) -> torch.Tensor:
332
+ prediction_scores = self.predictions(sequence_output)
333
+ return prediction_scores
334
+
335
+
336
+ # Copied from transformers.models.bert.modeling_bert.BertSelfAttention with Bert->MarkupLM
337
+ class MarkupLMSelfAttention(nn.Module):
338
+ def __init__(self, config, position_embedding_type=None):
339
+ super().__init__()
340
+ if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
341
+ raise ValueError(
342
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
343
+ f"heads ({config.num_attention_heads})"
344
+ )
345
+
346
+ self.num_attention_heads = config.num_attention_heads
347
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
348
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
349
+
350
+ self.query = nn.Linear(config.hidden_size, self.all_head_size)
351
+ self.key = nn.Linear(config.hidden_size, self.all_head_size)
352
+ self.value = nn.Linear(config.hidden_size, self.all_head_size)
353
+
354
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
355
+ self.position_embedding_type = position_embedding_type or getattr(
356
+ config, "position_embedding_type", "absolute"
357
+ )
358
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
359
+ self.max_position_embeddings = config.max_position_embeddings
360
+ self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
361
+
362
+ self.is_decoder = config.is_decoder
363
+
364
+ def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor:
365
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
366
+ x = x.view(new_x_shape)
367
+ return x.permute(0, 2, 1, 3)
368
+
369
+ def forward(
370
+ self,
371
+ hidden_states: torch.Tensor,
372
+ attention_mask: Optional[torch.FloatTensor] = None,
373
+ head_mask: Optional[torch.FloatTensor] = None,
374
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
375
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
376
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
377
+ output_attentions: Optional[bool] = False,
378
+ ) -> Tuple[torch.Tensor]:
379
+ mixed_query_layer = self.query(hidden_states)
380
+
381
+ # If this is instantiated as a cross-attention module, the keys
382
+ # and values come from an encoder; the attention mask needs to be
383
+ # such that the encoder's padding tokens are not attended to.
384
+ is_cross_attention = encoder_hidden_states is not None
385
+
386
+ if is_cross_attention and past_key_value is not None:
387
+ # reuse k,v, cross_attentions
388
+ key_layer = past_key_value[0]
389
+ value_layer = past_key_value[1]
390
+ attention_mask = encoder_attention_mask
391
+ elif is_cross_attention:
392
+ key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
393
+ value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
394
+ attention_mask = encoder_attention_mask
395
+ elif past_key_value is not None:
396
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
397
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
398
+ key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
399
+ value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
400
+ else:
401
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
402
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
403
+
404
+ query_layer = self.transpose_for_scores(mixed_query_layer)
405
+
406
+ use_cache = past_key_value is not None
407
+ if self.is_decoder:
408
+ # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
409
+ # Further calls to cross_attention layer can then reuse all cross-attention
410
+ # key/value_states (first "if" case)
411
+ # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
412
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
413
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
414
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
415
+ past_key_value = (key_layer, value_layer)
416
+
417
+ # Take the dot product between "query" and "key" to get the raw attention scores.
418
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
419
+
420
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
421
+ query_length, key_length = query_layer.shape[2], key_layer.shape[2]
422
+ if use_cache:
423
+ position_ids_l = torch.tensor(key_length - 1, dtype=torch.long, device=hidden_states.device).view(
424
+ -1, 1
425
+ )
426
+ else:
427
+ position_ids_l = torch.arange(query_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
428
+ position_ids_r = torch.arange(key_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
429
+ distance = position_ids_l - position_ids_r
430
+
431
+ positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
432
+ positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
433
+
434
+ if self.position_embedding_type == "relative_key":
435
+ relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
436
+ attention_scores = attention_scores + relative_position_scores
437
+ elif self.position_embedding_type == "relative_key_query":
438
+ relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
439
+ relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
440
+ attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
441
+
442
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
443
+ if attention_mask is not None:
444
+ # Apply the attention mask is (precomputed for all layers in MarkupLMModel forward() function)
445
+ attention_scores = attention_scores + attention_mask
446
+
447
+ # Normalize the attention scores to probabilities.
448
+ attention_probs = nn.functional.softmax(attention_scores, dim=-1)
449
+
450
+ # This is actually dropping out entire tokens to attend to, which might
451
+ # seem a bit unusual, but is taken from the original Transformer paper.
452
+ attention_probs = self.dropout(attention_probs)
453
+
454
+ # Mask heads if we want to
455
+ if head_mask is not None:
456
+ attention_probs = attention_probs * head_mask
457
+
458
+ context_layer = torch.matmul(attention_probs, value_layer)
459
+
460
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
461
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
462
+ context_layer = context_layer.view(new_context_layer_shape)
463
+
464
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
465
+
466
+ if self.is_decoder:
467
+ outputs = outputs + (past_key_value,)
468
+ return outputs
469
+
470
+
471
+ # Copied from transformers.models.bert.modeling_bert.BertAttention with Bert->MarkupLM
472
+ class MarkupLMAttention(nn.Module):
473
+ def __init__(self, config, position_embedding_type=None):
474
+ super().__init__()
475
+ self.self = MarkupLMSelfAttention(config, position_embedding_type=position_embedding_type)
476
+ self.output = MarkupLMSelfOutput(config)
477
+ self.pruned_heads = set()
478
+
479
+ def prune_heads(self, heads):
480
+ if len(heads) == 0:
481
+ return
482
+ heads, index = find_pruneable_heads_and_indices(
483
+ heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
484
+ )
485
+
486
+ # Prune linear layers
487
+ self.self.query = prune_linear_layer(self.self.query, index)
488
+ self.self.key = prune_linear_layer(self.self.key, index)
489
+ self.self.value = prune_linear_layer(self.self.value, index)
490
+ self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
491
+
492
+ # Update hyper params and store pruned heads
493
+ self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
494
+ self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
495
+ self.pruned_heads = self.pruned_heads.union(heads)
496
+
497
+ def forward(
498
+ self,
499
+ hidden_states: torch.Tensor,
500
+ attention_mask: Optional[torch.FloatTensor] = None,
501
+ head_mask: Optional[torch.FloatTensor] = None,
502
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
503
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
504
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
505
+ output_attentions: Optional[bool] = False,
506
+ ) -> Tuple[torch.Tensor]:
507
+ self_outputs = self.self(
508
+ hidden_states,
509
+ attention_mask,
510
+ head_mask,
511
+ encoder_hidden_states,
512
+ encoder_attention_mask,
513
+ past_key_value,
514
+ output_attentions,
515
+ )
516
+ attention_output = self.output(self_outputs[0], hidden_states)
517
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
518
+ return outputs
519
+
520
+
521
+ # Copied from transformers.models.bert.modeling_bert.BertLayer with Bert->MarkupLM
522
+ class MarkupLMLayer(nn.Module):
523
+ def __init__(self, config):
524
+ super().__init__()
525
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
526
+ self.seq_len_dim = 1
527
+ self.attention = MarkupLMAttention(config)
528
+ self.is_decoder = config.is_decoder
529
+ self.add_cross_attention = config.add_cross_attention
530
+ if self.add_cross_attention:
531
+ if not self.is_decoder:
532
+ raise ValueError(f"{self} should be used as a decoder model if cross attention is added")
533
+ self.crossattention = MarkupLMAttention(config, position_embedding_type="absolute")
534
+ self.intermediate = MarkupLMIntermediate(config)
535
+ self.output = MarkupLMOutput(config)
536
+
537
+ def forward(
538
+ self,
539
+ hidden_states: torch.Tensor,
540
+ attention_mask: Optional[torch.FloatTensor] = None,
541
+ head_mask: Optional[torch.FloatTensor] = None,
542
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
543
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
544
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
545
+ output_attentions: Optional[bool] = False,
546
+ ) -> Tuple[torch.Tensor]:
547
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
548
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
549
+ self_attention_outputs = self.attention(
550
+ hidden_states,
551
+ attention_mask,
552
+ head_mask,
553
+ output_attentions=output_attentions,
554
+ past_key_value=self_attn_past_key_value,
555
+ )
556
+ attention_output = self_attention_outputs[0]
557
+
558
+ # if decoder, the last output is tuple of self-attn cache
559
+ if self.is_decoder:
560
+ outputs = self_attention_outputs[1:-1]
561
+ present_key_value = self_attention_outputs[-1]
562
+ else:
563
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
564
+
565
+ cross_attn_present_key_value = None
566
+ if self.is_decoder and encoder_hidden_states is not None:
567
+ if not hasattr(self, "crossattention"):
568
+ raise ValueError(
569
+ f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers"
570
+ " by setting `config.add_cross_attention=True`"
571
+ )
572
+
573
+ # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple
574
+ cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
575
+ cross_attention_outputs = self.crossattention(
576
+ attention_output,
577
+ attention_mask,
578
+ head_mask,
579
+ encoder_hidden_states,
580
+ encoder_attention_mask,
581
+ cross_attn_past_key_value,
582
+ output_attentions,
583
+ )
584
+ attention_output = cross_attention_outputs[0]
585
+ outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
586
+
587
+ # add cross-attn cache to positions 3,4 of present_key_value tuple
588
+ cross_attn_present_key_value = cross_attention_outputs[-1]
589
+ present_key_value = present_key_value + cross_attn_present_key_value
590
+
591
+ layer_output = apply_chunking_to_forward(
592
+ self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
593
+ )
594
+ outputs = (layer_output,) + outputs
595
+
596
+ # if decoder, return the attn key/values as the last output
597
+ if self.is_decoder:
598
+ outputs = outputs + (present_key_value,)
599
+
600
+ return outputs
601
+
602
+ def feed_forward_chunk(self, attention_output):
603
+ intermediate_output = self.intermediate(attention_output)
604
+ layer_output = self.output(intermediate_output, attention_output)
605
+ return layer_output
606
+
607
+
608
+ # Copied from transformers.models.bert.modeling_bert.BertEncoder with Bert->MarkupLM
609
+ class MarkupLMEncoder(nn.Module):
610
+ def __init__(self, config):
611
+ super().__init__()
612
+ self.config = config
613
+ self.layer = nn.ModuleList([MarkupLMLayer(config) for _ in range(config.num_hidden_layers)])
614
+ self.gradient_checkpointing = False
615
+
616
+ def forward(
617
+ self,
618
+ hidden_states: torch.Tensor,
619
+ attention_mask: Optional[torch.FloatTensor] = None,
620
+ head_mask: Optional[torch.FloatTensor] = None,
621
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
622
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
623
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
624
+ use_cache: Optional[bool] = None,
625
+ output_attentions: Optional[bool] = False,
626
+ output_hidden_states: Optional[bool] = False,
627
+ return_dict: Optional[bool] = True,
628
+ ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]:
629
+ all_hidden_states = () if output_hidden_states else None
630
+ all_self_attentions = () if output_attentions else None
631
+ all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
632
+
633
+ if self.gradient_checkpointing and self.training:
634
+ if use_cache:
635
+ logger.warning_once(
636
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
637
+ )
638
+ use_cache = False
639
+
640
+ next_decoder_cache = () if use_cache else None
641
+ for i, layer_module in enumerate(self.layer):
642
+ if output_hidden_states:
643
+ all_hidden_states = all_hidden_states + (hidden_states,)
644
+
645
+ layer_head_mask = head_mask[i] if head_mask is not None else None
646
+ past_key_value = past_key_values[i] if past_key_values is not None else None
647
+
648
+ if self.gradient_checkpointing and self.training:
649
+ layer_outputs = self._gradient_checkpointing_func(
650
+ layer_module.__call__,
651
+ hidden_states,
652
+ attention_mask,
653
+ layer_head_mask,
654
+ encoder_hidden_states,
655
+ encoder_attention_mask,
656
+ past_key_value,
657
+ output_attentions,
658
+ )
659
+ else:
660
+ layer_outputs = layer_module(
661
+ hidden_states,
662
+ attention_mask,
663
+ layer_head_mask,
664
+ encoder_hidden_states,
665
+ encoder_attention_mask,
666
+ past_key_value,
667
+ output_attentions,
668
+ )
669
+
670
+ hidden_states = layer_outputs[0]
671
+ if use_cache:
672
+ next_decoder_cache += (layer_outputs[-1],)
673
+ if output_attentions:
674
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
675
+ if self.config.add_cross_attention:
676
+ all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
677
+
678
+ if output_hidden_states:
679
+ all_hidden_states = all_hidden_states + (hidden_states,)
680
+
681
+ if not return_dict:
682
+ return tuple(
683
+ v
684
+ for v in [
685
+ hidden_states,
686
+ next_decoder_cache,
687
+ all_hidden_states,
688
+ all_self_attentions,
689
+ all_cross_attentions,
690
+ ]
691
+ if v is not None
692
+ )
693
+ return BaseModelOutputWithPastAndCrossAttentions(
694
+ last_hidden_state=hidden_states,
695
+ past_key_values=next_decoder_cache,
696
+ hidden_states=all_hidden_states,
697
+ attentions=all_self_attentions,
698
+ cross_attentions=all_cross_attentions,
699
+ )
700
+
701
+
702
+ class MarkupLMPreTrainedModel(PreTrainedModel):
703
+ """
704
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
705
+ models.
706
+ """
707
+
708
+ config_class = MarkupLMConfig
709
+ base_model_prefix = "markuplm"
710
+
711
+ # Copied from transformers.models.bert.modeling_bert.BertPreTrainedModel._init_weights with Bert->MarkupLM
712
+ def _init_weights(self, module):
713
+ """Initialize the weights"""
714
+ if isinstance(module, nn.Linear):
715
+ # Slightly different from the TF version which uses truncated_normal for initialization
716
+ # cf https://github.com/pytorch/pytorch/pull/5617
717
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
718
+ if module.bias is not None:
719
+ module.bias.data.zero_()
720
+ elif isinstance(module, nn.Embedding):
721
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
722
+ if module.padding_idx is not None:
723
+ module.weight.data[module.padding_idx].zero_()
724
+ elif isinstance(module, nn.LayerNorm):
725
+ module.bias.data.zero_()
726
+ module.weight.data.fill_(1.0)
727
+
728
+ @classmethod
729
+ def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.PathLike]], *model_args, **kwargs):
730
+ return super(MarkupLMPreTrainedModel, cls).from_pretrained(
731
+ pretrained_model_name_or_path, *model_args, **kwargs
732
+ )
733
+
734
+
735
+ MARKUPLM_START_DOCSTRING = r"""
736
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
737
+ it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
738
+ behavior.
739
+
740
+ Parameters:
741
+ config ([`MarkupLMConfig`]): Model configuration class with all the parameters of the model.
742
+ Initializing with a config file does not load the weights associated with the model, only the
743
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
744
+ """
745
+
746
+ MARKUPLM_INPUTS_DOCSTRING = r"""
747
+ Args:
748
+ input_ids (`torch.LongTensor` of shape `({0})`):
749
+ Indices of input sequence tokens in the vocabulary.
750
+
751
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
752
+ [`PreTrainedTokenizer.__call__`] for details.
753
+
754
+ [What are input IDs?](../glossary#input-ids)
755
+
756
+ xpath_tags_seq (`torch.LongTensor` of shape `({0}, config.max_depth)`, *optional*):
757
+ Tag IDs for each token in the input sequence, padded up to config.max_depth.
758
+
759
+ xpath_subs_seq (`torch.LongTensor` of shape `({0}, config.max_depth)`, *optional*):
760
+ Subscript IDs for each token in the input sequence, padded up to config.max_depth.
761
+
762
+ attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
763
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: `1` for
764
+ tokens that are NOT MASKED, `0` for MASKED tokens.
765
+
766
+ [What are attention masks?](../glossary#attention-mask)
767
+ token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
768
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
769
+ 1]`: `0` corresponds to a *sentence A* token, `1` corresponds to a *sentence B* token
770
+
771
+ [What are token type IDs?](../glossary#token-type-ids)
772
+ position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
773
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
774
+ config.max_position_embeddings - 1]`.
775
+
776
+ [What are position IDs?](../glossary#position-ids)
777
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
778
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: `1`
779
+ indicates the head is **not masked**, `0` indicates the head is **masked**.
780
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
781
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
782
+ is useful if you want more control over how to convert *input_ids* indices into associated vectors than the
783
+ model's internal embedding lookup matrix.
784
+ output_attentions (`bool`, *optional*):
785
+ If set to `True`, the attentions tensors of all attention layers are returned. See `attentions` under
786
+ returned tensors for more detail.
787
+ output_hidden_states (`bool`, *optional*):
788
+ If set to `True`, the hidden states of all layers are returned. See `hidden_states` under returned tensors
789
+ for more detail.
790
+ return_dict (`bool`, *optional*):
791
+ If set to `True`, the model will return a [`~file_utils.ModelOutput`] instead of a plain tuple.
792
+ """
793
+
794
+
795
+ @add_start_docstrings(
796
+ "The bare MarkupLM Model transformer outputting raw hidden-states without any specific head on top.",
797
+ MARKUPLM_START_DOCSTRING,
798
+ )
799
+ class MarkupLMModel(MarkupLMPreTrainedModel):
800
+ # Copied from transformers.models.bert.modeling_bert.BertModel.__init__ with Bert->MarkupLM
801
+ def __init__(self, config, add_pooling_layer=True):
802
+ super().__init__(config)
803
+ self.config = config
804
+
805
+ self.embeddings = MarkupLMEmbeddings(config)
806
+ self.encoder = MarkupLMEncoder(config)
807
+
808
+ self.pooler = MarkupLMPooler(config) if add_pooling_layer else None
809
+
810
+ # Initialize weights and apply final processing
811
+ self.post_init()
812
+
813
+ def get_input_embeddings(self):
814
+ return self.embeddings.word_embeddings
815
+
816
+ def set_input_embeddings(self, value):
817
+ self.embeddings.word_embeddings = value
818
+
819
+ def _prune_heads(self, heads_to_prune):
820
+ """
821
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
822
+ class PreTrainedModel
823
+ """
824
+ for layer, heads in heads_to_prune.items():
825
+ self.encoder.layer[layer].attention.prune_heads(heads)
826
+
827
+ @add_start_docstrings_to_model_forward(MARKUPLM_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
828
+ @replace_return_docstrings(output_type=BaseModelOutputWithPoolingAndCrossAttentions, config_class=_CONFIG_FOR_DOC)
829
+ def forward(
830
+ self,
831
+ input_ids: Optional[torch.LongTensor] = None,
832
+ xpath_tags_seq: Optional[torch.LongTensor] = None,
833
+ xpath_subs_seq: Optional[torch.LongTensor] = None,
834
+ attention_mask: Optional[torch.FloatTensor] = None,
835
+ token_type_ids: Optional[torch.LongTensor] = None,
836
+ position_ids: Optional[torch.LongTensor] = None,
837
+ head_mask: Optional[torch.FloatTensor] = None,
838
+ inputs_embeds: Optional[torch.FloatTensor] = None,
839
+ output_attentions: Optional[bool] = None,
840
+ output_hidden_states: Optional[bool] = None,
841
+ return_dict: Optional[bool] = None,
842
+ ) -> Union[Tuple, BaseModelOutputWithPoolingAndCrossAttentions]:
843
+ r"""
844
+ Returns:
845
+
846
+ Examples:
847
+
848
+ ```python
849
+ >>> from transformers import AutoProcessor, MarkupLMModel
850
+
851
+ >>> processor = AutoProcessor.from_pretrained("microsoft/markuplm-base")
852
+ >>> model = MarkupLMModel.from_pretrained("microsoft/markuplm-base")
853
+
854
+ >>> html_string = "<html> <head> <title>Page Title</title> </head> </html>"
855
+
856
+ >>> encoding = processor(html_string, return_tensors="pt")
857
+
858
+ >>> outputs = model(**encoding)
859
+ >>> last_hidden_states = outputs.last_hidden_state
860
+ >>> list(last_hidden_states.shape)
861
+ [1, 4, 768]
862
+ ```"""
863
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
864
+ output_hidden_states = (
865
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
866
+ )
867
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
868
+
869
+ if input_ids is not None and inputs_embeds is not None:
870
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
871
+ elif input_ids is not None:
872
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
873
+ input_shape = input_ids.size()
874
+ elif inputs_embeds is not None:
875
+ input_shape = inputs_embeds.size()[:-1]
876
+ else:
877
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
878
+
879
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
880
+
881
+ if attention_mask is None:
882
+ attention_mask = torch.ones(input_shape, device=device)
883
+
884
+ if token_type_ids is None:
885
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
886
+
887
+ extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
888
+ extended_attention_mask = extended_attention_mask.to(dtype=self.dtype)
889
+ extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
890
+
891
+ if head_mask is not None:
892
+ if head_mask.dim() == 1:
893
+ head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
894
+ head_mask = head_mask.expand(self.config.num_hidden_layers, -1, -1, -1, -1)
895
+ elif head_mask.dim() == 2:
896
+ head_mask = head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1)
897
+ head_mask = head_mask.to(dtype=next(self.parameters()).dtype)
898
+ else:
899
+ head_mask = [None] * self.config.num_hidden_layers
900
+
901
+ embedding_output = self.embeddings(
902
+ input_ids=input_ids,
903
+ xpath_tags_seq=xpath_tags_seq,
904
+ xpath_subs_seq=xpath_subs_seq,
905
+ position_ids=position_ids,
906
+ token_type_ids=token_type_ids,
907
+ inputs_embeds=inputs_embeds,
908
+ )
909
+ encoder_outputs = self.encoder(
910
+ embedding_output,
911
+ extended_attention_mask,
912
+ head_mask=head_mask,
913
+ output_attentions=output_attentions,
914
+ output_hidden_states=output_hidden_states,
915
+ return_dict=return_dict,
916
+ )
917
+ sequence_output = encoder_outputs[0]
918
+
919
+ pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
920
+
921
+ if not return_dict:
922
+ return (sequence_output, pooled_output) + encoder_outputs[1:]
923
+
924
+ return BaseModelOutputWithPoolingAndCrossAttentions(
925
+ last_hidden_state=sequence_output,
926
+ pooler_output=pooled_output,
927
+ hidden_states=encoder_outputs.hidden_states,
928
+ attentions=encoder_outputs.attentions,
929
+ cross_attentions=encoder_outputs.cross_attentions,
930
+ )
931
+
932
+ # Copied from transformers.models.bert.modeling_bert.BertModel.prepare_inputs_for_generation
933
+ def prepare_inputs_for_generation(
934
+ self, input_ids, past_key_values=None, attention_mask=None, use_cache=True, **model_kwargs
935
+ ):
936
+ input_shape = input_ids.shape
937
+ # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
938
+ if attention_mask is None:
939
+ attention_mask = input_ids.new_ones(input_shape)
940
+
941
+ # cut decoder_input_ids if past_key_values is used
942
+ if past_key_values is not None:
943
+ past_length = past_key_values[0][0].shape[2]
944
+
945
+ # Some generation methods already pass only the last input ID
946
+ if input_ids.shape[1] > past_length:
947
+ remove_prefix_length = past_length
948
+ else:
949
+ # Default to old behavior: keep only final ID
950
+ remove_prefix_length = input_ids.shape[1] - 1
951
+
952
+ input_ids = input_ids[:, remove_prefix_length:]
953
+
954
+ return {
955
+ "input_ids": input_ids,
956
+ "attention_mask": attention_mask,
957
+ "past_key_values": past_key_values,
958
+ "use_cache": use_cache,
959
+ }
960
+
961
+ # Copied from transformers.models.bert.modeling_bert.BertModel._reorder_cache
962
+ def _reorder_cache(self, past_key_values, beam_idx):
963
+ reordered_past = ()
964
+ for layer_past in past_key_values:
965
+ reordered_past += (
966
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),
967
+ )
968
+ return reordered_past
969
+
970
+
971
+ @add_start_docstrings(
972
+ """
973
+ MarkupLM Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
974
+ layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
975
+ """,
976
+ MARKUPLM_START_DOCSTRING,
977
+ )
978
+ class MarkupLMForQuestionAnswering(MarkupLMPreTrainedModel):
979
+ # Copied from transformers.models.bert.modeling_bert.BertForQuestionAnswering.__init__ with bert->markuplm, Bert->MarkupLM
980
+ def __init__(self, config):
981
+ super().__init__(config)
982
+ self.num_labels = config.num_labels
983
+
984
+ self.markuplm = MarkupLMModel(config, add_pooling_layer=False)
985
+ self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
986
+
987
+ # Initialize weights and apply final processing
988
+ self.post_init()
989
+
990
+ @add_start_docstrings_to_model_forward(MARKUPLM_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
991
+ @replace_return_docstrings(output_type=QuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC)
992
+ def forward(
993
+ self,
994
+ input_ids: Optional[torch.Tensor] = None,
995
+ xpath_tags_seq: Optional[torch.Tensor] = None,
996
+ xpath_subs_seq: Optional[torch.Tensor] = None,
997
+ attention_mask: Optional[torch.Tensor] = None,
998
+ token_type_ids: Optional[torch.Tensor] = None,
999
+ position_ids: Optional[torch.Tensor] = None,
1000
+ head_mask: Optional[torch.Tensor] = None,
1001
+ inputs_embeds: Optional[torch.Tensor] = None,
1002
+ start_positions: Optional[torch.Tensor] = None,
1003
+ end_positions: Optional[torch.Tensor] = None,
1004
+ output_attentions: Optional[bool] = None,
1005
+ output_hidden_states: Optional[bool] = None,
1006
+ return_dict: Optional[bool] = None,
1007
+ ) -> Union[Tuple[torch.Tensor], QuestionAnsweringModelOutput]:
1008
+ r"""
1009
+ start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1010
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
1011
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1012
+ are not taken into account for computing the loss.
1013
+ end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1014
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
1015
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1016
+ are not taken into account for computing the loss.
1017
+
1018
+ Returns:
1019
+
1020
+ Examples:
1021
+
1022
+ ```python
1023
+ >>> from transformers import AutoProcessor, MarkupLMForQuestionAnswering
1024
+ >>> import torch
1025
+
1026
+ >>> processor = AutoProcessor.from_pretrained("microsoft/markuplm-base-finetuned-websrc")
1027
+ >>> model = MarkupLMForQuestionAnswering.from_pretrained("microsoft/markuplm-base-finetuned-websrc")
1028
+
1029
+ >>> html_string = "<html> <head> <title>My name is Niels</title> </head> </html>"
1030
+ >>> question = "What's his name?"
1031
+
1032
+ >>> encoding = processor(html_string, questions=question, return_tensors="pt")
1033
+
1034
+ >>> with torch.no_grad():
1035
+ ... outputs = model(**encoding)
1036
+
1037
+ >>> answer_start_index = outputs.start_logits.argmax()
1038
+ >>> answer_end_index = outputs.end_logits.argmax()
1039
+
1040
+ >>> predict_answer_tokens = encoding.input_ids[0, answer_start_index : answer_end_index + 1]
1041
+ >>> processor.decode(predict_answer_tokens).strip()
1042
+ 'Niels'
1043
+ ```"""
1044
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1045
+
1046
+ outputs = self.markuplm(
1047
+ input_ids,
1048
+ xpath_tags_seq=xpath_tags_seq,
1049
+ xpath_subs_seq=xpath_subs_seq,
1050
+ attention_mask=attention_mask,
1051
+ token_type_ids=token_type_ids,
1052
+ position_ids=position_ids,
1053
+ head_mask=head_mask,
1054
+ inputs_embeds=inputs_embeds,
1055
+ output_attentions=output_attentions,
1056
+ output_hidden_states=output_hidden_states,
1057
+ return_dict=return_dict,
1058
+ )
1059
+
1060
+ sequence_output = outputs[0]
1061
+
1062
+ logits = self.qa_outputs(sequence_output)
1063
+ start_logits, end_logits = logits.split(1, dim=-1)
1064
+ start_logits = start_logits.squeeze(-1).contiguous()
1065
+ end_logits = end_logits.squeeze(-1).contiguous()
1066
+
1067
+ total_loss = None
1068
+ if start_positions is not None and end_positions is not None:
1069
+ # If we are on multi-GPU, split add a dimension
1070
+ if len(start_positions.size()) > 1:
1071
+ start_positions = start_positions.squeeze(-1)
1072
+ if len(end_positions.size()) > 1:
1073
+ end_positions = end_positions.squeeze(-1)
1074
+ # sometimes the start/end positions are outside our model inputs, we ignore these terms
1075
+ ignored_index = start_logits.size(1)
1076
+ start_positions.clamp_(0, ignored_index)
1077
+ end_positions.clamp_(0, ignored_index)
1078
+
1079
+ loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
1080
+ start_loss = loss_fct(start_logits, start_positions)
1081
+ end_loss = loss_fct(end_logits, end_positions)
1082
+ total_loss = (start_loss + end_loss) / 2
1083
+
1084
+ if not return_dict:
1085
+ output = (start_logits, end_logits) + outputs[2:]
1086
+ return ((total_loss,) + output) if total_loss is not None else output
1087
+
1088
+ return QuestionAnsweringModelOutput(
1089
+ loss=total_loss,
1090
+ start_logits=start_logits,
1091
+ end_logits=end_logits,
1092
+ hidden_states=outputs.hidden_states,
1093
+ attentions=outputs.attentions,
1094
+ )
1095
+
1096
+
1097
+ @add_start_docstrings("""MarkupLM Model with a `token_classification` head on top.""", MARKUPLM_START_DOCSTRING)
1098
+ class MarkupLMForTokenClassification(MarkupLMPreTrainedModel):
1099
+ # Copied from transformers.models.bert.modeling_bert.BertForTokenClassification.__init__ with bert->markuplm, Bert->MarkupLM
1100
+ def __init__(self, config):
1101
+ super().__init__(config)
1102
+ self.num_labels = config.num_labels
1103
+
1104
+ self.markuplm = MarkupLMModel(config, add_pooling_layer=False)
1105
+ classifier_dropout = (
1106
+ config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
1107
+ )
1108
+ self.dropout = nn.Dropout(classifier_dropout)
1109
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
1110
+
1111
+ # Initialize weights and apply final processing
1112
+ self.post_init()
1113
+
1114
+ @add_start_docstrings_to_model_forward(MARKUPLM_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1115
+ @replace_return_docstrings(output_type=MaskedLMOutput, config_class=_CONFIG_FOR_DOC)
1116
+ def forward(
1117
+ self,
1118
+ input_ids: Optional[torch.Tensor] = None,
1119
+ xpath_tags_seq: Optional[torch.Tensor] = None,
1120
+ xpath_subs_seq: Optional[torch.Tensor] = None,
1121
+ attention_mask: Optional[torch.Tensor] = None,
1122
+ token_type_ids: Optional[torch.Tensor] = None,
1123
+ position_ids: Optional[torch.Tensor] = None,
1124
+ head_mask: Optional[torch.Tensor] = None,
1125
+ inputs_embeds: Optional[torch.Tensor] = None,
1126
+ labels: Optional[torch.Tensor] = None,
1127
+ output_attentions: Optional[bool] = None,
1128
+ output_hidden_states: Optional[bool] = None,
1129
+ return_dict: Optional[bool] = None,
1130
+ ) -> Union[Tuple[torch.Tensor], MaskedLMOutput]:
1131
+ r"""
1132
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1133
+ Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
1134
+
1135
+ Returns:
1136
+
1137
+ Examples:
1138
+
1139
+ ```python
1140
+ >>> from transformers import AutoProcessor, AutoModelForTokenClassification
1141
+ >>> import torch
1142
+
1143
+ >>> processor = AutoProcessor.from_pretrained("microsoft/markuplm-base")
1144
+ >>> processor.parse_html = False
1145
+ >>> model = AutoModelForTokenClassification.from_pretrained("microsoft/markuplm-base", num_labels=7)
1146
+
1147
+ >>> nodes = ["hello", "world"]
1148
+ >>> xpaths = ["/html/body/div/li[1]/div/span", "/html/body/div/li[1]/div/span"]
1149
+ >>> node_labels = [1, 2]
1150
+ >>> encoding = processor(nodes=nodes, xpaths=xpaths, node_labels=node_labels, return_tensors="pt")
1151
+
1152
+ >>> with torch.no_grad():
1153
+ ... outputs = model(**encoding)
1154
+
1155
+ >>> loss = outputs.loss
1156
+ >>> logits = outputs.logits
1157
+ ```"""
1158
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1159
+
1160
+ outputs = self.markuplm(
1161
+ input_ids,
1162
+ xpath_tags_seq=xpath_tags_seq,
1163
+ xpath_subs_seq=xpath_subs_seq,
1164
+ attention_mask=attention_mask,
1165
+ token_type_ids=token_type_ids,
1166
+ position_ids=position_ids,
1167
+ head_mask=head_mask,
1168
+ inputs_embeds=inputs_embeds,
1169
+ output_attentions=output_attentions,
1170
+ output_hidden_states=output_hidden_states,
1171
+ return_dict=return_dict,
1172
+ )
1173
+
1174
+ sequence_output = outputs[0]
1175
+ prediction_scores = self.classifier(sequence_output) # (batch_size, seq_length, node_type_size)
1176
+
1177
+ loss = None
1178
+ if labels is not None:
1179
+ loss_fct = CrossEntropyLoss()
1180
+ loss = loss_fct(
1181
+ prediction_scores.view(-1, self.config.num_labels),
1182
+ labels.view(-1),
1183
+ )
1184
+
1185
+ if not return_dict:
1186
+ output = (prediction_scores,) + outputs[2:]
1187
+ return ((loss,) + output) if loss is not None else output
1188
+
1189
+ return TokenClassifierOutput(
1190
+ loss=loss,
1191
+ logits=prediction_scores,
1192
+ hidden_states=outputs.hidden_states,
1193
+ attentions=outputs.attentions,
1194
+ )
1195
+
1196
+
1197
+ @add_start_docstrings(
1198
+ """
1199
+ MarkupLM Model transformer with a sequence classification/regression head on top (a linear layer on top of the
1200
+ pooled output) e.g. for GLUE tasks.
1201
+ """,
1202
+ MARKUPLM_START_DOCSTRING,
1203
+ )
1204
+ class MarkupLMForSequenceClassification(MarkupLMPreTrainedModel):
1205
+ # Copied from transformers.models.bert.modeling_bert.BertForSequenceClassification.__init__ with bert->markuplm, Bert->MarkupLM
1206
+ def __init__(self, config):
1207
+ super().__init__(config)
1208
+ self.num_labels = config.num_labels
1209
+ self.config = config
1210
+
1211
+ self.markuplm = MarkupLMModel(config)
1212
+ classifier_dropout = (
1213
+ config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
1214
+ )
1215
+ self.dropout = nn.Dropout(classifier_dropout)
1216
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
1217
+
1218
+ # Initialize weights and apply final processing
1219
+ self.post_init()
1220
+
1221
+ @add_start_docstrings_to_model_forward(MARKUPLM_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1222
+ @replace_return_docstrings(output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC)
1223
+ def forward(
1224
+ self,
1225
+ input_ids: Optional[torch.Tensor] = None,
1226
+ xpath_tags_seq: Optional[torch.Tensor] = None,
1227
+ xpath_subs_seq: Optional[torch.Tensor] = None,
1228
+ attention_mask: Optional[torch.Tensor] = None,
1229
+ token_type_ids: Optional[torch.Tensor] = None,
1230
+ position_ids: Optional[torch.Tensor] = None,
1231
+ head_mask: Optional[torch.Tensor] = None,
1232
+ inputs_embeds: Optional[torch.Tensor] = None,
1233
+ labels: Optional[torch.Tensor] = None,
1234
+ output_attentions: Optional[bool] = None,
1235
+ output_hidden_states: Optional[bool] = None,
1236
+ return_dict: Optional[bool] = None,
1237
+ ) -> Union[Tuple[torch.Tensor], SequenceClassifierOutput]:
1238
+ r"""
1239
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1240
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1241
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1242
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1243
+
1244
+ Returns:
1245
+
1246
+ Examples:
1247
+
1248
+ ```python
1249
+ >>> from transformers import AutoProcessor, AutoModelForSequenceClassification
1250
+ >>> import torch
1251
+
1252
+ >>> processor = AutoProcessor.from_pretrained("microsoft/markuplm-base")
1253
+ >>> model = AutoModelForSequenceClassification.from_pretrained("microsoft/markuplm-base", num_labels=7)
1254
+
1255
+ >>> html_string = "<html> <head> <title>Page Title</title> </head> </html>"
1256
+ >>> encoding = processor(html_string, return_tensors="pt")
1257
+
1258
+ >>> with torch.no_grad():
1259
+ ... outputs = model(**encoding)
1260
+
1261
+ >>> loss = outputs.loss
1262
+ >>> logits = outputs.logits
1263
+ ```"""
1264
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1265
+
1266
+ outputs = self.markuplm(
1267
+ input_ids,
1268
+ xpath_tags_seq=xpath_tags_seq,
1269
+ xpath_subs_seq=xpath_subs_seq,
1270
+ attention_mask=attention_mask,
1271
+ token_type_ids=token_type_ids,
1272
+ position_ids=position_ids,
1273
+ head_mask=head_mask,
1274
+ inputs_embeds=inputs_embeds,
1275
+ output_attentions=output_attentions,
1276
+ output_hidden_states=output_hidden_states,
1277
+ return_dict=return_dict,
1278
+ )
1279
+
1280
+ pooled_output = outputs[1]
1281
+
1282
+ pooled_output = self.dropout(pooled_output)
1283
+ logits = self.classifier(pooled_output)
1284
+
1285
+ loss = None
1286
+ if labels is not None:
1287
+ if self.config.problem_type is None:
1288
+ if self.num_labels == 1:
1289
+ self.config.problem_type = "regression"
1290
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
1291
+ self.config.problem_type = "single_label_classification"
1292
+ else:
1293
+ self.config.problem_type = "multi_label_classification"
1294
+
1295
+ if self.config.problem_type == "regression":
1296
+ loss_fct = MSELoss()
1297
+ if self.num_labels == 1:
1298
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
1299
+ else:
1300
+ loss = loss_fct(logits, labels)
1301
+ elif self.config.problem_type == "single_label_classification":
1302
+ loss_fct = CrossEntropyLoss()
1303
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1304
+ elif self.config.problem_type == "multi_label_classification":
1305
+ loss_fct = BCEWithLogitsLoss()
1306
+ loss = loss_fct(logits, labels)
1307
+ if not return_dict:
1308
+ output = (logits,) + outputs[2:]
1309
+ return ((loss,) + output) if loss is not None else output
1310
+
1311
+ return SequenceClassifierOutput(
1312
+ loss=loss,
1313
+ logits=logits,
1314
+ hidden_states=outputs.hidden_states,
1315
+ attentions=outputs.attentions,
1316
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/markuplm/processing_markuplm.py ADDED
@@ -0,0 +1,146 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """
16
+ Processor class for MarkupLM.
17
+ """
18
+ from typing import Optional, Union
19
+
20
+ from ...file_utils import TensorType
21
+ from ...processing_utils import ProcessorMixin
22
+ from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, TruncationStrategy
23
+
24
+
25
+ class MarkupLMProcessor(ProcessorMixin):
26
+ r"""
27
+ Constructs a MarkupLM processor which combines a MarkupLM feature extractor and a MarkupLM tokenizer into a single
28
+ processor.
29
+
30
+ [`MarkupLMProcessor`] offers all the functionalities you need to prepare data for the model.
31
+
32
+ It first uses [`MarkupLMFeatureExtractor`] to extract nodes and corresponding xpaths from one or more HTML strings.
33
+ Next, these are provided to [`MarkupLMTokenizer`] or [`MarkupLMTokenizerFast`], which turns them into token-level
34
+ `input_ids`, `attention_mask`, `token_type_ids`, `xpath_tags_seq` and `xpath_subs_seq`.
35
+
36
+ Args:
37
+ feature_extractor (`MarkupLMFeatureExtractor`):
38
+ An instance of [`MarkupLMFeatureExtractor`]. The feature extractor is a required input.
39
+ tokenizer (`MarkupLMTokenizer` or `MarkupLMTokenizerFast`):
40
+ An instance of [`MarkupLMTokenizer`] or [`MarkupLMTokenizerFast`]. The tokenizer is a required input.
41
+ parse_html (`bool`, *optional*, defaults to `True`):
42
+ Whether or not to use `MarkupLMFeatureExtractor` to parse HTML strings into nodes and corresponding xpaths.
43
+ """
44
+
45
+ feature_extractor_class = "MarkupLMFeatureExtractor"
46
+ tokenizer_class = ("MarkupLMTokenizer", "MarkupLMTokenizerFast")
47
+ parse_html = True
48
+
49
+ def __call__(
50
+ self,
51
+ html_strings=None,
52
+ nodes=None,
53
+ xpaths=None,
54
+ node_labels=None,
55
+ questions=None,
56
+ add_special_tokens: bool = True,
57
+ padding: Union[bool, str, PaddingStrategy] = False,
58
+ truncation: Union[bool, str, TruncationStrategy] = None,
59
+ max_length: Optional[int] = None,
60
+ stride: int = 0,
61
+ pad_to_multiple_of: Optional[int] = None,
62
+ return_token_type_ids: Optional[bool] = None,
63
+ return_attention_mask: Optional[bool] = None,
64
+ return_overflowing_tokens: bool = False,
65
+ return_special_tokens_mask: bool = False,
66
+ return_offsets_mapping: bool = False,
67
+ return_length: bool = False,
68
+ verbose: bool = True,
69
+ return_tensors: Optional[Union[str, TensorType]] = None,
70
+ **kwargs,
71
+ ) -> BatchEncoding:
72
+ """
73
+ This method first forwards the `html_strings` argument to [`~MarkupLMFeatureExtractor.__call__`]. Next, it
74
+ passes the `nodes` and `xpaths` along with the additional arguments to [`~MarkupLMTokenizer.__call__`] and
75
+ returns the output.
76
+
77
+ Optionally, one can also provide a `text` argument which is passed along as first sequence.
78
+
79
+ Please refer to the docstring of the above two methods for more information.
80
+ """
81
+ # first, create nodes and xpaths
82
+ if self.parse_html:
83
+ if html_strings is None:
84
+ raise ValueError("Make sure to pass HTML strings in case `parse_html` is set to `True`")
85
+
86
+ if nodes is not None or xpaths is not None or node_labels is not None:
87
+ raise ValueError(
88
+ "Please don't pass nodes, xpaths nor node labels in case `parse_html` is set to `True`"
89
+ )
90
+
91
+ features = self.feature_extractor(html_strings)
92
+ nodes = features["nodes"]
93
+ xpaths = features["xpaths"]
94
+ else:
95
+ if html_strings is not None:
96
+ raise ValueError("You have passed HTML strings but `parse_html` is set to `False`.")
97
+ if nodes is None or xpaths is None:
98
+ raise ValueError("Make sure to pass nodes and xpaths in case `parse_html` is set to `False`")
99
+
100
+ # # second, apply the tokenizer
101
+ if questions is not None and self.parse_html:
102
+ if isinstance(questions, str):
103
+ questions = [questions] # add batch dimension (as the feature extractor always adds a batch dimension)
104
+
105
+ encoded_inputs = self.tokenizer(
106
+ text=questions if questions is not None else nodes,
107
+ text_pair=nodes if questions is not None else None,
108
+ xpaths=xpaths,
109
+ node_labels=node_labels,
110
+ add_special_tokens=add_special_tokens,
111
+ padding=padding,
112
+ truncation=truncation,
113
+ max_length=max_length,
114
+ stride=stride,
115
+ pad_to_multiple_of=pad_to_multiple_of,
116
+ return_token_type_ids=return_token_type_ids,
117
+ return_attention_mask=return_attention_mask,
118
+ return_overflowing_tokens=return_overflowing_tokens,
119
+ return_special_tokens_mask=return_special_tokens_mask,
120
+ return_offsets_mapping=return_offsets_mapping,
121
+ return_length=return_length,
122
+ verbose=verbose,
123
+ return_tensors=return_tensors,
124
+ **kwargs,
125
+ )
126
+
127
+ return encoded_inputs
128
+
129
+ def batch_decode(self, *args, **kwargs):
130
+ """
131
+ This method forwards all its arguments to TrOCRTokenizer's [`~PreTrainedTokenizer.batch_decode`]. Please refer
132
+ to the docstring of this method for more information.
133
+ """
134
+ return self.tokenizer.batch_decode(*args, **kwargs)
135
+
136
+ def decode(self, *args, **kwargs):
137
+ """
138
+ This method forwards all its arguments to TrOCRTokenizer's [`~PreTrainedTokenizer.decode`]. Please refer to the
139
+ docstring of this method for more information.
140
+ """
141
+ return self.tokenizer.decode(*args, **kwargs)
142
+
143
+ @property
144
+ def model_input_names(self):
145
+ tokenizer_input_names = self.tokenizer.model_input_names
146
+ return tokenizer_input_names
llmeval-env/lib/python3.10/site-packages/transformers/models/markuplm/tokenization_markuplm.py ADDED
@@ -0,0 +1,1445 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright Microsoft Research and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Tokenization class for MarkupLM."""
16
+
17
+ import json
18
+ import os
19
+ from functools import lru_cache
20
+ from typing import Dict, List, Optional, Tuple, Union
21
+
22
+ import regex as re
23
+
24
+ from ...file_utils import PaddingStrategy, TensorType, add_end_docstrings
25
+ from ...tokenization_utils import AddedToken, PreTrainedTokenizer
26
+ from ...tokenization_utils_base import (
27
+ ENCODE_KWARGS_DOCSTRING,
28
+ BatchEncoding,
29
+ EncodedInput,
30
+ PreTokenizedInput,
31
+ TextInput,
32
+ TextInputPair,
33
+ TruncationStrategy,
34
+ )
35
+ from ...utils import logging
36
+
37
+
38
+ logger = logging.get_logger(__name__)
39
+
40
+ VOCAB_FILES_NAMES = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
41
+
42
+
43
+ MARKUPLM_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING = r"""
44
+ add_special_tokens (`bool`, *optional*, defaults to `True`):
45
+ Whether or not to encode the sequences with the special tokens relative to their model.
46
+ padding (`bool`, `str` or [`~file_utils.PaddingStrategy`], *optional*, defaults to `False`):
47
+ Activates and controls padding. Accepts the following values:
48
+
49
+ - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
50
+ sequence if provided).
51
+ - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
52
+ acceptable input length for the model if that argument is not provided.
53
+ - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
54
+ lengths).
55
+ truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
56
+ Activates and controls truncation. Accepts the following values:
57
+
58
+ - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or
59
+ to the maximum acceptable input length for the model if that argument is not provided. This will
60
+ truncate token by token, removing a token from the longest sequence in the pair if a pair of
61
+ sequences (or a batch of pairs) is provided.
62
+ - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the
63
+ maximum acceptable input length for the model if that argument is not provided. This will only
64
+ truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
65
+ - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the
66
+ maximum acceptable input length for the model if that argument is not provided. This will only
67
+ truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
68
+ - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
69
+ greater than the model maximum admissible input size).
70
+ max_length (`int`, *optional*):
71
+ Controls the maximum length to use by one of the truncation/padding parameters. If left unset or set to
72
+ `None`, this will use the predefined model maximum length if a maximum length is required by one of the
73
+ truncation/padding parameters. If the model has no specific maximum input length (like XLNet)
74
+ truncation/padding to a maximum length will be deactivated.
75
+ stride (`int`, *optional*, defaults to 0):
76
+ If set to a number along with `max_length`, the overflowing tokens returned when
77
+ `return_overflowing_tokens=True` will contain some tokens from the end of the truncated sequence
78
+ returned to provide some overlap between truncated and overflowing sequences. The value of this
79
+ argument defines the number of overlapping tokens.
80
+ pad_to_multiple_of (`int`, *optional*):
81
+ If set will pad the sequence to a multiple of the provided value. This is especially useful to enable
82
+ the use of Tensor Cores on NVIDIA hardware with compute capability `>= 7.5` (Volta).
83
+ return_tensors (`str` or [`~file_utils.TensorType`], *optional*):
84
+ If set, will return tensors instead of list of python integers. Acceptable values are:
85
+
86
+ - `'tf'`: Return TensorFlow `tf.constant` objects.
87
+ - `'pt'`: Return PyTorch `torch.Tensor` objects.
88
+ - `'np'`: Return Numpy `np.ndarray` objects.
89
+ """
90
+
91
+
92
+ @lru_cache()
93
+ def bytes_to_unicode():
94
+ """
95
+ Returns list of utf-8 byte and a mapping to unicode strings. We specifically avoids mapping to whitespace/control
96
+ characters the bpe code barfs on. The reversible bpe codes work on unicode strings. This means you need a large #
97
+ of unicode characters in your vocab if you want to avoid UNKs. When you're at something like a 10B token dataset
98
+ you end up needing around 5K for decent coverage. This is a significant percentage of your normal, say, 32K bpe
99
+ vocab. To avoid that, we want lookup tables between utf-8 bytes and unicode strings.
100
+ """
101
+ bs = (
102
+ list(range(ord("!"), ord("~") + 1)) + list(range(ord("¡"), ord("¬") + 1)) + list(range(ord("®"), ord("ÿ") + 1))
103
+ )
104
+ cs = bs[:]
105
+ n = 0
106
+ for b in range(2**8):
107
+ if b not in bs:
108
+ bs.append(b)
109
+ cs.append(2**8 + n)
110
+ n += 1
111
+ cs = [chr(n) for n in cs]
112
+ return dict(zip(bs, cs))
113
+
114
+
115
+ def get_pairs(word):
116
+ """
117
+ Return set of symbol pairs in a word. Word is represented as tuple of symbols (symbols being variable-length
118
+ strings).
119
+ """
120
+ pairs = set()
121
+ prev_char = word[0]
122
+ for char in word[1:]:
123
+ pairs.add((prev_char, char))
124
+ prev_char = char
125
+ return pairs
126
+
127
+
128
+ class MarkupLMTokenizer(PreTrainedTokenizer):
129
+ r"""
130
+ Construct a MarkupLM tokenizer. Based on byte-level Byte-Pair-Encoding (BPE). [`MarkupLMTokenizer`] can be used to
131
+ turn HTML strings into to token-level `input_ids`, `attention_mask`, `token_type_ids`, `xpath_tags_seq` and
132
+ `xpath_tags_seq`. This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods.
133
+ Users should refer to this superclass for more information regarding those methods.
134
+
135
+ Args:
136
+ vocab_file (`str`):
137
+ Path to the vocabulary file.
138
+ merges_file (`str`):
139
+ Path to the merges file.
140
+ errors (`str`, *optional*, defaults to `"replace"`):
141
+ Paradigm to follow when decoding bytes to UTF-8. See
142
+ [bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information.
143
+ bos_token (`str`, *optional*, defaults to `"<s>"`):
144
+ The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
145
+
146
+ <Tip>
147
+
148
+ When building a sequence using special tokens, this is not the token that is used for the beginning of
149
+ sequence. The token used is the `cls_token`.
150
+
151
+ </Tip>
152
+
153
+ eos_token (`str`, *optional*, defaults to `"</s>"`):
154
+ The end of sequence token.
155
+
156
+ <Tip>
157
+
158
+ When building a sequence using special tokens, this is not the token that is used for the end of sequence.
159
+ The token used is the `sep_token`.
160
+
161
+ </Tip>
162
+
163
+ sep_token (`str`, *optional*, defaults to `"</s>"`):
164
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
165
+ sequence classification or for a text and a question for question answering. It is also used as the last
166
+ token of a sequence built with special tokens.
167
+ cls_token (`str`, *optional*, defaults to `"<s>"`):
168
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
169
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
170
+ unk_token (`str`, *optional*, defaults to `"<unk>"`):
171
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
172
+ token instead.
173
+ pad_token (`str`, *optional*, defaults to `"<pad>"`):
174
+ The token used for padding, for example when batching sequences of different lengths.
175
+ mask_token (`str`, *optional*, defaults to `"<mask>"`):
176
+ The token used for masking values. This is the token used when training this model with masked language
177
+ modeling. This is the token which the model will try to predict.
178
+ add_prefix_space (`bool`, *optional*, defaults to `False`):
179
+ Whether or not to add an initial space to the input. This allows to treat the leading word just as any
180
+ other word. (RoBERTa tokenizer detect beginning of words by the preceding space).
181
+ """
182
+
183
+ vocab_files_names = VOCAB_FILES_NAMES
184
+
185
+ def __init__(
186
+ self,
187
+ vocab_file,
188
+ merges_file,
189
+ tags_dict,
190
+ errors="replace",
191
+ bos_token="<s>",
192
+ eos_token="</s>",
193
+ sep_token="</s>",
194
+ cls_token="<s>",
195
+ unk_token="<unk>",
196
+ pad_token="<pad>",
197
+ mask_token="<mask>",
198
+ add_prefix_space=False,
199
+ max_depth=50,
200
+ max_width=1000,
201
+ pad_width=1001,
202
+ pad_token_label=-100,
203
+ only_label_first_subword=True,
204
+ **kwargs,
205
+ ):
206
+ bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token
207
+ eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token
208
+ sep_token = AddedToken(sep_token, lstrip=False, rstrip=False) if isinstance(sep_token, str) else sep_token
209
+ cls_token = AddedToken(cls_token, lstrip=False, rstrip=False) if isinstance(cls_token, str) else cls_token
210
+ unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token
211
+ pad_token = AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token
212
+
213
+ # Mask token behave like a normal word, i.e. include the space before it
214
+ mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token
215
+
216
+ with open(vocab_file, encoding="utf-8") as vocab_handle:
217
+ self.encoder = json.load(vocab_handle)
218
+
219
+ self.tags_dict = tags_dict
220
+ self.decoder = {v: k for k, v in self.encoder.items()}
221
+ self.errors = errors # how to handle errors in decoding
222
+ self.byte_encoder = bytes_to_unicode()
223
+ self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
224
+ with open(merges_file, encoding="utf-8") as merges_handle:
225
+ bpe_merges = merges_handle.read().split("\n")[1:-1]
226
+ bpe_merges = [tuple(merge.split()) for merge in bpe_merges]
227
+ self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges))))
228
+ self.cache = {}
229
+ self.add_prefix_space = add_prefix_space
230
+
231
+ # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
232
+ self.pat = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""")
233
+
234
+ # additional properties
235
+ self.max_depth = max_depth
236
+ self.max_width = max_width
237
+ self.pad_width = pad_width
238
+ self.unk_tag_id = len(self.tags_dict)
239
+ self.pad_tag_id = self.unk_tag_id + 1
240
+ self.pad_xpath_tags_seq = [self.pad_tag_id] * self.max_depth
241
+ self.pad_xpath_subs_seq = [self.pad_width] * self.max_depth
242
+
243
+ super().__init__(
244
+ vocab_file=vocab_file,
245
+ merges_file=merges_file,
246
+ tags_dict=tags_dict,
247
+ errors=errors,
248
+ bos_token=bos_token,
249
+ eos_token=eos_token,
250
+ unk_token=unk_token,
251
+ sep_token=sep_token,
252
+ cls_token=cls_token,
253
+ pad_token=pad_token,
254
+ mask_token=mask_token,
255
+ add_prefix_space=add_prefix_space,
256
+ max_depth=max_depth,
257
+ max_width=max_width,
258
+ pad_width=pad_width,
259
+ pad_token_label=pad_token_label,
260
+ only_label_first_subword=only_label_first_subword,
261
+ **kwargs,
262
+ )
263
+
264
+ self.pad_token_label = pad_token_label
265
+ self.only_label_first_subword = only_label_first_subword
266
+
267
+ def get_xpath_seq(self, xpath):
268
+ """
269
+ Given the xpath expression of one particular node (like "/html/body/div/li[1]/div/span[2]"), return a list of
270
+ tag IDs and corresponding subscripts, taking into account max depth.
271
+ """
272
+ xpath_tags_list = []
273
+ xpath_subs_list = []
274
+
275
+ xpath_units = xpath.split("/")
276
+ for unit in xpath_units:
277
+ if not unit.strip():
278
+ continue
279
+ name_subs = unit.strip().split("[")
280
+ tag_name = name_subs[0]
281
+ sub = 0 if len(name_subs) == 1 else int(name_subs[1][:-1])
282
+ xpath_tags_list.append(self.tags_dict.get(tag_name, self.unk_tag_id))
283
+ xpath_subs_list.append(min(self.max_width, sub))
284
+
285
+ xpath_tags_list = xpath_tags_list[: self.max_depth]
286
+ xpath_subs_list = xpath_subs_list[: self.max_depth]
287
+ xpath_tags_list += [self.pad_tag_id] * (self.max_depth - len(xpath_tags_list))
288
+ xpath_subs_list += [self.pad_width] * (self.max_depth - len(xpath_subs_list))
289
+
290
+ return xpath_tags_list, xpath_subs_list
291
+
292
+ @property
293
+ def vocab_size(self):
294
+ return len(self.encoder)
295
+
296
+ def get_vocab(self):
297
+ vocab = self.encoder.copy()
298
+ vocab.update(self.added_tokens_encoder)
299
+ return vocab
300
+
301
+ def bpe(self, token):
302
+ if token in self.cache:
303
+ return self.cache[token]
304
+ word = tuple(token)
305
+ pairs = get_pairs(word)
306
+
307
+ if not pairs:
308
+ return token
309
+
310
+ while True:
311
+ bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf")))
312
+ if bigram not in self.bpe_ranks:
313
+ break
314
+ first, second = bigram
315
+ new_word = []
316
+ i = 0
317
+ while i < len(word):
318
+ try:
319
+ j = word.index(first, i)
320
+ except ValueError:
321
+ new_word.extend(word[i:])
322
+ break
323
+ else:
324
+ new_word.extend(word[i:j])
325
+ i = j
326
+
327
+ if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
328
+ new_word.append(first + second)
329
+ i += 2
330
+ else:
331
+ new_word.append(word[i])
332
+ i += 1
333
+ new_word = tuple(new_word)
334
+ word = new_word
335
+ if len(word) == 1:
336
+ break
337
+ else:
338
+ pairs = get_pairs(word)
339
+ word = " ".join(word)
340
+ self.cache[token] = word
341
+ return word
342
+
343
+ def _tokenize(self, text):
344
+ """Tokenize a string."""
345
+ bpe_tokens = []
346
+ for token in re.findall(self.pat, text):
347
+ token = "".join(
348
+ self.byte_encoder[b] for b in token.encode("utf-8")
349
+ ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
350
+ bpe_tokens.extend(bpe_token for bpe_token in self.bpe(token).split(" "))
351
+ return bpe_tokens
352
+
353
+ def _convert_token_to_id(self, token):
354
+ """Converts a token (str) in an id using the vocab."""
355
+ return self.encoder.get(token, self.encoder.get(self.unk_token))
356
+
357
+ def _convert_id_to_token(self, index):
358
+ """Converts an index (integer) in a token (str) using the vocab."""
359
+ return self.decoder.get(index)
360
+
361
+ def convert_tokens_to_string(self, tokens):
362
+ """Converts a sequence of tokens (string) in a single string."""
363
+ logger.warning(
364
+ "MarkupLM now does not support generative tasks, decoding is experimental and subject to change."
365
+ )
366
+ text = "".join(tokens)
367
+ text = bytearray([self.byte_decoder[c] for c in text]).decode("utf-8", errors=self.errors)
368
+ return text
369
+
370
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
371
+ if not os.path.isdir(save_directory):
372
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
373
+ return
374
+ vocab_file = os.path.join(
375
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
376
+ )
377
+ merge_file = os.path.join(
378
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"]
379
+ )
380
+
381
+ # save vocab_file
382
+ with open(vocab_file, "w", encoding="utf-8") as f:
383
+ f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + "\n")
384
+
385
+ # save merge_file
386
+ index = 0
387
+ with open(merge_file, "w", encoding="utf-8") as writer:
388
+ writer.write("#version: 0.2\n")
389
+ for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]):
390
+ if index != token_index:
391
+ logger.warning(
392
+ f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
393
+ " Please check that the tokenizer is not corrupted!"
394
+ )
395
+ index = token_index
396
+ writer.write(" ".join(bpe_tokens) + "\n")
397
+ index += 1
398
+
399
+ return vocab_file, merge_file
400
+
401
+ def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs):
402
+ add_prefix_space = kwargs.pop("add_prefix_space", self.add_prefix_space)
403
+ if (is_split_into_words or add_prefix_space) and (len(text) > 0 and not text[0].isspace()):
404
+ text = " " + text
405
+ return (text, kwargs)
406
+
407
+ def build_inputs_with_special_tokens(
408
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
409
+ ) -> List[int]:
410
+ """
411
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
412
+ adding special tokens. A RoBERTa sequence has the following format:
413
+ - single sequence: `<s> X </s>`
414
+ - pair of sequences: `<s> A </s></s> B </s>`
415
+
416
+ Args:
417
+ token_ids_0 (`List[int]`):
418
+ List of IDs to which the special tokens will be added.
419
+ token_ids_1 (`List[int]`, *optional*):
420
+ Optional second list of IDs for sequence pairs.
421
+ Returns:
422
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
423
+ """
424
+ if token_ids_1 is None:
425
+ return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
426
+ cls = [self.cls_token_id]
427
+ sep = [self.sep_token_id]
428
+ return cls + token_ids_0 + sep + token_ids_1 + sep
429
+
430
+ def build_xpath_tags_with_special_tokens(
431
+ self, xpath_tags_0: List[int], xpath_tags_1: Optional[List[int]] = None
432
+ ) -> List[int]:
433
+ pad = [self.pad_xpath_tags_seq]
434
+ if len(xpath_tags_1) == 0:
435
+ return pad + xpath_tags_0 + pad
436
+ return pad + xpath_tags_0 + pad + xpath_tags_1 + pad
437
+
438
+ def build_xpath_subs_with_special_tokens(
439
+ self, xpath_subs_0: List[int], xpath_subs_1: Optional[List[int]] = None
440
+ ) -> List[int]:
441
+ pad = [self.pad_xpath_subs_seq]
442
+ if len(xpath_subs_1) == 0:
443
+ return pad + xpath_subs_0 + pad
444
+ return pad + xpath_subs_0 + pad + xpath_subs_1 + pad
445
+
446
+ def get_special_tokens_mask(
447
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
448
+ ) -> List[int]:
449
+ """
450
+ Args:
451
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
452
+ special tokens using the tokenizer `prepare_for_model` method.
453
+ token_ids_0 (`List[int]`):
454
+ List of IDs.
455
+ token_ids_1 (`List[int]`, *optional*):
456
+ Optional second list of IDs for sequence pairs.
457
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
458
+ Whether or not the token list is already formatted with special tokens for the model.
459
+ Returns:
460
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
461
+ """
462
+ if already_has_special_tokens:
463
+ return super().get_special_tokens_mask(
464
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
465
+ )
466
+
467
+ if token_ids_1 is None:
468
+ return [1] + ([0] * len(token_ids_0)) + [1]
469
+ return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1]
470
+
471
+ def create_token_type_ids_from_sequences(
472
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
473
+ ) -> List[int]:
474
+ """
475
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. RoBERTa does not
476
+ make use of token type ids, therefore a list of zeros is returned.
477
+
478
+ Args:
479
+ token_ids_0 (`List[int]`):
480
+ List of IDs.
481
+ token_ids_1 (`List[int]`, *optional*):
482
+ Optional second list of IDs for sequence pairs.
483
+ Returns:
484
+ `List[int]`: List of zeros.
485
+ """
486
+ sep = [self.sep_token_id]
487
+ cls = [self.cls_token_id]
488
+
489
+ if token_ids_1 is None:
490
+ return len(cls + token_ids_0 + sep) * [0]
491
+ return len(cls + token_ids_0 + sep + token_ids_1 + sep) * [0]
492
+
493
+ @add_end_docstrings(ENCODE_KWARGS_DOCSTRING, MARKUPLM_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
494
+ def __call__(
495
+ self,
496
+ text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]],
497
+ text_pair: Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None,
498
+ xpaths: Union[List[List[int]], List[List[List[int]]]] = None,
499
+ node_labels: Optional[Union[List[int], List[List[int]]]] = None,
500
+ add_special_tokens: bool = True,
501
+ padding: Union[bool, str, PaddingStrategy] = False,
502
+ truncation: Union[bool, str, TruncationStrategy] = None,
503
+ max_length: Optional[int] = None,
504
+ stride: int = 0,
505
+ pad_to_multiple_of: Optional[int] = None,
506
+ return_tensors: Optional[Union[str, TensorType]] = None,
507
+ return_token_type_ids: Optional[bool] = None,
508
+ return_attention_mask: Optional[bool] = None,
509
+ return_overflowing_tokens: bool = False,
510
+ return_special_tokens_mask: bool = False,
511
+ return_offsets_mapping: bool = False,
512
+ return_length: bool = False,
513
+ verbose: bool = True,
514
+ **kwargs,
515
+ ) -> BatchEncoding:
516
+ """
517
+ Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of
518
+ sequences with node-level xpaths and optional labels.
519
+
520
+ Args:
521
+ text (`str`, `List[str]`, `List[List[str]]`):
522
+ The sequence or batch of sequences to be encoded. Each sequence can be a string, a list of strings
523
+ (nodes of a single example or questions of a batch of examples) or a list of list of strings (batch of
524
+ nodes).
525
+ text_pair (`List[str]`, `List[List[str]]`):
526
+ The sequence or batch of sequences to be encoded. Each sequence should be a list of strings
527
+ (pretokenized string).
528
+ xpaths (`List[List[int]]`, `List[List[List[int]]]`):
529
+ Node-level xpaths.
530
+ node_labels (`List[int]`, `List[List[int]]`, *optional*):
531
+ Node-level integer labels (for token classification tasks).
532
+ """
533
+
534
+ # Input type checking for clearer error
535
+ def _is_valid_text_input(t):
536
+ if isinstance(t, str):
537
+ # Strings are fine
538
+ return True
539
+ elif isinstance(t, (list, tuple)):
540
+ # List are fine as long as they are...
541
+ if len(t) == 0:
542
+ # ... empty
543
+ return True
544
+ elif isinstance(t[0], str):
545
+ # ... list of strings
546
+ return True
547
+ elif isinstance(t[0], (list, tuple)):
548
+ # ... list with an empty list or with a list of strings
549
+ return len(t[0]) == 0 or isinstance(t[0][0], str)
550
+ else:
551
+ return False
552
+ else:
553
+ return False
554
+
555
+ if text_pair is not None:
556
+ # in case text + text_pair are provided, text = questions, text_pair = nodes
557
+ if not _is_valid_text_input(text):
558
+ raise ValueError("text input must of type `str` (single example) or `List[str]` (batch of examples). ")
559
+ if not isinstance(text_pair, (list, tuple)):
560
+ raise ValueError(
561
+ "Nodes must be of type `List[str]` (single pretokenized example), "
562
+ "or `List[List[str]]` (batch of pretokenized examples)."
563
+ )
564
+ else:
565
+ # in case only text is provided => must be nodes
566
+ if not isinstance(text, (list, tuple)):
567
+ raise ValueError(
568
+ "Nodes must be of type `List[str]` (single pretokenized example), "
569
+ "or `List[List[str]]` (batch of pretokenized examples)."
570
+ )
571
+
572
+ if text_pair is not None:
573
+ is_batched = isinstance(text, (list, tuple))
574
+ else:
575
+ is_batched = isinstance(text, (list, tuple)) and text and isinstance(text[0], (list, tuple))
576
+
577
+ nodes = text if text_pair is None else text_pair
578
+ assert xpaths is not None, "You must provide corresponding xpaths"
579
+ if is_batched:
580
+ assert len(nodes) == len(xpaths), "You must provide nodes and xpaths for an equal amount of examples"
581
+ for nodes_example, xpaths_example in zip(nodes, xpaths):
582
+ assert len(nodes_example) == len(xpaths_example), "You must provide as many nodes as there are xpaths"
583
+ else:
584
+ assert len(nodes) == len(xpaths), "You must provide as many nodes as there are xpaths"
585
+
586
+ if is_batched:
587
+ if text_pair is not None and len(text) != len(text_pair):
588
+ raise ValueError(
589
+ f"batch length of `text`: {len(text)} does not match batch length of `text_pair`:"
590
+ f" {len(text_pair)}."
591
+ )
592
+ batch_text_or_text_pairs = list(zip(text, text_pair)) if text_pair is not None else text
593
+ is_pair = bool(text_pair is not None)
594
+ return self.batch_encode_plus(
595
+ batch_text_or_text_pairs=batch_text_or_text_pairs,
596
+ is_pair=is_pair,
597
+ xpaths=xpaths,
598
+ node_labels=node_labels,
599
+ add_special_tokens=add_special_tokens,
600
+ padding=padding,
601
+ truncation=truncation,
602
+ max_length=max_length,
603
+ stride=stride,
604
+ pad_to_multiple_of=pad_to_multiple_of,
605
+ return_tensors=return_tensors,
606
+ return_token_type_ids=return_token_type_ids,
607
+ return_attention_mask=return_attention_mask,
608
+ return_overflowing_tokens=return_overflowing_tokens,
609
+ return_special_tokens_mask=return_special_tokens_mask,
610
+ return_offsets_mapping=return_offsets_mapping,
611
+ return_length=return_length,
612
+ verbose=verbose,
613
+ **kwargs,
614
+ )
615
+ else:
616
+ return self.encode_plus(
617
+ text=text,
618
+ text_pair=text_pair,
619
+ xpaths=xpaths,
620
+ node_labels=node_labels,
621
+ add_special_tokens=add_special_tokens,
622
+ padding=padding,
623
+ truncation=truncation,
624
+ max_length=max_length,
625
+ stride=stride,
626
+ pad_to_multiple_of=pad_to_multiple_of,
627
+ return_tensors=return_tensors,
628
+ return_token_type_ids=return_token_type_ids,
629
+ return_attention_mask=return_attention_mask,
630
+ return_overflowing_tokens=return_overflowing_tokens,
631
+ return_special_tokens_mask=return_special_tokens_mask,
632
+ return_offsets_mapping=return_offsets_mapping,
633
+ return_length=return_length,
634
+ verbose=verbose,
635
+ **kwargs,
636
+ )
637
+
638
+ @add_end_docstrings(ENCODE_KWARGS_DOCSTRING, MARKUPLM_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
639
+ def batch_encode_plus(
640
+ self,
641
+ batch_text_or_text_pairs: Union[
642
+ List[TextInput],
643
+ List[TextInputPair],
644
+ List[PreTokenizedInput],
645
+ ],
646
+ is_pair: bool = None,
647
+ xpaths: Optional[List[List[List[int]]]] = None,
648
+ node_labels: Optional[Union[List[int], List[List[int]]]] = None,
649
+ add_special_tokens: bool = True,
650
+ padding: Union[bool, str, PaddingStrategy] = False,
651
+ truncation: Union[bool, str, TruncationStrategy] = None,
652
+ max_length: Optional[int] = None,
653
+ stride: int = 0,
654
+ pad_to_multiple_of: Optional[int] = None,
655
+ return_tensors: Optional[Union[str, TensorType]] = None,
656
+ return_token_type_ids: Optional[bool] = None,
657
+ return_attention_mask: Optional[bool] = None,
658
+ return_overflowing_tokens: bool = False,
659
+ return_special_tokens_mask: bool = False,
660
+ return_offsets_mapping: bool = False,
661
+ return_length: bool = False,
662
+ verbose: bool = True,
663
+ **kwargs,
664
+ ) -> BatchEncoding:
665
+ # Backward compatibility for 'truncation_strategy', 'pad_to_max_length'
666
+ padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(
667
+ padding=padding,
668
+ truncation=truncation,
669
+ max_length=max_length,
670
+ pad_to_multiple_of=pad_to_multiple_of,
671
+ verbose=verbose,
672
+ **kwargs,
673
+ )
674
+
675
+ return self._batch_encode_plus(
676
+ batch_text_or_text_pairs=batch_text_or_text_pairs,
677
+ is_pair=is_pair,
678
+ xpaths=xpaths,
679
+ node_labels=node_labels,
680
+ add_special_tokens=add_special_tokens,
681
+ padding_strategy=padding_strategy,
682
+ truncation_strategy=truncation_strategy,
683
+ max_length=max_length,
684
+ stride=stride,
685
+ pad_to_multiple_of=pad_to_multiple_of,
686
+ return_tensors=return_tensors,
687
+ return_token_type_ids=return_token_type_ids,
688
+ return_attention_mask=return_attention_mask,
689
+ return_overflowing_tokens=return_overflowing_tokens,
690
+ return_special_tokens_mask=return_special_tokens_mask,
691
+ return_offsets_mapping=return_offsets_mapping,
692
+ return_length=return_length,
693
+ verbose=verbose,
694
+ **kwargs,
695
+ )
696
+
697
+ def _batch_encode_plus(
698
+ self,
699
+ batch_text_or_text_pairs: Union[
700
+ List[TextInput],
701
+ List[TextInputPair],
702
+ List[PreTokenizedInput],
703
+ ],
704
+ is_pair: bool = None,
705
+ xpaths: Optional[List[List[List[int]]]] = None,
706
+ node_labels: Optional[List[List[int]]] = None,
707
+ add_special_tokens: bool = True,
708
+ padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
709
+ truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
710
+ max_length: Optional[int] = None,
711
+ stride: int = 0,
712
+ pad_to_multiple_of: Optional[int] = None,
713
+ return_tensors: Optional[Union[str, TensorType]] = None,
714
+ return_token_type_ids: Optional[bool] = None,
715
+ return_attention_mask: Optional[bool] = None,
716
+ return_overflowing_tokens: bool = False,
717
+ return_special_tokens_mask: bool = False,
718
+ return_offsets_mapping: bool = False,
719
+ return_length: bool = False,
720
+ verbose: bool = True,
721
+ **kwargs,
722
+ ) -> BatchEncoding:
723
+ if return_offsets_mapping:
724
+ raise NotImplementedError(
725
+ "return_offset_mapping is not available when using Python tokenizers. "
726
+ "To use this feature, change your tokenizer to one deriving from "
727
+ "transformers.PreTrainedTokenizerFast."
728
+ )
729
+
730
+ batch_outputs = self._batch_prepare_for_model(
731
+ batch_text_or_text_pairs=batch_text_or_text_pairs,
732
+ is_pair=is_pair,
733
+ xpaths=xpaths,
734
+ node_labels=node_labels,
735
+ add_special_tokens=add_special_tokens,
736
+ padding_strategy=padding_strategy,
737
+ truncation_strategy=truncation_strategy,
738
+ max_length=max_length,
739
+ stride=stride,
740
+ pad_to_multiple_of=pad_to_multiple_of,
741
+ return_attention_mask=return_attention_mask,
742
+ return_token_type_ids=return_token_type_ids,
743
+ return_overflowing_tokens=return_overflowing_tokens,
744
+ return_special_tokens_mask=return_special_tokens_mask,
745
+ return_length=return_length,
746
+ return_tensors=return_tensors,
747
+ verbose=verbose,
748
+ )
749
+
750
+ return BatchEncoding(batch_outputs)
751
+
752
+ @add_end_docstrings(ENCODE_KWARGS_DOCSTRING, MARKUPLM_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
753
+ def _batch_prepare_for_model(
754
+ self,
755
+ batch_text_or_text_pairs,
756
+ is_pair: bool = None,
757
+ xpaths: Optional[List[List[int]]] = None,
758
+ node_labels: Optional[List[List[int]]] = None,
759
+ add_special_tokens: bool = True,
760
+ padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
761
+ truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
762
+ max_length: Optional[int] = None,
763
+ stride: int = 0,
764
+ pad_to_multiple_of: Optional[int] = None,
765
+ return_tensors: Optional[str] = None,
766
+ return_token_type_ids: Optional[bool] = None,
767
+ return_attention_mask: Optional[bool] = None,
768
+ return_overflowing_tokens: bool = False,
769
+ return_special_tokens_mask: bool = False,
770
+ return_length: bool = False,
771
+ verbose: bool = True,
772
+ ) -> BatchEncoding:
773
+ """
774
+ Prepares a sequence of input id, or a pair of sequences of inputs ids so that it can be used by the model. It
775
+ adds special tokens, truncates sequences if overflowing while taking into account the special tokens and
776
+ manages a moving window (with user defined stride) for overflowing tokens.
777
+
778
+ Args:
779
+ batch_ids_pairs: list of tokenized input ids or input ids pairs
780
+ """
781
+
782
+ batch_outputs = {}
783
+ for idx, example in enumerate(zip(batch_text_or_text_pairs, xpaths)):
784
+ batch_text_or_text_pair, xpaths_example = example
785
+ outputs = self.prepare_for_model(
786
+ batch_text_or_text_pair[0] if is_pair else batch_text_or_text_pair,
787
+ batch_text_or_text_pair[1] if is_pair else None,
788
+ xpaths_example,
789
+ node_labels=node_labels[idx] if node_labels is not None else None,
790
+ add_special_tokens=add_special_tokens,
791
+ padding=PaddingStrategy.DO_NOT_PAD.value, # we pad in batch afterward
792
+ truncation=truncation_strategy.value,
793
+ max_length=max_length,
794
+ stride=stride,
795
+ pad_to_multiple_of=None, # we pad in batch afterward
796
+ return_attention_mask=False, # we pad in batch afterward
797
+ return_token_type_ids=return_token_type_ids,
798
+ return_overflowing_tokens=return_overflowing_tokens,
799
+ return_special_tokens_mask=return_special_tokens_mask,
800
+ return_length=return_length,
801
+ return_tensors=None, # We convert the whole batch to tensors at the end
802
+ prepend_batch_axis=False,
803
+ verbose=verbose,
804
+ )
805
+
806
+ for key, value in outputs.items():
807
+ if key not in batch_outputs:
808
+ batch_outputs[key] = []
809
+ batch_outputs[key].append(value)
810
+
811
+ batch_outputs = self.pad(
812
+ batch_outputs,
813
+ padding=padding_strategy.value,
814
+ max_length=max_length,
815
+ pad_to_multiple_of=pad_to_multiple_of,
816
+ return_attention_mask=return_attention_mask,
817
+ )
818
+
819
+ batch_outputs = BatchEncoding(batch_outputs, tensor_type=return_tensors)
820
+
821
+ return batch_outputs
822
+
823
+ @add_end_docstrings(ENCODE_KWARGS_DOCSTRING)
824
+ def encode(
825
+ self,
826
+ text: Union[TextInput, PreTokenizedInput],
827
+ text_pair: Optional[PreTokenizedInput] = None,
828
+ xpaths: Optional[List[List[int]]] = None,
829
+ node_labels: Optional[List[int]] = None,
830
+ add_special_tokens: bool = True,
831
+ padding: Union[bool, str, PaddingStrategy] = False,
832
+ truncation: Union[bool, str, TruncationStrategy] = None,
833
+ max_length: Optional[int] = None,
834
+ stride: int = 0,
835
+ pad_to_multiple_of: Optional[int] = None,
836
+ return_tensors: Optional[Union[str, TensorType]] = None,
837
+ return_token_type_ids: Optional[bool] = None,
838
+ return_attention_mask: Optional[bool] = None,
839
+ return_overflowing_tokens: bool = False,
840
+ return_special_tokens_mask: bool = False,
841
+ return_offsets_mapping: bool = False,
842
+ return_length: bool = False,
843
+ verbose: bool = True,
844
+ **kwargs,
845
+ ) -> List[int]:
846
+ encoded_inputs = self.encode_plus(
847
+ text=text,
848
+ text_pair=text_pair,
849
+ xpaths=xpaths,
850
+ node_labels=node_labels,
851
+ add_special_tokens=add_special_tokens,
852
+ padding=padding,
853
+ truncation=truncation,
854
+ max_length=max_length,
855
+ stride=stride,
856
+ pad_to_multiple_of=pad_to_multiple_of,
857
+ return_tensors=return_tensors,
858
+ return_token_type_ids=return_token_type_ids,
859
+ return_attention_mask=return_attention_mask,
860
+ return_overflowing_tokens=return_overflowing_tokens,
861
+ return_special_tokens_mask=return_special_tokens_mask,
862
+ return_offsets_mapping=return_offsets_mapping,
863
+ return_length=return_length,
864
+ verbose=verbose,
865
+ **kwargs,
866
+ )
867
+
868
+ return encoded_inputs["input_ids"]
869
+
870
+ @add_end_docstrings(ENCODE_KWARGS_DOCSTRING, MARKUPLM_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
871
+ def encode_plus(
872
+ self,
873
+ text: Union[TextInput, PreTokenizedInput],
874
+ text_pair: Optional[PreTokenizedInput] = None,
875
+ xpaths: Optional[List[List[int]]] = None,
876
+ node_labels: Optional[List[int]] = None,
877
+ add_special_tokens: bool = True,
878
+ padding: Union[bool, str, PaddingStrategy] = False,
879
+ truncation: Union[bool, str, TruncationStrategy] = None,
880
+ max_length: Optional[int] = None,
881
+ stride: int = 0,
882
+ pad_to_multiple_of: Optional[int] = None,
883
+ return_tensors: Optional[Union[str, TensorType]] = None,
884
+ return_token_type_ids: Optional[bool] = None,
885
+ return_attention_mask: Optional[bool] = None,
886
+ return_overflowing_tokens: bool = False,
887
+ return_special_tokens_mask: bool = False,
888
+ return_offsets_mapping: bool = False,
889
+ return_length: bool = False,
890
+ verbose: bool = True,
891
+ **kwargs,
892
+ ) -> BatchEncoding:
893
+ """
894
+ Tokenize and prepare for the model a sequence or a pair of sequences. .. warning:: This method is deprecated,
895
+ `__call__` should be used instead.
896
+
897
+ Args:
898
+ text (`str`, `List[str]`, `List[List[str]]`):
899
+ The first sequence to be encoded. This can be a string, a list of strings or a list of list of strings.
900
+ text_pair (`List[str]` or `List[int]`, *optional*):
901
+ Optional second sequence to be encoded. This can be a list of strings (nodes of a single example) or a
902
+ list of list of strings (nodes of a batch of examples).
903
+ """
904
+
905
+ # Backward compatibility for 'truncation_strategy', 'pad_to_max_length'
906
+ padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(
907
+ padding=padding,
908
+ truncation=truncation,
909
+ max_length=max_length,
910
+ pad_to_multiple_of=pad_to_multiple_of,
911
+ verbose=verbose,
912
+ **kwargs,
913
+ )
914
+
915
+ return self._encode_plus(
916
+ text=text,
917
+ xpaths=xpaths,
918
+ text_pair=text_pair,
919
+ node_labels=node_labels,
920
+ add_special_tokens=add_special_tokens,
921
+ padding_strategy=padding_strategy,
922
+ truncation_strategy=truncation_strategy,
923
+ max_length=max_length,
924
+ stride=stride,
925
+ pad_to_multiple_of=pad_to_multiple_of,
926
+ return_tensors=return_tensors,
927
+ return_token_type_ids=return_token_type_ids,
928
+ return_attention_mask=return_attention_mask,
929
+ return_overflowing_tokens=return_overflowing_tokens,
930
+ return_special_tokens_mask=return_special_tokens_mask,
931
+ return_offsets_mapping=return_offsets_mapping,
932
+ return_length=return_length,
933
+ verbose=verbose,
934
+ **kwargs,
935
+ )
936
+
937
+ def _encode_plus(
938
+ self,
939
+ text: Union[TextInput, PreTokenizedInput],
940
+ text_pair: Optional[PreTokenizedInput] = None,
941
+ xpaths: Optional[List[List[int]]] = None,
942
+ node_labels: Optional[List[int]] = None,
943
+ add_special_tokens: bool = True,
944
+ padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
945
+ truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
946
+ max_length: Optional[int] = None,
947
+ stride: int = 0,
948
+ pad_to_multiple_of: Optional[int] = None,
949
+ return_tensors: Optional[Union[str, TensorType]] = None,
950
+ return_token_type_ids: Optional[bool] = None,
951
+ return_attention_mask: Optional[bool] = None,
952
+ return_overflowing_tokens: bool = False,
953
+ return_special_tokens_mask: bool = False,
954
+ return_offsets_mapping: bool = False,
955
+ return_length: bool = False,
956
+ verbose: bool = True,
957
+ **kwargs,
958
+ ) -> BatchEncoding:
959
+ if return_offsets_mapping:
960
+ raise NotImplementedError(
961
+ "return_offset_mapping is not available when using Python tokenizers. "
962
+ "To use this feature, change your tokenizer to one deriving from "
963
+ "transformers.PreTrainedTokenizerFast. "
964
+ "More information on available tokenizers at "
965
+ "https://github.com/huggingface/transformers/pull/2674"
966
+ )
967
+
968
+ return self.prepare_for_model(
969
+ text=text,
970
+ text_pair=text_pair,
971
+ xpaths=xpaths,
972
+ node_labels=node_labels,
973
+ add_special_tokens=add_special_tokens,
974
+ padding=padding_strategy.value,
975
+ truncation=truncation_strategy.value,
976
+ max_length=max_length,
977
+ stride=stride,
978
+ pad_to_multiple_of=pad_to_multiple_of,
979
+ return_tensors=return_tensors,
980
+ prepend_batch_axis=True,
981
+ return_attention_mask=return_attention_mask,
982
+ return_token_type_ids=return_token_type_ids,
983
+ return_overflowing_tokens=return_overflowing_tokens,
984
+ return_special_tokens_mask=return_special_tokens_mask,
985
+ return_length=return_length,
986
+ verbose=verbose,
987
+ )
988
+
989
+ @add_end_docstrings(ENCODE_KWARGS_DOCSTRING, MARKUPLM_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
990
+ def prepare_for_model(
991
+ self,
992
+ text: Union[TextInput, PreTokenizedInput],
993
+ text_pair: Optional[PreTokenizedInput] = None,
994
+ xpaths: Optional[List[List[int]]] = None,
995
+ node_labels: Optional[List[int]] = None,
996
+ add_special_tokens: bool = True,
997
+ padding: Union[bool, str, PaddingStrategy] = False,
998
+ truncation: Union[bool, str, TruncationStrategy] = None,
999
+ max_length: Optional[int] = None,
1000
+ stride: int = 0,
1001
+ pad_to_multiple_of: Optional[int] = None,
1002
+ return_tensors: Optional[Union[str, TensorType]] = None,
1003
+ return_token_type_ids: Optional[bool] = None,
1004
+ return_attention_mask: Optional[bool] = None,
1005
+ return_overflowing_tokens: bool = False,
1006
+ return_special_tokens_mask: bool = False,
1007
+ return_offsets_mapping: bool = False,
1008
+ return_length: bool = False,
1009
+ verbose: bool = True,
1010
+ prepend_batch_axis: bool = False,
1011
+ **kwargs,
1012
+ ) -> BatchEncoding:
1013
+ """
1014
+ Prepares a sequence or a pair of sequences so that it can be used by the model. It adds special tokens,
1015
+ truncates sequences if overflowing while taking into account the special tokens and manages a moving window
1016
+ (with user defined stride) for overflowing tokens. Please Note, for *text_pair* different than `None` and
1017
+ *truncation_strategy = longest_first* or `True`, it is not possible to return overflowing tokens. Such a
1018
+ combination of arguments will raise an error.
1019
+
1020
+ Node-level `xpaths` are turned into token-level `xpath_tags_seq` and `xpath_subs_seq`. If provided, node-level
1021
+ `node_labels` are turned into token-level `labels`. The node label is used for the first token of the node,
1022
+ while remaining tokens are labeled with -100, such that they will be ignored by the loss function.
1023
+
1024
+ Args:
1025
+ text (`str`, `List[str]`, `List[List[str]]`):
1026
+ The first sequence to be encoded. This can be a string, a list of strings or a list of list of strings.
1027
+ text_pair (`List[str]` or `List[int]`, *optional*):
1028
+ Optional second sequence to be encoded. This can be a list of strings (nodes of a single example) or a
1029
+ list of list of strings (nodes of a batch of examples).
1030
+ """
1031
+
1032
+ # Backward compatibility for 'truncation_strategy', 'pad_to_max_length'
1033
+ padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(
1034
+ padding=padding,
1035
+ truncation=truncation,
1036
+ max_length=max_length,
1037
+ pad_to_multiple_of=pad_to_multiple_of,
1038
+ verbose=verbose,
1039
+ **kwargs,
1040
+ )
1041
+
1042
+ tokens = []
1043
+ pair_tokens = []
1044
+ xpath_tags_seq = []
1045
+ xpath_subs_seq = []
1046
+ pair_xpath_tags_seq = []
1047
+ pair_xpath_subs_seq = []
1048
+ labels = []
1049
+
1050
+ if text_pair is None:
1051
+ if node_labels is None:
1052
+ # CASE 1: web page classification (training + inference) + CASE 2: token classification (inference)
1053
+ for word, xpath in zip(text, xpaths):
1054
+ if len(word) < 1: # skip empty nodes
1055
+ continue
1056
+ word_tokens = self.tokenize(word)
1057
+ tokens.extend(word_tokens)
1058
+ xpath_tags_list, xpath_subs_list = self.get_xpath_seq(xpath)
1059
+ xpath_tags_seq.extend([xpath_tags_list] * len(word_tokens))
1060
+ xpath_subs_seq.extend([xpath_subs_list] * len(word_tokens))
1061
+ else:
1062
+ # CASE 2: token classification (training)
1063
+ for word, xpath, label in zip(text, xpaths, node_labels):
1064
+ if len(word) < 1: # skip empty nodes
1065
+ continue
1066
+ word_tokens = self.tokenize(word)
1067
+ tokens.extend(word_tokens)
1068
+ xpath_tags_list, xpath_subs_list = self.get_xpath_seq(xpath)
1069
+ xpath_tags_seq.extend([xpath_tags_list] * len(word_tokens))
1070
+ xpath_subs_seq.extend([xpath_subs_list] * len(word_tokens))
1071
+ if self.only_label_first_subword:
1072
+ # Use the real label id for the first token of the word, and padding ids for the remaining tokens
1073
+ labels.extend([label] + [self.pad_token_label] * (len(word_tokens) - 1))
1074
+ else:
1075
+ labels.extend([label] * len(word_tokens))
1076
+ else:
1077
+ # CASE 3: web page question answering (inference)
1078
+ # text = question
1079
+ # text_pair = nodes
1080
+ tokens = self.tokenize(text)
1081
+ xpath_tags_seq = [self.pad_xpath_tags_seq for _ in range(len(tokens))]
1082
+ xpath_subs_seq = [self.pad_xpath_subs_seq for _ in range(len(tokens))]
1083
+
1084
+ for word, xpath in zip(text_pair, xpaths):
1085
+ if len(word) < 1: # skip empty nodes
1086
+ continue
1087
+ word_tokens = self.tokenize(word)
1088
+ pair_tokens.extend(word_tokens)
1089
+ xpath_tags_list, xpath_subs_list = self.get_xpath_seq(xpath)
1090
+ pair_xpath_tags_seq.extend([xpath_tags_list] * len(word_tokens))
1091
+ pair_xpath_subs_seq.extend([xpath_subs_list] * len(word_tokens))
1092
+
1093
+ # Create ids + pair_ids
1094
+ ids = self.convert_tokens_to_ids(tokens)
1095
+ pair_ids = self.convert_tokens_to_ids(pair_tokens) if pair_tokens else None
1096
+
1097
+ if (
1098
+ return_overflowing_tokens
1099
+ and truncation_strategy == TruncationStrategy.LONGEST_FIRST
1100
+ and pair_ids is not None
1101
+ ):
1102
+ raise ValueError(
1103
+ "Not possible to return overflowing tokens for pair of sequences with the "
1104
+ "`longest_first`. Please select another truncation strategy than `longest_first`, "
1105
+ "for instance `only_second` or `only_first`."
1106
+ )
1107
+
1108
+ # Compute the total size of the returned encodings
1109
+ pair = bool(pair_ids is not None)
1110
+ len_ids = len(ids)
1111
+ len_pair_ids = len(pair_ids) if pair else 0
1112
+ total_len = len_ids + len_pair_ids + (self.num_special_tokens_to_add(pair=pair) if add_special_tokens else 0)
1113
+
1114
+ # Truncation: Handle max sequence length
1115
+ overflowing_tokens = []
1116
+ overflowing_xpath_tags_seq = []
1117
+ overflowing_xpath_subs_seq = []
1118
+ overflowing_labels = []
1119
+ if truncation_strategy != TruncationStrategy.DO_NOT_TRUNCATE and max_length and total_len > max_length:
1120
+ (
1121
+ ids,
1122
+ xpath_tags_seq,
1123
+ xpath_subs_seq,
1124
+ pair_ids,
1125
+ pair_xpath_tags_seq,
1126
+ pair_xpath_subs_seq,
1127
+ labels,
1128
+ overflowing_tokens,
1129
+ overflowing_xpath_tags_seq,
1130
+ overflowing_xpath_subs_seq,
1131
+ overflowing_labels,
1132
+ ) = self.truncate_sequences(
1133
+ ids,
1134
+ xpath_tags_seq=xpath_tags_seq,
1135
+ xpath_subs_seq=xpath_subs_seq,
1136
+ pair_ids=pair_ids,
1137
+ pair_xpath_tags_seq=pair_xpath_tags_seq,
1138
+ pair_xpath_subs_seq=pair_xpath_subs_seq,
1139
+ labels=labels,
1140
+ num_tokens_to_remove=total_len - max_length,
1141
+ truncation_strategy=truncation_strategy,
1142
+ stride=stride,
1143
+ )
1144
+
1145
+ if return_token_type_ids and not add_special_tokens:
1146
+ raise ValueError(
1147
+ "Asking to return token_type_ids while setting add_special_tokens to False "
1148
+ "results in an undefined behavior. Please set add_special_tokens to True or "
1149
+ "set return_token_type_ids to None."
1150
+ )
1151
+
1152
+ # Load from model defaults
1153
+ if return_token_type_ids is None:
1154
+ return_token_type_ids = "token_type_ids" in self.model_input_names
1155
+ if return_attention_mask is None:
1156
+ return_attention_mask = "attention_mask" in self.model_input_names
1157
+
1158
+ encoded_inputs = {}
1159
+
1160
+ if return_overflowing_tokens:
1161
+ encoded_inputs["overflowing_tokens"] = overflowing_tokens
1162
+ encoded_inputs["overflowing_xpath_tags_seq"] = overflowing_xpath_tags_seq
1163
+ encoded_inputs["overflowing_xpath_subs_seq"] = overflowing_xpath_subs_seq
1164
+ encoded_inputs["overflowing_labels"] = overflowing_labels
1165
+ encoded_inputs["num_truncated_tokens"] = total_len - max_length
1166
+
1167
+ # Add special tokens
1168
+ if add_special_tokens:
1169
+ sequence = self.build_inputs_with_special_tokens(ids, pair_ids)
1170
+ token_type_ids = self.create_token_type_ids_from_sequences(ids, pair_ids)
1171
+ xpath_tags_ids = self.build_xpath_tags_with_special_tokens(xpath_tags_seq, pair_xpath_tags_seq)
1172
+ xpath_subs_ids = self.build_xpath_subs_with_special_tokens(xpath_subs_seq, pair_xpath_subs_seq)
1173
+ if labels:
1174
+ labels = [self.pad_token_label] + labels + [self.pad_token_label]
1175
+ else:
1176
+ sequence = ids + pair_ids if pair else ids
1177
+ token_type_ids = [0] * len(ids) + ([0] * len(pair_ids) if pair else [])
1178
+ xpath_tags_ids = xpath_tags_seq + pair_xpath_tags_seq if pair else xpath_tags_seq
1179
+ xpath_subs_ids = xpath_subs_seq + pair_xpath_subs_seq if pair else xpath_subs_seq
1180
+
1181
+ # Build output dictionary
1182
+ encoded_inputs["input_ids"] = sequence
1183
+ encoded_inputs["xpath_tags_seq"] = xpath_tags_ids
1184
+ encoded_inputs["xpath_subs_seq"] = xpath_subs_ids
1185
+ if return_token_type_ids:
1186
+ encoded_inputs["token_type_ids"] = token_type_ids
1187
+ if return_special_tokens_mask:
1188
+ if add_special_tokens:
1189
+ encoded_inputs["special_tokens_mask"] = self.get_special_tokens_mask(ids, pair_ids)
1190
+ else:
1191
+ encoded_inputs["special_tokens_mask"] = [0] * len(sequence)
1192
+
1193
+ if labels:
1194
+ encoded_inputs["labels"] = labels
1195
+
1196
+ # Check lengths
1197
+ self._eventual_warn_about_too_long_sequence(encoded_inputs["input_ids"], max_length, verbose)
1198
+
1199
+ # Padding
1200
+ if padding_strategy != PaddingStrategy.DO_NOT_PAD or return_attention_mask:
1201
+ encoded_inputs = self.pad(
1202
+ encoded_inputs,
1203
+ max_length=max_length,
1204
+ padding=padding_strategy.value,
1205
+ pad_to_multiple_of=pad_to_multiple_of,
1206
+ return_attention_mask=return_attention_mask,
1207
+ )
1208
+
1209
+ if return_length:
1210
+ encoded_inputs["length"] = len(encoded_inputs["input_ids"])
1211
+
1212
+ batch_outputs = BatchEncoding(
1213
+ encoded_inputs, tensor_type=return_tensors, prepend_batch_axis=prepend_batch_axis
1214
+ )
1215
+
1216
+ return batch_outputs
1217
+
1218
+ def truncate_sequences(
1219
+ self,
1220
+ ids: List[int],
1221
+ xpath_tags_seq: List[List[int]],
1222
+ xpath_subs_seq: List[List[int]],
1223
+ pair_ids: Optional[List[int]] = None,
1224
+ pair_xpath_tags_seq: Optional[List[List[int]]] = None,
1225
+ pair_xpath_subs_seq: Optional[List[List[int]]] = None,
1226
+ labels: Optional[List[int]] = None,
1227
+ num_tokens_to_remove: int = 0,
1228
+ truncation_strategy: Union[str, TruncationStrategy] = "longest_first",
1229
+ stride: int = 0,
1230
+ ) -> Tuple[List[int], List[int], List[int]]:
1231
+ """
1232
+ Args:
1233
+ Truncates a sequence pair in-place following the strategy.
1234
+ ids (`List[int]`):
1235
+ Tokenized input ids of the first sequence. Can be obtained from a string by chaining the `tokenize` and
1236
+ `convert_tokens_to_ids` methods.
1237
+ xpath_tags_seq (`List[List[int]]`):
1238
+ XPath tag IDs of the first sequence.
1239
+ xpath_subs_seq (`List[List[int]]`):
1240
+ XPath sub IDs of the first sequence.
1241
+ pair_ids (`List[int]`, *optional*):
1242
+ Tokenized input ids of the second sequence. Can be obtained from a string by chaining the `tokenize`
1243
+ and `convert_tokens_to_ids` methods.
1244
+ pair_xpath_tags_seq (`List[List[int]]`, *optional*):
1245
+ XPath tag IDs of the second sequence.
1246
+ pair_xpath_subs_seq (`List[List[int]]`, *optional*):
1247
+ XPath sub IDs of the second sequence.
1248
+ num_tokens_to_remove (`int`, *optional*, defaults to 0):
1249
+ Number of tokens to remove using the truncation strategy.
1250
+ truncation_strategy (`str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to
1251
+ `False`):
1252
+ The strategy to follow for truncation. Can be:
1253
+ - `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to the
1254
+ maximum acceptable input length for the model if that argument is not provided. This will truncate
1255
+ token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a
1256
+ batch of pairs) is provided.
1257
+ - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the
1258
+ maximum acceptable input length for the model if that argument is not provided. This will only
1259
+ truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
1260
+ - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the
1261
+ maximum acceptable input length for the model if that argument is not provided. This will only
1262
+ truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
1263
+ - `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths greater
1264
+ than the model maximum admissible input size).
1265
+ stride (`int`, *optional*, defaults to 0):
1266
+ If set to a positive number, the overflowing tokens returned will contain some tokens from the main
1267
+ sequence returned. The value of this argument defines the number of additional tokens.
1268
+ Returns:
1269
+ `Tuple[List[int], List[int], List[int]]`: The truncated `ids`, the truncated `pair_ids` and the list of
1270
+ overflowing tokens. Note: The *longest_first* strategy returns empty list of overflowing tokens if a pair
1271
+ of sequences (or a batch of pairs) is provided.
1272
+ """
1273
+ if num_tokens_to_remove <= 0:
1274
+ return ids, xpath_tags_seq, xpath_subs_seq, pair_ids, pair_xpath_tags_seq, pair_xpath_subs_seq, [], [], []
1275
+
1276
+ if not isinstance(truncation_strategy, TruncationStrategy):
1277
+ truncation_strategy = TruncationStrategy(truncation_strategy)
1278
+
1279
+ overflowing_tokens = []
1280
+ overflowing_xpath_tags_seq = []
1281
+ overflowing_xpath_subs_seq = []
1282
+ overflowing_labels = []
1283
+ if truncation_strategy == TruncationStrategy.ONLY_FIRST or (
1284
+ truncation_strategy == TruncationStrategy.LONGEST_FIRST and pair_ids is None
1285
+ ):
1286
+ if len(ids) > num_tokens_to_remove:
1287
+ window_len = min(len(ids), stride + num_tokens_to_remove)
1288
+ overflowing_tokens = ids[-window_len:]
1289
+ overflowing_xpath_tags_seq = xpath_tags_seq[-window_len:]
1290
+ overflowing_xpath_subs_seq = xpath_subs_seq[-window_len:]
1291
+ ids = ids[:-num_tokens_to_remove]
1292
+ xpath_tags_seq = xpath_tags_seq[:-num_tokens_to_remove]
1293
+ xpath_subs_seq = xpath_subs_seq[:-num_tokens_to_remove]
1294
+ labels = labels[:-num_tokens_to_remove]
1295
+ else:
1296
+ error_msg = (
1297
+ f"We need to remove {num_tokens_to_remove} to truncate the input "
1298
+ f"but the first sequence has a length {len(ids)}. "
1299
+ )
1300
+ if truncation_strategy == TruncationStrategy.ONLY_FIRST:
1301
+ error_msg = (
1302
+ error_msg + "Please select another truncation strategy than "
1303
+ f"{truncation_strategy}, for instance 'longest_first' or 'only_second'."
1304
+ )
1305
+ logger.error(error_msg)
1306
+ elif truncation_strategy == TruncationStrategy.LONGEST_FIRST:
1307
+ logger.warning(
1308
+ "Be aware, overflowing tokens are not returned for the setting you have chosen,"
1309
+ f" i.e. sequence pairs with the '{TruncationStrategy.LONGEST_FIRST.value}' "
1310
+ "truncation strategy. So the returned list will always be empty even if some "
1311
+ "tokens have been removed."
1312
+ )
1313
+ for _ in range(num_tokens_to_remove):
1314
+ if pair_ids is None or len(ids) > len(pair_ids):
1315
+ ids = ids[:-1]
1316
+ xpath_tags_seq = xpath_tags_seq[:-1]
1317
+ xpath_subs_seq = xpath_subs_seq[:-1]
1318
+ labels = labels[:-1]
1319
+ else:
1320
+ pair_ids = pair_ids[:-1]
1321
+ pair_xpath_tags_seq = pair_xpath_tags_seq[:-1]
1322
+ pair_xpath_subs_seq = pair_xpath_subs_seq[:-1]
1323
+ elif truncation_strategy == TruncationStrategy.ONLY_SECOND and pair_ids is not None:
1324
+ if len(pair_ids) > num_tokens_to_remove:
1325
+ window_len = min(len(pair_ids), stride + num_tokens_to_remove)
1326
+ overflowing_tokens = pair_ids[-window_len:]
1327
+ overflowing_xpath_tags_seq = pair_xpath_tags_seq[-window_len:]
1328
+ overflowing_xpath_subs_seq = pair_xpath_subs_seq[-window_len:]
1329
+ pair_ids = pair_ids[:-num_tokens_to_remove]
1330
+ pair_xpath_tags_seq = pair_xpath_tags_seq[:-num_tokens_to_remove]
1331
+ pair_xpath_subs_seq = pair_xpath_subs_seq[:-num_tokens_to_remove]
1332
+ else:
1333
+ logger.error(
1334
+ f"We need to remove {num_tokens_to_remove} to truncate the input "
1335
+ f"but the second sequence has a length {len(pair_ids)}. "
1336
+ f"Please select another truncation strategy than {truncation_strategy}, "
1337
+ "for instance 'longest_first' or 'only_first'."
1338
+ )
1339
+
1340
+ return (
1341
+ ids,
1342
+ xpath_tags_seq,
1343
+ xpath_subs_seq,
1344
+ pair_ids,
1345
+ pair_xpath_tags_seq,
1346
+ pair_xpath_subs_seq,
1347
+ labels,
1348
+ overflowing_tokens,
1349
+ overflowing_xpath_tags_seq,
1350
+ overflowing_xpath_subs_seq,
1351
+ overflowing_labels,
1352
+ )
1353
+
1354
+ def _pad(
1355
+ self,
1356
+ encoded_inputs: Union[Dict[str, EncodedInput], BatchEncoding],
1357
+ max_length: Optional[int] = None,
1358
+ padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
1359
+ pad_to_multiple_of: Optional[int] = None,
1360
+ return_attention_mask: Optional[bool] = None,
1361
+ ) -> dict:
1362
+ """
1363
+ Args:
1364
+ Pad encoded inputs (on left/right and up to predefined length or max length in the batch)
1365
+ encoded_inputs:
1366
+ Dictionary of tokenized inputs (`List[int]`) or batch of tokenized inputs (`List[List[int]]`).
1367
+ max_length: maximum length of the returned list and optionally padding length (see below).
1368
+ Will truncate by taking into account the special tokens.
1369
+ padding_strategy: PaddingStrategy to use for padding.
1370
+ - PaddingStrategy.LONGEST Pad to the longest sequence in the batch
1371
+ - PaddingStrategy.MAX_LENGTH: Pad to the max length (default)
1372
+ - PaddingStrategy.DO_NOT_PAD: Do not pad
1373
+ The tokenizer padding sides are defined in self.padding_side:
1374
+ - 'left': pads on the left of the sequences
1375
+ - 'right': pads on the right of the sequences
1376
+ pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value.
1377
+ This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability
1378
+ `>= 7.5` (Volta).
1379
+ return_attention_mask:
1380
+ (optional) Set to False to avoid returning attention mask (default: set to model specifics)
1381
+ """
1382
+ # Load from model defaults
1383
+ if return_attention_mask is None:
1384
+ return_attention_mask = "attention_mask" in self.model_input_names
1385
+
1386
+ required_input = encoded_inputs[self.model_input_names[0]]
1387
+
1388
+ if padding_strategy == PaddingStrategy.LONGEST:
1389
+ max_length = len(required_input)
1390
+
1391
+ if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
1392
+ max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
1393
+
1394
+ needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(required_input) != max_length
1395
+
1396
+ # Initialize attention mask if not present.
1397
+ if return_attention_mask and "attention_mask" not in encoded_inputs:
1398
+ encoded_inputs["attention_mask"] = [1] * len(required_input)
1399
+
1400
+ if needs_to_be_padded:
1401
+ difference = max_length - len(required_input)
1402
+ if self.padding_side == "right":
1403
+ if return_attention_mask:
1404
+ encoded_inputs["attention_mask"] = encoded_inputs["attention_mask"] + [0] * difference
1405
+ if "token_type_ids" in encoded_inputs:
1406
+ encoded_inputs["token_type_ids"] = (
1407
+ encoded_inputs["token_type_ids"] + [self.pad_token_type_id] * difference
1408
+ )
1409
+ if "xpath_tags_seq" in encoded_inputs:
1410
+ encoded_inputs["xpath_tags_seq"] = (
1411
+ encoded_inputs["xpath_tags_seq"] + [self.pad_xpath_tags_seq] * difference
1412
+ )
1413
+ if "xpath_subs_seq" in encoded_inputs:
1414
+ encoded_inputs["xpath_subs_seq"] = (
1415
+ encoded_inputs["xpath_subs_seq"] + [self.pad_xpath_subs_seq] * difference
1416
+ )
1417
+ if "labels" in encoded_inputs:
1418
+ encoded_inputs["labels"] = encoded_inputs["labels"] + [self.pad_token_label] * difference
1419
+ if "special_tokens_mask" in encoded_inputs:
1420
+ encoded_inputs["special_tokens_mask"] = encoded_inputs["special_tokens_mask"] + [1] * difference
1421
+ encoded_inputs[self.model_input_names[0]] = required_input + [self.pad_token_id] * difference
1422
+ elif self.padding_side == "left":
1423
+ if return_attention_mask:
1424
+ encoded_inputs["attention_mask"] = [0] * difference + encoded_inputs["attention_mask"]
1425
+ if "token_type_ids" in encoded_inputs:
1426
+ encoded_inputs["token_type_ids"] = [self.pad_token_type_id] * difference + encoded_inputs[
1427
+ "token_type_ids"
1428
+ ]
1429
+ if "xpath_tags_seq" in encoded_inputs:
1430
+ encoded_inputs["xpath_tags_seq"] = [self.pad_xpath_tags_seq] * difference + encoded_inputs[
1431
+ "xpath_tags_seq"
1432
+ ]
1433
+ if "xpath_subs_seq" in encoded_inputs:
1434
+ encoded_inputs["xpath_subs_seq"] = [self.pad_xpath_subs_seq] * difference + encoded_inputs[
1435
+ "xpath_subs_seq"
1436
+ ]
1437
+ if "labels" in encoded_inputs:
1438
+ encoded_inputs["labels"] = [self.pad_token_label] * difference + encoded_inputs["labels"]
1439
+ if "special_tokens_mask" in encoded_inputs:
1440
+ encoded_inputs["special_tokens_mask"] = [1] * difference + encoded_inputs["special_tokens_mask"]
1441
+ encoded_inputs[self.model_input_names[0]] = [self.pad_token_id] * difference + required_input
1442
+ else:
1443
+ raise ValueError("Invalid padding strategy:" + str(self.padding_side))
1444
+
1445
+ return encoded_inputs
llmeval-env/lib/python3.10/site-packages/transformers/models/markuplm/tokenization_markuplm_fast.py ADDED
@@ -0,0 +1,918 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """
16
+ Fast tokenization class for MarkupLM. It overwrites 2 methods of the slow tokenizer class, namely _batch_encode_plus
17
+ and _encode_plus, in which the Rust tokenizer is used.
18
+ """
19
+
20
+ import json
21
+ from functools import lru_cache
22
+ from typing import Dict, List, Optional, Tuple, Union
23
+
24
+ from tokenizers import pre_tokenizers, processors
25
+
26
+ from ...file_utils import PaddingStrategy, TensorType, add_end_docstrings
27
+ from ...tokenization_utils_base import (
28
+ ENCODE_KWARGS_DOCSTRING,
29
+ AddedToken,
30
+ BatchEncoding,
31
+ EncodedInput,
32
+ PreTokenizedInput,
33
+ TextInput,
34
+ TextInputPair,
35
+ TruncationStrategy,
36
+ )
37
+ from ...tokenization_utils_fast import PreTrainedTokenizerFast
38
+ from ...utils import logging
39
+ from .tokenization_markuplm import MARKUPLM_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING, MarkupLMTokenizer
40
+
41
+
42
+ logger = logging.get_logger(__name__)
43
+
44
+ VOCAB_FILES_NAMES = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
45
+
46
+
47
+ @lru_cache()
48
+ def bytes_to_unicode():
49
+ """
50
+ Returns list of utf-8 byte and a mapping to unicode strings. We specifically avoids mapping to whitespace/control
51
+ characters the bpe code barfs on. The reversible bpe codes work on unicode strings. This means you need a large #
52
+ of unicode characters in your vocab if you want to avoid UNKs. When you're at something like a 10B token dataset
53
+ you end up needing around 5K for decent coverage. This is a significant percentage of your normal, say, 32K bpe
54
+ vocab. To avoid that, we want lookup tables between utf-8 bytes and unicode strings.
55
+ """
56
+ bs = (
57
+ list(range(ord("!"), ord("~") + 1)) + list(range(ord("¡"), ord("¬") + 1)) + list(range(ord("®"), ord("ÿ") + 1))
58
+ )
59
+ cs = bs[:]
60
+ n = 0
61
+ for b in range(2**8):
62
+ if b not in bs:
63
+ bs.append(b)
64
+ cs.append(2**8 + n)
65
+ n += 1
66
+ cs = [chr(n) for n in cs]
67
+ return dict(zip(bs, cs))
68
+
69
+
70
+ def get_pairs(word):
71
+ """
72
+ Return set of symbol pairs in a word. Word is represented as tuple of symbols (symbols being variable-length
73
+ strings).
74
+ """
75
+ pairs = set()
76
+ prev_char = word[0]
77
+ for char in word[1:]:
78
+ pairs.add((prev_char, char))
79
+ prev_char = char
80
+ return pairs
81
+
82
+
83
+ class MarkupLMTokenizerFast(PreTrainedTokenizerFast):
84
+ r"""
85
+ Construct a MarkupLM tokenizer. Based on byte-level Byte-Pair-Encoding (BPE).
86
+
87
+ [`MarkupLMTokenizerFast`] can be used to turn HTML strings into to token-level `input_ids`, `attention_mask`,
88
+ `token_type_ids`, `xpath_tags_seq` and `xpath_tags_seq`. This tokenizer inherits from [`PreTrainedTokenizer`] which
89
+ contains most of the main methods.
90
+
91
+ Users should refer to this superclass for more information regarding those methods.
92
+
93
+ Args:
94
+ vocab_file (`str`):
95
+ Path to the vocabulary file.
96
+ merges_file (`str`):
97
+ Path to the merges file.
98
+ errors (`str`, *optional*, defaults to `"replace"`):
99
+ Paradigm to follow when decoding bytes to UTF-8. See
100
+ [bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information.
101
+ bos_token (`str`, *optional*, defaults to `"<s>"`):
102
+ The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
103
+
104
+ <Tip>
105
+
106
+ When building a sequence using special tokens, this is not the token that is used for the beginning of
107
+ sequence. The token used is the `cls_token`.
108
+
109
+ </Tip>
110
+
111
+ eos_token (`str`, *optional*, defaults to `"</s>"`):
112
+ The end of sequence token.
113
+
114
+ <Tip>
115
+
116
+ When building a sequence using special tokens, this is not the token that is used for the end of sequence.
117
+ The token used is the `sep_token`.
118
+
119
+ </Tip>
120
+
121
+ sep_token (`str`, *optional*, defaults to `"</s>"`):
122
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
123
+ sequence classification or for a text and a question for question answering. It is also used as the last
124
+ token of a sequence built with special tokens.
125
+ cls_token (`str`, *optional*, defaults to `"<s>"`):
126
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
127
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
128
+ unk_token (`str`, *optional*, defaults to `"<unk>"`):
129
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
130
+ token instead.
131
+ pad_token (`str`, *optional*, defaults to `"<pad>"`):
132
+ The token used for padding, for example when batching sequences of different lengths.
133
+ mask_token (`str`, *optional*, defaults to `"<mask>"`):
134
+ The token used for masking values. This is the token used when training this model with masked language
135
+ modeling. This is the token which the model will try to predict.
136
+ add_prefix_space (`bool`, *optional*, defaults to `False`):
137
+ Whether or not to add an initial space to the input. This allows to treat the leading word just as any
138
+ other word. (RoBERTa tokenizer detect beginning of words by the preceding space).
139
+ """
140
+
141
+ vocab_files_names = VOCAB_FILES_NAMES
142
+ slow_tokenizer_class = MarkupLMTokenizer
143
+
144
+ def __init__(
145
+ self,
146
+ vocab_file,
147
+ merges_file,
148
+ tags_dict,
149
+ tokenizer_file=None,
150
+ errors="replace",
151
+ bos_token="<s>",
152
+ eos_token="</s>",
153
+ sep_token="</s>",
154
+ cls_token="<s>",
155
+ unk_token="<unk>",
156
+ pad_token="<pad>",
157
+ mask_token="<mask>",
158
+ add_prefix_space=False,
159
+ max_depth=50,
160
+ max_width=1000,
161
+ pad_width=1001,
162
+ pad_token_label=-100,
163
+ only_label_first_subword=True,
164
+ trim_offsets=False,
165
+ **kwargs,
166
+ ):
167
+ bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token
168
+ eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token
169
+ sep_token = AddedToken(sep_token, lstrip=False, rstrip=False) if isinstance(sep_token, str) else sep_token
170
+ cls_token = AddedToken(cls_token, lstrip=False, rstrip=False) if isinstance(cls_token, str) else cls_token
171
+ unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token
172
+ pad_token = AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token
173
+
174
+ # Mask token behave like a normal word, i.e. include the space before it
175
+ mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token
176
+
177
+ super().__init__(
178
+ vocab_file=vocab_file,
179
+ merges_file=merges_file,
180
+ tags_dict=tags_dict,
181
+ tokenizer_file=tokenizer_file,
182
+ errors=errors,
183
+ bos_token=bos_token,
184
+ eos_token=eos_token,
185
+ unk_token=unk_token,
186
+ sep_token=sep_token,
187
+ cls_token=cls_token,
188
+ pad_token=pad_token,
189
+ mask_token=mask_token,
190
+ add_prefix_space=add_prefix_space,
191
+ trim_offsets=trim_offsets,
192
+ max_depth=max_depth,
193
+ max_width=max_width,
194
+ pad_width=pad_width,
195
+ pad_token_label=pad_token_label,
196
+ only_label_first_subword=only_label_first_subword,
197
+ **kwargs,
198
+ )
199
+ if trim_offsets:
200
+ # Not implemented yet, because we need to chain two post processors which is not possible yet
201
+ # We need to wait for https://github.com/huggingface/tokenizers/pull/1005
202
+ # With `trim_offsets=False` we don't need to do add `processors.ByteLevel(trim_offsets=False)`
203
+ # because it's not doing anything
204
+ raise NotImplementedError(
205
+ "`trim_offsets=True` is not implemented for MarkupLMTokenizerFast. Please set it to False."
206
+ )
207
+
208
+ self.tags_dict = tags_dict
209
+
210
+ pre_tok_state = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
211
+ if pre_tok_state.get("add_prefix_space", add_prefix_space) != add_prefix_space:
212
+ pre_tok_class = getattr(pre_tokenizers, pre_tok_state.pop("type"))
213
+ pre_tok_state["add_prefix_space"] = add_prefix_space
214
+ self.backend_tokenizer.pre_tokenizer = pre_tok_class(**pre_tok_state)
215
+
216
+ self.add_prefix_space = add_prefix_space
217
+
218
+ tokenizer_component = "post_processor"
219
+ tokenizer_component_instance = getattr(self.backend_tokenizer, tokenizer_component, None)
220
+ if tokenizer_component_instance:
221
+ state = json.loads(tokenizer_component_instance.__getstate__())
222
+
223
+ # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
224
+ if "sep" in state:
225
+ state["sep"] = tuple(state["sep"])
226
+ if "cls" in state:
227
+ state["cls"] = tuple(state["cls"])
228
+
229
+ changes_to_apply = False
230
+
231
+ if state.get("add_prefix_space", add_prefix_space) != add_prefix_space:
232
+ state["add_prefix_space"] = add_prefix_space
233
+ changes_to_apply = True
234
+
235
+ if changes_to_apply:
236
+ component_class = getattr(processors, state.pop("type"))
237
+ new_value = component_class(**state)
238
+ setattr(self.backend_tokenizer, tokenizer_component, new_value)
239
+
240
+ # additional properties
241
+ self.max_depth = max_depth
242
+ self.max_width = max_width
243
+ self.pad_width = pad_width
244
+ self.unk_tag_id = len(self.tags_dict)
245
+ self.pad_tag_id = self.unk_tag_id + 1
246
+ self.pad_xpath_tags_seq = [self.pad_tag_id] * self.max_depth
247
+ self.pad_xpath_subs_seq = [self.pad_width] * self.max_depth
248
+ self.pad_token_label = pad_token_label
249
+ self.only_label_first_subword = only_label_first_subword
250
+
251
+ def get_xpath_seq(self, xpath):
252
+ """
253
+ Given the xpath expression of one particular node (like "/html/body/div/li[1]/div/span[2]"), return a list of
254
+ tag IDs and corresponding subscripts, taking into account max depth.
255
+ """
256
+ xpath_tags_list = []
257
+ xpath_subs_list = []
258
+
259
+ xpath_units = xpath.split("/")
260
+ for unit in xpath_units:
261
+ if not unit.strip():
262
+ continue
263
+ name_subs = unit.strip().split("[")
264
+ tag_name = name_subs[0]
265
+ sub = 0 if len(name_subs) == 1 else int(name_subs[1][:-1])
266
+ xpath_tags_list.append(self.tags_dict.get(tag_name, self.unk_tag_id))
267
+ xpath_subs_list.append(min(self.max_width, sub))
268
+
269
+ xpath_tags_list = xpath_tags_list[: self.max_depth]
270
+ xpath_subs_list = xpath_subs_list[: self.max_depth]
271
+ xpath_tags_list += [self.pad_tag_id] * (self.max_depth - len(xpath_tags_list))
272
+ xpath_subs_list += [self.pad_width] * (self.max_depth - len(xpath_subs_list))
273
+
274
+ return xpath_tags_list, xpath_subs_list
275
+
276
+ @add_end_docstrings(ENCODE_KWARGS_DOCSTRING, MARKUPLM_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
277
+ def __call__(
278
+ self,
279
+ text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]],
280
+ text_pair: Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None,
281
+ xpaths: Union[List[List[int]], List[List[List[int]]]] = None,
282
+ node_labels: Optional[Union[List[int], List[List[int]]]] = None,
283
+ add_special_tokens: bool = True,
284
+ padding: Union[bool, str, PaddingStrategy] = False,
285
+ truncation: Union[bool, str, TruncationStrategy] = None,
286
+ max_length: Optional[int] = None,
287
+ stride: int = 0,
288
+ pad_to_multiple_of: Optional[int] = None,
289
+ return_tensors: Optional[Union[str, TensorType]] = None,
290
+ return_token_type_ids: Optional[bool] = None,
291
+ return_attention_mask: Optional[bool] = None,
292
+ return_overflowing_tokens: bool = False,
293
+ return_special_tokens_mask: bool = False,
294
+ return_offsets_mapping: bool = False,
295
+ return_length: bool = False,
296
+ verbose: bool = True,
297
+ **kwargs,
298
+ ) -> BatchEncoding:
299
+ """
300
+ Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of
301
+ sequences with nodes, xpaths and optional labels.
302
+
303
+ Args:
304
+ text (`str`, `List[str]`, `List[List[str]]`):
305
+ The sequence or batch of sequences to be encoded. Each sequence can be a string, a list of strings
306
+ (words of a single example or questions of a batch of examples) or a list of list of strings (batch of
307
+ words).
308
+ text_pair (`List[str]`, `List[List[str]]`):
309
+ The sequence or batch of sequences to be encoded. Each sequence should be a list of strings
310
+ (pretokenized string).
311
+ xpaths (`List[List[int]]`, `List[List[List[int]]]`):
312
+ Node-level xpaths. Each bounding box should be normalized to be on a 0-1000 scale.
313
+ node_labels (`List[int]`, `List[List[int]]`, *optional*):
314
+ Node-level integer labels (for token classification tasks).
315
+ """
316
+
317
+ # Input type checking for clearer error
318
+ def _is_valid_text_input(t):
319
+ if isinstance(t, str):
320
+ # Strings are fine
321
+ return True
322
+ elif isinstance(t, (list, tuple)):
323
+ # List are fine as long as they are...
324
+ if len(t) == 0:
325
+ # ... empty
326
+ return True
327
+ elif isinstance(t[0], str):
328
+ # ... list of strings
329
+ return True
330
+ elif isinstance(t[0], (list, tuple)):
331
+ # ... list with an empty list or with a list of strings
332
+ return len(t[0]) == 0 or isinstance(t[0][0], str)
333
+ else:
334
+ return False
335
+ else:
336
+ return False
337
+
338
+ if text_pair is not None:
339
+ # in case text + text_pair are provided, text = questions, text_pair = nodes
340
+ if not _is_valid_text_input(text):
341
+ raise ValueError("text input must of type `str` (single example) or `List[str]` (batch of examples). ")
342
+ if not isinstance(text_pair, (list, tuple)):
343
+ raise ValueError(
344
+ "Nodes must be of type `List[str]` (single pretokenized example), "
345
+ "or `List[List[str]]` (batch of pretokenized examples)."
346
+ )
347
+ else:
348
+ # in case only text is provided => must be nodes
349
+ if not isinstance(text, (list, tuple)):
350
+ raise ValueError(
351
+ "Nodes must be of type `List[str]` (single pretokenized example), "
352
+ "or `List[List[str]]` (batch of pretokenized examples)."
353
+ )
354
+
355
+ if text_pair is not None:
356
+ is_batched = isinstance(text, (list, tuple))
357
+ else:
358
+ is_batched = isinstance(text, (list, tuple)) and text and isinstance(text[0], (list, tuple))
359
+
360
+ nodes = text if text_pair is None else text_pair
361
+ assert xpaths is not None, "You must provide corresponding xpaths"
362
+ if is_batched:
363
+ assert len(nodes) == len(xpaths), "You must provide nodes and xpaths for an equal amount of examples"
364
+ for nodes_example, xpaths_example in zip(nodes, xpaths):
365
+ assert len(nodes_example) == len(xpaths_example), "You must provide as many nodes as there are xpaths"
366
+ else:
367
+ assert len(nodes) == len(xpaths), "You must provide as many nodes as there are xpaths"
368
+
369
+ if is_batched:
370
+ if text_pair is not None and len(text) != len(text_pair):
371
+ raise ValueError(
372
+ f"batch length of `text`: {len(text)} does not match batch length of `text_pair`:"
373
+ f" {len(text_pair)}."
374
+ )
375
+ batch_text_or_text_pairs = list(zip(text, text_pair)) if text_pair is not None else text
376
+ is_pair = bool(text_pair is not None)
377
+ return self.batch_encode_plus(
378
+ batch_text_or_text_pairs=batch_text_or_text_pairs,
379
+ is_pair=is_pair,
380
+ xpaths=xpaths,
381
+ node_labels=node_labels,
382
+ add_special_tokens=add_special_tokens,
383
+ padding=padding,
384
+ truncation=truncation,
385
+ max_length=max_length,
386
+ stride=stride,
387
+ pad_to_multiple_of=pad_to_multiple_of,
388
+ return_tensors=return_tensors,
389
+ return_token_type_ids=return_token_type_ids,
390
+ return_attention_mask=return_attention_mask,
391
+ return_overflowing_tokens=return_overflowing_tokens,
392
+ return_special_tokens_mask=return_special_tokens_mask,
393
+ return_offsets_mapping=return_offsets_mapping,
394
+ return_length=return_length,
395
+ verbose=verbose,
396
+ **kwargs,
397
+ )
398
+ else:
399
+ return self.encode_plus(
400
+ text=text,
401
+ text_pair=text_pair,
402
+ xpaths=xpaths,
403
+ node_labels=node_labels,
404
+ add_special_tokens=add_special_tokens,
405
+ padding=padding,
406
+ truncation=truncation,
407
+ max_length=max_length,
408
+ stride=stride,
409
+ pad_to_multiple_of=pad_to_multiple_of,
410
+ return_tensors=return_tensors,
411
+ return_token_type_ids=return_token_type_ids,
412
+ return_attention_mask=return_attention_mask,
413
+ return_overflowing_tokens=return_overflowing_tokens,
414
+ return_special_tokens_mask=return_special_tokens_mask,
415
+ return_offsets_mapping=return_offsets_mapping,
416
+ return_length=return_length,
417
+ verbose=verbose,
418
+ **kwargs,
419
+ )
420
+
421
+ @add_end_docstrings(ENCODE_KWARGS_DOCSTRING, MARKUPLM_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
422
+ def batch_encode_plus(
423
+ self,
424
+ batch_text_or_text_pairs: Union[
425
+ List[TextInput],
426
+ List[TextInputPair],
427
+ List[PreTokenizedInput],
428
+ ],
429
+ is_pair: bool = None,
430
+ xpaths: Optional[List[List[List[int]]]] = None,
431
+ node_labels: Optional[Union[List[int], List[List[int]]]] = None,
432
+ add_special_tokens: bool = True,
433
+ padding: Union[bool, str, PaddingStrategy] = False,
434
+ truncation: Union[bool, str, TruncationStrategy] = None,
435
+ max_length: Optional[int] = None,
436
+ stride: int = 0,
437
+ pad_to_multiple_of: Optional[int] = None,
438
+ return_tensors: Optional[Union[str, TensorType]] = None,
439
+ return_token_type_ids: Optional[bool] = None,
440
+ return_attention_mask: Optional[bool] = None,
441
+ return_overflowing_tokens: bool = False,
442
+ return_special_tokens_mask: bool = False,
443
+ return_offsets_mapping: bool = False,
444
+ return_length: bool = False,
445
+ verbose: bool = True,
446
+ **kwargs,
447
+ ) -> BatchEncoding:
448
+ # Backward compatibility for 'truncation_strategy', 'pad_to_max_length'
449
+ padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(
450
+ padding=padding,
451
+ truncation=truncation,
452
+ max_length=max_length,
453
+ pad_to_multiple_of=pad_to_multiple_of,
454
+ verbose=verbose,
455
+ **kwargs,
456
+ )
457
+
458
+ return self._batch_encode_plus(
459
+ batch_text_or_text_pairs=batch_text_or_text_pairs,
460
+ is_pair=is_pair,
461
+ xpaths=xpaths,
462
+ node_labels=node_labels,
463
+ add_special_tokens=add_special_tokens,
464
+ padding_strategy=padding_strategy,
465
+ truncation_strategy=truncation_strategy,
466
+ max_length=max_length,
467
+ stride=stride,
468
+ pad_to_multiple_of=pad_to_multiple_of,
469
+ return_tensors=return_tensors,
470
+ return_token_type_ids=return_token_type_ids,
471
+ return_attention_mask=return_attention_mask,
472
+ return_overflowing_tokens=return_overflowing_tokens,
473
+ return_special_tokens_mask=return_special_tokens_mask,
474
+ return_offsets_mapping=return_offsets_mapping,
475
+ return_length=return_length,
476
+ verbose=verbose,
477
+ **kwargs,
478
+ )
479
+
480
+ def tokenize(self, text: str, pair: Optional[str] = None, add_special_tokens: bool = False, **kwargs) -> List[str]:
481
+ batched_input = [(text, pair)] if pair else [text]
482
+ encodings = self._tokenizer.encode_batch(
483
+ batched_input, add_special_tokens=add_special_tokens, is_pretokenized=False, **kwargs
484
+ )
485
+
486
+ return encodings[0].tokens
487
+
488
+ @add_end_docstrings(ENCODE_KWARGS_DOCSTRING, MARKUPLM_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
489
+ def encode_plus(
490
+ self,
491
+ text: Union[TextInput, PreTokenizedInput],
492
+ text_pair: Optional[PreTokenizedInput] = None,
493
+ xpaths: Optional[List[List[int]]] = None,
494
+ node_labels: Optional[List[int]] = None,
495
+ add_special_tokens: bool = True,
496
+ padding: Union[bool, str, PaddingStrategy] = False,
497
+ truncation: Union[bool, str, TruncationStrategy] = None,
498
+ max_length: Optional[int] = None,
499
+ stride: int = 0,
500
+ pad_to_multiple_of: Optional[int] = None,
501
+ return_tensors: Optional[Union[str, TensorType]] = None,
502
+ return_token_type_ids: Optional[bool] = None,
503
+ return_attention_mask: Optional[bool] = None,
504
+ return_overflowing_tokens: bool = False,
505
+ return_special_tokens_mask: bool = False,
506
+ return_offsets_mapping: bool = False,
507
+ return_length: bool = False,
508
+ verbose: bool = True,
509
+ **kwargs,
510
+ ) -> BatchEncoding:
511
+ """
512
+ Tokenize and prepare for the model a sequence or a pair of sequences. .. warning:: This method is deprecated,
513
+ `__call__` should be used instead.
514
+
515
+ Args:
516
+ text (`str`, `List[str]`, `List[List[str]]`):
517
+ The first sequence to be encoded. This can be a string, a list of strings or a list of list of strings.
518
+ text_pair (`List[str]` or `List[int]`, *optional*):
519
+ Optional second sequence to be encoded. This can be a list of strings (words of a single example) or a
520
+ list of list of strings (words of a batch of examples).
521
+ """
522
+
523
+ # Backward compatibility for 'truncation_strategy', 'pad_to_max_length'
524
+ padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(
525
+ padding=padding,
526
+ truncation=truncation,
527
+ max_length=max_length,
528
+ pad_to_multiple_of=pad_to_multiple_of,
529
+ verbose=verbose,
530
+ **kwargs,
531
+ )
532
+
533
+ return self._encode_plus(
534
+ text=text,
535
+ xpaths=xpaths,
536
+ text_pair=text_pair,
537
+ node_labels=node_labels,
538
+ add_special_tokens=add_special_tokens,
539
+ padding_strategy=padding_strategy,
540
+ truncation_strategy=truncation_strategy,
541
+ max_length=max_length,
542
+ stride=stride,
543
+ pad_to_multiple_of=pad_to_multiple_of,
544
+ return_tensors=return_tensors,
545
+ return_token_type_ids=return_token_type_ids,
546
+ return_attention_mask=return_attention_mask,
547
+ return_overflowing_tokens=return_overflowing_tokens,
548
+ return_special_tokens_mask=return_special_tokens_mask,
549
+ return_offsets_mapping=return_offsets_mapping,
550
+ return_length=return_length,
551
+ verbose=verbose,
552
+ **kwargs,
553
+ )
554
+
555
+ def _batch_encode_plus(
556
+ self,
557
+ batch_text_or_text_pairs: Union[
558
+ List[TextInput],
559
+ List[TextInputPair],
560
+ List[PreTokenizedInput],
561
+ ],
562
+ is_pair: bool = None,
563
+ xpaths: Optional[List[List[List[int]]]] = None,
564
+ node_labels: Optional[List[List[int]]] = None,
565
+ add_special_tokens: bool = True,
566
+ padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
567
+ truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
568
+ max_length: Optional[int] = None,
569
+ stride: int = 0,
570
+ pad_to_multiple_of: Optional[int] = None,
571
+ return_tensors: Optional[str] = None,
572
+ return_token_type_ids: Optional[bool] = None,
573
+ return_attention_mask: Optional[bool] = None,
574
+ return_overflowing_tokens: bool = False,
575
+ return_special_tokens_mask: bool = False,
576
+ return_offsets_mapping: bool = False,
577
+ return_length: bool = False,
578
+ verbose: bool = True,
579
+ ) -> BatchEncoding:
580
+ if not isinstance(batch_text_or_text_pairs, list):
581
+ raise TypeError(f"batch_text_or_text_pairs has to be a list (got {type(batch_text_or_text_pairs)})")
582
+
583
+ # Set the truncation and padding strategy and restore the initial configuration
584
+ self.set_truncation_and_padding(
585
+ padding_strategy=padding_strategy,
586
+ truncation_strategy=truncation_strategy,
587
+ max_length=max_length,
588
+ stride=stride,
589
+ pad_to_multiple_of=pad_to_multiple_of,
590
+ )
591
+
592
+ if is_pair:
593
+ batch_text_or_text_pairs = [([text], text_pair) for text, text_pair in batch_text_or_text_pairs]
594
+
595
+ encodings = self._tokenizer.encode_batch(
596
+ batch_text_or_text_pairs,
597
+ add_special_tokens=add_special_tokens,
598
+ is_pretokenized=True, # we set this to True as MarkupLM always expects pretokenized inputs
599
+ )
600
+
601
+ # Convert encoding to dict
602
+ # `Tokens` is a tuple of (List[Dict[str, List[List[int]]]] or List[Dict[str, 2D-Tensor]],
603
+ # List[EncodingFast]) with nested dimensions corresponding to batch, overflows, sequence length
604
+ tokens_and_encodings = [
605
+ self._convert_encoding(
606
+ encoding=encoding,
607
+ return_token_type_ids=return_token_type_ids,
608
+ return_attention_mask=return_attention_mask,
609
+ return_overflowing_tokens=return_overflowing_tokens,
610
+ return_special_tokens_mask=return_special_tokens_mask,
611
+ return_offsets_mapping=True
612
+ if node_labels is not None
613
+ else return_offsets_mapping, # we use offsets to create the labels
614
+ return_length=return_length,
615
+ verbose=verbose,
616
+ )
617
+ for encoding in encodings
618
+ ]
619
+
620
+ # Convert the output to have dict[list] from list[dict] and remove the additional overflows dimension
621
+ # From (variable) shape (batch, overflows, sequence length) to ~ (batch * overflows, sequence length)
622
+ # (we say ~ because the number of overflow varies with the example in the batch)
623
+ #
624
+ # To match each overflowing sample with the original sample in the batch
625
+ # we add an overflow_to_sample_mapping array (see below)
626
+ sanitized_tokens = {}
627
+ for key in tokens_and_encodings[0][0].keys():
628
+ stack = [e for item, _ in tokens_and_encodings for e in item[key]]
629
+ sanitized_tokens[key] = stack
630
+ sanitized_encodings = [e for _, item in tokens_and_encodings for e in item]
631
+
632
+ # If returning overflowing tokens, we need to return a mapping
633
+ # from the batch idx to the original sample
634
+ if return_overflowing_tokens:
635
+ overflow_to_sample_mapping = []
636
+ for i, (toks, _) in enumerate(tokens_and_encodings):
637
+ overflow_to_sample_mapping += [i] * len(toks["input_ids"])
638
+ sanitized_tokens["overflow_to_sample_mapping"] = overflow_to_sample_mapping
639
+
640
+ for input_ids in sanitized_tokens["input_ids"]:
641
+ self._eventual_warn_about_too_long_sequence(input_ids, max_length, verbose)
642
+
643
+ # create the token-level xpaths tags and subscripts
644
+ xpath_tags_seq = []
645
+ xpath_subs_seq = []
646
+ for batch_index in range(len(sanitized_tokens["input_ids"])):
647
+ if return_overflowing_tokens:
648
+ original_index = sanitized_tokens["overflow_to_sample_mapping"][batch_index]
649
+ else:
650
+ original_index = batch_index
651
+ xpath_tags_seq_example = []
652
+ xpath_subs_seq_example = []
653
+ for id, sequence_id, word_id in zip(
654
+ sanitized_tokens["input_ids"][batch_index],
655
+ sanitized_encodings[batch_index].sequence_ids,
656
+ sanitized_encodings[batch_index].word_ids,
657
+ ):
658
+ if word_id is not None:
659
+ if is_pair and sequence_id == 0:
660
+ xpath_tags_seq_example.append(self.pad_xpath_tags_seq)
661
+ xpath_subs_seq_example.append(self.pad_xpath_subs_seq)
662
+ else:
663
+ xpath_tags_list, xpath_subs_list = self.get_xpath_seq(xpaths[original_index][word_id])
664
+ xpath_tags_seq_example.extend([xpath_tags_list])
665
+ xpath_subs_seq_example.extend([xpath_subs_list])
666
+ else:
667
+ if id in [self.cls_token_id, self.sep_token_id, self.pad_token_id]:
668
+ xpath_tags_seq_example.append(self.pad_xpath_tags_seq)
669
+ xpath_subs_seq_example.append(self.pad_xpath_subs_seq)
670
+ else:
671
+ raise ValueError("Id not recognized")
672
+ xpath_tags_seq.append(xpath_tags_seq_example)
673
+ xpath_subs_seq.append(xpath_subs_seq_example)
674
+
675
+ sanitized_tokens["xpath_tags_seq"] = xpath_tags_seq
676
+ sanitized_tokens["xpath_subs_seq"] = xpath_subs_seq
677
+
678
+ # optionally, create the labels
679
+ if node_labels is not None:
680
+ labels = []
681
+ for batch_index in range(len(sanitized_tokens["input_ids"])):
682
+ if return_overflowing_tokens:
683
+ original_index = sanitized_tokens["overflow_to_sample_mapping"][batch_index]
684
+ else:
685
+ original_index = batch_index
686
+ labels_example = []
687
+ for id, offset, word_id in zip(
688
+ sanitized_tokens["input_ids"][batch_index],
689
+ sanitized_tokens["offset_mapping"][batch_index],
690
+ sanitized_encodings[batch_index].word_ids,
691
+ ):
692
+ if word_id is not None:
693
+ if self.only_label_first_subword:
694
+ if offset[0] == 0:
695
+ # Use the real label id for the first token of the word, and padding ids for the remaining tokens
696
+ labels_example.append(node_labels[original_index][word_id])
697
+ else:
698
+ labels_example.append(self.pad_token_label)
699
+ else:
700
+ labels_example.append(node_labels[original_index][word_id])
701
+ else:
702
+ labels_example.append(self.pad_token_label)
703
+ labels.append(labels_example)
704
+
705
+ sanitized_tokens["labels"] = labels
706
+ # finally, remove offsets if the user didn't want them
707
+ if not return_offsets_mapping:
708
+ del sanitized_tokens["offset_mapping"]
709
+
710
+ return BatchEncoding(sanitized_tokens, sanitized_encodings, tensor_type=return_tensors)
711
+
712
+ def _encode_plus(
713
+ self,
714
+ text: Union[TextInput, PreTokenizedInput],
715
+ text_pair: Optional[PreTokenizedInput] = None,
716
+ xpaths: Optional[List[List[int]]] = None,
717
+ node_labels: Optional[List[int]] = None,
718
+ add_special_tokens: bool = True,
719
+ padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
720
+ truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
721
+ max_length: Optional[int] = None,
722
+ stride: int = 0,
723
+ pad_to_multiple_of: Optional[int] = None,
724
+ return_tensors: Optional[bool] = None,
725
+ return_token_type_ids: Optional[bool] = None,
726
+ return_attention_mask: Optional[bool] = None,
727
+ return_overflowing_tokens: bool = False,
728
+ return_special_tokens_mask: bool = False,
729
+ return_offsets_mapping: bool = False,
730
+ return_length: bool = False,
731
+ verbose: bool = True,
732
+ **kwargs,
733
+ ) -> BatchEncoding:
734
+ # make it a batched input
735
+ # 2 options:
736
+ # 1) only text, in case text must be a list of str
737
+ # 2) text + text_pair, in which case text = str and text_pair a list of str
738
+ batched_input = [(text, text_pair)] if text_pair else [text]
739
+ batched_xpaths = [xpaths]
740
+ batched_node_labels = [node_labels] if node_labels is not None else None
741
+ batched_output = self._batch_encode_plus(
742
+ batched_input,
743
+ is_pair=bool(text_pair is not None),
744
+ xpaths=batched_xpaths,
745
+ node_labels=batched_node_labels,
746
+ add_special_tokens=add_special_tokens,
747
+ padding_strategy=padding_strategy,
748
+ truncation_strategy=truncation_strategy,
749
+ max_length=max_length,
750
+ stride=stride,
751
+ pad_to_multiple_of=pad_to_multiple_of,
752
+ return_tensors=return_tensors,
753
+ return_token_type_ids=return_token_type_ids,
754
+ return_attention_mask=return_attention_mask,
755
+ return_overflowing_tokens=return_overflowing_tokens,
756
+ return_special_tokens_mask=return_special_tokens_mask,
757
+ return_offsets_mapping=return_offsets_mapping,
758
+ return_length=return_length,
759
+ verbose=verbose,
760
+ **kwargs,
761
+ )
762
+
763
+ # Return tensor is None, then we can remove the leading batch axis
764
+ # Overflowing tokens are returned as a batch of output so we keep them in this case
765
+ if return_tensors is None and not return_overflowing_tokens:
766
+ batched_output = BatchEncoding(
767
+ {
768
+ key: value[0] if len(value) > 0 and isinstance(value[0], list) else value
769
+ for key, value in batched_output.items()
770
+ },
771
+ batched_output.encodings,
772
+ )
773
+
774
+ self._eventual_warn_about_too_long_sequence(batched_output["input_ids"], max_length, verbose)
775
+
776
+ return batched_output
777
+
778
+ def _pad(
779
+ self,
780
+ encoded_inputs: Union[Dict[str, EncodedInput], BatchEncoding],
781
+ max_length: Optional[int] = None,
782
+ padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
783
+ pad_to_multiple_of: Optional[int] = None,
784
+ return_attention_mask: Optional[bool] = None,
785
+ ) -> dict:
786
+ """
787
+ Args:
788
+ Pad encoded inputs (on left/right and up to predefined length or max length in the batch)
789
+ encoded_inputs:
790
+ Dictionary of tokenized inputs (`List[int]`) or batch of tokenized inputs (`List[List[int]]`).
791
+ max_length: maximum length of the returned list and optionally padding length (see below).
792
+ Will truncate by taking into account the special tokens.
793
+ padding_strategy: PaddingStrategy to use for padding.
794
+ - PaddingStrategy.LONGEST Pad to the longest sequence in the batch
795
+ - PaddingStrategy.MAX_LENGTH: Pad to the max length (default)
796
+ - PaddingStrategy.DO_NOT_PAD: Do not pad
797
+ The tokenizer padding sides are defined in self.padding_side:
798
+ - 'left': pads on the left of the sequences
799
+ - 'right': pads on the right of the sequences
800
+ pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value.
801
+ This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability
802
+ `>= 7.5` (Volta).
803
+ return_attention_mask:
804
+ (optional) Set to False to avoid returning attention mask (default: set to model specifics)
805
+ """
806
+ # Load from model defaults
807
+ if return_attention_mask is None:
808
+ return_attention_mask = "attention_mask" in self.model_input_names
809
+
810
+ required_input = encoded_inputs[self.model_input_names[0]]
811
+
812
+ if padding_strategy == PaddingStrategy.LONGEST:
813
+ max_length = len(required_input)
814
+
815
+ if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
816
+ max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
817
+
818
+ needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(required_input) != max_length
819
+
820
+ # Initialize attention mask if not present.
821
+ if return_attention_mask and "attention_mask" not in encoded_inputs:
822
+ encoded_inputs["attention_mask"] = [1] * len(required_input)
823
+
824
+ if needs_to_be_padded:
825
+ difference = max_length - len(required_input)
826
+ if self.padding_side == "right":
827
+ if return_attention_mask:
828
+ encoded_inputs["attention_mask"] = encoded_inputs["attention_mask"] + [0] * difference
829
+ if "token_type_ids" in encoded_inputs:
830
+ encoded_inputs["token_type_ids"] = (
831
+ encoded_inputs["token_type_ids"] + [self.pad_token_type_id] * difference
832
+ )
833
+ if "xpath_tags_seq" in encoded_inputs:
834
+ encoded_inputs["xpath_tags_seq"] = (
835
+ encoded_inputs["xpath_tags_seq"] + [self.pad_xpath_tags_seq] * difference
836
+ )
837
+ if "xpath_subs_seq" in encoded_inputs:
838
+ encoded_inputs["xpath_subs_seq"] = (
839
+ encoded_inputs["xpath_subs_seq"] + [self.pad_xpath_subs_seq] * difference
840
+ )
841
+ if "labels" in encoded_inputs:
842
+ encoded_inputs["labels"] = encoded_inputs["labels"] + [self.pad_token_label] * difference
843
+ if "special_tokens_mask" in encoded_inputs:
844
+ encoded_inputs["special_tokens_mask"] = encoded_inputs["special_tokens_mask"] + [1] * difference
845
+ encoded_inputs[self.model_input_names[0]] = required_input + [self.pad_token_id] * difference
846
+ elif self.padding_side == "left":
847
+ if return_attention_mask:
848
+ encoded_inputs["attention_mask"] = [0] * difference + encoded_inputs["attention_mask"]
849
+ if "token_type_ids" in encoded_inputs:
850
+ encoded_inputs["token_type_ids"] = [self.pad_token_type_id] * difference + encoded_inputs[
851
+ "token_type_ids"
852
+ ]
853
+ if "xpath_tags_seq" in encoded_inputs:
854
+ encoded_inputs["xpath_tags_seq"] = [self.pad_xpath_tags_seq] * difference + encoded_inputs[
855
+ "xpath_tags_seq"
856
+ ]
857
+ if "xpath_subs_seq" in encoded_inputs:
858
+ encoded_inputs["xpath_subs_seq"] = [self.pad_xpath_subs_seq] * difference + encoded_inputs[
859
+ "xpath_subs_seq"
860
+ ]
861
+ if "labels" in encoded_inputs:
862
+ encoded_inputs["labels"] = [self.pad_token_label] * difference + encoded_inputs["labels"]
863
+ if "special_tokens_mask" in encoded_inputs:
864
+ encoded_inputs["special_tokens_mask"] = [1] * difference + encoded_inputs["special_tokens_mask"]
865
+ encoded_inputs[self.model_input_names[0]] = [self.pad_token_id] * difference + required_input
866
+ else:
867
+ raise ValueError("Invalid padding strategy:" + str(self.padding_side))
868
+
869
+ return encoded_inputs
870
+
871
+ def build_inputs_with_special_tokens(
872
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
873
+ ) -> List[int]:
874
+ """
875
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
876
+ adding special tokens. A RoBERTa sequence has the following format:
877
+ - single sequence: `<s> X </s>`
878
+ - pair of sequences: `<s> A </s></s> B </s>`
879
+
880
+ Args:
881
+ token_ids_0 (`List[int]`):
882
+ List of IDs to which the special tokens will be added.
883
+ token_ids_1 (`List[int]`, *optional*):
884
+ Optional second list of IDs for sequence pairs.
885
+ Returns:
886
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
887
+ """
888
+ if token_ids_1 is None:
889
+ return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
890
+ cls = [self.cls_token_id]
891
+ sep = [self.sep_token_id]
892
+ return cls + token_ids_0 + sep + token_ids_1 + sep
893
+
894
+ def create_token_type_ids_from_sequences(
895
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
896
+ ) -> List[int]:
897
+ """
898
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. RoBERTa does not
899
+ make use of token type ids, therefore a list of zeros is returned.
900
+
901
+ Args:
902
+ token_ids_0 (`List[int]`):
903
+ List of IDs.
904
+ token_ids_1 (`List[int]`, *optional*):
905
+ Optional second list of IDs for sequence pairs.
906
+ Returns:
907
+ `List[int]`: List of zeros.
908
+ """
909
+ sep = [self.sep_token_id]
910
+ cls = [self.cls_token_id]
911
+
912
+ if token_ids_1 is None:
913
+ return len(cls + token_ids_0 + sep) * [0]
914
+ return len(cls + token_ids_0 + sep + token_ids_1 + sep) * [0]
915
+
916
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
917
+ files = self._tokenizer.model.save(save_directory, name=filename_prefix)
918
+ return tuple(files)
llmeval-env/lib/python3.10/site-packages/transformers/models/oneformer/__init__.py ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
17
+
18
+
19
+ _import_structure = {
20
+ "configuration_oneformer": ["ONEFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "OneFormerConfig"],
21
+ "processing_oneformer": ["OneFormerProcessor"],
22
+ }
23
+
24
+ try:
25
+ if not is_vision_available():
26
+ raise OptionalDependencyNotAvailable()
27
+ except OptionalDependencyNotAvailable:
28
+ pass
29
+ else:
30
+ _import_structure["image_processing_oneformer"] = ["OneFormerImageProcessor"]
31
+
32
+ try:
33
+ if not is_torch_available():
34
+ raise OptionalDependencyNotAvailable()
35
+ except OptionalDependencyNotAvailable:
36
+ pass
37
+ else:
38
+ _import_structure["modeling_oneformer"] = [
39
+ "ONEFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
40
+ "OneFormerForUniversalSegmentation",
41
+ "OneFormerModel",
42
+ "OneFormerPreTrainedModel",
43
+ ]
44
+
45
+ if TYPE_CHECKING:
46
+ from .configuration_oneformer import ONEFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, OneFormerConfig
47
+ from .processing_oneformer import OneFormerProcessor
48
+
49
+ try:
50
+ if not is_vision_available():
51
+ raise OptionalDependencyNotAvailable()
52
+ except OptionalDependencyNotAvailable:
53
+ pass
54
+ else:
55
+ from .image_processing_oneformer import OneFormerImageProcessor
56
+ try:
57
+ if not is_torch_available():
58
+ raise OptionalDependencyNotAvailable()
59
+ except OptionalDependencyNotAvailable:
60
+ pass
61
+ else:
62
+ from .modeling_oneformer import (
63
+ ONEFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
64
+ OneFormerForUniversalSegmentation,
65
+ OneFormerModel,
66
+ OneFormerPreTrainedModel,
67
+ )
68
+
69
+
70
+ else:
71
+ import sys
72
+
73
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure)
llmeval-env/lib/python3.10/site-packages/transformers/models/oneformer/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.23 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/oneformer/__pycache__/configuration_oneformer.cpython-310.pyc ADDED
Binary file (11.5 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/oneformer/__pycache__/convert_to_hf_oneformer.cpython-310.pyc ADDED
Binary file (31.7 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/oneformer/__pycache__/image_processing_oneformer.cpython-310.pyc ADDED
Binary file (42.6 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/oneformer/__pycache__/modeling_oneformer.cpython-310.pyc ADDED
Binary file (105 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/oneformer/__pycache__/processing_oneformer.cpython-310.pyc ADDED
Binary file (7.82 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/oneformer/configuration_oneformer.py ADDED
@@ -0,0 +1,276 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 SHI Labs and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """OneFormer model configuration"""
16
+ from typing import Dict, Optional
17
+
18
+ from ...configuration_utils import PretrainedConfig
19
+ from ...utils import logging
20
+ from ..auto import CONFIG_MAPPING
21
+
22
+
23
+ logger = logging.get_logger(__name__)
24
+
25
+
26
+ from ..deprecated._archive_maps import ONEFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
27
+
28
+
29
+ class OneFormerConfig(PretrainedConfig):
30
+ r"""
31
+ This is the configuration class to store the configuration of a [`OneFormerModel`]. It is used to instantiate a
32
+ OneFormer model according to the specified arguments, defining the model architecture. Instantiating a
33
+ configuration with the defaults will yield a similar configuration to that of the OneFormer
34
+ [shi-labs/oneformer_ade20k_swin_tiny](https://huggingface.co/shi-labs/oneformer_ade20k_swin_tiny) architecture
35
+ trained on [ADE20k-150](https://huggingface.co/datasets/scene_parse_150).
36
+
37
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
38
+ documentation from [`PretrainedConfig`] for more information.
39
+
40
+ Args:
41
+ backbone_config (`PretrainedConfig`, *optional*, defaults to `SwinConfig`):
42
+ The configuration of the backbone model.
43
+ backbone (`str`, *optional*):
44
+ Name of backbone to use when `backbone_config` is `None`. If `use_pretrained_backbone` is `True`, this
45
+ will load the corresponding pretrained weights from the timm or transformers library. If `use_pretrained_backbone`
46
+ is `False`, this loads the backbone's config and uses that to initialize the backbone with random weights.
47
+ use_pretrained_backbone (`bool`, *optional*, defaults to `False`):
48
+ Whether to use pretrained weights for the backbone.
49
+ use_timm_backbone (`bool`, *optional*, defaults to `False`):
50
+ Whether to load `backbone` from the timm library. If `False`, the backbone is loaded from the transformers
51
+ library.
52
+ backbone_kwargs (`dict`, *optional*):
53
+ Keyword arguments to be passed to AutoBackbone when loading from a checkpoint
54
+ e.g. `{'out_indices': (0, 1, 2, 3)}`. Cannot be specified if `backbone_config` is set.
55
+ ignore_value (`int`, *optional*, defaults to 255):
56
+ Values to be ignored in GT label while calculating loss.
57
+ num_queries (`int`, *optional*, defaults to 150):
58
+ Number of object queries.
59
+ no_object_weight (`float`, *optional*, defaults to 0.1):
60
+ Weight for no-object class predictions.
61
+ class_weight (`float`, *optional*, defaults to 2.0):
62
+ Weight for Classification CE loss.
63
+ mask_weight (`float`, *optional*, defaults to 5.0):
64
+ Weight for binary CE loss.
65
+ dice_weight (`float`, *optional*, defaults to 5.0):
66
+ Weight for dice loss.
67
+ contrastive_weight (`float`, *optional*, defaults to 0.5):
68
+ Weight for contrastive loss.
69
+ contrastive_temperature (`float`, *optional*, defaults to 0.07):
70
+ Initial value for scaling the contrastive logits.
71
+ train_num_points (`int`, *optional*, defaults to 12544):
72
+ Number of points to sample while calculating losses on mask predictions.
73
+ oversample_ratio (`float`, *optional*, defaults to 3.0):
74
+ Ratio to decide how many points to oversample.
75
+ importance_sample_ratio (`float`, *optional*, defaults to 0.75):
76
+ Ratio of points that are sampled via importance sampling.
77
+ init_std (`float`, *optional*, defaults to 0.02):
78
+ Standard deviation for normal intialization.
79
+ init_xavier_std (`float`, *optional*, defaults to 1.0):
80
+ Standard deviation for xavier uniform initialization.
81
+ layer_norm_eps (`float`, *optional*, defaults to 1e-05):
82
+ Epsilon for layer normalization.
83
+ is_training (`bool`, *optional*, defaults to `False`):
84
+ Whether to run in training or inference mode.
85
+ use_auxiliary_loss (`bool`, *optional*, defaults to `True`):
86
+ Whether to calculate loss using intermediate predictions from transformer decoder.
87
+ output_auxiliary_logits (`bool`, *optional*, defaults to `True`):
88
+ Whether to return intermediate predictions from transformer decoder.
89
+ strides (`list`, *optional*, defaults to `[4, 8, 16, 32]`):
90
+ List containing the strides for feature maps in the encoder.
91
+ task_seq_len (`int`, *optional*, defaults to 77):
92
+ Sequence length for tokenizing text list input.
93
+ text_encoder_width (`int`, *optional*, defaults to 256):
94
+ Hidden size for text encoder.
95
+ text_encoder_context_length (`int`, *optional*, defaults to 77):
96
+ Input sequence length for text encoder.
97
+ text_encoder_num_layers (`int`, *optional*, defaults to 6):
98
+ Number of layers for transformer in text encoder.
99
+ text_encoder_vocab_size (`int`, *optional*, defaults to 49408):
100
+ Vocabulary size for tokenizer.
101
+ text_encoder_proj_layers (`int`, *optional*, defaults to 2):
102
+ Number of layers in MLP for project text queries.
103
+ text_encoder_n_ctx (`int`, *optional*, defaults to 16):
104
+ Number of learnable text context queries.
105
+ conv_dim (`int`, *optional*, defaults to 256):
106
+ Feature map dimension to map outputs from the backbone.
107
+ mask_dim (`int`, *optional*, defaults to 256):
108
+ Dimension for feature maps in pixel decoder.
109
+ hidden_dim (`int`, *optional*, defaults to 256):
110
+ Dimension for hidden states in transformer decoder.
111
+ encoder_feedforward_dim (`int`, *optional*, defaults to 1024):
112
+ Dimension for FFN layer in pixel decoder.
113
+ norm (`str`, *optional*, defaults to `"GN"`):
114
+ Type of normalization.
115
+ encoder_layers (`int`, *optional*, defaults to 6):
116
+ Number of layers in pixel decoder.
117
+ decoder_layers (`int`, *optional*, defaults to 10):
118
+ Number of layers in transformer decoder.
119
+ use_task_norm (`bool`, *optional*, defaults to `True`):
120
+ Whether to normalize the task token.
121
+ num_attention_heads (`int`, *optional*, defaults to 8):
122
+ Number of attention heads in transformer layers in the pixel and transformer decoders.
123
+ dropout (`float`, *optional*, defaults to 0.1):
124
+ Dropout probability for pixel and transformer decoders.
125
+ dim_feedforward (`int`, *optional*, defaults to 2048):
126
+ Dimension for FFN layer in transformer decoder.
127
+ pre_norm (`bool`, *optional*, defaults to `False`):
128
+ Whether to normalize hidden states before attention layers in transformer decoder.
129
+ enforce_input_proj (`bool`, *optional*, defaults to `False`):
130
+ Whether to project hidden states in transformer decoder.
131
+ query_dec_layers (`int`, *optional*, defaults to 2):
132
+ Number of layers in query transformer.
133
+ common_stride (`int`, *optional*, defaults to 4):
134
+ Common stride used for features in pixel decoder.
135
+
136
+ Examples:
137
+ ```python
138
+ >>> from transformers import OneFormerConfig, OneFormerModel
139
+
140
+ >>> # Initializing a OneFormer shi-labs/oneformer_ade20k_swin_tiny configuration
141
+ >>> configuration = OneFormerConfig()
142
+ >>> # Initializing a model (with random weights) from the shi-labs/oneformer_ade20k_swin_tiny style configuration
143
+ >>> model = OneFormerModel(configuration)
144
+ >>> # Accessing the model configuration
145
+ >>> configuration = model.config
146
+ ```
147
+ """
148
+
149
+ model_type = "oneformer"
150
+ attribute_map = {"hidden_size": "hidden_dim"}
151
+
152
+ def __init__(
153
+ self,
154
+ backbone_config: Optional[Dict] = None,
155
+ backbone: Optional[str] = None,
156
+ use_pretrained_backbone: bool = False,
157
+ use_timm_backbone: bool = False,
158
+ backbone_kwargs: Optional[Dict] = None,
159
+ ignore_value: int = 255,
160
+ num_queries: int = 150,
161
+ no_object_weight: int = 0.1,
162
+ class_weight: float = 2.0,
163
+ mask_weight: float = 5.0,
164
+ dice_weight: float = 5.0,
165
+ contrastive_weight: float = 0.5,
166
+ contrastive_temperature: float = 0.07,
167
+ train_num_points: int = 12544,
168
+ oversample_ratio: float = 3.0,
169
+ importance_sample_ratio: float = 0.75,
170
+ init_std: float = 0.02,
171
+ init_xavier_std: float = 1.0,
172
+ layer_norm_eps: float = 1e-05,
173
+ is_training: bool = False,
174
+ use_auxiliary_loss: bool = True,
175
+ output_auxiliary_logits: bool = True,
176
+ strides: Optional[list] = [4, 8, 16, 32],
177
+ task_seq_len: int = 77,
178
+ text_encoder_width: int = 256,
179
+ text_encoder_context_length: int = 77,
180
+ text_encoder_num_layers: int = 6,
181
+ text_encoder_vocab_size: int = 49408,
182
+ text_encoder_proj_layers: int = 2,
183
+ text_encoder_n_ctx: int = 16,
184
+ conv_dim: int = 256,
185
+ mask_dim: int = 256,
186
+ hidden_dim: int = 256,
187
+ encoder_feedforward_dim: int = 1024,
188
+ norm: str = "GN",
189
+ encoder_layers: int = 6,
190
+ decoder_layers: int = 10,
191
+ use_task_norm: bool = True,
192
+ num_attention_heads: int = 8,
193
+ dropout: float = 0.1,
194
+ dim_feedforward: int = 2048,
195
+ pre_norm: bool = False,
196
+ enforce_input_proj: bool = False,
197
+ query_dec_layers: int = 2,
198
+ common_stride: int = 4,
199
+ **kwargs,
200
+ ):
201
+ if use_pretrained_backbone:
202
+ raise ValueError("Pretrained backbones are not supported yet.")
203
+
204
+ if backbone_config is not None and backbone is not None:
205
+ raise ValueError("You can't specify both `backbone` and `backbone_config`.")
206
+
207
+ if backbone_config is None and backbone is None:
208
+ logger.info("`backbone_config` is unset. Initializing the config with the default `Swin` backbone.")
209
+ backbone_config = CONFIG_MAPPING["swin"](
210
+ image_size=224,
211
+ in_channels=3,
212
+ patch_size=4,
213
+ embed_dim=96,
214
+ depths=[2, 2, 6, 2],
215
+ num_heads=[3, 6, 12, 24],
216
+ window_size=7,
217
+ drop_path_rate=0.3,
218
+ use_absolute_embeddings=False,
219
+ out_features=["stage1", "stage2", "stage3", "stage4"],
220
+ )
221
+ elif isinstance(backbone_config, dict):
222
+ backbone_model_type = backbone_config.get("model_type")
223
+ config_class = CONFIG_MAPPING[backbone_model_type]
224
+ backbone_config = config_class.from_dict(backbone_config)
225
+
226
+ if backbone_kwargs is not None and backbone_kwargs and backbone_config is not None:
227
+ raise ValueError("You can't specify both `backbone_kwargs` and `backbone_config`.")
228
+
229
+ self.backbone_config = backbone_config
230
+ self.backbone = backbone
231
+ self.use_pretrained_backbone = use_pretrained_backbone
232
+ self.use_timm_backbone = use_timm_backbone
233
+ self.backbone_kwargs = backbone_kwargs
234
+ self.ignore_value = ignore_value
235
+ self.num_queries = num_queries
236
+ self.no_object_weight = no_object_weight
237
+ self.class_weight = class_weight
238
+ self.mask_weight = mask_weight
239
+ self.dice_weight = dice_weight
240
+ self.contrastive_weight = contrastive_weight
241
+ self.contrastive_temperature = contrastive_temperature
242
+ self.train_num_points = train_num_points
243
+ self.oversample_ratio = oversample_ratio
244
+ self.importance_sample_ratio = importance_sample_ratio
245
+ self.init_std = init_std
246
+ self.init_xavier_std = init_xavier_std
247
+ self.layer_norm_eps = layer_norm_eps
248
+ self.is_training = is_training
249
+ self.use_auxiliary_loss = use_auxiliary_loss
250
+ self.output_auxiliary_logits = output_auxiliary_logits
251
+ self.strides = strides
252
+ self.task_seq_len = task_seq_len
253
+ self.text_encoder_width = text_encoder_width
254
+ self.text_encoder_context_length = text_encoder_context_length
255
+ self.text_encoder_num_layers = text_encoder_num_layers
256
+ self.text_encoder_vocab_size = text_encoder_vocab_size
257
+ self.text_encoder_proj_layers = text_encoder_proj_layers
258
+ self.text_encoder_n_ctx = text_encoder_n_ctx
259
+ self.conv_dim = conv_dim
260
+ self.mask_dim = mask_dim
261
+ self.hidden_dim = hidden_dim
262
+ self.encoder_feedforward_dim = encoder_feedforward_dim
263
+ self.norm = norm
264
+ self.encoder_layers = encoder_layers
265
+ self.decoder_layers = decoder_layers
266
+ self.use_task_norm = use_task_norm
267
+ self.num_attention_heads = num_attention_heads
268
+ self.dropout = dropout
269
+ self.dim_feedforward = dim_feedforward
270
+ self.pre_norm = pre_norm
271
+ self.enforce_input_proj = enforce_input_proj
272
+ self.query_dec_layers = query_dec_layers
273
+ self.common_stride = common_stride
274
+ self.num_hidden_layers = decoder_layers
275
+
276
+ super().__init__(**kwargs)
llmeval-env/lib/python3.10/site-packages/transformers/models/oneformer/convert_to_hf_oneformer.py ADDED
@@ -0,0 +1,1191 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 SHI Labs and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """Convert OneFormer checkpoints from the original repository. URL: https://github.com/SHI-Labs/OneFormer"""
17
+
18
+ import os
19
+ import sys
20
+ from argparse import ArgumentParser
21
+ from dataclasses import dataclass
22
+ from pathlib import Path
23
+ from pprint import pformat
24
+ from typing import Any, Dict, Iterator, List, Set, Tuple
25
+
26
+ import requests
27
+ import torch
28
+ import torchvision.transforms as T
29
+ from PIL import Image
30
+ from torch import Tensor, nn
31
+
32
+
33
+ try:
34
+ from detectron2.checkpoint import DetectionCheckpointer
35
+ from detectron2.config import get_cfg
36
+ from detectron2.data import MetadataCatalog
37
+ from detectron2.projects.deeplab import add_deeplab_config
38
+ except ImportError:
39
+ pass
40
+ from transformers import CLIPTokenizer, DinatConfig, SwinConfig
41
+ from transformers.models.oneformer.image_processing_oneformer import OneFormerImageProcessor
42
+ from transformers.models.oneformer.modeling_oneformer import (
43
+ OneFormerConfig,
44
+ OneFormerForUniversalSegmentation,
45
+ OneFormerForUniversalSegmentationOutput,
46
+ OneFormerModel,
47
+ OneFormerModelOutput,
48
+ )
49
+ from transformers.models.oneformer.processing_oneformer import OneFormerProcessor
50
+ from transformers.utils import logging
51
+
52
+
53
+ StateDict = Dict[str, Tensor]
54
+
55
+ logging.set_verbosity_info()
56
+ logger = logging.get_logger()
57
+
58
+ torch.manual_seed(0)
59
+
60
+
61
+ class TrackedStateDict:
62
+ def __init__(self, to_track: Dict):
63
+ """This class "tracks" a python dictionary by keeping track of which item is accessed.
64
+
65
+ Args:
66
+ to_track (Dict): The dictionary we wish to track
67
+ """
68
+ self.to_track = to_track
69
+ self._seen: Set[str] = set()
70
+
71
+ def __getitem__(self, key: str) -> Any:
72
+ return self.to_track[key]
73
+
74
+ def __setitem__(self, key: str, item: Any):
75
+ self._seen.add(key)
76
+ self.to_track[key] = item
77
+
78
+ def diff(self) -> List[str]:
79
+ """This method returns a set difference between the keys in the tracked state dict and the one we have access so far.
80
+ This is an effective method to check if we have update all the keys
81
+
82
+ Returns:
83
+ List[str]: List of keys not yet updated
84
+ """
85
+ return set(self.to_track.keys()) - self._seen
86
+
87
+ def copy(self) -> Dict:
88
+ # proxy the call to the internal dictionary
89
+ return self.to_track.copy()
90
+
91
+
92
+ # Image to verify the result
93
+ def prepare_img():
94
+ url = "https://praeclarumjj3.github.io/files/coco.jpeg"
95
+ img_data = requests.get(url, stream=True).raw
96
+ im = Image.open(img_data)
97
+ return im
98
+
99
+
100
+ @dataclass
101
+ class Args:
102
+ """Fake command line arguments needed by oneformer/detectron2 implementation"""
103
+
104
+ config_file: str
105
+
106
+
107
+ def setup_cfg(args: Args):
108
+ # load config from file and command-line arguments
109
+ cfg = get_cfg()
110
+ add_deeplab_config(cfg)
111
+ add_common_config(cfg)
112
+ add_oneformer_config(cfg)
113
+ add_swin_config(cfg)
114
+ add_dinat_config(cfg)
115
+ cfg.merge_from_file(args.config_file)
116
+ cfg.freeze()
117
+ return cfg
118
+
119
+
120
+ class OriginalOneFormerConfigToOursConverter:
121
+ def __call__(self, original_config: object, is_swin: bool) -> OneFormerConfig:
122
+ model = original_config.MODEL
123
+
124
+ dataset_catalog = MetadataCatalog.get(original_config.DATASETS.TEST_PANOPTIC[0])
125
+ id2label = dict(enumerate(dataset_catalog.stuff_classes))
126
+ label2id = {label: idx for idx, label in id2label.items()}
127
+
128
+ if is_swin:
129
+ if model.SWIN.EMBED_DIM == 96:
130
+ backbone_config = SwinConfig.from_pretrained(
131
+ "microsoft/swin-tiny-patch4-window7-224",
132
+ drop_path_rate=model.SWIN.DROP_PATH_RATE,
133
+ out_features=["stage1", "stage2", "stage3", "stage4"],
134
+ )
135
+ elif model.SWIN.EMBED_DIM == 192:
136
+ backbone_config = SwinConfig.from_pretrained(
137
+ "microsoft/swin-large-patch4-window12-384",
138
+ drop_path_rate=model.SWIN.DROP_PATH_RATE,
139
+ out_features=["stage1", "stage2", "stage3", "stage4"],
140
+ )
141
+ else:
142
+ raise ValueError(f"embed dim {model.SWIN.EMBED_DIM} not supported for Swin!")
143
+ else:
144
+ backbone_config = DinatConfig.from_pretrained(
145
+ "shi-labs/dinat-large-11x11-in22k-in1k-384",
146
+ dilations=model.DiNAT.DILATIONS,
147
+ kernel_size=model.DiNAT.KERNEL_SIZE,
148
+ out_features=["stage1", "stage2", "stage3", "stage4"],
149
+ )
150
+
151
+ config: OneFormerConfig = OneFormerConfig(
152
+ backbone_config=backbone_config,
153
+ output_attentions=True,
154
+ output_hidden_states=True,
155
+ return_dict=True,
156
+ ignore_value=model.SEM_SEG_HEAD.IGNORE_VALUE,
157
+ num_classes=model.SEM_SEG_HEAD.NUM_CLASSES,
158
+ num_queries=model.ONE_FORMER.NUM_OBJECT_QUERIES,
159
+ no_object_weight=model.ONE_FORMER.NO_OBJECT_WEIGHT,
160
+ class_weight=model.ONE_FORMER.CLASS_WEIGHT,
161
+ mask_weight=model.ONE_FORMER.MASK_WEIGHT,
162
+ dice_weight=model.ONE_FORMER.DICE_WEIGHT,
163
+ contrastive_weight=model.ONE_FORMER.CONTRASTIVE_WEIGHT,
164
+ contrastive_temperature=model.ONE_FORMER.CONTRASTIVE_TEMPERATURE,
165
+ train_num_points=model.ONE_FORMER.TRAIN_NUM_POINTS,
166
+ oversample_ratio=model.ONE_FORMER.OVERSAMPLE_RATIO,
167
+ importance_sample_ratio=model.ONE_FORMER.IMPORTANCE_SAMPLE_RATIO,
168
+ init_std=0.02,
169
+ init_xavier_std=1.0,
170
+ layer_norm_eps=1e-05,
171
+ is_training=False,
172
+ use_auxiliary_loss=model.ONE_FORMER.DEEP_SUPERVISION,
173
+ output_auxiliary_logits=True,
174
+ strides=[4, 8, 16, 32],
175
+ task_seq_len=original_config.INPUT.TASK_SEQ_LEN,
176
+ max_seq_len=original_config.INPUT.MAX_SEQ_LEN,
177
+ text_encoder_width=model.TEXT_ENCODER.WIDTH,
178
+ text_encoder_context_length=model.TEXT_ENCODER.CONTEXT_LENGTH,
179
+ text_encoder_num_layers=model.TEXT_ENCODER.NUM_LAYERS,
180
+ text_encoder_vocab_size=model.TEXT_ENCODER.VOCAB_SIZE,
181
+ text_encoder_proj_layers=model.TEXT_ENCODER.PROJ_NUM_LAYERS,
182
+ text_encoder_n_ctx=model.TEXT_ENCODER.N_CTX,
183
+ conv_dim=model.SEM_SEG_HEAD.CONVS_DIM,
184
+ mask_dim=model.SEM_SEG_HEAD.MASK_DIM,
185
+ hidden_dim=model.ONE_FORMER.HIDDEN_DIM,
186
+ norm=model.SEM_SEG_HEAD.NORM,
187
+ encoder_layers=model.SEM_SEG_HEAD.TRANSFORMER_ENC_LAYERS,
188
+ encoder_feedforward_dim=1024,
189
+ decoder_layers=model.ONE_FORMER.DEC_LAYERS,
190
+ use_task_norm=model.ONE_FORMER.USE_TASK_NORM,
191
+ num_attention_heads=model.ONE_FORMER.NHEADS,
192
+ dropout=model.ONE_FORMER.DROPOUT,
193
+ dim_feedforward=model.ONE_FORMER.DIM_FEEDFORWARD,
194
+ pre_norm=model.ONE_FORMER.PRE_NORM,
195
+ enforce_input_proj=model.ONE_FORMER.ENFORCE_INPUT_PROJ,
196
+ query_dec_layers=model.ONE_FORMER.CLASS_DEC_LAYERS,
197
+ common_stride=model.SEM_SEG_HEAD.COMMON_STRIDE,
198
+ id2label=id2label,
199
+ label2id=label2id,
200
+ )
201
+
202
+ return config
203
+
204
+
205
+ class OriginalOneFormerConfigToProcessorConverter:
206
+ def __call__(self, original_config: object, model_repo: str) -> OneFormerProcessor:
207
+ model = original_config.MODEL
208
+ model_input = original_config.INPUT
209
+ dataset_catalog = MetadataCatalog.get(original_config.DATASETS.TEST_PANOPTIC[0])
210
+
211
+ if "ade20k" in model_repo:
212
+ class_info_file = "ade20k_panoptic.json"
213
+ elif "coco" in model_repo:
214
+ class_info_file = "coco_panoptic.json"
215
+ elif "cityscapes" in model_repo:
216
+ class_info_file = "cityscapes_panoptic.json"
217
+ else:
218
+ raise ValueError("Invalid Dataset!")
219
+
220
+ image_processor = OneFormerImageProcessor(
221
+ image_mean=(torch.tensor(model.PIXEL_MEAN) / 255).tolist(),
222
+ image_std=(torch.tensor(model.PIXEL_STD) / 255).tolist(),
223
+ size=model_input.MIN_SIZE_TEST,
224
+ max_size=model_input.MAX_SIZE_TEST,
225
+ num_labels=model.SEM_SEG_HEAD.NUM_CLASSES,
226
+ ignore_index=dataset_catalog.ignore_label,
227
+ class_info_file=class_info_file,
228
+ )
229
+
230
+ tokenizer = CLIPTokenizer.from_pretrained(model_repo)
231
+
232
+ return OneFormerProcessor(
233
+ image_processor=image_processor,
234
+ tokenizer=tokenizer,
235
+ task_seq_length=original_config.INPUT.TASK_SEQ_LEN,
236
+ max_seq_length=original_config.INPUT.MAX_SEQ_LEN,
237
+ )
238
+
239
+
240
+ class OriginalOneFormerCheckpointToOursConverter:
241
+ def __init__(self, original_model: nn.Module, config: OneFormerConfig):
242
+ self.original_model = original_model
243
+ self.config = config
244
+
245
+ def pop_all(self, renamed_keys: List[Tuple[str, str]], dst_state_dict: StateDict, src_state_dict: StateDict):
246
+ for src_key, dst_key in renamed_keys:
247
+ dst_state_dict[dst_key] = src_state_dict.pop(src_key)
248
+
249
+ # Swin Backbone
250
+ def replace_swin_backbone(self, dst_state_dict: StateDict, src_state_dict: StateDict, config: OneFormerConfig):
251
+ dst_prefix: str = "pixel_level_module.encoder"
252
+ src_prefix: str = "backbone"
253
+
254
+ renamed_keys = [
255
+ (
256
+ f"{src_prefix}.patch_embed.proj.weight",
257
+ f"{dst_prefix}.embeddings.patch_embeddings.projection.weight",
258
+ ),
259
+ (f"{src_prefix}.patch_embed.proj.bias", f"{dst_prefix}.embeddings.patch_embeddings.projection.bias"),
260
+ (f"{src_prefix}.patch_embed.norm.weight", f"{dst_prefix}.embeddings.norm.weight"),
261
+ (f"{src_prefix}.patch_embed.norm.bias", f"{dst_prefix}.embeddings.norm.bias"),
262
+ ]
263
+ num_layers = len(config.backbone_config.depths)
264
+ for layer_idx in range(num_layers):
265
+ for block_idx in range(config.backbone_config.depths[layer_idx]):
266
+ renamed_keys.extend(
267
+ [ # src, dst
268
+ (
269
+ f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.norm1.weight",
270
+ f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.layernorm_before.weight",
271
+ ),
272
+ (
273
+ f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.norm1.bias",
274
+ f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.layernorm_before.bias",
275
+ ),
276
+ (
277
+ f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.relative_position_bias_table",
278
+ f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.relative_position_bias_table",
279
+ ),
280
+ ]
281
+ )
282
+ # now we need to handle the attentions
283
+ # read in weights + bias of input projection layer of cross-attention
284
+
285
+ src_att_weight = src_state_dict[f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.qkv.weight"]
286
+ src_att_bias = src_state_dict[f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.qkv.bias"]
287
+
288
+ size = src_att_weight.shape[0]
289
+ offset = size // 3
290
+ dst_state_dict[
291
+ f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.query.weight"
292
+ ] = src_att_weight[:offset, :]
293
+ dst_state_dict[
294
+ f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.query.bias"
295
+ ] = src_att_bias[:offset]
296
+
297
+ dst_state_dict[
298
+ f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.key.weight"
299
+ ] = src_att_weight[offset : offset * 2, :]
300
+ dst_state_dict[
301
+ f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.key.bias"
302
+ ] = src_att_bias[offset : offset * 2]
303
+
304
+ dst_state_dict[
305
+ f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.value.weight"
306
+ ] = src_att_weight[-offset:, :]
307
+ dst_state_dict[
308
+ f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.value.bias"
309
+ ] = src_att_bias[-offset:]
310
+
311
+ # let's pop them
312
+ src_state_dict.pop(f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.qkv.weight")
313
+ src_state_dict.pop(f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.qkv.bias")
314
+ # proj
315
+ renamed_keys.extend(
316
+ [
317
+ (
318
+ f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.proj.weight",
319
+ f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.output.dense.weight",
320
+ ),
321
+ (
322
+ f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.proj.bias",
323
+ f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.output.dense.bias",
324
+ ),
325
+ ]
326
+ )
327
+
328
+ # second norm
329
+ renamed_keys.extend(
330
+ [
331
+ (
332
+ f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.norm2.weight",
333
+ f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.layernorm_after.weight",
334
+ ),
335
+ (
336
+ f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.norm2.bias",
337
+ f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.layernorm_after.bias",
338
+ ),
339
+ ]
340
+ )
341
+
342
+ # mlp
343
+ renamed_keys.extend(
344
+ [
345
+ (
346
+ f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.mlp.fc1.weight",
347
+ f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.intermediate.dense.weight",
348
+ ),
349
+ (
350
+ f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.mlp.fc1.bias",
351
+ f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.intermediate.dense.bias",
352
+ ),
353
+ (
354
+ f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.mlp.fc2.weight",
355
+ f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.output.dense.weight",
356
+ ),
357
+ (
358
+ f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.mlp.fc2.bias",
359
+ f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.output.dense.bias",
360
+ ),
361
+ ]
362
+ )
363
+
364
+ renamed_keys.extend(
365
+ [
366
+ (
367
+ f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.relative_position_index",
368
+ f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.relative_position_index",
369
+ )
370
+ ]
371
+ )
372
+
373
+ if layer_idx < num_layers - 1:
374
+ # patch merging
375
+ renamed_keys.extend(
376
+ [
377
+ (
378
+ f"{src_prefix}.layers.{layer_idx}.downsample.reduction.weight",
379
+ f"{dst_prefix}.encoder.layers.{layer_idx}.downsample.reduction.weight",
380
+ ),
381
+ (
382
+ f"{src_prefix}.layers.{layer_idx}.downsample.norm.weight",
383
+ f"{dst_prefix}.encoder.layers.{layer_idx}.downsample.norm.weight",
384
+ ),
385
+ (
386
+ f"{src_prefix}.layers.{layer_idx}.downsample.norm.bias",
387
+ f"{dst_prefix}.encoder.layers.{layer_idx}.downsample.norm.bias",
388
+ ),
389
+ ]
390
+ )
391
+
392
+ # hidden states norms
393
+ renamed_keys.extend(
394
+ [
395
+ (
396
+ f"{src_prefix}.norm{layer_idx}.weight",
397
+ f"{dst_prefix}.hidden_states_norms.stage{layer_idx+1}.weight",
398
+ ),
399
+ (
400
+ f"{src_prefix}.norm{layer_idx}.bias",
401
+ f"{dst_prefix}.hidden_states_norms.stage{layer_idx+1}.bias",
402
+ ),
403
+ ]
404
+ )
405
+
406
+ self.pop_all(renamed_keys, dst_state_dict, src_state_dict)
407
+
408
+ # Dinat Backbone
409
+ def replace_dinat_backbone(self, dst_state_dict: StateDict, src_state_dict: StateDict, config: OneFormerConfig):
410
+ dst_prefix: str = "pixel_level_module.encoder"
411
+ src_prefix: str = "backbone"
412
+
413
+ def rename_keys_for_weight_bias(src_prefix: str, dst_prefix: str):
414
+ return [
415
+ (f"{src_prefix}.weight", f"{dst_prefix}.weight"),
416
+ (f"{src_prefix}.bias", f"{dst_prefix}.bias"),
417
+ ]
418
+
419
+ renamed_keys = rename_keys_for_weight_bias(f"{src_prefix}.patch_embed.norm", f"{dst_prefix}.embeddings.norm")
420
+
421
+ for i in range(2):
422
+ renamed_keys.extend(
423
+ rename_keys_for_weight_bias(
424
+ f"{src_prefix}.patch_embed.proj.{i}",
425
+ f"{dst_prefix}.embeddings.patch_embeddings.projection.{i}",
426
+ )
427
+ )
428
+
429
+ num_layers = len(config.backbone_config.depths)
430
+ for layer_idx in range(num_layers):
431
+ for block_idx in range(config.backbone_config.depths[layer_idx]):
432
+ renamed_keys.extend(
433
+ rename_keys_for_weight_bias(
434
+ f"{src_prefix}.levels.{layer_idx}.blocks.{block_idx}.norm1",
435
+ f"{dst_prefix}.encoder.levels.{layer_idx}.layers.{block_idx}.layernorm_before",
436
+ )
437
+ )
438
+
439
+ renamed_keys.extend(
440
+ rename_keys_for_weight_bias(
441
+ f"{src_prefix}.levels.{layer_idx}.blocks.{block_idx}.norm2",
442
+ f"{dst_prefix}.encoder.levels.{layer_idx}.layers.{block_idx}.layernorm_after",
443
+ )
444
+ )
445
+
446
+ renamed_keys.extend(
447
+ [ # src, dst
448
+ (
449
+ f"{src_prefix}.levels.{layer_idx}.blocks.{block_idx}.attn.rpb",
450
+ f"{dst_prefix}.encoder.levels.{layer_idx}.layers.{block_idx}.attention.self.rpb",
451
+ ),
452
+ ]
453
+ )
454
+ # now we need to handle the attentions
455
+ # read in weights + bias of input projection layer of cross-attention
456
+
457
+ src_att_weight = src_state_dict[f"{src_prefix}.levels.{layer_idx}.blocks.{block_idx}.attn.qkv.weight"]
458
+ src_att_bias = src_state_dict[f"{src_prefix}.levels.{layer_idx}.blocks.{block_idx}.attn.qkv.bias"]
459
+
460
+ size = src_att_weight.shape[0]
461
+ offset = size // 3
462
+ dst_state_dict[
463
+ f"{dst_prefix}.encoder.levels.{layer_idx}.layers.{block_idx}.attention.self.query.weight"
464
+ ] = src_att_weight[:offset, :]
465
+ dst_state_dict[
466
+ f"{dst_prefix}.encoder.levels.{layer_idx}.layers.{block_idx}.attention.self.query.bias"
467
+ ] = src_att_bias[:offset]
468
+
469
+ dst_state_dict[
470
+ f"{dst_prefix}.encoder.levels.{layer_idx}.layers.{block_idx}.attention.self.key.weight"
471
+ ] = src_att_weight[offset : offset * 2, :]
472
+ dst_state_dict[
473
+ f"{dst_prefix}.encoder.levels.{layer_idx}.layers.{block_idx}.attention.self.key.bias"
474
+ ] = src_att_bias[offset : offset * 2]
475
+
476
+ dst_state_dict[
477
+ f"{dst_prefix}.encoder.levels.{layer_idx}.layers.{block_idx}.attention.self.value.weight"
478
+ ] = src_att_weight[-offset:, :]
479
+ dst_state_dict[
480
+ f"{dst_prefix}.encoder.levels.{layer_idx}.layers.{block_idx}.attention.self.value.bias"
481
+ ] = src_att_bias[-offset:]
482
+
483
+ # let's pop them
484
+ src_state_dict.pop(f"{src_prefix}.levels.{layer_idx}.blocks.{block_idx}.attn.qkv.weight")
485
+ src_state_dict.pop(f"{src_prefix}.levels.{layer_idx}.blocks.{block_idx}.attn.qkv.bias")
486
+ # proj
487
+
488
+ renamed_keys.extend(
489
+ rename_keys_for_weight_bias(
490
+ f"{src_prefix}.levels.{layer_idx}.blocks.{block_idx}.attn.proj",
491
+ f"{dst_prefix}.encoder.levels.{layer_idx}.layers.{block_idx}.attention.output.dense",
492
+ )
493
+ )
494
+
495
+ # mlp
496
+ renamed_keys.extend(
497
+ rename_keys_for_weight_bias(
498
+ f"{src_prefix}.levels.{layer_idx}.blocks.{block_idx}.mlp.fc1",
499
+ f"{dst_prefix}.encoder.levels.{layer_idx}.layers.{block_idx}.intermediate.dense",
500
+ )
501
+ )
502
+
503
+ renamed_keys.extend(
504
+ rename_keys_for_weight_bias(
505
+ f"{src_prefix}.levels.{layer_idx}.blocks.{block_idx}.mlp.fc2",
506
+ f"{dst_prefix}.encoder.levels.{layer_idx}.layers.{block_idx}.output.dense",
507
+ )
508
+ )
509
+
510
+ if layer_idx < num_layers - 1:
511
+ # patch merging
512
+ renamed_keys.extend(
513
+ [
514
+ (
515
+ f"{src_prefix}.levels.{layer_idx}.downsample.reduction.weight",
516
+ f"{dst_prefix}.encoder.levels.{layer_idx}.downsample.reduction.weight",
517
+ ),
518
+ (
519
+ f"{src_prefix}.levels.{layer_idx}.downsample.norm.weight",
520
+ f"{dst_prefix}.encoder.levels.{layer_idx}.downsample.norm.weight",
521
+ ),
522
+ (
523
+ f"{src_prefix}.levels.{layer_idx}.downsample.norm.bias",
524
+ f"{dst_prefix}.encoder.levels.{layer_idx}.downsample.norm.bias",
525
+ ),
526
+ ]
527
+ )
528
+
529
+ # hidden states norms
530
+ renamed_keys.extend(
531
+ [
532
+ (
533
+ f"{src_prefix}.norm{layer_idx}.weight",
534
+ f"{dst_prefix}.hidden_states_norms.stage{layer_idx+1}.weight",
535
+ ),
536
+ (
537
+ f"{src_prefix}.norm{layer_idx}.bias",
538
+ f"{dst_prefix}.hidden_states_norms.stage{layer_idx+1}.bias",
539
+ ),
540
+ ]
541
+ )
542
+
543
+ self.pop_all(renamed_keys, dst_state_dict, src_state_dict)
544
+
545
+ # Backbone + Pixel Decoder
546
+ def replace_pixel_module(self, dst_state_dict: StateDict, src_state_dict: StateDict, is_swin: bool):
547
+ dst_prefix: str = "pixel_level_module.decoder"
548
+ src_prefix: str = "sem_seg_head.pixel_decoder"
549
+
550
+ if is_swin:
551
+ self.replace_swin_backbone(dst_state_dict, src_state_dict, self.config)
552
+ else:
553
+ self.replace_dinat_backbone(dst_state_dict, src_state_dict, self.config)
554
+
555
+ def rename_keys_for_weight_bias(src_prefix: str, dst_prefix: str):
556
+ return [
557
+ (f"{src_prefix}.weight", f"{dst_prefix}.weight"),
558
+ (f"{src_prefix}.bias", f"{dst_prefix}.bias"),
559
+ ]
560
+
561
+ def rename_keys_for_self_attn(src_prefix: str, dst_prefix: str):
562
+ self_attn_keys = []
563
+ self_attn_keys.extend(
564
+ rename_keys_for_weight_bias(f"{src_prefix}.attention_weights", f"{dst_prefix}.attention_weights")
565
+ )
566
+ self_attn_keys.extend(
567
+ rename_keys_for_weight_bias(f"{src_prefix}.output_proj", f"{dst_prefix}.output_proj")
568
+ )
569
+ self_attn_keys.extend(
570
+ rename_keys_for_weight_bias(f"{src_prefix}.sampling_offsets", f"{dst_prefix}.sampling_offsets")
571
+ )
572
+ self_attn_keys.extend(rename_keys_for_weight_bias(f"{src_prefix}.value_proj", f"{dst_prefix}.value_proj"))
573
+
574
+ return self_attn_keys
575
+
576
+ def rename_keys_for_encoder_layer(src_prefix: str, dst_prefix: str):
577
+ encoder_keys = []
578
+ encoder_keys.extend(rename_keys_for_weight_bias(f"{src_prefix}.linear1", f"{dst_prefix}.fc1"))
579
+ encoder_keys.extend(rename_keys_for_weight_bias(f"{src_prefix}.linear2", f"{dst_prefix}.fc2"))
580
+ encoder_keys.extend(
581
+ rename_keys_for_weight_bias(f"{src_prefix}.norm1", f"{dst_prefix}.self_attn_layer_norm")
582
+ )
583
+ encoder_keys.extend(rename_keys_for_weight_bias(f"{src_prefix}.norm2", f"{dst_prefix}.final_layer_norm"))
584
+ encoder_keys.extend(rename_keys_for_self_attn(f"{src_prefix}.self_attn", f"{dst_prefix}.self_attn"))
585
+
586
+ return encoder_keys
587
+
588
+ # convolution layer for final features
589
+ renamed_keys = [
590
+ (f"{src_prefix}.adapter_1.weight", f"{dst_prefix}.adapter_1.0.weight"),
591
+ (f"{src_prefix}.adapter_1.norm.weight", f"{dst_prefix}.adapter_1.1.weight"),
592
+ (f"{src_prefix}.adapter_1.norm.bias", f"{dst_prefix}.adapter_1.1.bias"),
593
+ ]
594
+
595
+ renamed_keys.extend(
596
+ [
597
+ (f"{src_prefix}.layer_1.weight", f"{dst_prefix}.layer_1.0.weight"),
598
+ (f"{src_prefix}.layer_1.norm.weight", f"{dst_prefix}.layer_1.1.weight"),
599
+ (f"{src_prefix}.layer_1.norm.bias", f"{dst_prefix}.layer_1.1.bias"),
600
+ ]
601
+ )
602
+
603
+ # proj layers
604
+ for i in range(3):
605
+ for j in range(2):
606
+ renamed_keys.extend(
607
+ [
608
+ (f"{src_prefix}.input_proj.{i}.{j}.weight", f"{dst_prefix}.input_projections.{i}.{j}.weight"),
609
+ (f"{src_prefix}.input_proj.{i}.{j}.bias", f"{dst_prefix}.input_projections.{i}.{j}.bias"),
610
+ ]
611
+ )
612
+
613
+ renamed_keys.extend([(f"{src_prefix}.transformer.level_embed", f"{dst_prefix}.level_embed")])
614
+
615
+ # layers
616
+ for layer_idx in range(self.config.encoder_layers):
617
+ renamed_keys.extend(
618
+ rename_keys_for_encoder_layer(
619
+ f"{src_prefix}.transformer.encoder.layers.{layer_idx}", f"{dst_prefix}.encoder.layers.{layer_idx}"
620
+ )
621
+ )
622
+
623
+ # proj
624
+ renamed_keys.extend(
625
+ [
626
+ (f"{src_prefix}.mask_features.weight", f"{dst_prefix}.mask_projection.weight"),
627
+ (f"{src_prefix}.mask_features.bias", f"{dst_prefix}.mask_projection.bias"),
628
+ ]
629
+ )
630
+
631
+ self.pop_all(renamed_keys, dst_state_dict, src_state_dict)
632
+
633
+ # Transformer Decoder
634
+ def replace_keys_qkv_transformer_decoder(self, dst_state_dict: StateDict, src_state_dict: StateDict):
635
+ dst_prefix: str = "transformer_module.decoder.layers"
636
+ src_prefix: str = "sem_seg_head.predictor"
637
+ for i in range(self.config.decoder_layers - 1):
638
+ # read in weights + bias of input projection layer of self-attention
639
+ in_proj_weight = src_state_dict.pop(
640
+ f"{src_prefix}.transformer_self_attention_layers.{i}.self_attn.in_proj_weight"
641
+ )
642
+ in_proj_bias = src_state_dict.pop(
643
+ f"{src_prefix}.transformer_self_attention_layers.{i}.self_attn.in_proj_bias"
644
+ )
645
+ # next, add query, keys and values (in that order) to the state dict
646
+ dst_state_dict[f"{dst_prefix}.{i}.self_attn.self_attn.q_proj.weight"] = in_proj_weight[:256, :]
647
+ dst_state_dict[f"{dst_prefix}.{i}.self_attn.self_attn.q_proj.bias"] = in_proj_bias[:256]
648
+ dst_state_dict[f"{dst_prefix}.{i}.self_attn.self_attn.k_proj.weight"] = in_proj_weight[256:512, :]
649
+ dst_state_dict[f"{dst_prefix}.{i}.self_attn.self_attn.k_proj.bias"] = in_proj_bias[256:512]
650
+ dst_state_dict[f"{dst_prefix}.{i}.self_attn.self_attn.v_proj.weight"] = in_proj_weight[-256:, :]
651
+ dst_state_dict[f"{dst_prefix}.{i}.self_attn.self_attn.v_proj.bias"] = in_proj_bias[-256:]
652
+
653
+ def replace_transformer_module(self, dst_state_dict: StateDict, src_state_dict: StateDict):
654
+ dst_prefix: str = "transformer_module"
655
+ src_prefix: str = "sem_seg_head.predictor"
656
+
657
+ def rename_keys_for_weight_bias(src_prefix: str, dst_prefix: str):
658
+ return [
659
+ (f"{src_prefix}.weight", f"{dst_prefix}.weight"),
660
+ (f"{src_prefix}.bias", f"{dst_prefix}.bias"),
661
+ ]
662
+
663
+ def rename_keys_for_attn(src_prefix: str, dst_prefix: str):
664
+ attn_keys = [
665
+ (f"{src_prefix}.in_proj_bias", f"{dst_prefix}.in_proj_bias"),
666
+ (f"{src_prefix}.in_proj_weight", f"{dst_prefix}.in_proj_weight"),
667
+ ]
668
+ attn_keys.extend(rename_keys_for_weight_bias(f"{src_prefix}.out_proj", f"{dst_prefix}.out_proj"))
669
+
670
+ return attn_keys
671
+
672
+ def rename_keys_for_self_attn(src_prefix: str, dst_prefix: str):
673
+ attn_keys = []
674
+ attn_keys.extend(rename_keys_for_weight_bias(f"{src_prefix}.out_proj", f"{dst_prefix}.out_proj"))
675
+
676
+ return attn_keys
677
+
678
+ def rename_keys_for_query_transformer_layer(src_prefix: str, dst_prefix: str):
679
+ query_transformer_layer_keys = []
680
+
681
+ query_transformer_layer_keys.extend(
682
+ rename_keys_for_weight_bias(f"{src_prefix}.linear1", f"{dst_prefix}.linear1")
683
+ )
684
+ query_transformer_layer_keys.extend(
685
+ rename_keys_for_weight_bias(f"{src_prefix}.linear2", f"{dst_prefix}.linear2")
686
+ )
687
+ query_transformer_layer_keys.extend(
688
+ rename_keys_for_weight_bias(f"{src_prefix}.norm1", f"{dst_prefix}.norm1")
689
+ )
690
+ query_transformer_layer_keys.extend(
691
+ rename_keys_for_weight_bias(f"{src_prefix}.norm2", f"{dst_prefix}.norm2")
692
+ )
693
+ query_transformer_layer_keys.extend(
694
+ rename_keys_for_weight_bias(f"{src_prefix}.norm3", f"{dst_prefix}.norm3")
695
+ )
696
+
697
+ query_transformer_layer_keys.extend(
698
+ rename_keys_for_attn(f"{src_prefix}.self_attn", f"{dst_prefix}.self_attn")
699
+ )
700
+
701
+ query_transformer_layer_keys.extend(
702
+ rename_keys_for_attn(f"{src_prefix}.multihead_attn", f"{dst_prefix}.multihead_attn")
703
+ )
704
+
705
+ return query_transformer_layer_keys
706
+
707
+ def rename_keys_for_cross_attn_layer(src_prefix: str, dst_prefix: str):
708
+ cross_attn_layer_keys = []
709
+
710
+ cross_attn_layer_keys.extend(rename_keys_for_weight_bias(f"{src_prefix}.norm", f"{dst_prefix}.norm"))
711
+ cross_attn_layer_keys.extend(
712
+ rename_keys_for_attn(f"{src_prefix}.multihead_attn", f"{dst_prefix}.multihead_attn")
713
+ )
714
+
715
+ return cross_attn_layer_keys
716
+
717
+ def rename_keys_for_self_attn_layer(src_prefix: str, dst_prefix: str):
718
+ self_attn_layer_keys = []
719
+
720
+ self_attn_layer_keys.extend(rename_keys_for_weight_bias(f"{src_prefix}.norm", f"{dst_prefix}.norm"))
721
+ self_attn_layer_keys.extend(
722
+ rename_keys_for_self_attn(f"{src_prefix}.self_attn", f"{dst_prefix}.self_attn")
723
+ )
724
+
725
+ return self_attn_layer_keys
726
+
727
+ def rename_keys_for_ffn_layer(src_prefix: str, dst_prefix: str):
728
+ ffn_layer_keys = []
729
+
730
+ ffn_layer_keys.extend(rename_keys_for_weight_bias(f"{src_prefix}.linear1", f"{dst_prefix}.linear1"))
731
+ ffn_layer_keys.extend(rename_keys_for_weight_bias(f"{src_prefix}.linear2", f"{dst_prefix}.linear2"))
732
+ ffn_layer_keys.extend(rename_keys_for_weight_bias(f"{src_prefix}.norm", f"{dst_prefix}.norm"))
733
+
734
+ return ffn_layer_keys
735
+
736
+ def rename_keys_for_transformer_decoder_layer(src_prefix: str, dst_prefix: str, idx: int):
737
+ transformer_decoder_layer_keys = []
738
+
739
+ transformer_decoder_layer_keys.extend(
740
+ rename_keys_for_cross_attn_layer(
741
+ f"{src_prefix}.transformer_cross_attention_layers.{idx}", f"{dst_prefix}.{idx}.cross_attn"
742
+ )
743
+ )
744
+
745
+ transformer_decoder_layer_keys.extend(
746
+ rename_keys_for_self_attn_layer(
747
+ f"{src_prefix}.transformer_self_attention_layers.{idx}", f"{dst_prefix}.{idx}.self_attn"
748
+ )
749
+ )
750
+
751
+ transformer_decoder_layer_keys.extend(
752
+ rename_keys_for_ffn_layer(f"{src_prefix}.transformer_ffn_layers.{idx}", f"{dst_prefix}.{idx}.ffn")
753
+ )
754
+
755
+ return transformer_decoder_layer_keys
756
+
757
+ # positional embedding for object queries
758
+ renamed_keys = [
759
+ (f"{src_prefix}.query_embed.weight", f"{dst_prefix}.queries_embedder.weight"),
760
+ (f"{src_prefix}.level_embed.weight", f"{dst_prefix}.level_embed.weight"),
761
+ ]
762
+
763
+ # norm
764
+ renamed_keys.extend(
765
+ rename_keys_for_weight_bias(f"{src_prefix}.decoder_norm", f"{dst_prefix}.decoder.decoder_norm")
766
+ )
767
+
768
+ # proj
769
+ renamed_keys.extend(
770
+ rename_keys_for_weight_bias(
771
+ f"{src_prefix}.class_input_proj", f"{dst_prefix}.decoder.query_input_projection"
772
+ )
773
+ )
774
+
775
+ renamed_keys.extend(
776
+ rename_keys_for_weight_bias(f"{src_prefix}.class_embed", f"{dst_prefix}.decoder.class_embed")
777
+ )
778
+
779
+ for i in range(3):
780
+ renamed_keys.extend(
781
+ rename_keys_for_weight_bias(
782
+ f"{src_prefix}.mask_embed.layers.{i}", f"{dst_prefix}.decoder.mask_embed.layers.{i}.0"
783
+ )
784
+ )
785
+
786
+ # norm
787
+ renamed_keys.extend(
788
+ rename_keys_for_weight_bias(
789
+ f"{src_prefix}.class_transformer.decoder.norm", f"{dst_prefix}.decoder.query_transformer.decoder.norm"
790
+ )
791
+ )
792
+
793
+ # transformer to update queries with task tokens
794
+ for i in range(self.config.query_dec_layers):
795
+ renamed_keys.extend(
796
+ rename_keys_for_query_transformer_layer(
797
+ f"{src_prefix}.class_transformer.decoder.layers.{i}",
798
+ f"{dst_prefix}.decoder.query_transformer.decoder.layers.{i}",
799
+ )
800
+ )
801
+
802
+ # decoder layers
803
+ for i in range(self.config.decoder_layers - 1):
804
+ renamed_keys.extend(
805
+ rename_keys_for_transformer_decoder_layer(
806
+ f"{src_prefix}",
807
+ f"{dst_prefix}.decoder.layers",
808
+ i,
809
+ )
810
+ )
811
+
812
+ self.pop_all(renamed_keys, dst_state_dict, src_state_dict)
813
+ self.replace_keys_qkv_transformer_decoder(dst_state_dict, src_state_dict)
814
+
815
+ def replace_task_mlp(self, dst_state_dict: StateDict, src_state_dict: StateDict):
816
+ dst_prefix: str = "task_encoder"
817
+ src_prefix: str = "task_mlp"
818
+
819
+ def rename_keys_for_weight_bias(src_prefix: str, dst_prefix: str):
820
+ return [
821
+ (f"{src_prefix}.weight", f"{dst_prefix}.weight"),
822
+ (f"{src_prefix}.bias", f"{dst_prefix}.bias"),
823
+ ]
824
+
825
+ renamed_keys = []
826
+
827
+ for i in range(2):
828
+ renamed_keys.extend(
829
+ rename_keys_for_weight_bias(f"{src_prefix}.layers.{i}", f"{dst_prefix}.task_mlp.layers.{i}.0")
830
+ )
831
+
832
+ self.pop_all(renamed_keys, dst_state_dict, src_state_dict)
833
+
834
+ def replace_text_projector(self, dst_state_dict: StateDict, src_state_dict: StateDict):
835
+ dst_prefix: str = "text_mapper.text_projector"
836
+ src_prefix: str = "text_projector"
837
+
838
+ def rename_keys_for_weight_bias(src_prefix: str, dst_prefix: str):
839
+ return [
840
+ (f"{src_prefix}.weight", f"{dst_prefix}.weight"),
841
+ (f"{src_prefix}.bias", f"{dst_prefix}.bias"),
842
+ ]
843
+
844
+ renamed_keys = []
845
+
846
+ for i in range(self.config.text_encoder_config["text_encoder_proj_layers"]):
847
+ renamed_keys.extend(rename_keys_for_weight_bias(f"{src_prefix}.layers.{i}", f"{dst_prefix}.{i}.0"))
848
+
849
+ self.pop_all(renamed_keys, dst_state_dict, src_state_dict)
850
+
851
+ def replace_text_mapper(self, dst_state_dict: StateDict, src_state_dict: StateDict):
852
+ dst_prefix: str = "text_mapper.text_encoder"
853
+ src_prefix: str = "text_encoder"
854
+
855
+ self.replace_text_projector(dst_state_dict, src_state_dict)
856
+
857
+ def rename_keys_for_weight_bias(src_prefix: str, dst_prefix: str):
858
+ return [
859
+ (f"{src_prefix}.weight", f"{dst_prefix}.weight"),
860
+ (f"{src_prefix}.bias", f"{dst_prefix}.bias"),
861
+ ]
862
+
863
+ def rename_keys_for_attn(src_prefix: str, dst_prefix: str):
864
+ attn_keys = [
865
+ (f"{src_prefix}.in_proj_bias", f"{dst_prefix}.in_proj_bias"),
866
+ (f"{src_prefix}.in_proj_weight", f"{dst_prefix}.in_proj_weight"),
867
+ ]
868
+ attn_keys.extend(rename_keys_for_weight_bias(f"{src_prefix}.out_proj", f"{dst_prefix}.out_proj"))
869
+
870
+ return attn_keys
871
+
872
+ def rename_keys_for_layer(src_prefix: str, dst_prefix: str):
873
+ resblock_keys = []
874
+
875
+ resblock_keys.extend(rename_keys_for_weight_bias(f"{src_prefix}.mlp.c_fc", f"{dst_prefix}.mlp.fc1"))
876
+ resblock_keys.extend(rename_keys_for_weight_bias(f"{src_prefix}.mlp.c_proj", f"{dst_prefix}.mlp.fc2"))
877
+ resblock_keys.extend(rename_keys_for_weight_bias(f"{src_prefix}.ln_1", f"{dst_prefix}.layer_norm1"))
878
+ resblock_keys.extend(rename_keys_for_weight_bias(f"{src_prefix}.ln_2", f"{dst_prefix}.layer_norm2"))
879
+ resblock_keys.extend(rename_keys_for_attn(f"{src_prefix}.attn", f"{dst_prefix}.self_attn"))
880
+
881
+ return resblock_keys
882
+
883
+ renamed_keys = [
884
+ ("prompt_ctx.weight", "text_mapper.prompt_ctx.weight"),
885
+ ]
886
+
887
+ renamed_keys.extend(
888
+ [
889
+ (f"{src_prefix}.positional_embedding", f"{dst_prefix}.positional_embedding"),
890
+ (f"{src_prefix}.token_embedding.weight", f"{dst_prefix}.token_embedding.weight"),
891
+ ]
892
+ )
893
+
894
+ renamed_keys.extend(rename_keys_for_weight_bias(f"{src_prefix}.ln_final", f"{dst_prefix}.ln_final"))
895
+
896
+ for i in range(self.config.text_encoder_config["text_encoder_num_layers"]):
897
+ renamed_keys.extend(
898
+ rename_keys_for_layer(
899
+ f"{src_prefix}.transformer.resblocks.{i}", f"{dst_prefix}.transformer.layers.{i}"
900
+ )
901
+ )
902
+
903
+ self.pop_all(renamed_keys, dst_state_dict, src_state_dict)
904
+
905
+ def convert(self, oneformer: OneFormerModel, is_swin: bool) -> OneFormerModel:
906
+ dst_state_dict = TrackedStateDict(oneformer.state_dict())
907
+ src_state_dict = self.original_model.state_dict()
908
+
909
+ self.replace_pixel_module(dst_state_dict, src_state_dict, is_swin)
910
+ self.replace_transformer_module(dst_state_dict, src_state_dict)
911
+ self.replace_task_mlp(dst_state_dict, src_state_dict)
912
+ if self.config.is_training:
913
+ self.replace_text_mapper(dst_state_dict, src_state_dict)
914
+
915
+ logger.info(f"Missed keys are {pformat(dst_state_dict.diff())}")
916
+ logger.info(f"Not copied keys are {pformat(src_state_dict.keys())}")
917
+ logger.info("🙌 Done")
918
+
919
+ oneformer.load_state_dict(dst_state_dict)
920
+
921
+ return oneformer
922
+
923
+ @staticmethod
924
+ def using_dirs(checkpoints_dir: Path, config_dir: Path) -> Iterator[Tuple[object, Path, Path]]:
925
+ checkpoints: List[Path] = checkpoints_dir.glob("**/*.pth")
926
+
927
+ for checkpoint in checkpoints:
928
+ logger.info(f"💪 Converting {checkpoint.stem}")
929
+ # find associated config file
930
+ config: Path = config_dir / f"{checkpoint.stem}.yaml"
931
+
932
+ yield config, checkpoint
933
+
934
+
935
+ def post_process_sem_seg_output(outputs: OneFormerForUniversalSegmentationOutput, target_size: Tuple[int, int]):
936
+ # class_queries_logits has shape [BATCH, QUERIES, CLASSES + 1]
937
+ class_queries_logits = outputs.class_queries_logits
938
+ # masks_queries_logits has shape [BATCH, QUERIES, HEIGHT, WIDTH]
939
+ masks_queries_logits = outputs.masks_queries_logits
940
+ if target_size is not None:
941
+ masks_queries_logits = torch.nn.functional.interpolate(
942
+ masks_queries_logits,
943
+ size=target_size,
944
+ mode="bilinear",
945
+ align_corners=False,
946
+ )
947
+ # remove the null class `[..., :-1]`
948
+ masks_classes = class_queries_logits.softmax(dim=-1)[..., :-1]
949
+ # mask probs has shape [BATCH, QUERIES, HEIGHT, WIDTH]
950
+ masks_probs = masks_queries_logits.sigmoid()
951
+ # now we want to sum over the queries,
952
+ # $ out_{c,h,w} = \sum_q p_{q,c} * m_{q,h,w} $
953
+ # where $ softmax(p) \in R^{q, c} $ is the mask classes
954
+ # and $ sigmoid(m) \in R^{q, h, w}$ is the mask probabilities
955
+ # b(atch)q(uery)c(lasses), b(atch)q(uery)h(eight)w(idth)
956
+ segmentation = torch.einsum("bqc, bqhw -> bchw", masks_classes, masks_probs)
957
+
958
+ return segmentation
959
+
960
+
961
+ def test(
962
+ original_model,
963
+ our_model: OneFormerForUniversalSegmentation,
964
+ processor: OneFormerProcessor,
965
+ model_repo: str,
966
+ ):
967
+ def _preprocess_text(text_list=None, max_length=77):
968
+ if text_list is None:
969
+ raise ValueError("tokens cannot be None.")
970
+
971
+ tokens = tokenizer(text_list, padding="max_length", max_length=max_length, truncation=True)
972
+
973
+ attention_masks, input_ids = tokens["attention_mask"], tokens["input_ids"]
974
+
975
+ token_inputs = []
976
+ for attn_mask, input_id in zip(attention_masks, input_ids):
977
+ token = torch.tensor(attn_mask) * torch.tensor(input_id)
978
+ token_inputs.append(token.unsqueeze(0))
979
+
980
+ token_inputs = torch.cat(token_inputs, dim=0)
981
+ return token_inputs
982
+
983
+ with torch.no_grad():
984
+ tokenizer = CLIPTokenizer.from_pretrained(model_repo)
985
+ original_model = original_model.eval()
986
+ our_model = our_model.eval()
987
+
988
+ im = prepare_img()
989
+
990
+ tr = T.Compose(
991
+ [
992
+ T.Resize((640, 640)),
993
+ T.ToTensor(),
994
+ T.Normalize(
995
+ mean=torch.tensor([123.675, 116.280, 103.530]) / 255.0,
996
+ std=torch.tensor([58.395, 57.120, 57.375]) / 255.0,
997
+ ),
998
+ ],
999
+ )
1000
+
1001
+ x = tr(im).unsqueeze(0)
1002
+
1003
+ task_input = ["the task is semantic"]
1004
+ task_token = _preprocess_text(task_input, max_length=processor.task_seq_length)
1005
+
1006
+ original_model_backbone_features = original_model.backbone(x.clone())
1007
+
1008
+ our_model_output: OneFormerModelOutput = our_model.model(x.clone(), task_token, output_hidden_states=True)
1009
+
1010
+ for original_model_feature, our_model_feature in zip(
1011
+ original_model_backbone_features.values(), our_model_output.encoder_hidden_states
1012
+ ):
1013
+ assert torch.allclose(
1014
+ original_model_feature, our_model_feature, atol=3e-3
1015
+ ), "The backbone features are not the same."
1016
+ mask_features, _, multi_scale_features, _, _ = original_model.sem_seg_head.pixel_decoder.forward_features(
1017
+ original_model_backbone_features
1018
+ )
1019
+
1020
+ original_pixel_decoder_features = []
1021
+ original_pixel_decoder_features.append(mask_features)
1022
+ for i in range(len(multi_scale_features)):
1023
+ original_pixel_decoder_features.append(multi_scale_features[i])
1024
+
1025
+ for original_model_feature, our_model_feature in zip(
1026
+ original_pixel_decoder_features, our_model_output.pixel_decoder_hidden_states
1027
+ ):
1028
+ assert torch.allclose(
1029
+ original_model_feature, our_model_feature, atol=3e-4
1030
+ ), "The pixel decoder feature are not the same"
1031
+
1032
+ tr_complete = T.Compose(
1033
+ [
1034
+ T.Resize((640, 640)),
1035
+ T.ToTensor(),
1036
+ ],
1037
+ )
1038
+
1039
+ y = (tr_complete(im) * 255.0).to(torch.int).float()
1040
+
1041
+ # let's test the full model
1042
+ original_model_out = original_model([{"image": y.clone(), "task": "The task is semantic"}])
1043
+
1044
+ original_segmentation = original_model_out[0]["sem_seg"]
1045
+
1046
+ our_model_out: OneFormerForUniversalSegmentationOutput = our_model(
1047
+ x.clone(), task_token, output_hidden_states=True
1048
+ )
1049
+
1050
+ our_segmentation = post_process_sem_seg_output(our_model_out, target_size=(640, 640))[0]
1051
+
1052
+ assert torch.allclose(
1053
+ original_segmentation, our_segmentation, atol=1e-3
1054
+ ), "The segmentation image is not the same."
1055
+
1056
+ logger.info("✅ Test passed!")
1057
+
1058
+
1059
+ def get_name(checkpoint_file: Path):
1060
+ model_name_raw: str = checkpoint_file.stem
1061
+
1062
+ backbone = "swin" if "swin" in model_name_raw else "dinat"
1063
+ dataset = ""
1064
+ if "coco" in model_name_raw:
1065
+ dataset = "coco"
1066
+ elif "ade20k" in model_name_raw:
1067
+ dataset = "ade20k"
1068
+ elif "cityscapes" in model_name_raw:
1069
+ dataset = "cityscapes"
1070
+ else:
1071
+ raise ValueError(
1072
+ f"{model_name_raw} must be wrong since we didn't find 'coco' or 'ade20k' or 'cityscapes' in it "
1073
+ )
1074
+
1075
+ backbone_types = ["tiny", "large"]
1076
+
1077
+ backbone_type = list(filter(lambda x: x in model_name_raw, backbone_types))[0]
1078
+
1079
+ model_name = f"oneformer_{dataset}_{backbone}_{backbone_type}"
1080
+
1081
+ return model_name
1082
+
1083
+
1084
+ if __name__ == "__main__":
1085
+ parser = ArgumentParser(
1086
+ description=(
1087
+ "Command line to convert the original oneformer models (with swin backbone) to transformers"
1088
+ " implementation."
1089
+ )
1090
+ )
1091
+
1092
+ parser.add_argument(
1093
+ "--checkpoints_dir",
1094
+ type=Path,
1095
+ help=(
1096
+ "A directory containing the model's checkpoints. The directory has to have the following structure:"
1097
+ " structure: <DIR_NAME>/<DATASET_NAME>/<CONFIG_NAME>.pth; where <CONFIG_NAME> name must follow the"
1098
+ " following nomenclature nomenclature: oneformer_<DATASET_NAME>_<BACKBONE>_<BACKBONE_TYPE>"
1099
+ ),
1100
+ )
1101
+ parser.add_argument(
1102
+ "--configs_dir",
1103
+ type=Path,
1104
+ help=(
1105
+ "A directory containing the model's configs, see detectron2 doc. The directory has to have the following"
1106
+ " structure: <DIR_NAME>/<DATASET_NAME>/<CONFIG_NAME>.yaml; where <CONFIG_NAME> name must follow the"
1107
+ " following nomenclature nomenclature: oneformer_<DATASET_NAME>_<BACKBONE>_<BACKBONE_TYPE>"
1108
+ ),
1109
+ )
1110
+ parser.add_argument(
1111
+ "--pytorch_dump_folder_path",
1112
+ required=True,
1113
+ type=Path,
1114
+ help="Path to the folder to output PyTorch models.",
1115
+ )
1116
+ parser.add_argument(
1117
+ "--oneformer_dir",
1118
+ required=True,
1119
+ type=Path,
1120
+ help=(
1121
+ "A path to OneFormer's original implementation directory. You can download from here: "
1122
+ "https://github.com/SHI-Labs/OneFormer"
1123
+ ),
1124
+ )
1125
+
1126
+ args = parser.parse_args()
1127
+
1128
+ checkpoints_dir: Path = args.checkpoints_dir
1129
+ config_dir: Path = args.configs_dir
1130
+ save_directory: Path = args.pytorch_dump_folder_path
1131
+ oneformer_dir: Path = args.oneformer_dir
1132
+ # append the path to the parents to oneformer dir
1133
+ sys.path.append(str(oneformer_dir.parent))
1134
+ # and import what's needed
1135
+ from OneFormer.oneformer import add_common_config, add_dinat_config, add_oneformer_config, add_swin_config
1136
+ from OneFormer.oneformer.oneformer_model import OneFormer as OriginalOneFormer
1137
+
1138
+ if not save_directory.exists():
1139
+ save_directory.mkdir(parents=True)
1140
+
1141
+ for config_file, checkpoint_file in OriginalOneFormerCheckpointToOursConverter.using_dirs(
1142
+ checkpoints_dir, config_dir
1143
+ ):
1144
+ processor = OriginalOneFormerConfigToProcessorConverter()(
1145
+ setup_cfg(Args(config_file=config_file)), os.path.join("shi-labs", config_file.stem)
1146
+ )
1147
+
1148
+ original_config = setup_cfg(Args(config_file=config_file))
1149
+ oneformer_kwargs = OriginalOneFormer.from_config(original_config)
1150
+
1151
+ original_model = OriginalOneFormer(**oneformer_kwargs).eval()
1152
+
1153
+ DetectionCheckpointer(original_model).load(str(checkpoint_file))
1154
+
1155
+ is_swin = "swin" in config_file.stem
1156
+
1157
+ config: OneFormerConfig = OriginalOneFormerConfigToOursConverter()(original_config, is_swin)
1158
+
1159
+ oneformer = OneFormerModel(config=config).eval()
1160
+
1161
+ converter = OriginalOneFormerCheckpointToOursConverter(original_model, config)
1162
+
1163
+ oneformer = converter.convert(oneformer, is_swin)
1164
+
1165
+ oneformer_for_universal_segmentation = OneFormerForUniversalSegmentation(config=config).eval()
1166
+
1167
+ oneformer_for_universal_segmentation.model = oneformer
1168
+
1169
+ test(
1170
+ original_model,
1171
+ oneformer_for_universal_segmentation,
1172
+ processor,
1173
+ os.path.join("shi-labs", config_file.stem),
1174
+ )
1175
+
1176
+ model_name = get_name(checkpoint_file)
1177
+ logger.info(f"🪄 Saving {model_name}")
1178
+
1179
+ processor.save_pretrained(save_directory / model_name)
1180
+ oneformer_for_universal_segmentation.save_pretrained(save_directory / model_name)
1181
+
1182
+ processor.push_to_hub(
1183
+ repo_id=os.path.join("shi-labs", config_file.stem),
1184
+ commit_message="Add configs",
1185
+ use_temp_dir=True,
1186
+ )
1187
+ oneformer_for_universal_segmentation.push_to_hub(
1188
+ repo_id=os.path.join("shi-labs", config_file.stem),
1189
+ commit_message="Add model",
1190
+ use_temp_dir=True,
1191
+ )