applied-ai-018 commited on
Commit
2839303
·
verified ·
1 Parent(s): efc8380

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. llmeval-env/lib/python3.10/site-packages/transformers/models/align/__init__.py +73 -0
  2. llmeval-env/lib/python3.10/site-packages/transformers/models/align/__pycache__/__init__.cpython-310.pyc +0 -0
  3. llmeval-env/lib/python3.10/site-packages/transformers/models/align/__pycache__/configuration_align.cpython-310.pyc +0 -0
  4. llmeval-env/lib/python3.10/site-packages/transformers/models/align/__pycache__/convert_align_tf_to_hf.cpython-310.pyc +0 -0
  5. llmeval-env/lib/python3.10/site-packages/transformers/models/align/__pycache__/modeling_align.cpython-310.pyc +0 -0
  6. llmeval-env/lib/python3.10/site-packages/transformers/models/align/__pycache__/processing_align.cpython-310.pyc +0 -0
  7. llmeval-env/lib/python3.10/site-packages/transformers/models/align/configuration_align.py +383 -0
  8. llmeval-env/lib/python3.10/site-packages/transformers/models/align/convert_align_tf_to_hf.py +389 -0
  9. llmeval-env/lib/python3.10/site-packages/transformers/models/align/modeling_align.py +1633 -0
  10. llmeval-env/lib/python3.10/site-packages/transformers/models/align/processing_align.py +121 -0
  11. llmeval-env/lib/python3.10/site-packages/transformers/models/donut/__init__.py +74 -0
  12. llmeval-env/lib/python3.10/site-packages/transformers/models/donut/__pycache__/configuration_donut_swin.cpython-310.pyc +0 -0
  13. llmeval-env/lib/python3.10/site-packages/transformers/models/donut/configuration_donut_swin.py +135 -0
  14. llmeval-env/lib/python3.10/site-packages/transformers/models/donut/convert_donut_to_pytorch.py +234 -0
  15. llmeval-env/lib/python3.10/site-packages/transformers/models/donut/modeling_donut_swin.py +955 -0
  16. llmeval-env/lib/python3.10/site-packages/transformers/models/donut/processing_donut.py +196 -0
  17. llmeval-env/lib/python3.10/site-packages/transformers/models/encodec/__init__.py +65 -0
  18. llmeval-env/lib/python3.10/site-packages/transformers/models/encodec/__pycache__/__init__.cpython-310.pyc +0 -0
  19. llmeval-env/lib/python3.10/site-packages/transformers/models/encodec/__pycache__/configuration_encodec.cpython-310.pyc +0 -0
  20. llmeval-env/lib/python3.10/site-packages/transformers/models/encodec/__pycache__/convert_encodec_checkpoint_to_pytorch.cpython-310.pyc +0 -0
  21. llmeval-env/lib/python3.10/site-packages/transformers/models/encodec/__pycache__/feature_extraction_encodec.cpython-310.pyc +0 -0
  22. llmeval-env/lib/python3.10/site-packages/transformers/models/encodec/__pycache__/modeling_encodec.cpython-310.pyc +0 -0
  23. llmeval-env/lib/python3.10/site-packages/transformers/models/encodec/configuration_encodec.py +193 -0
  24. llmeval-env/lib/python3.10/site-packages/transformers/models/encodec/convert_encodec_checkpoint_to_pytorch.py +365 -0
  25. llmeval-env/lib/python3.10/site-packages/transformers/models/encodec/feature_extraction_encodec.py +206 -0
  26. llmeval-env/lib/python3.10/site-packages/transformers/models/encodec/modeling_encodec.py +810 -0
  27. llmeval-env/lib/python3.10/site-packages/transformers/models/mobilevitv2/__init__.py +71 -0
  28. llmeval-env/lib/python3.10/site-packages/transformers/models/mobilevitv2/__pycache__/__init__.cpython-310.pyc +0 -0
  29. llmeval-env/lib/python3.10/site-packages/transformers/models/mobilevitv2/__pycache__/configuration_mobilevitv2.cpython-310.pyc +0 -0
  30. llmeval-env/lib/python3.10/site-packages/transformers/models/mobilevitv2/__pycache__/convert_mlcvnets_to_pytorch.cpython-310.pyc +0 -0
  31. llmeval-env/lib/python3.10/site-packages/transformers/models/mobilevitv2/__pycache__/modeling_mobilevitv2.cpython-310.pyc +0 -0
  32. llmeval-env/lib/python3.10/site-packages/transformers/models/mobilevitv2/configuration_mobilevitv2.py +168 -0
  33. llmeval-env/lib/python3.10/site-packages/transformers/models/mobilevitv2/convert_mlcvnets_to_pytorch.py +326 -0
  34. llmeval-env/lib/python3.10/site-packages/transformers/models/mobilevitv2/modeling_mobilevitv2.py +1030 -0
  35. llmeval-env/lib/python3.10/site-packages/transformers/models/owlvit/__init__.py +100 -0
  36. llmeval-env/lib/python3.10/site-packages/transformers/models/owlvit/__pycache__/__init__.cpython-310.pyc +0 -0
  37. llmeval-env/lib/python3.10/site-packages/transformers/models/owlvit/__pycache__/configuration_owlvit.cpython-310.pyc +0 -0
  38. llmeval-env/lib/python3.10/site-packages/transformers/models/owlvit/__pycache__/convert_owlvit_original_flax_to_hf.cpython-310.pyc +0 -0
  39. llmeval-env/lib/python3.10/site-packages/transformers/models/owlvit/__pycache__/feature_extraction_owlvit.cpython-310.pyc +0 -0
  40. llmeval-env/lib/python3.10/site-packages/transformers/models/owlvit/__pycache__/image_processing_owlvit.cpython-310.pyc +0 -0
  41. llmeval-env/lib/python3.10/site-packages/transformers/models/owlvit/__pycache__/modeling_owlvit.cpython-310.pyc +0 -0
  42. llmeval-env/lib/python3.10/site-packages/transformers/models/owlvit/__pycache__/processing_owlvit.cpython-310.pyc +0 -0
  43. llmeval-env/lib/python3.10/site-packages/transformers/models/owlvit/configuration_owlvit.py +383 -0
  44. llmeval-env/lib/python3.10/site-packages/transformers/models/owlvit/convert_owlvit_original_flax_to_hf.py +406 -0
  45. llmeval-env/lib/python3.10/site-packages/transformers/models/owlvit/feature_extraction_owlvit.py +33 -0
  46. llmeval-env/lib/python3.10/site-packages/transformers/models/owlvit/image_processing_owlvit.py +611 -0
  47. llmeval-env/lib/python3.10/site-packages/transformers/models/owlvit/modeling_owlvit.py +1685 -0
  48. llmeval-env/lib/python3.10/site-packages/transformers/models/owlvit/processing_owlvit.py +224 -0
  49. llmeval-env/lib/python3.10/site-packages/transformers/models/persimmon/__init__.py +62 -0
  50. llmeval-env/lib/python3.10/site-packages/transformers/models/persimmon/__pycache__/__init__.cpython-310.pyc +0 -0
llmeval-env/lib/python3.10/site-packages/transformers/models/align/__init__.py ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import (
17
+ OptionalDependencyNotAvailable,
18
+ _LazyModule,
19
+ is_torch_available,
20
+ )
21
+
22
+
23
+ _import_structure = {
24
+ "configuration_align": [
25
+ "ALIGN_PRETRAINED_CONFIG_ARCHIVE_MAP",
26
+ "AlignConfig",
27
+ "AlignTextConfig",
28
+ "AlignVisionConfig",
29
+ ],
30
+ "processing_align": ["AlignProcessor"],
31
+ }
32
+
33
+ try:
34
+ if not is_torch_available():
35
+ raise OptionalDependencyNotAvailable()
36
+ except OptionalDependencyNotAvailable:
37
+ pass
38
+ else:
39
+ _import_structure["modeling_align"] = [
40
+ "ALIGN_PRETRAINED_MODEL_ARCHIVE_LIST",
41
+ "AlignModel",
42
+ "AlignPreTrainedModel",
43
+ "AlignTextModel",
44
+ "AlignVisionModel",
45
+ ]
46
+
47
+ if TYPE_CHECKING:
48
+ from .configuration_align import (
49
+ ALIGN_PRETRAINED_CONFIG_ARCHIVE_MAP,
50
+ AlignConfig,
51
+ AlignTextConfig,
52
+ AlignVisionConfig,
53
+ )
54
+ from .processing_align import AlignProcessor
55
+
56
+ try:
57
+ if not is_torch_available():
58
+ raise OptionalDependencyNotAvailable()
59
+ except OptionalDependencyNotAvailable:
60
+ pass
61
+ else:
62
+ from .modeling_align import (
63
+ ALIGN_PRETRAINED_MODEL_ARCHIVE_LIST,
64
+ AlignModel,
65
+ AlignPreTrainedModel,
66
+ AlignTextModel,
67
+ AlignVisionModel,
68
+ )
69
+
70
+ else:
71
+ import sys
72
+
73
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
llmeval-env/lib/python3.10/site-packages/transformers/models/align/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.05 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/align/__pycache__/configuration_align.cpython-310.pyc ADDED
Binary file (16.1 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/align/__pycache__/convert_align_tf_to_hf.cpython-310.pyc ADDED
Binary file (10.9 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/align/__pycache__/modeling_align.cpython-310.pyc ADDED
Binary file (50.4 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/align/__pycache__/processing_align.cpython-310.pyc ADDED
Binary file (5.69 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/align/configuration_align.py ADDED
@@ -0,0 +1,383 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ ALIGN model configuration"""
16
+
17
+ import os
18
+ from typing import TYPE_CHECKING, List, Union
19
+
20
+
21
+ if TYPE_CHECKING:
22
+ pass
23
+
24
+ from ...configuration_utils import PretrainedConfig
25
+ from ...utils import logging
26
+
27
+
28
+ logger = logging.get_logger(__name__)
29
+
30
+
31
+ from ..deprecated._archive_maps import ALIGN_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
32
+
33
+
34
+ class AlignTextConfig(PretrainedConfig):
35
+ r"""
36
+ This is the configuration class to store the configuration of a [`AlignTextModel`]. It is used to instantiate a
37
+ ALIGN text encoder according to the specified arguments, defining the model architecture. Instantiating a
38
+ configuration with the defaults will yield a similar configuration to that of the text encoder of the ALIGN
39
+ [kakaobrain/align-base](https://huggingface.co/kakaobrain/align-base) architecture. The default values here are
40
+ copied from BERT.
41
+
42
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
43
+ documentation from [`PretrainedConfig`] for more information.
44
+
45
+ Args:
46
+ vocab_size (`int`, *optional*, defaults to 30522):
47
+ Vocabulary size of the Align Text model. Defines the number of different tokens that can be represented by
48
+ the `inputs_ids` passed when calling [`AlignTextModel`].
49
+ hidden_size (`int`, *optional*, defaults to 768):
50
+ Dimensionality of the encoder layers and the pooler layer.
51
+ num_hidden_layers (`int`, *optional*, defaults to 12):
52
+ Number of hidden layers in the Transformer encoder.
53
+ num_attention_heads (`int`, *optional*, defaults to 12):
54
+ Number of attention heads for each attention layer in the Transformer encoder.
55
+ intermediate_size (`int`, *optional*, defaults to 3072):
56
+ Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
57
+ hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`):
58
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
59
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
60
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
61
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
62
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
63
+ The dropout ratio for the attention probabilities.
64
+ max_position_embeddings (`int`, *optional*, defaults to 512):
65
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
66
+ just in case (e.g., 512 or 1024 or 2048).
67
+ type_vocab_size (`int`, *optional*, defaults to 2):
68
+ The vocabulary size of the `token_type_ids` passed when calling [`AlignTextModel`].
69
+ initializer_range (`float`, *optional*, defaults to 0.02):
70
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
71
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
72
+ The epsilon used by the layer normalization layers.
73
+ pad_token_id (`int`, *optional*, defaults to 0):
74
+ Padding token id.
75
+ position_embedding_type (`str`, *optional*, defaults to `"absolute"`):
76
+ Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For
77
+ positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to
78
+ [Self-Attention with Relative Position Representations (Shaw et al.)](https://arxiv.org/abs/1803.02155).
79
+ For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models
80
+ with Better Relative Position Embeddings (Huang et al.)](https://arxiv.org/abs/2009.13658).
81
+ use_cache (`bool`, *optional*, defaults to `True`):
82
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
83
+ relevant if `config.is_decoder=True`.
84
+
85
+ Example:
86
+
87
+ ```python
88
+ >>> from transformers import AlignTextConfig, AlignTextModel
89
+
90
+ >>> # Initializing a AlignTextConfig with kakaobrain/align-base style configuration
91
+ >>> configuration = AlignTextConfig()
92
+
93
+ >>> # Initializing a AlignTextModel (with random weights) from the kakaobrain/align-base style configuration
94
+ >>> model = AlignTextModel(configuration)
95
+
96
+ >>> # Accessing the model configuration
97
+ >>> configuration = model.config
98
+ ```"""
99
+
100
+ model_type = "align_text_model"
101
+
102
+ def __init__(
103
+ self,
104
+ vocab_size=30522,
105
+ hidden_size=768,
106
+ num_hidden_layers=12,
107
+ num_attention_heads=12,
108
+ intermediate_size=3072,
109
+ hidden_act="gelu",
110
+ hidden_dropout_prob=0.1,
111
+ attention_probs_dropout_prob=0.1,
112
+ max_position_embeddings=512,
113
+ type_vocab_size=2,
114
+ initializer_range=0.02,
115
+ layer_norm_eps=1e-12,
116
+ pad_token_id=0,
117
+ position_embedding_type="absolute",
118
+ use_cache=True,
119
+ **kwargs,
120
+ ):
121
+ super().__init__(**kwargs)
122
+
123
+ self.vocab_size = vocab_size
124
+ self.hidden_size = hidden_size
125
+ self.num_hidden_layers = num_hidden_layers
126
+ self.num_attention_heads = num_attention_heads
127
+ self.hidden_act = hidden_act
128
+ self.intermediate_size = intermediate_size
129
+ self.hidden_dropout_prob = hidden_dropout_prob
130
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
131
+ self.max_position_embeddings = max_position_embeddings
132
+ self.type_vocab_size = type_vocab_size
133
+ self.initializer_range = initializer_range
134
+ self.layer_norm_eps = layer_norm_eps
135
+ self.position_embedding_type = position_embedding_type
136
+ self.use_cache = use_cache
137
+ self.pad_token_id = pad_token_id
138
+
139
+ @classmethod
140
+ def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig":
141
+ cls._set_token_in_kwargs(kwargs)
142
+
143
+ config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
144
+
145
+ # get the text config dict if we are loading from AlignConfig
146
+ if config_dict.get("model_type") == "align":
147
+ config_dict = config_dict["text_config"]
148
+
149
+ if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
150
+ logger.warning(
151
+ f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
152
+ f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
153
+ )
154
+
155
+ return cls.from_dict(config_dict, **kwargs)
156
+
157
+
158
+ class AlignVisionConfig(PretrainedConfig):
159
+ r"""
160
+ This is the configuration class to store the configuration of a [`AlignVisionModel`]. It is used to instantiate a
161
+ ALIGN vision encoder according to the specified arguments, defining the model architecture. Instantiating a
162
+ configuration with the defaults will yield a similar configuration to that of the vision encoder of the ALIGN
163
+ [kakaobrain/align-base](https://huggingface.co/kakaobrain/align-base) architecture. The default values are copied
164
+ from EfficientNet (efficientnet-b7)
165
+
166
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
167
+ documentation from [`PretrainedConfig`] for more information.
168
+
169
+ Args:
170
+ num_channels (`int`, *optional*, defaults to 3):
171
+ The number of input channels.
172
+ image_size (`int`, *optional*, defaults to 600):
173
+ The input image size.
174
+ width_coefficient (`float`, *optional*, defaults to 2.0):
175
+ Scaling coefficient for network width at each stage.
176
+ depth_coefficient (`float`, *optional*, defaults to 3.1):
177
+ Scaling coefficient for network depth at each stage.
178
+ depth_divisor `int`, *optional*, defaults to 8):
179
+ A unit of network width.
180
+ kernel_sizes (`List[int]`, *optional*, defaults to `[3, 3, 5, 3, 5, 5, 3]`):
181
+ List of kernel sizes to be used in each block.
182
+ in_channels (`List[int]`, *optional*, defaults to `[32, 16, 24, 40, 80, 112, 192]`):
183
+ List of input channel sizes to be used in each block for convolutional layers.
184
+ out_channels (`List[int]`, *optional*, defaults to `[16, 24, 40, 80, 112, 192, 320]`):
185
+ List of output channel sizes to be used in each block for convolutional layers.
186
+ depthwise_padding (`List[int]`, *optional*, defaults to `[]`):
187
+ List of block indices with square padding.
188
+ strides (`List[int]`, *optional*, defaults to `[1, 2, 2, 2, 1, 2, 1]`):
189
+ List of stride sizes to be used in each block for convolutional layers.
190
+ num_block_repeats (`List[int]`, *optional*, defaults to `[1, 2, 2, 3, 3, 4, 1]`):
191
+ List of the number of times each block is to repeated.
192
+ expand_ratios (`List[int]`, *optional*, defaults to `[1, 6, 6, 6, 6, 6, 6]`):
193
+ List of scaling coefficient of each block.
194
+ squeeze_expansion_ratio (`float`, *optional*, defaults to 0.25):
195
+ Squeeze expansion ratio.
196
+ hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
197
+ The non-linear activation function (function or string) in each block. If string, `"gelu"`, `"relu"`,
198
+ `"selu", `"gelu_new"`, `"silu"` and `"mish"` are supported.
199
+ hiddem_dim (`int`, *optional*, defaults to 1280):
200
+ The hidden dimension of the layer before the classification head.
201
+ pooling_type (`str` or `function`, *optional*, defaults to `"mean"`):
202
+ Type of final pooling to be applied before the dense classification head. Available options are [`"mean"`,
203
+ `"max"`]
204
+ initializer_range (`float`, *optional*, defaults to 0.02):
205
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
206
+ batch_norm_eps (`float`, *optional*, defaults to 1e-3):
207
+ The epsilon used by the batch normalization layers.
208
+ batch_norm_momentum (`float`, *optional*, defaults to 0.99):
209
+ The momentum used by the batch normalization layers.
210
+ drop_connect_rate (`float`, *optional*, defaults to 0.2):
211
+ The drop rate for skip connections.
212
+
213
+ Example:
214
+
215
+ ```python
216
+ >>> from transformers import AlignVisionConfig, AlignVisionModel
217
+
218
+ >>> # Initializing a AlignVisionConfig with kakaobrain/align-base style configuration
219
+ >>> configuration = AlignVisionConfig()
220
+
221
+ >>> # Initializing a AlignVisionModel (with random weights) from the kakaobrain/align-base style configuration
222
+ >>> model = AlignVisionModel(configuration)
223
+
224
+ >>> # Accessing the model configuration
225
+ >>> configuration = model.config
226
+ ```"""
227
+
228
+ model_type = "align_vision_model"
229
+
230
+ def __init__(
231
+ self,
232
+ num_channels: int = 3,
233
+ image_size: int = 600,
234
+ width_coefficient: float = 2.0,
235
+ depth_coefficient: float = 3.1,
236
+ depth_divisor: int = 8,
237
+ kernel_sizes: List[int] = [3, 3, 5, 3, 5, 5, 3],
238
+ in_channels: List[int] = [32, 16, 24, 40, 80, 112, 192],
239
+ out_channels: List[int] = [16, 24, 40, 80, 112, 192, 320],
240
+ depthwise_padding: List[int] = [],
241
+ strides: List[int] = [1, 2, 2, 2, 1, 2, 1],
242
+ num_block_repeats: List[int] = [1, 2, 2, 3, 3, 4, 1],
243
+ expand_ratios: List[int] = [1, 6, 6, 6, 6, 6, 6],
244
+ squeeze_expansion_ratio: float = 0.25,
245
+ hidden_act: str = "swish",
246
+ hidden_dim: int = 2560,
247
+ pooling_type: str = "mean",
248
+ initializer_range: float = 0.02,
249
+ batch_norm_eps: float = 0.001,
250
+ batch_norm_momentum: float = 0.99,
251
+ drop_connect_rate: float = 0.2,
252
+ **kwargs,
253
+ ):
254
+ super().__init__(**kwargs)
255
+
256
+ self.num_channels = num_channels
257
+ self.image_size = image_size
258
+ self.width_coefficient = width_coefficient
259
+ self.depth_coefficient = depth_coefficient
260
+ self.depth_divisor = depth_divisor
261
+ self.kernel_sizes = kernel_sizes
262
+ self.in_channels = in_channels
263
+ self.out_channels = out_channels
264
+ self.depthwise_padding = depthwise_padding
265
+ self.strides = strides
266
+ self.num_block_repeats = num_block_repeats
267
+ self.expand_ratios = expand_ratios
268
+ self.squeeze_expansion_ratio = squeeze_expansion_ratio
269
+ self.hidden_act = hidden_act
270
+ self.hidden_dim = hidden_dim
271
+ self.pooling_type = pooling_type
272
+ self.initializer_range = initializer_range
273
+ self.batch_norm_eps = batch_norm_eps
274
+ self.batch_norm_momentum = batch_norm_momentum
275
+ self.drop_connect_rate = drop_connect_rate
276
+ self.num_hidden_layers = sum(num_block_repeats) * 4
277
+
278
+ @classmethod
279
+ def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig":
280
+ cls._set_token_in_kwargs(kwargs)
281
+
282
+ config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
283
+
284
+ # get the vision config dict if we are loading from AlignConfig
285
+ if config_dict.get("model_type") == "align":
286
+ config_dict = config_dict["vision_config"]
287
+
288
+ if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
289
+ logger.warning(
290
+ f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
291
+ f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
292
+ )
293
+
294
+ return cls.from_dict(config_dict, **kwargs)
295
+
296
+
297
+ class AlignConfig(PretrainedConfig):
298
+ r"""
299
+ [`AlignConfig`] is the configuration class to store the configuration of a [`AlignModel`]. It is used to
300
+ instantiate a ALIGN model according to the specified arguments, defining the text model and vision model configs.
301
+ Instantiating a configuration with the defaults will yield a similar configuration to that of the ALIGN
302
+ [kakaobrain/align-base](https://huggingface.co/kakaobrain/align-base) architecture.
303
+
304
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
305
+ documentation from [`PretrainedConfig`] for more information.
306
+
307
+ Args:
308
+ text_config (`dict`, *optional*):
309
+ Dictionary of configuration options used to initialize [`AlignTextConfig`].
310
+ vision_config (`dict`, *optional*):
311
+ Dictionary of configuration options used to initialize [`AlignVisionConfig`].
312
+ projection_dim (`int`, *optional*, defaults to 640):
313
+ Dimentionality of text and vision projection layers.
314
+ temperature_init_value (`float`, *optional*, defaults to 1.0):
315
+ The inital value of the *temperature* paramter. Default is used as per the original ALIGN implementation.
316
+ initializer_range (`float`, *optional*, defaults to 0.02):
317
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
318
+ kwargs (*optional*):
319
+ Dictionary of keyword arguments.
320
+
321
+ Example:
322
+
323
+ ```python
324
+ >>> from transformers import AlignConfig, AlignModel
325
+
326
+ >>> # Initializing a AlignConfig with kakaobrain/align-base style configuration
327
+ >>> configuration = AlignConfig()
328
+
329
+ >>> # Initializing a AlignModel (with random weights) from the kakaobrain/align-base style configuration
330
+ >>> model = AlignModel(configuration)
331
+
332
+ >>> # Accessing the model configuration
333
+ >>> configuration = model.config
334
+
335
+ >>> # We can also initialize a AlignConfig from a AlignTextConfig and a AlignVisionConfig
336
+ >>> from transformers import AlignTextConfig, AlignVisionConfig
337
+
338
+ >>> # Initializing ALIGN Text and Vision configurations
339
+ >>> config_text = AlignTextConfig()
340
+ >>> config_vision = AlignVisionConfig()
341
+
342
+ >>> config = AlignConfig.from_text_vision_configs(config_text, config_vision)
343
+ ```"""
344
+
345
+ model_type = "align"
346
+
347
+ def __init__(
348
+ self,
349
+ text_config=None,
350
+ vision_config=None,
351
+ projection_dim=640,
352
+ temperature_init_value=1.0,
353
+ initializer_range=0.02,
354
+ **kwargs,
355
+ ):
356
+ super().__init__(**kwargs)
357
+
358
+ if text_config is None:
359
+ text_config = {}
360
+ logger.info("text_config is None. Initializing the AlignTextConfig with default values.")
361
+
362
+ if vision_config is None:
363
+ vision_config = {}
364
+ logger.info("vision_config is None. Initializing the AlignVisionConfig with default values.")
365
+
366
+ self.text_config = AlignTextConfig(**text_config)
367
+ self.vision_config = AlignVisionConfig(**vision_config)
368
+
369
+ self.projection_dim = projection_dim
370
+ self.temperature_init_value = temperature_init_value
371
+ self.initializer_range = initializer_range
372
+
373
+ @classmethod
374
+ def from_text_vision_configs(cls, text_config: AlignTextConfig, vision_config: AlignVisionConfig, **kwargs):
375
+ r"""
376
+ Instantiate a [`AlignConfig`] (or a derived class) from align text model configuration and align vision model
377
+ configuration.
378
+
379
+ Returns:
380
+ [`AlignConfig`]: An instance of a configuration object
381
+ """
382
+
383
+ return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **kwargs)
llmeval-env/lib/python3.10/site-packages/transformers/models/align/convert_align_tf_to_hf.py ADDED
@@ -0,0 +1,389 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Convert ALIGN checkpoints from the original repository."""
16
+
17
+ import argparse
18
+ import os
19
+
20
+ import align
21
+ import numpy as np
22
+ import requests
23
+ import tensorflow as tf
24
+ import torch
25
+ from PIL import Image
26
+ from tokenizer import Tokenizer
27
+
28
+ from transformers import (
29
+ AlignConfig,
30
+ AlignModel,
31
+ AlignProcessor,
32
+ BertConfig,
33
+ BertTokenizer,
34
+ EfficientNetConfig,
35
+ EfficientNetImageProcessor,
36
+ )
37
+ from transformers.utils import logging
38
+
39
+
40
+ logging.set_verbosity_info()
41
+ logger = logging.get_logger(__name__)
42
+
43
+
44
+ def preprocess(image):
45
+ image = tf.image.resize(image, (346, 346))
46
+ image = tf.image.crop_to_bounding_box(image, (346 - 289) // 2, (346 - 289) // 2, 289, 289)
47
+ return image
48
+
49
+
50
+ def get_align_config():
51
+ vision_config = EfficientNetConfig.from_pretrained("google/efficientnet-b7")
52
+ vision_config.image_size = 289
53
+ vision_config.hidden_dim = 640
54
+ vision_config.id2label = {"0": "LABEL_0", "1": "LABEL_1"}
55
+ vision_config.label2id = {"LABEL_0": 0, "LABEL_1": 1}
56
+ vision_config.depthwise_padding = []
57
+
58
+ text_config = BertConfig()
59
+ config = AlignConfig.from_text_vision_configs(
60
+ text_config=text_config, vision_config=vision_config, projection_dim=640
61
+ )
62
+ return config
63
+
64
+
65
+ # We will verify our results on an image of cute cats
66
+ def prepare_img():
67
+ url = "http://images.cocodataset.org/val2017/000000039769.jpg"
68
+ im = Image.open(requests.get(url, stream=True).raw)
69
+ return im
70
+
71
+
72
+ def get_processor():
73
+ image_processor = EfficientNetImageProcessor(
74
+ do_center_crop=True,
75
+ rescale_factor=1 / 127.5,
76
+ rescale_offset=True,
77
+ do_normalize=False,
78
+ include_top=False,
79
+ resample=Image.BILINEAR,
80
+ )
81
+ tokenizer = BertTokenizer.from_pretrained("google-bert/bert-base-uncased")
82
+ tokenizer.model_max_length = 64
83
+ processor = AlignProcessor(image_processor=image_processor, tokenizer=tokenizer)
84
+ return processor
85
+
86
+
87
+ # here we list all keys to be renamed (original name on the left, our name on the right)
88
+ def rename_keys(original_param_names):
89
+ # EfficientNet image encoder
90
+ block_names = [v.split("_")[0].split("block")[1] for v in original_param_names if v.startswith("block")]
91
+ block_names = list(set(block_names))
92
+ block_names = sorted(block_names)
93
+ num_blocks = len(block_names)
94
+ block_name_mapping = {b: str(i) for b, i in zip(block_names, range(num_blocks))}
95
+
96
+ rename_keys = []
97
+ rename_keys.append(("stem_conv/kernel:0", "embeddings.convolution.weight"))
98
+ rename_keys.append(("stem_bn/gamma:0", "embeddings.batchnorm.weight"))
99
+ rename_keys.append(("stem_bn/beta:0", "embeddings.batchnorm.bias"))
100
+ rename_keys.append(("stem_bn/moving_mean:0", "embeddings.batchnorm.running_mean"))
101
+ rename_keys.append(("stem_bn/moving_variance:0", "embeddings.batchnorm.running_var"))
102
+
103
+ for b in block_names:
104
+ hf_b = block_name_mapping[b]
105
+ rename_keys.append((f"block{b}_expand_conv/kernel:0", f"encoder.blocks.{hf_b}.expansion.expand_conv.weight"))
106
+ rename_keys.append((f"block{b}_expand_bn/gamma:0", f"encoder.blocks.{hf_b}.expansion.expand_bn.weight"))
107
+ rename_keys.append((f"block{b}_expand_bn/beta:0", f"encoder.blocks.{hf_b}.expansion.expand_bn.bias"))
108
+ rename_keys.append(
109
+ (f"block{b}_expand_bn/moving_mean:0", f"encoder.blocks.{hf_b}.expansion.expand_bn.running_mean")
110
+ )
111
+ rename_keys.append(
112
+ (f"block{b}_expand_bn/moving_variance:0", f"encoder.blocks.{hf_b}.expansion.expand_bn.running_var")
113
+ )
114
+ rename_keys.append(
115
+ (f"block{b}_dwconv/depthwise_kernel:0", f"encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight")
116
+ )
117
+ rename_keys.append((f"block{b}_bn/gamma:0", f"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight"))
118
+ rename_keys.append((f"block{b}_bn/beta:0", f"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias"))
119
+ rename_keys.append(
120
+ (f"block{b}_bn/moving_mean:0", f"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean")
121
+ )
122
+ rename_keys.append(
123
+ (f"block{b}_bn/moving_variance:0", f"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var")
124
+ )
125
+
126
+ rename_keys.append((f"block{b}_se_reduce/kernel:0", f"encoder.blocks.{hf_b}.squeeze_excite.reduce.weight"))
127
+ rename_keys.append((f"block{b}_se_reduce/bias:0", f"encoder.blocks.{hf_b}.squeeze_excite.reduce.bias"))
128
+ rename_keys.append((f"block{b}_se_expand/kernel:0", f"encoder.blocks.{hf_b}.squeeze_excite.expand.weight"))
129
+ rename_keys.append((f"block{b}_se_expand/bias:0", f"encoder.blocks.{hf_b}.squeeze_excite.expand.bias"))
130
+ rename_keys.append(
131
+ (f"block{b}_project_conv/kernel:0", f"encoder.blocks.{hf_b}.projection.project_conv.weight")
132
+ )
133
+ rename_keys.append((f"block{b}_project_bn/gamma:0", f"encoder.blocks.{hf_b}.projection.project_bn.weight"))
134
+ rename_keys.append((f"block{b}_project_bn/beta:0", f"encoder.blocks.{hf_b}.projection.project_bn.bias"))
135
+ rename_keys.append(
136
+ (f"block{b}_project_bn/moving_mean:0", f"encoder.blocks.{hf_b}.projection.project_bn.running_mean")
137
+ )
138
+ rename_keys.append(
139
+ (f"block{b}_project_bn/moving_variance:0", f"encoder.blocks.{hf_b}.projection.project_bn.running_var")
140
+ )
141
+
142
+ key_mapping = {}
143
+ for item in rename_keys:
144
+ if item[0] in original_param_names:
145
+ key_mapping[item[0]] = "vision_model." + item[1]
146
+
147
+ # BERT text encoder
148
+ rename_keys = []
149
+ old = "tf_bert_model/bert"
150
+ new = "text_model"
151
+ for i in range(12):
152
+ rename_keys.append(
153
+ (
154
+ f"{old}/encoder/layer_._{i}/attention/self/query/kernel:0",
155
+ f"{new}.encoder.layer.{i}.attention.self.query.weight",
156
+ )
157
+ )
158
+ rename_keys.append(
159
+ (
160
+ f"{old}/encoder/layer_._{i}/attention/self/query/bias:0",
161
+ f"{new}.encoder.layer.{i}.attention.self.query.bias",
162
+ )
163
+ )
164
+ rename_keys.append(
165
+ (
166
+ f"{old}/encoder/layer_._{i}/attention/self/key/kernel:0",
167
+ f"{new}.encoder.layer.{i}.attention.self.key.weight",
168
+ )
169
+ )
170
+ rename_keys.append(
171
+ (
172
+ f"{old}/encoder/layer_._{i}/attention/self/key/bias:0",
173
+ f"{new}.encoder.layer.{i}.attention.self.key.bias",
174
+ )
175
+ )
176
+ rename_keys.append(
177
+ (
178
+ f"{old}/encoder/layer_._{i}/attention/self/value/kernel:0",
179
+ f"{new}.encoder.layer.{i}.attention.self.value.weight",
180
+ )
181
+ )
182
+ rename_keys.append(
183
+ (
184
+ f"{old}/encoder/layer_._{i}/attention/self/value/bias:0",
185
+ f"{new}.encoder.layer.{i}.attention.self.value.bias",
186
+ )
187
+ )
188
+ rename_keys.append(
189
+ (
190
+ f"{old}/encoder/layer_._{i}/attention/output/dense/kernel:0",
191
+ f"{new}.encoder.layer.{i}.attention.output.dense.weight",
192
+ )
193
+ )
194
+ rename_keys.append(
195
+ (
196
+ f"{old}/encoder/layer_._{i}/attention/output/dense/bias:0",
197
+ f"{new}.encoder.layer.{i}.attention.output.dense.bias",
198
+ )
199
+ )
200
+ rename_keys.append(
201
+ (
202
+ f"{old}/encoder/layer_._{i}/attention/output/LayerNorm/gamma:0",
203
+ f"{new}.encoder.layer.{i}.attention.output.LayerNorm.weight",
204
+ )
205
+ )
206
+ rename_keys.append(
207
+ (
208
+ f"{old}/encoder/layer_._{i}/attention/output/LayerNorm/beta:0",
209
+ f"{new}.encoder.layer.{i}.attention.output.LayerNorm.bias",
210
+ )
211
+ )
212
+ rename_keys.append(
213
+ (
214
+ f"{old}/encoder/layer_._{i}/intermediate/dense/kernel:0",
215
+ f"{new}.encoder.layer.{i}.intermediate.dense.weight",
216
+ )
217
+ )
218
+ rename_keys.append(
219
+ (
220
+ f"{old}/encoder/layer_._{i}/intermediate/dense/bias:0",
221
+ f"{new}.encoder.layer.{i}.intermediate.dense.bias",
222
+ )
223
+ )
224
+ rename_keys.append(
225
+ (f"{old}/encoder/layer_._{i}/output/dense/kernel:0", f"{new}.encoder.layer.{i}.output.dense.weight")
226
+ )
227
+ rename_keys.append(
228
+ (f"{old}/encoder/layer_._{i}/output/dense/bias:0", f"{new}.encoder.layer.{i}.output.dense.bias")
229
+ )
230
+ rename_keys.append(
231
+ (f"{old}/encoder/layer_._{i}/output/LayerNorm/gamma:0", f"{new}.encoder.layer.{i}.output.LayerNorm.weight")
232
+ )
233
+ rename_keys.append(
234
+ (f"{old}/encoder/layer_._{i}/output/LayerNorm/beta:0", f"{new}.encoder.layer.{i}.output.LayerNorm.bias")
235
+ )
236
+
237
+ rename_keys.append((f"{old}/embeddings/word_embeddings/weight:0", f"{new}.embeddings.word_embeddings.weight"))
238
+ rename_keys.append(
239
+ (f"{old}/embeddings/position_embeddings/embeddings:0", f"{new}.embeddings.position_embeddings.weight")
240
+ )
241
+ rename_keys.append(
242
+ (f"{old}/embeddings/token_type_embeddings/embeddings:0", f"{new}.embeddings.token_type_embeddings.weight")
243
+ )
244
+ rename_keys.append((f"{old}/embeddings/LayerNorm/gamma:0", f"{new}.embeddings.LayerNorm.weight"))
245
+ rename_keys.append((f"{old}/embeddings/LayerNorm/beta:0", f"{new}.embeddings.LayerNorm.bias"))
246
+
247
+ rename_keys.append((f"{old}/pooler/dense/kernel:0", f"{new}.pooler.dense.weight"))
248
+ rename_keys.append((f"{old}/pooler/dense/bias:0", f"{new}.pooler.dense.bias"))
249
+ rename_keys.append(("dense/kernel:0", "text_projection.weight"))
250
+ rename_keys.append(("dense/bias:0", "text_projection.bias"))
251
+ rename_keys.append(("dense/bias:0", "text_projection.bias"))
252
+ rename_keys.append(("temperature:0", "temperature"))
253
+
254
+ for item in rename_keys:
255
+ if item[0] in original_param_names:
256
+ key_mapping[item[0]] = item[1]
257
+ return key_mapping
258
+
259
+
260
+ def replace_params(hf_params, tf_params, key_mapping):
261
+ list(hf_params.keys())
262
+
263
+ for key, value in tf_params.items():
264
+ if key not in key_mapping:
265
+ continue
266
+
267
+ hf_key = key_mapping[key]
268
+ if "_conv" in key and "kernel" in key:
269
+ new_hf_value = torch.from_numpy(value).permute(3, 2, 0, 1)
270
+ elif "embeddings" in key:
271
+ new_hf_value = torch.from_numpy(value)
272
+ elif "depthwise_kernel" in key:
273
+ new_hf_value = torch.from_numpy(value).permute(2, 3, 0, 1)
274
+ elif "kernel" in key:
275
+ new_hf_value = torch.from_numpy(np.transpose(value))
276
+ elif "temperature" in key:
277
+ new_hf_value = value
278
+ elif "bn/gamma" or "bn/beta" in key:
279
+ new_hf_value = torch.from_numpy(np.transpose(value)).squeeze()
280
+ else:
281
+ new_hf_value = torch.from_numpy(value)
282
+
283
+ # Replace HF parameters with original TF model parameters
284
+ hf_params[hf_key].copy_(new_hf_value)
285
+
286
+
287
+ @torch.no_grad()
288
+ def convert_align_checkpoint(checkpoint_path, pytorch_dump_folder_path, save_model, push_to_hub):
289
+ """
290
+ Copy/paste/tweak model's weights to our ALIGN structure.
291
+ """
292
+ # Load original model
293
+ seq_length = 64
294
+ tok = Tokenizer(seq_length)
295
+ original_model = align.Align("efficientnet-b7", "bert-base", 640, seq_length, tok.get_vocab_size())
296
+ original_model.compile()
297
+ original_model.load_weights(checkpoint_path)
298
+
299
+ tf_params = original_model.trainable_variables
300
+ tf_non_train_params = original_model.non_trainable_variables
301
+ tf_params = {param.name: param.numpy() for param in tf_params}
302
+ for param in tf_non_train_params:
303
+ tf_params[param.name] = param.numpy()
304
+ tf_param_names = list(tf_params.keys())
305
+
306
+ # Load HuggingFace model
307
+ config = get_align_config()
308
+ hf_model = AlignModel(config).eval()
309
+ hf_params = hf_model.state_dict()
310
+
311
+ # Create src-to-dst parameter name mapping dictionary
312
+ print("Converting parameters...")
313
+ key_mapping = rename_keys(tf_param_names)
314
+ replace_params(hf_params, tf_params, key_mapping)
315
+
316
+ # Initialize processor
317
+ processor = get_processor()
318
+ inputs = processor(
319
+ images=prepare_img(), text="A picture of a cat", padding="max_length", max_length=64, return_tensors="pt"
320
+ )
321
+
322
+ # HF model inference
323
+ hf_model.eval()
324
+ with torch.no_grad():
325
+ outputs = hf_model(**inputs)
326
+
327
+ hf_image_features = outputs.image_embeds.detach().numpy()
328
+ hf_text_features = outputs.text_embeds.detach().numpy()
329
+
330
+ # Original model inference
331
+ original_model.trainable = False
332
+ tf_image_processor = EfficientNetImageProcessor(
333
+ do_center_crop=True,
334
+ do_rescale=False,
335
+ do_normalize=False,
336
+ include_top=False,
337
+ resample=Image.BILINEAR,
338
+ )
339
+ image = tf_image_processor(images=prepare_img(), return_tensors="tf", data_format="channels_last")["pixel_values"]
340
+ text = tok(tf.constant(["A picture of a cat"]))
341
+
342
+ image_features = original_model.image_encoder(image, training=False)
343
+ text_features = original_model.text_encoder(text, training=False)
344
+
345
+ image_features = tf.nn.l2_normalize(image_features, axis=-1)
346
+ text_features = tf.nn.l2_normalize(text_features, axis=-1)
347
+
348
+ # Check whether original and HF model outputs match -> np.allclose
349
+ if not np.allclose(image_features, hf_image_features, atol=1e-3):
350
+ raise ValueError("The predicted image features are not the same.")
351
+ if not np.allclose(text_features, hf_text_features, atol=1e-3):
352
+ raise ValueError("The predicted text features are not the same.")
353
+ print("Model outputs match!")
354
+
355
+ if save_model:
356
+ # Create folder to save model
357
+ if not os.path.isdir(pytorch_dump_folder_path):
358
+ os.mkdir(pytorch_dump_folder_path)
359
+ # Save converted model and image processor
360
+ hf_model.save_pretrained(pytorch_dump_folder_path)
361
+ processor.save_pretrained(pytorch_dump_folder_path)
362
+
363
+ if push_to_hub:
364
+ # Push model and image processor to hub
365
+ print("Pushing converted ALIGN to the hub...")
366
+ processor.push_to_hub("align-base")
367
+ hf_model.push_to_hub("align-base")
368
+
369
+
370
+ if __name__ == "__main__":
371
+ parser = argparse.ArgumentParser()
372
+ # Required parameters
373
+ parser.add_argument(
374
+ "--checkpoint_path",
375
+ default="./weights/model-weights",
376
+ type=str,
377
+ help="Path to the pretrained TF ALIGN checkpoint.",
378
+ )
379
+ parser.add_argument(
380
+ "--pytorch_dump_folder_path",
381
+ default="hf_model",
382
+ type=str,
383
+ help="Path to the output PyTorch model directory.",
384
+ )
385
+ parser.add_argument("--save_model", action="store_true", help="Save model to local")
386
+ parser.add_argument("--push_to_hub", action="store_true", help="Push model and image processor to the hub")
387
+
388
+ args = parser.parse_args()
389
+ convert_align_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
llmeval-env/lib/python3.10/site-packages/transformers/models/align/modeling_align.py ADDED
@@ -0,0 +1,1633 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The Google Research Team Authors and The HuggingFace Team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch ALIGN model."""
16
+
17
+ import math
18
+ from dataclasses import dataclass
19
+ from typing import Any, Optional, Tuple, Union
20
+
21
+ import torch
22
+ import torch.utils.checkpoint
23
+ from torch import nn
24
+
25
+ from ...activations import ACT2FN
26
+ from ...modeling_outputs import (
27
+ BaseModelOutputWithNoAttention,
28
+ BaseModelOutputWithPastAndCrossAttentions,
29
+ BaseModelOutputWithPoolingAndCrossAttentions,
30
+ BaseModelOutputWithPoolingAndNoAttention,
31
+ )
32
+ from ...modeling_utils import PreTrainedModel
33
+ from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
34
+ from ...utils import (
35
+ ModelOutput,
36
+ add_start_docstrings,
37
+ add_start_docstrings_to_model_forward,
38
+ logging,
39
+ replace_return_docstrings,
40
+ )
41
+ from .configuration_align import AlignConfig, AlignTextConfig, AlignVisionConfig
42
+
43
+
44
+ logger = logging.get_logger(__name__)
45
+
46
+ _CHECKPOINT_FOR_DOC = "kakaobrain/align-base"
47
+ _CONFIG_FOR_DOC = "AlignConfig"
48
+
49
+
50
+ from ..deprecated._archive_maps import ALIGN_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
51
+
52
+
53
+ ALIGN_START_DOCSTRING = r"""
54
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
55
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
56
+ etc.)
57
+
58
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
59
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
60
+ and behavior.
61
+
62
+ Parameters:
63
+ config ([`AlignConfig`]): Model configuration class with all the parameters of the model.
64
+ Initializing with a config file does not load the weights associated with the model, only the
65
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
66
+ """
67
+
68
+ ALIGN_TEXT_INPUTS_DOCSTRING = r"""
69
+ Args:
70
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
71
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
72
+ it.
73
+
74
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
75
+ [`PreTrainedTokenizer.__call__`] for details.
76
+
77
+ [What are input IDs?](../glossary#input-ids)
78
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
79
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
80
+
81
+ - 1 for tokens that are **not masked**,
82
+ - 0 for tokens that are **masked**.
83
+
84
+ [What are attention masks?](../glossary#attention-mask)
85
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
86
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
87
+ config.max_position_embeddings - 1]`.
88
+
89
+ [What are position IDs?](../glossary#position-ids)
90
+ token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
91
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
92
+ 1]`:
93
+
94
+ - 0 corresponds to a *sentence A* token,
95
+ - 1 corresponds to a *sentence B* token.
96
+
97
+ [What are token type IDs?](../glossary#token-type-ids)
98
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
99
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
100
+
101
+ - 1 indicates the head is **not masked**,
102
+ - 0 indicates the head is **masked**.
103
+
104
+ inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
105
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
106
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
107
+ model's internal embedding lookup matrix.
108
+ output_attentions (`bool`, *optional*):
109
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
110
+ tensors for more detail.
111
+ output_hidden_states (`bool`, *optional*):
112
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
113
+ more detail.
114
+ return_dict (`bool`, *optional*):
115
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
116
+ """
117
+
118
+ ALIGN_VISION_INPUTS_DOCSTRING = r"""
119
+ Args:
120
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
121
+ Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
122
+ [`AutoImageProcessor`]. See [`EfficientNetImageProcessor.__call__`] for details.
123
+ output_hidden_states (`bool`, *optional*):
124
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
125
+ more detail.
126
+ return_dict (`bool`, *optional*):
127
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
128
+ """
129
+
130
+ ALIGN_INPUTS_DOCSTRING = r"""
131
+ Args:
132
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
133
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
134
+ it.
135
+
136
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
137
+ [`PreTrainedTokenizer.__call__`] for details.
138
+
139
+ [What are input IDs?](../glossary#input-ids)
140
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
141
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
142
+
143
+ - 1 for tokens that are **not masked**,
144
+ - 0 for tokens that are **masked**.
145
+
146
+ [What are attention masks?](../glossary#attention-mask)
147
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
148
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
149
+ config.max_position_embeddings - 1]`.
150
+
151
+ [What are position IDs?](../glossary#position-ids)
152
+ token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
153
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
154
+ 1]`:
155
+
156
+ - 0 corresponds to a *sentence A* token,
157
+ - 1 corresponds to a *sentence B* token.
158
+
159
+ [What are token type IDs?](../glossary#token-type-ids)
160
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
161
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
162
+
163
+ - 1 indicates the head is **not masked**,
164
+ - 0 indicates the head is **masked**.
165
+
166
+ inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
167
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
168
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
169
+ model's internal embedding lookup matrix.
170
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
171
+ Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
172
+ [`AutoImageProcessor`]. See [`EfficientNetImageProcessor.__call__`] for details.
173
+ return_loss (`bool`, *optional*):
174
+ Whether or not to return the contrastive loss.
175
+ output_attentions (`bool`, *optional*):
176
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
177
+ tensors for more detail.
178
+ output_hidden_states (`bool`, *optional*):
179
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
180
+ more detail.
181
+ return_dict (`bool`, *optional*):
182
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
183
+ """
184
+
185
+
186
+ @dataclass
187
+ class AlignVisionModelOutput(ModelOutput):
188
+ """
189
+ Base class for vision model's outputs that also contains image embeddings of the pooling of the last hidden states.
190
+
191
+ Args:
192
+ image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim)` *optional* returned when model is initialized with `with_projection=True`):
193
+ The image embeddings obtained by applying the projection layer to the pooler_output.
194
+ last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
195
+ Sequence of hidden-states at the output of the last layer of the model.
196
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
197
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
198
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
199
+
200
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
201
+ """
202
+
203
+ image_embeds: Optional[torch.FloatTensor] = None
204
+ last_hidden_state: torch.FloatTensor = None
205
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
206
+
207
+
208
+ @dataclass
209
+ class AlignTextModelOutput(ModelOutput):
210
+ """
211
+ Base class for text model's outputs that also contains a pooling of the last hidden states.
212
+
213
+ Args:
214
+ text_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim)` *optional* returned when model is initialized with `with_projection=True`):
215
+ The text embeddings obtained by applying the projection layer to the pooler_output.
216
+ last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
217
+ Sequence of hidden-states at the output of the last layer of the model.
218
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
219
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
220
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
221
+
222
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
223
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
224
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
225
+ sequence_length)`.
226
+
227
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
228
+ heads.
229
+ """
230
+
231
+ text_embeds: Optional[torch.FloatTensor] = None
232
+ last_hidden_state: torch.FloatTensor = None
233
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
234
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
235
+
236
+
237
+ @dataclass
238
+ class AlignOutput(ModelOutput):
239
+ """
240
+ Args:
241
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`):
242
+ Contrastive loss for image-text similarity.
243
+ logits_per_image:(`torch.FloatTensor` of shape `(image_batch_size, text_batch_size)`):
244
+ The scaled dot product scores between `image_embeds` and `text_embeds`. This represents the image-text
245
+ similarity scores.
246
+ logits_per_text:(`torch.FloatTensor` of shape `(text_batch_size, image_batch_size)`):
247
+ The scaled dot product scores between `text_embeds` and `image_embeds`. This represents the text-image
248
+ similarity scores.
249
+ text_embeds(`torch.FloatTensor` of shape `(batch_size, output_dim`):
250
+ The text embeddings obtained by applying the projection layer to the pooled output of [`AlignTextModel`].
251
+ image_embeds(`torch.FloatTensor` of shape `(batch_size, output_dim`):
252
+ The output of [`AlignVisionModel`].
253
+ text_model_output(`BaseModelOutputWithPoolingAndCrossAttentions`):
254
+ The output of the [`AlignTextModel`].
255
+ vision_model_output(`BaseModelOutputWithPoolingAndNoAttention`):
256
+ The output of the [`AlignVisionModel`].
257
+ """
258
+
259
+ loss: Optional[torch.FloatTensor] = None
260
+ logits_per_image: torch.FloatTensor = None
261
+ logits_per_text: torch.FloatTensor = None
262
+ text_embeds: torch.FloatTensor = None
263
+ image_embeds: torch.FloatTensor = None
264
+ text_model_output: BaseModelOutputWithPoolingAndCrossAttentions = None
265
+ vision_model_output: BaseModelOutputWithPoolingAndNoAttention = None
266
+
267
+ def to_tuple(self) -> Tuple[Any]:
268
+ return tuple(
269
+ self[k] if k not in ["text_model_output", "vision_model_output"] else getattr(self, k).to_tuple()
270
+ for k in self.keys()
271
+ )
272
+
273
+
274
+ # contrastive loss function, adapted from
275
+ # https://sachinruk.github.io/blog/pytorch/pytorch%20lightning/loss%20function/gpu/2021/03/07/CLIP.html
276
+ def contrastive_loss(logits: torch.Tensor) -> torch.Tensor:
277
+ return nn.functional.cross_entropy(logits, torch.arange(len(logits), device=logits.device), label_smoothing=0.1)
278
+
279
+
280
+ def align_loss(similarity: torch.Tensor) -> torch.Tensor:
281
+ caption_loss = contrastive_loss(similarity)
282
+ image_loss = contrastive_loss(similarity.t())
283
+ return (caption_loss + image_loss) / 2.0
284
+
285
+
286
+ # Copied from transformers.models.efficientnet.modeling_efficientnet.round_filters with EfficientNet->AlignVision
287
+ def round_filters(config: AlignVisionConfig, num_channels: int):
288
+ r"""
289
+ Round number of filters based on depth multiplier.
290
+ """
291
+ divisor = config.depth_divisor
292
+ num_channels *= config.width_coefficient
293
+ new_dim = max(divisor, int(num_channels + divisor / 2) // divisor * divisor)
294
+
295
+ # Make sure that round down does not go down by more than 10%.
296
+ if new_dim < 0.9 * num_channels:
297
+ new_dim += divisor
298
+
299
+ return int(new_dim)
300
+
301
+
302
+ # Copied from transformers.models.efficientnet.modeling_efficientnet.correct_pad
303
+ def correct_pad(kernel_size: Union[int, Tuple], adjust: bool = True):
304
+ r"""
305
+ Utility function to get the tuple padding value for the depthwise convolution.
306
+
307
+ Args:
308
+ kernel_size (`int` or `tuple`):
309
+ Kernel size of the convolution layers.
310
+ adjust (`bool`, *optional*, defaults to `True`):
311
+ Adjusts padding value to apply to right and bottom sides of the input.
312
+ """
313
+ if isinstance(kernel_size, int):
314
+ kernel_size = (kernel_size, kernel_size)
315
+
316
+ correct = (kernel_size[0] // 2, kernel_size[1] // 2)
317
+ if adjust:
318
+ return (correct[1] - 1, correct[1], correct[0] - 1, correct[0])
319
+ else:
320
+ return (correct[1], correct[1], correct[0], correct[0])
321
+
322
+
323
+ # Copied from transformers.models.efficientnet.modeling_efficientnet.EfficientNetEmbeddings with EfficientNet->AlignVision
324
+ class AlignVisionEmbeddings(nn.Module):
325
+ r"""
326
+ A module that corresponds to the stem module of the original work.
327
+ """
328
+
329
+ def __init__(self, config: AlignVisionConfig):
330
+ super().__init__()
331
+
332
+ self.out_dim = round_filters(config, 32)
333
+ self.padding = nn.ZeroPad2d(padding=(0, 1, 0, 1))
334
+ self.convolution = nn.Conv2d(
335
+ config.num_channels, self.out_dim, kernel_size=3, stride=2, padding="valid", bias=False
336
+ )
337
+ self.batchnorm = nn.BatchNorm2d(self.out_dim, eps=config.batch_norm_eps, momentum=config.batch_norm_momentum)
338
+ self.activation = ACT2FN[config.hidden_act]
339
+
340
+ def forward(self, pixel_values: torch.Tensor) -> torch.Tensor:
341
+ features = self.padding(pixel_values)
342
+ features = self.convolution(features)
343
+ features = self.batchnorm(features)
344
+ features = self.activation(features)
345
+
346
+ return features
347
+
348
+
349
+ # Copied from transformers.models.efficientnet.modeling_efficientnet.EfficientNetDepthwiseConv2d with EfficientNet->AlignVision
350
+ class AlignVisionDepthwiseConv2d(nn.Conv2d):
351
+ def __init__(
352
+ self,
353
+ in_channels,
354
+ depth_multiplier=1,
355
+ kernel_size=3,
356
+ stride=1,
357
+ padding=0,
358
+ dilation=1,
359
+ bias=True,
360
+ padding_mode="zeros",
361
+ ):
362
+ out_channels = in_channels * depth_multiplier
363
+ super().__init__(
364
+ in_channels=in_channels,
365
+ out_channels=out_channels,
366
+ kernel_size=kernel_size,
367
+ stride=stride,
368
+ padding=padding,
369
+ dilation=dilation,
370
+ groups=in_channels,
371
+ bias=bias,
372
+ padding_mode=padding_mode,
373
+ )
374
+
375
+
376
+ # Copied from transformers.models.efficientnet.modeling_efficientnet.EfficientNetExpansionLayer with EfficientNet->AlignVision
377
+ class AlignVisionExpansionLayer(nn.Module):
378
+ r"""
379
+ This corresponds to the expansion phase of each block in the original implementation.
380
+ """
381
+
382
+ def __init__(self, config: AlignVisionConfig, in_dim: int, out_dim: int, stride: int):
383
+ super().__init__()
384
+ self.expand_conv = nn.Conv2d(
385
+ in_channels=in_dim,
386
+ out_channels=out_dim,
387
+ kernel_size=1,
388
+ padding="same",
389
+ bias=False,
390
+ )
391
+ self.expand_bn = nn.BatchNorm2d(num_features=out_dim, eps=config.batch_norm_eps)
392
+ self.expand_act = ACT2FN[config.hidden_act]
393
+
394
+ def forward(self, hidden_states: torch.FloatTensor) -> torch.Tensor:
395
+ # Expand phase
396
+ hidden_states = self.expand_conv(hidden_states)
397
+ hidden_states = self.expand_bn(hidden_states)
398
+ hidden_states = self.expand_act(hidden_states)
399
+
400
+ return hidden_states
401
+
402
+
403
+ # Copied from transformers.models.efficientnet.modeling_efficientnet.EfficientNetDepthwiseLayer with EfficientNet->AlignVision
404
+ class AlignVisionDepthwiseLayer(nn.Module):
405
+ r"""
406
+ This corresponds to the depthwise convolution phase of each block in the original implementation.
407
+ """
408
+
409
+ def __init__(
410
+ self,
411
+ config: AlignVisionConfig,
412
+ in_dim: int,
413
+ stride: int,
414
+ kernel_size: int,
415
+ adjust_padding: bool,
416
+ ):
417
+ super().__init__()
418
+ self.stride = stride
419
+ conv_pad = "valid" if self.stride == 2 else "same"
420
+ padding = correct_pad(kernel_size, adjust=adjust_padding)
421
+
422
+ self.depthwise_conv_pad = nn.ZeroPad2d(padding=padding)
423
+ self.depthwise_conv = AlignVisionDepthwiseConv2d(
424
+ in_dim, kernel_size=kernel_size, stride=stride, padding=conv_pad, bias=False
425
+ )
426
+ self.depthwise_norm = nn.BatchNorm2d(
427
+ num_features=in_dim, eps=config.batch_norm_eps, momentum=config.batch_norm_momentum
428
+ )
429
+ self.depthwise_act = ACT2FN[config.hidden_act]
430
+
431
+ def forward(self, hidden_states: torch.FloatTensor) -> torch.Tensor:
432
+ # Depthwise convolution
433
+ if self.stride == 2:
434
+ hidden_states = self.depthwise_conv_pad(hidden_states)
435
+
436
+ hidden_states = self.depthwise_conv(hidden_states)
437
+ hidden_states = self.depthwise_norm(hidden_states)
438
+ hidden_states = self.depthwise_act(hidden_states)
439
+
440
+ return hidden_states
441
+
442
+
443
+ # Copied from transformers.models.efficientnet.modeling_efficientnet.EfficientNetSqueezeExciteLayer with EfficientNet->AlignVision
444
+ class AlignVisionSqueezeExciteLayer(nn.Module):
445
+ r"""
446
+ This corresponds to the Squeeze and Excitement phase of each block in the original implementation.
447
+ """
448
+
449
+ def __init__(self, config: AlignVisionConfig, in_dim: int, expand_dim: int, expand: bool = False):
450
+ super().__init__()
451
+ self.dim = expand_dim if expand else in_dim
452
+ self.dim_se = max(1, int(in_dim * config.squeeze_expansion_ratio))
453
+
454
+ self.squeeze = nn.AdaptiveAvgPool2d(output_size=1)
455
+ self.reduce = nn.Conv2d(
456
+ in_channels=self.dim,
457
+ out_channels=self.dim_se,
458
+ kernel_size=1,
459
+ padding="same",
460
+ )
461
+ self.expand = nn.Conv2d(
462
+ in_channels=self.dim_se,
463
+ out_channels=self.dim,
464
+ kernel_size=1,
465
+ padding="same",
466
+ )
467
+ self.act_reduce = ACT2FN[config.hidden_act]
468
+ self.act_expand = nn.Sigmoid()
469
+
470
+ def forward(self, hidden_states: torch.FloatTensor) -> torch.Tensor:
471
+ inputs = hidden_states
472
+ hidden_states = self.squeeze(hidden_states)
473
+ hidden_states = self.reduce(hidden_states)
474
+ hidden_states = self.act_reduce(hidden_states)
475
+
476
+ hidden_states = self.expand(hidden_states)
477
+ hidden_states = self.act_expand(hidden_states)
478
+ hidden_states = torch.mul(inputs, hidden_states)
479
+
480
+ return hidden_states
481
+
482
+
483
+ class AlignVisionFinalBlockLayer(nn.Module):
484
+ r"""
485
+ This corresponds to the final phase of each block in the original implementation.
486
+ """
487
+
488
+ def __init__(
489
+ self, config: AlignVisionConfig, in_dim: int, out_dim: int, stride: int, drop_rate: float, id_skip: bool
490
+ ):
491
+ super().__init__()
492
+ self.apply_dropout = stride == 1 and not id_skip
493
+ self.project_conv = nn.Conv2d(
494
+ in_channels=in_dim,
495
+ out_channels=out_dim,
496
+ kernel_size=1,
497
+ padding="same",
498
+ bias=False,
499
+ )
500
+ self.project_bn = nn.BatchNorm2d(
501
+ num_features=out_dim, eps=config.batch_norm_eps, momentum=config.batch_norm_momentum
502
+ )
503
+ self.dropout = nn.Dropout(p=drop_rate)
504
+
505
+ def forward(self, embeddings: torch.FloatTensor, hidden_states: torch.FloatTensor) -> torch.Tensor:
506
+ hidden_states = self.project_conv(hidden_states)
507
+ hidden_states = self.project_bn(hidden_states)
508
+
509
+ if self.apply_dropout:
510
+ hidden_states = self.dropout(hidden_states)
511
+ hidden_states = hidden_states + embeddings
512
+
513
+ return hidden_states
514
+
515
+
516
+ class AlignVisionBlock(nn.Module):
517
+ r"""
518
+ This corresponds to the block module of original the EfficientNet vision encoder implementation.
519
+
520
+ Args:
521
+ config ([`AlignVisionConfig`]):
522
+ Model configuration class.
523
+ in_dim (`int`):
524
+ Number of input channels.
525
+ out_dim (`int`):
526
+ Number of output channels.
527
+ stride (`int`):
528
+ Stride size to be used in convolution layers.
529
+ expand_ratio (`int`):
530
+ Expand ratio to set the output dimensions for the expansion and squeeze-excite layers.
531
+ kernel_size (`int`):
532
+ Kernel size for the depthwise convolution layer.
533
+ drop_rate (`float`):
534
+ Dropout rate to be used in the final phase of each block.
535
+ id_skip (`bool`):
536
+ Whether to apply dropout and sum the final hidden states with the input embeddings during the final phase
537
+ of each block. Set to `True` for the first block of each stage.
538
+ adjust_padding (`bool`):
539
+ Whether to apply padding to only right and bottom side of the input kernel before the depthwise convolution
540
+ operation, set to `True` for inputs with odd input sizes.
541
+ """
542
+
543
+ def __init__(
544
+ self,
545
+ config: AlignVisionConfig,
546
+ in_dim: int,
547
+ out_dim: int,
548
+ stride: int,
549
+ expand_ratio: int,
550
+ kernel_size: int,
551
+ drop_rate: float,
552
+ id_skip: bool,
553
+ adjust_padding: bool,
554
+ ):
555
+ super().__init__()
556
+ self.expand_ratio = expand_ratio
557
+ self.expand = True if self.expand_ratio != 1 else False
558
+ expand_in_dim = in_dim * expand_ratio
559
+
560
+ if self.expand:
561
+ self.expansion = AlignVisionExpansionLayer(
562
+ config=config, in_dim=in_dim, out_dim=expand_in_dim, stride=stride
563
+ )
564
+
565
+ self.depthwise_conv = AlignVisionDepthwiseLayer(
566
+ config=config,
567
+ in_dim=expand_in_dim if self.expand else in_dim,
568
+ stride=stride,
569
+ kernel_size=kernel_size,
570
+ adjust_padding=adjust_padding,
571
+ )
572
+ self.squeeze_excite = AlignVisionSqueezeExciteLayer(
573
+ config=config, in_dim=in_dim, expand_dim=expand_in_dim, expand=self.expand
574
+ )
575
+ self.projection = AlignVisionFinalBlockLayer(
576
+ config=config,
577
+ in_dim=expand_in_dim if self.expand else in_dim,
578
+ out_dim=out_dim,
579
+ stride=stride,
580
+ drop_rate=drop_rate,
581
+ id_skip=id_skip,
582
+ )
583
+
584
+ def forward(self, hidden_states: torch.FloatTensor) -> torch.Tensor:
585
+ embeddings = hidden_states
586
+ # Expansion and depthwise convolution phase
587
+ if self.expand_ratio != 1:
588
+ hidden_states = self.expansion(hidden_states)
589
+ hidden_states = self.depthwise_conv(hidden_states)
590
+
591
+ # Squeeze and excite phase
592
+ hidden_states = self.squeeze_excite(hidden_states)
593
+ hidden_states = self.projection(embeddings, hidden_states)
594
+ return hidden_states
595
+
596
+
597
+ class AlignVisionEncoder(nn.Module):
598
+ r"""
599
+ Forward propogates the embeddings through each vision encoder (EfficientNet) block.
600
+
601
+ Args:
602
+ config ([`AlignVisionConfig`]):
603
+ Model configuration class.
604
+ """
605
+
606
+ def __init__(self, config: AlignVisionConfig):
607
+ super().__init__()
608
+ self.depth_coefficient = config.depth_coefficient
609
+
610
+ def round_repeats(repeats):
611
+ # Round number of block repeats based on depth multiplier.
612
+ return int(math.ceil(self.depth_coefficient * repeats))
613
+
614
+ num_base_blocks = len(config.in_channels)
615
+ num_blocks = sum(round_repeats(n) for n in config.num_block_repeats)
616
+
617
+ curr_block_num = 0
618
+ blocks = []
619
+ for i in range(num_base_blocks):
620
+ in_dim = round_filters(config, config.in_channels[i])
621
+ out_dim = round_filters(config, config.out_channels[i])
622
+ stride = config.strides[i]
623
+ kernel_size = config.kernel_sizes[i]
624
+ expand_ratio = config.expand_ratios[i]
625
+
626
+ for j in range(round_repeats(config.num_block_repeats[i])):
627
+ id_skip = True if j == 0 else False
628
+ stride = 1 if j > 0 else stride
629
+ in_dim = out_dim if j > 0 else in_dim
630
+ adjust_padding = False if curr_block_num in config.depthwise_padding else True
631
+ drop_rate = config.drop_connect_rate * curr_block_num / num_blocks
632
+
633
+ block = AlignVisionBlock(
634
+ config=config,
635
+ in_dim=in_dim,
636
+ out_dim=out_dim,
637
+ stride=stride,
638
+ kernel_size=kernel_size,
639
+ expand_ratio=expand_ratio,
640
+ drop_rate=drop_rate,
641
+ id_skip=id_skip,
642
+ adjust_padding=adjust_padding,
643
+ )
644
+ blocks.append(block)
645
+ curr_block_num += 1
646
+
647
+ self.blocks = nn.ModuleList(blocks)
648
+
649
+ def forward(
650
+ self,
651
+ hidden_states: torch.FloatTensor,
652
+ output_hidden_states: Optional[bool] = False,
653
+ return_dict: Optional[bool] = True,
654
+ ) -> BaseModelOutputWithPoolingAndNoAttention:
655
+ all_hidden_states = (hidden_states,) if output_hidden_states else None
656
+
657
+ for block in self.blocks:
658
+ hidden_states = block(hidden_states)
659
+ if output_hidden_states:
660
+ all_hidden_states += (hidden_states,)
661
+
662
+ if not return_dict:
663
+ return tuple(v for v in [hidden_states, all_hidden_states] if v is not None)
664
+
665
+ return BaseModelOutputWithNoAttention(
666
+ last_hidden_state=hidden_states,
667
+ hidden_states=all_hidden_states,
668
+ )
669
+
670
+
671
+ # Copied from transformers.models.bert.modeling_bert.BertEmbeddings with Bert->AlignText
672
+ class AlignTextEmbeddings(nn.Module):
673
+ """Construct the embeddings from word, position and token_type embeddings."""
674
+
675
+ def __init__(self, config):
676
+ super().__init__()
677
+ self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
678
+ self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
679
+ self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
680
+
681
+ # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
682
+ # any TensorFlow checkpoint file
683
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
684
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
685
+ # position_ids (1, len position emb) is contiguous in memory and exported when serialized
686
+ self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
687
+ self.register_buffer(
688
+ "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
689
+ )
690
+ self.register_buffer(
691
+ "token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False
692
+ )
693
+
694
+ def forward(
695
+ self,
696
+ input_ids: Optional[torch.LongTensor] = None,
697
+ token_type_ids: Optional[torch.LongTensor] = None,
698
+ position_ids: Optional[torch.LongTensor] = None,
699
+ inputs_embeds: Optional[torch.FloatTensor] = None,
700
+ past_key_values_length: int = 0,
701
+ ) -> torch.Tensor:
702
+ if input_ids is not None:
703
+ input_shape = input_ids.size()
704
+ else:
705
+ input_shape = inputs_embeds.size()[:-1]
706
+
707
+ seq_length = input_shape[1]
708
+
709
+ if position_ids is None:
710
+ position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length]
711
+
712
+ # Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs
713
+ # when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves
714
+ # issue #5664
715
+ if token_type_ids is None:
716
+ if hasattr(self, "token_type_ids"):
717
+ buffered_token_type_ids = self.token_type_ids[:, :seq_length]
718
+ buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length)
719
+ token_type_ids = buffered_token_type_ids_expanded
720
+ else:
721
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
722
+
723
+ if inputs_embeds is None:
724
+ inputs_embeds = self.word_embeddings(input_ids)
725
+ token_type_embeddings = self.token_type_embeddings(token_type_ids)
726
+
727
+ embeddings = inputs_embeds + token_type_embeddings
728
+ if self.position_embedding_type == "absolute":
729
+ position_embeddings = self.position_embeddings(position_ids)
730
+ embeddings += position_embeddings
731
+ embeddings = self.LayerNorm(embeddings)
732
+ embeddings = self.dropout(embeddings)
733
+ return embeddings
734
+
735
+
736
+ # Copied from transformers.models.bert.modeling_bert.BertSelfAttention with Bert->AlignText
737
+ class AlignTextSelfAttention(nn.Module):
738
+ def __init__(self, config, position_embedding_type=None):
739
+ super().__init__()
740
+ if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
741
+ raise ValueError(
742
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
743
+ f"heads ({config.num_attention_heads})"
744
+ )
745
+
746
+ self.num_attention_heads = config.num_attention_heads
747
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
748
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
749
+
750
+ self.query = nn.Linear(config.hidden_size, self.all_head_size)
751
+ self.key = nn.Linear(config.hidden_size, self.all_head_size)
752
+ self.value = nn.Linear(config.hidden_size, self.all_head_size)
753
+
754
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
755
+ self.position_embedding_type = position_embedding_type or getattr(
756
+ config, "position_embedding_type", "absolute"
757
+ )
758
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
759
+ self.max_position_embeddings = config.max_position_embeddings
760
+ self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
761
+
762
+ self.is_decoder = config.is_decoder
763
+
764
+ def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor:
765
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
766
+ x = x.view(new_x_shape)
767
+ return x.permute(0, 2, 1, 3)
768
+
769
+ def forward(
770
+ self,
771
+ hidden_states: torch.Tensor,
772
+ attention_mask: Optional[torch.FloatTensor] = None,
773
+ head_mask: Optional[torch.FloatTensor] = None,
774
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
775
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
776
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
777
+ output_attentions: Optional[bool] = False,
778
+ ) -> Tuple[torch.Tensor]:
779
+ mixed_query_layer = self.query(hidden_states)
780
+
781
+ # If this is instantiated as a cross-attention module, the keys
782
+ # and values come from an encoder; the attention mask needs to be
783
+ # such that the encoder's padding tokens are not attended to.
784
+ is_cross_attention = encoder_hidden_states is not None
785
+
786
+ if is_cross_attention and past_key_value is not None:
787
+ # reuse k,v, cross_attentions
788
+ key_layer = past_key_value[0]
789
+ value_layer = past_key_value[1]
790
+ attention_mask = encoder_attention_mask
791
+ elif is_cross_attention:
792
+ key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
793
+ value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
794
+ attention_mask = encoder_attention_mask
795
+ elif past_key_value is not None:
796
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
797
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
798
+ key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
799
+ value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
800
+ else:
801
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
802
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
803
+
804
+ query_layer = self.transpose_for_scores(mixed_query_layer)
805
+
806
+ use_cache = past_key_value is not None
807
+ if self.is_decoder:
808
+ # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
809
+ # Further calls to cross_attention layer can then reuse all cross-attention
810
+ # key/value_states (first "if" case)
811
+ # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
812
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
813
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
814
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
815
+ past_key_value = (key_layer, value_layer)
816
+
817
+ # Take the dot product between "query" and "key" to get the raw attention scores.
818
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
819
+
820
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
821
+ query_length, key_length = query_layer.shape[2], key_layer.shape[2]
822
+ if use_cache:
823
+ position_ids_l = torch.tensor(key_length - 1, dtype=torch.long, device=hidden_states.device).view(
824
+ -1, 1
825
+ )
826
+ else:
827
+ position_ids_l = torch.arange(query_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
828
+ position_ids_r = torch.arange(key_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
829
+ distance = position_ids_l - position_ids_r
830
+
831
+ positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
832
+ positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
833
+
834
+ if self.position_embedding_type == "relative_key":
835
+ relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
836
+ attention_scores = attention_scores + relative_position_scores
837
+ elif self.position_embedding_type == "relative_key_query":
838
+ relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
839
+ relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
840
+ attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
841
+
842
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
843
+ if attention_mask is not None:
844
+ # Apply the attention mask is (precomputed for all layers in AlignTextModel forward() function)
845
+ attention_scores = attention_scores + attention_mask
846
+
847
+ # Normalize the attention scores to probabilities.
848
+ attention_probs = nn.functional.softmax(attention_scores, dim=-1)
849
+
850
+ # This is actually dropping out entire tokens to attend to, which might
851
+ # seem a bit unusual, but is taken from the original Transformer paper.
852
+ attention_probs = self.dropout(attention_probs)
853
+
854
+ # Mask heads if we want to
855
+ if head_mask is not None:
856
+ attention_probs = attention_probs * head_mask
857
+
858
+ context_layer = torch.matmul(attention_probs, value_layer)
859
+
860
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
861
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
862
+ context_layer = context_layer.view(new_context_layer_shape)
863
+
864
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
865
+
866
+ if self.is_decoder:
867
+ outputs = outputs + (past_key_value,)
868
+ return outputs
869
+
870
+
871
+ # Copied from transformers.models.bert.modeling_bert.BertSelfOutput with Bert->AlignText
872
+ class AlignTextSelfOutput(nn.Module):
873
+ def __init__(self, config):
874
+ super().__init__()
875
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
876
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
877
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
878
+
879
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
880
+ hidden_states = self.dense(hidden_states)
881
+ hidden_states = self.dropout(hidden_states)
882
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
883
+ return hidden_states
884
+
885
+
886
+ # Copied from transformers.models.bert.modeling_bert.BertAttention with Bert->AlignText
887
+ class AlignTextAttention(nn.Module):
888
+ def __init__(self, config, position_embedding_type=None):
889
+ super().__init__()
890
+ self.self = AlignTextSelfAttention(config, position_embedding_type=position_embedding_type)
891
+ self.output = AlignTextSelfOutput(config)
892
+ self.pruned_heads = set()
893
+
894
+ def prune_heads(self, heads):
895
+ if len(heads) == 0:
896
+ return
897
+ heads, index = find_pruneable_heads_and_indices(
898
+ heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
899
+ )
900
+
901
+ # Prune linear layers
902
+ self.self.query = prune_linear_layer(self.self.query, index)
903
+ self.self.key = prune_linear_layer(self.self.key, index)
904
+ self.self.value = prune_linear_layer(self.self.value, index)
905
+ self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
906
+
907
+ # Update hyper params and store pruned heads
908
+ self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
909
+ self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
910
+ self.pruned_heads = self.pruned_heads.union(heads)
911
+
912
+ def forward(
913
+ self,
914
+ hidden_states: torch.Tensor,
915
+ attention_mask: Optional[torch.FloatTensor] = None,
916
+ head_mask: Optional[torch.FloatTensor] = None,
917
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
918
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
919
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
920
+ output_attentions: Optional[bool] = False,
921
+ ) -> Tuple[torch.Tensor]:
922
+ self_outputs = self.self(
923
+ hidden_states,
924
+ attention_mask,
925
+ head_mask,
926
+ encoder_hidden_states,
927
+ encoder_attention_mask,
928
+ past_key_value,
929
+ output_attentions,
930
+ )
931
+ attention_output = self.output(self_outputs[0], hidden_states)
932
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
933
+ return outputs
934
+
935
+
936
+ # Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->AlignText
937
+ class AlignTextIntermediate(nn.Module):
938
+ def __init__(self, config):
939
+ super().__init__()
940
+ self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
941
+ if isinstance(config.hidden_act, str):
942
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
943
+ else:
944
+ self.intermediate_act_fn = config.hidden_act
945
+
946
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
947
+ hidden_states = self.dense(hidden_states)
948
+ hidden_states = self.intermediate_act_fn(hidden_states)
949
+ return hidden_states
950
+
951
+
952
+ # Copied from transformers.models.bert.modeling_bert.BertOutput with Bert->AlignText
953
+ class AlignTextOutput(nn.Module):
954
+ def __init__(self, config):
955
+ super().__init__()
956
+ self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
957
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
958
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
959
+
960
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
961
+ hidden_states = self.dense(hidden_states)
962
+ hidden_states = self.dropout(hidden_states)
963
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
964
+ return hidden_states
965
+
966
+
967
+ # Copied from transformers.models.bert.modeling_bert.BertLayer with Bert->AlignText
968
+ class AlignTextLayer(nn.Module):
969
+ def __init__(self, config):
970
+ super().__init__()
971
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
972
+ self.seq_len_dim = 1
973
+ self.attention = AlignTextAttention(config)
974
+ self.is_decoder = config.is_decoder
975
+ self.add_cross_attention = config.add_cross_attention
976
+ if self.add_cross_attention:
977
+ if not self.is_decoder:
978
+ raise ValueError(f"{self} should be used as a decoder model if cross attention is added")
979
+ self.crossattention = AlignTextAttention(config, position_embedding_type="absolute")
980
+ self.intermediate = AlignTextIntermediate(config)
981
+ self.output = AlignTextOutput(config)
982
+
983
+ def forward(
984
+ self,
985
+ hidden_states: torch.Tensor,
986
+ attention_mask: Optional[torch.FloatTensor] = None,
987
+ head_mask: Optional[torch.FloatTensor] = None,
988
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
989
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
990
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
991
+ output_attentions: Optional[bool] = False,
992
+ ) -> Tuple[torch.Tensor]:
993
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
994
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
995
+ self_attention_outputs = self.attention(
996
+ hidden_states,
997
+ attention_mask,
998
+ head_mask,
999
+ output_attentions=output_attentions,
1000
+ past_key_value=self_attn_past_key_value,
1001
+ )
1002
+ attention_output = self_attention_outputs[0]
1003
+
1004
+ # if decoder, the last output is tuple of self-attn cache
1005
+ if self.is_decoder:
1006
+ outputs = self_attention_outputs[1:-1]
1007
+ present_key_value = self_attention_outputs[-1]
1008
+ else:
1009
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
1010
+
1011
+ cross_attn_present_key_value = None
1012
+ if self.is_decoder and encoder_hidden_states is not None:
1013
+ if not hasattr(self, "crossattention"):
1014
+ raise ValueError(
1015
+ f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers"
1016
+ " by setting `config.add_cross_attention=True`"
1017
+ )
1018
+
1019
+ # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple
1020
+ cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
1021
+ cross_attention_outputs = self.crossattention(
1022
+ attention_output,
1023
+ attention_mask,
1024
+ head_mask,
1025
+ encoder_hidden_states,
1026
+ encoder_attention_mask,
1027
+ cross_attn_past_key_value,
1028
+ output_attentions,
1029
+ )
1030
+ attention_output = cross_attention_outputs[0]
1031
+ outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
1032
+
1033
+ # add cross-attn cache to positions 3,4 of present_key_value tuple
1034
+ cross_attn_present_key_value = cross_attention_outputs[-1]
1035
+ present_key_value = present_key_value + cross_attn_present_key_value
1036
+
1037
+ layer_output = apply_chunking_to_forward(
1038
+ self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
1039
+ )
1040
+ outputs = (layer_output,) + outputs
1041
+
1042
+ # if decoder, return the attn key/values as the last output
1043
+ if self.is_decoder:
1044
+ outputs = outputs + (present_key_value,)
1045
+
1046
+ return outputs
1047
+
1048
+ def feed_forward_chunk(self, attention_output):
1049
+ intermediate_output = self.intermediate(attention_output)
1050
+ layer_output = self.output(intermediate_output, attention_output)
1051
+ return layer_output
1052
+
1053
+
1054
+ # Copied from transformers.models.bert.modeling_bert.BertEncoder with Bert->AlignText
1055
+ class AlignTextEncoder(nn.Module):
1056
+ def __init__(self, config):
1057
+ super().__init__()
1058
+ self.config = config
1059
+ self.layer = nn.ModuleList([AlignTextLayer(config) for _ in range(config.num_hidden_layers)])
1060
+ self.gradient_checkpointing = False
1061
+
1062
+ def forward(
1063
+ self,
1064
+ hidden_states: torch.Tensor,
1065
+ attention_mask: Optional[torch.FloatTensor] = None,
1066
+ head_mask: Optional[torch.FloatTensor] = None,
1067
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
1068
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
1069
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
1070
+ use_cache: Optional[bool] = None,
1071
+ output_attentions: Optional[bool] = False,
1072
+ output_hidden_states: Optional[bool] = False,
1073
+ return_dict: Optional[bool] = True,
1074
+ ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]:
1075
+ all_hidden_states = () if output_hidden_states else None
1076
+ all_self_attentions = () if output_attentions else None
1077
+ all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
1078
+
1079
+ if self.gradient_checkpointing and self.training:
1080
+ if use_cache:
1081
+ logger.warning_once(
1082
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
1083
+ )
1084
+ use_cache = False
1085
+
1086
+ next_decoder_cache = () if use_cache else None
1087
+ for i, layer_module in enumerate(self.layer):
1088
+ if output_hidden_states:
1089
+ all_hidden_states = all_hidden_states + (hidden_states,)
1090
+
1091
+ layer_head_mask = head_mask[i] if head_mask is not None else None
1092
+ past_key_value = past_key_values[i] if past_key_values is not None else None
1093
+
1094
+ if self.gradient_checkpointing and self.training:
1095
+ layer_outputs = self._gradient_checkpointing_func(
1096
+ layer_module.__call__,
1097
+ hidden_states,
1098
+ attention_mask,
1099
+ layer_head_mask,
1100
+ encoder_hidden_states,
1101
+ encoder_attention_mask,
1102
+ past_key_value,
1103
+ output_attentions,
1104
+ )
1105
+ else:
1106
+ layer_outputs = layer_module(
1107
+ hidden_states,
1108
+ attention_mask,
1109
+ layer_head_mask,
1110
+ encoder_hidden_states,
1111
+ encoder_attention_mask,
1112
+ past_key_value,
1113
+ output_attentions,
1114
+ )
1115
+
1116
+ hidden_states = layer_outputs[0]
1117
+ if use_cache:
1118
+ next_decoder_cache += (layer_outputs[-1],)
1119
+ if output_attentions:
1120
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
1121
+ if self.config.add_cross_attention:
1122
+ all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
1123
+
1124
+ if output_hidden_states:
1125
+ all_hidden_states = all_hidden_states + (hidden_states,)
1126
+
1127
+ if not return_dict:
1128
+ return tuple(
1129
+ v
1130
+ for v in [
1131
+ hidden_states,
1132
+ next_decoder_cache,
1133
+ all_hidden_states,
1134
+ all_self_attentions,
1135
+ all_cross_attentions,
1136
+ ]
1137
+ if v is not None
1138
+ )
1139
+ return BaseModelOutputWithPastAndCrossAttentions(
1140
+ last_hidden_state=hidden_states,
1141
+ past_key_values=next_decoder_cache,
1142
+ hidden_states=all_hidden_states,
1143
+ attentions=all_self_attentions,
1144
+ cross_attentions=all_cross_attentions,
1145
+ )
1146
+
1147
+
1148
+ # Copied from transformers.models.bert.modeling_bert.BertPooler with Bert -> AlignText
1149
+ class AlignTextPooler(nn.Module):
1150
+ def __init__(self, config):
1151
+ super().__init__()
1152
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
1153
+ self.activation = nn.Tanh()
1154
+
1155
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
1156
+ # We "pool" the model by simply taking the hidden state corresponding
1157
+ # to the first token.
1158
+ first_token_tensor = hidden_states[:, 0]
1159
+ pooled_output = self.dense(first_token_tensor)
1160
+ pooled_output = self.activation(pooled_output)
1161
+ return pooled_output
1162
+
1163
+
1164
+ class AlignPreTrainedModel(PreTrainedModel):
1165
+ """
1166
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
1167
+ models.
1168
+ """
1169
+
1170
+ config_class = AlignConfig
1171
+ base_model_prefix = "align"
1172
+ supports_gradient_checkpointing = True
1173
+
1174
+ def _init_weights(self, module):
1175
+ """Initialize the weights"""
1176
+ if isinstance(module, (nn.Linear, nn.Conv2d)):
1177
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
1178
+ if module.bias is not None:
1179
+ module.bias.data.zero_()
1180
+ elif isinstance(module, AlignModel):
1181
+ nn.init.xavier_uniform_(module.text_projection.weight)
1182
+ module.text_projection.bias.data.zero_()
1183
+ module.text_projection._is_hf_initialized = True
1184
+ elif isinstance(module, nn.Embedding):
1185
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
1186
+ if module.padding_idx is not None:
1187
+ module.weight.data[module.padding_idx].zero_()
1188
+ if isinstance(module, nn.LayerNorm):
1189
+ module.bias.data.zero_()
1190
+ module.weight.data.fill_(1.0)
1191
+
1192
+
1193
+ @add_start_docstrings(
1194
+ """The text model from ALIGN without any head or projection on top.""",
1195
+ ALIGN_START_DOCSTRING,
1196
+ )
1197
+ class AlignTextModel(AlignPreTrainedModel):
1198
+ config_class = AlignTextConfig
1199
+
1200
+ def __init__(self, config: AlignTextConfig, add_pooling_layer: bool = True):
1201
+ super().__init__(config)
1202
+ self.config = config
1203
+
1204
+ self.embeddings = AlignTextEmbeddings(config)
1205
+ self.encoder = AlignTextEncoder(config)
1206
+
1207
+ self.pooler = AlignTextPooler(config) if add_pooling_layer else None
1208
+
1209
+ # Initialize weights and apply final processing
1210
+ self.post_init()
1211
+
1212
+ def get_input_embeddings(self):
1213
+ return self.embeddings.word_embeddings
1214
+
1215
+ def set_input_embeddings(self, value):
1216
+ self.embeddings.word_embeddings = value
1217
+
1218
+ @add_start_docstrings_to_model_forward(ALIGN_TEXT_INPUTS_DOCSTRING)
1219
+ @replace_return_docstrings(output_type=BaseModelOutputWithPoolingAndCrossAttentions, config_class=AlignTextConfig)
1220
+ def forward(
1221
+ self,
1222
+ input_ids: Optional[torch.Tensor] = None,
1223
+ attention_mask: Optional[torch.Tensor] = None,
1224
+ token_type_ids: Optional[torch.Tensor] = None,
1225
+ position_ids: Optional[torch.Tensor] = None,
1226
+ head_mask: Optional[torch.Tensor] = None,
1227
+ inputs_embeds: Optional[torch.Tensor] = None,
1228
+ output_attentions: Optional[bool] = None,
1229
+ output_hidden_states: Optional[bool] = None,
1230
+ return_dict: Optional[bool] = None,
1231
+ ) -> Union[Tuple, BaseModelOutputWithPoolingAndCrossAttentions]:
1232
+ r"""
1233
+ Returns:
1234
+
1235
+ Examples:
1236
+
1237
+ ```python
1238
+ >>> from transformers import AutoTokenizer, AlignTextModel
1239
+
1240
+ >>> model = AlignTextModel.from_pretrained("kakaobrain/align-base")
1241
+ >>> tokenizer = AutoTokenizer.from_pretrained("kakaobrain/align-base")
1242
+
1243
+ >>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="pt")
1244
+
1245
+ >>> outputs = model(**inputs)
1246
+ >>> last_hidden_state = outputs.last_hidden_state
1247
+ >>> pooled_output = outputs.pooler_output # pooled (EOS token) states
1248
+ ```"""
1249
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1250
+ output_hidden_states = (
1251
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1252
+ )
1253
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1254
+
1255
+ if input_ids is not None and inputs_embeds is not None:
1256
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
1257
+ elif input_ids is not None:
1258
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
1259
+ input_shape = input_ids.size()
1260
+ elif inputs_embeds is not None:
1261
+ input_shape = inputs_embeds.size()[:-1]
1262
+ else:
1263
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
1264
+
1265
+ batch_size, seq_length = input_shape
1266
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
1267
+
1268
+ if attention_mask is None:
1269
+ attention_mask = torch.ones(((batch_size, seq_length)), device=device)
1270
+
1271
+ if token_type_ids is None:
1272
+ if hasattr(self.embeddings, "token_type_ids"):
1273
+ buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length]
1274
+ buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length)
1275
+ token_type_ids = buffered_token_type_ids_expanded
1276
+ else:
1277
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
1278
+
1279
+ # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
1280
+ # ourselves in which case we just need to make it broadcastable to all heads.
1281
+ extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape)
1282
+
1283
+ # Prepare head mask if needed
1284
+ # 1.0 in head_mask indicate we keep the head
1285
+ # attention_probs has shape bsz x n_heads x N x N
1286
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
1287
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
1288
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
1289
+
1290
+ embedding_output = self.embeddings(
1291
+ input_ids=input_ids,
1292
+ position_ids=position_ids,
1293
+ token_type_ids=token_type_ids,
1294
+ inputs_embeds=inputs_embeds,
1295
+ )
1296
+ encoder_outputs = self.encoder(
1297
+ embedding_output,
1298
+ attention_mask=extended_attention_mask,
1299
+ head_mask=head_mask,
1300
+ output_attentions=output_attentions,
1301
+ output_hidden_states=output_hidden_states,
1302
+ return_dict=return_dict,
1303
+ )
1304
+ sequence_output = encoder_outputs[0]
1305
+ pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
1306
+
1307
+ if not return_dict:
1308
+ return (sequence_output, pooled_output) + encoder_outputs[1:]
1309
+
1310
+ return BaseModelOutputWithPoolingAndCrossAttentions(
1311
+ last_hidden_state=sequence_output,
1312
+ pooler_output=pooled_output,
1313
+ hidden_states=encoder_outputs.hidden_states,
1314
+ attentions=encoder_outputs.attentions,
1315
+ cross_attentions=encoder_outputs.cross_attentions,
1316
+ )
1317
+
1318
+
1319
+ @add_start_docstrings(
1320
+ """The vision model from ALIGN without any head or projection on top.""",
1321
+ ALIGN_START_DOCSTRING,
1322
+ )
1323
+ class AlignVisionModel(AlignPreTrainedModel):
1324
+ config_class = AlignVisionConfig
1325
+ main_input_name = "pixel_values"
1326
+ supports_gradient_checkpointing = False
1327
+
1328
+ def __init__(self, config: AlignVisionConfig):
1329
+ super().__init__(config)
1330
+ self.config = config
1331
+ self.embeddings = AlignVisionEmbeddings(config)
1332
+ self.encoder = AlignVisionEncoder(config)
1333
+
1334
+ # Final pooling layer
1335
+ if config.pooling_type == "mean":
1336
+ self.pooler = nn.AvgPool2d(config.hidden_dim, ceil_mode=True)
1337
+ elif config.pooling_type == "max":
1338
+ self.pooler = nn.MaxPool2d(config.hidden_dim, ceil_mode=True)
1339
+ else:
1340
+ raise ValueError(f"config.pooling must be one of ['mean', 'max'] got {config.pooling}")
1341
+
1342
+ # Initialize weights and apply final processing
1343
+ self.post_init()
1344
+
1345
+ def get_input_embeddings(self) -> nn.Module:
1346
+ return self.vision_model.embeddings.convolution
1347
+
1348
+ @add_start_docstrings_to_model_forward(ALIGN_VISION_INPUTS_DOCSTRING)
1349
+ @replace_return_docstrings(output_type=BaseModelOutputWithPoolingAndNoAttention, config_class=AlignVisionConfig)
1350
+ def forward(
1351
+ self,
1352
+ pixel_values: Optional[torch.FloatTensor] = None,
1353
+ output_hidden_states: Optional[bool] = None,
1354
+ return_dict: Optional[bool] = None,
1355
+ ) -> Union[Tuple, BaseModelOutputWithPoolingAndNoAttention]:
1356
+ r"""
1357
+ Returns:
1358
+
1359
+ Examples:
1360
+
1361
+ ```python
1362
+ >>> from PIL import Image
1363
+ >>> import requests
1364
+ >>> from transformers import AutoProcessor, AlignVisionModel
1365
+
1366
+ >>> model = AlignVisionModel.from_pretrained("kakaobrain/align-base")
1367
+ >>> processor = AutoProcessor.from_pretrained("kakaobrain/align-base")
1368
+
1369
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
1370
+ >>> image = Image.open(requests.get(url, stream=True).raw)
1371
+
1372
+ >>> inputs = processor(images=image, return_tensors="pt")
1373
+
1374
+ >>> outputs = model(**inputs)
1375
+ >>> last_hidden_state = outputs.last_hidden_state
1376
+ >>> pooled_output = outputs.pooler_output # pooled CLS states
1377
+ ```"""
1378
+ output_hidden_states = (
1379
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1380
+ )
1381
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1382
+
1383
+ if pixel_values is None:
1384
+ raise ValueError("You have to specify pixel_values")
1385
+
1386
+ embedding_output = self.embeddings(pixel_values)
1387
+ encoder_outputs = self.encoder(
1388
+ embedding_output,
1389
+ output_hidden_states=output_hidden_states,
1390
+ return_dict=return_dict,
1391
+ )
1392
+ # Apply pooling
1393
+ last_hidden_state = encoder_outputs[0]
1394
+ pooled_output = self.pooler(last_hidden_state)
1395
+ # Reshape (batch_size, projection_dim, 1 , 1) -> (batch_size, projection_dim)
1396
+ pooled_output = pooled_output.reshape(pooled_output.shape[:2])
1397
+
1398
+ if not return_dict:
1399
+ return (last_hidden_state, pooled_output) + encoder_outputs[1:]
1400
+
1401
+ return BaseModelOutputWithPoolingAndNoAttention(
1402
+ last_hidden_state=last_hidden_state,
1403
+ pooler_output=pooled_output,
1404
+ hidden_states=encoder_outputs.hidden_states,
1405
+ )
1406
+
1407
+
1408
+ @add_start_docstrings(ALIGN_START_DOCSTRING)
1409
+ class AlignModel(AlignPreTrainedModel):
1410
+ config_class = AlignConfig
1411
+
1412
+ def __init__(self, config: AlignConfig):
1413
+ super().__init__(config)
1414
+
1415
+ if not isinstance(config.text_config, AlignTextConfig):
1416
+ raise ValueError(
1417
+ "config.text_config is expected to be of type AlignTextConfig but is of type"
1418
+ f" {type(config.text_config)}."
1419
+ )
1420
+
1421
+ if not isinstance(config.vision_config, AlignVisionConfig):
1422
+ raise ValueError(
1423
+ "config.vision_config is expected to be of type AlignVisionConfig but is of type"
1424
+ f" {type(config.vision_config)}."
1425
+ )
1426
+
1427
+ text_config = config.text_config
1428
+ vision_config = config.vision_config
1429
+
1430
+ self.projection_dim = config.projection_dim
1431
+ self.text_embed_dim = text_config.hidden_size
1432
+
1433
+ self.text_model = AlignTextModel(text_config)
1434
+ self.vision_model = AlignVisionModel(vision_config)
1435
+
1436
+ self.text_projection = nn.Linear(self.text_embed_dim, self.projection_dim)
1437
+ self.temperature = nn.Parameter(torch.tensor(self.config.temperature_init_value))
1438
+
1439
+ # Initialize weights and apply final processing
1440
+ self.post_init()
1441
+
1442
+ @add_start_docstrings_to_model_forward(ALIGN_TEXT_INPUTS_DOCSTRING)
1443
+ def get_text_features(
1444
+ self,
1445
+ input_ids: Optional[torch.Tensor] = None,
1446
+ attention_mask: Optional[torch.Tensor] = None,
1447
+ token_type_ids: Optional[torch.Tensor] = None,
1448
+ position_ids: Optional[torch.Tensor] = None,
1449
+ head_mask: Optional[torch.Tensor] = None,
1450
+ inputs_embeds: Optional[torch.Tensor] = None,
1451
+ output_attentions: Optional[bool] = None,
1452
+ output_hidden_states: Optional[bool] = None,
1453
+ return_dict: Optional[bool] = None,
1454
+ ) -> torch.FloatTensor:
1455
+ r"""
1456
+ Returns:
1457
+ text_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by
1458
+ applying the projection layer to the pooled output of [`AlignTextModel`].
1459
+
1460
+ Examples:
1461
+
1462
+ ```python
1463
+ >>> from transformers import AutoTokenizer, AlignModel
1464
+
1465
+ >>> model = AlignModel.from_pretrained("kakaobrain/align-base")
1466
+ >>> tokenizer = AutoTokenizer.from_pretrained("kakaobrain/align-base")
1467
+
1468
+ >>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="pt")
1469
+ >>> text_features = model.get_text_features(**inputs)
1470
+ ```"""
1471
+ # Use ALIGN model's config for some fields (if specified) instead of those of vision & text components.
1472
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1473
+ output_hidden_states = (
1474
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1475
+ )
1476
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1477
+
1478
+ text_outputs = self.text_model(
1479
+ input_ids=input_ids,
1480
+ attention_mask=attention_mask,
1481
+ token_type_ids=token_type_ids,
1482
+ position_ids=position_ids,
1483
+ head_mask=head_mask,
1484
+ inputs_embeds=inputs_embeds,
1485
+ output_attentions=output_attentions,
1486
+ output_hidden_states=output_hidden_states,
1487
+ return_dict=return_dict,
1488
+ )
1489
+
1490
+ last_hidden_state = text_outputs[0][:, 0, :]
1491
+ text_features = self.text_projection(last_hidden_state)
1492
+
1493
+ return text_features
1494
+
1495
+ @add_start_docstrings_to_model_forward(ALIGN_VISION_INPUTS_DOCSTRING)
1496
+ def get_image_features(
1497
+ self,
1498
+ pixel_values: Optional[torch.FloatTensor] = None,
1499
+ output_hidden_states: Optional[bool] = None,
1500
+ return_dict: Optional[bool] = None,
1501
+ ) -> torch.FloatTensor:
1502
+ r"""
1503
+ Returns:
1504
+ image_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The image embeddings obtained by
1505
+ applying the projection layer to the pooled output of [`AlignVisionModel`].
1506
+
1507
+ Examples:
1508
+
1509
+ ```python
1510
+ >>> from PIL import Image
1511
+ >>> import requests
1512
+ >>> from transformers import AutoProcessor, AlignModel
1513
+
1514
+ >>> model = AlignModel.from_pretrained("kakaobrain/align-base")
1515
+ >>> processor = AutoProcessor.from_pretrained("kakaobrain/align-base")
1516
+
1517
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
1518
+ >>> image = Image.open(requests.get(url, stream=True).raw)
1519
+
1520
+ >>> inputs = processor(images=image, return_tensors="pt")
1521
+
1522
+ >>> image_features = model.get_image_features(**inputs)
1523
+ ```"""
1524
+ # Use ALIGN model's config for some fields (if specified) instead of those of vision & text components.
1525
+ output_hidden_states = (
1526
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1527
+ )
1528
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1529
+
1530
+ vision_outputs = self.vision_model(
1531
+ pixel_values=pixel_values,
1532
+ output_hidden_states=output_hidden_states,
1533
+ return_dict=return_dict,
1534
+ )
1535
+
1536
+ image_features = vision_outputs[1] # pooled_output
1537
+
1538
+ return image_features
1539
+
1540
+ @add_start_docstrings_to_model_forward(ALIGN_INPUTS_DOCSTRING)
1541
+ @replace_return_docstrings(output_type=AlignOutput, config_class=AlignConfig)
1542
+ def forward(
1543
+ self,
1544
+ input_ids: Optional[torch.LongTensor] = None,
1545
+ pixel_values: Optional[torch.FloatTensor] = None,
1546
+ attention_mask: Optional[torch.Tensor] = None,
1547
+ token_type_ids: Optional[torch.Tensor] = None,
1548
+ position_ids: Optional[torch.Tensor] = None,
1549
+ head_mask: Optional[torch.Tensor] = None,
1550
+ inputs_embeds: Optional[torch.Tensor] = None,
1551
+ return_loss: Optional[bool] = None,
1552
+ output_attentions: Optional[bool] = None,
1553
+ output_hidden_states: Optional[bool] = None,
1554
+ return_dict: Optional[bool] = None,
1555
+ ) -> Union[Tuple, AlignOutput]:
1556
+ r"""
1557
+ Returns:
1558
+
1559
+ Examples:
1560
+
1561
+ ```python
1562
+ >>> from PIL import Image
1563
+ >>> import requests
1564
+ >>> from transformers import AutoProcessor, AlignModel
1565
+
1566
+ >>> model = AlignModel.from_pretrained("kakaobrain/align-base")
1567
+ >>> processor = AutoProcessor.from_pretrained("kakaobrain/align-base")
1568
+
1569
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
1570
+ >>> image = Image.open(requests.get(url, stream=True).raw)
1571
+
1572
+ >>> inputs = processor(
1573
+ ... text=["a photo of a cat", "a photo of a dog"], images=image, return_tensors="pt", padding=True
1574
+ ... )
1575
+
1576
+ >>> outputs = model(**inputs)
1577
+ >>> logits_per_image = outputs.logits_per_image # this is the image-text similarity score
1578
+ >>> probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities
1579
+ ```"""
1580
+ # Use ALIGN model's config for some fields (if specified) instead of those of vision & text components.
1581
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1582
+ output_hidden_states = (
1583
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1584
+ )
1585
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1586
+
1587
+ vision_outputs = self.vision_model(
1588
+ pixel_values=pixel_values,
1589
+ output_hidden_states=output_hidden_states,
1590
+ return_dict=return_dict,
1591
+ )
1592
+
1593
+ text_outputs = self.text_model(
1594
+ input_ids=input_ids,
1595
+ attention_mask=attention_mask,
1596
+ token_type_ids=token_type_ids,
1597
+ position_ids=position_ids,
1598
+ head_mask=head_mask,
1599
+ inputs_embeds=inputs_embeds,
1600
+ output_attentions=output_attentions,
1601
+ output_hidden_states=output_hidden_states,
1602
+ return_dict=return_dict,
1603
+ )
1604
+
1605
+ image_embeds = vision_outputs[1]
1606
+ text_embeds = text_outputs[0][:, 0, :]
1607
+ text_embeds = self.text_projection(text_embeds)
1608
+
1609
+ # normalized features
1610
+ image_embeds = image_embeds / image_embeds.norm(p=2, dim=-1, keepdim=True)
1611
+ text_embeds = text_embeds / text_embeds.norm(p=2, dim=-1, keepdim=True)
1612
+
1613
+ # cosine similarity as logits
1614
+ logits_per_text = torch.matmul(text_embeds, image_embeds.t()) / self.temperature
1615
+ logits_per_image = logits_per_text.t()
1616
+
1617
+ loss = None
1618
+ if return_loss:
1619
+ loss = align_loss(logits_per_text)
1620
+
1621
+ if not return_dict:
1622
+ output = (logits_per_image, logits_per_text, text_embeds, image_embeds, text_outputs, vision_outputs)
1623
+ return ((loss,) + output) if loss is not None else output
1624
+
1625
+ return AlignOutput(
1626
+ loss=loss,
1627
+ logits_per_image=logits_per_image,
1628
+ logits_per_text=logits_per_text,
1629
+ text_embeds=text_embeds,
1630
+ image_embeds=image_embeds,
1631
+ text_model_output=text_outputs,
1632
+ vision_model_output=vision_outputs,
1633
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/align/processing_align.py ADDED
@@ -0,0 +1,121 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """
16
+ Image/Text processor class for ALIGN
17
+ """
18
+
19
+
20
+ from ...processing_utils import ProcessorMixin
21
+ from ...tokenization_utils_base import BatchEncoding
22
+
23
+
24
+ class AlignProcessor(ProcessorMixin):
25
+ r"""
26
+ Constructs an ALIGN processor which wraps [`EfficientNetImageProcessor`] and
27
+ [`BertTokenizer`]/[`BertTokenizerFast`] into a single processor that interits both the image processor and
28
+ tokenizer functionalities. See the [`~AlignProcessor.__call__`] and [`~OwlViTProcessor.decode`] for more
29
+ information.
30
+
31
+ Args:
32
+ image_processor ([`EfficientNetImageProcessor`]):
33
+ The image processor is a required input.
34
+ tokenizer ([`BertTokenizer`, `BertTokenizerFast`]):
35
+ The tokenizer is a required input.
36
+ """
37
+
38
+ attributes = ["image_processor", "tokenizer"]
39
+ image_processor_class = "EfficientNetImageProcessor"
40
+ tokenizer_class = ("BertTokenizer", "BertTokenizerFast")
41
+
42
+ def __init__(self, image_processor, tokenizer):
43
+ super().__init__(image_processor, tokenizer)
44
+
45
+ def __call__(self, text=None, images=None, padding="max_length", max_length=64, return_tensors=None, **kwargs):
46
+ """
47
+ Main method to prepare text(s) and image(s) to be fed as input to the model. This method forwards the `text`
48
+ and `kwargs` arguments to BertTokenizerFast's [`~BertTokenizerFast.__call__`] if `text` is not `None` to encode
49
+ the text. To prepare the image(s), this method forwards the `images` and `kwargs` arguments to
50
+ EfficientNetImageProcessor's [`~EfficientNetImageProcessor.__call__`] if `images` is not `None`. Please refer
51
+ to the doctsring of the above two methods for more information.
52
+
53
+ Args:
54
+ text (`str`, `List[str]`):
55
+ The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
56
+ (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
57
+ `is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
58
+ images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`, `List[torch.Tensor]`):
59
+ The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
60
+ tensor. Both channels-first and channels-last formats are supported.
61
+ padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `max_length`):
62
+ Activates and controls padding for tokenization of input text. Choose between [`True` or `'longest'`,
63
+ `'max_length'`, `False` or `'do_not_pad'`]
64
+ max_length (`int`, *optional*, defaults to `max_length`):
65
+ Maximum padding value to use to pad the input text during tokenization.
66
+
67
+ return_tensors (`str` or [`~utils.TensorType`], *optional*):
68
+ If set, will return tensors of a particular framework. Acceptable values are:
69
+
70
+ - `'tf'`: Return TensorFlow `tf.constant` objects.
71
+ - `'pt'`: Return PyTorch `torch.Tensor` objects.
72
+ - `'np'`: Return NumPy `np.ndarray` objects.
73
+ - `'jax'`: Return JAX `jnp.ndarray` objects.
74
+
75
+ Returns:
76
+ [`BatchEncoding`]: A [`BatchEncoding`] with the following fields:
77
+
78
+ - **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`.
79
+ - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
80
+ `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not
81
+ `None`).
82
+ - **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`.
83
+ """
84
+ if text is None and images is None:
85
+ raise ValueError("You have to specify either text or images. Both cannot be none.")
86
+
87
+ if text is not None:
88
+ encoding = self.tokenizer(
89
+ text, padding=padding, max_length=max_length, return_tensors=return_tensors, **kwargs
90
+ )
91
+
92
+ if images is not None:
93
+ image_features = self.image_processor(images, return_tensors=return_tensors, **kwargs)
94
+
95
+ if text is not None and images is not None:
96
+ encoding["pixel_values"] = image_features.pixel_values
97
+ return encoding
98
+ elif text is not None:
99
+ return encoding
100
+ else:
101
+ return BatchEncoding(data=dict(**image_features), tensor_type=return_tensors)
102
+
103
+ def batch_decode(self, *args, **kwargs):
104
+ """
105
+ This method forwards all its arguments to BertTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please
106
+ refer to the docstring of this method for more information.
107
+ """
108
+ return self.tokenizer.batch_decode(*args, **kwargs)
109
+
110
+ def decode(self, *args, **kwargs):
111
+ """
112
+ This method forwards all its arguments to BertTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to
113
+ the docstring of this method for more information.
114
+ """
115
+ return self.tokenizer.decode(*args, **kwargs)
116
+
117
+ @property
118
+ def model_input_names(self):
119
+ tokenizer_input_names = self.tokenizer.model_input_names
120
+ image_processor_input_names = self.image_processor.model_input_names
121
+ return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
llmeval-env/lib/python3.10/site-packages/transformers/models/donut/__init__.py ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
17
+
18
+
19
+ _import_structure = {
20
+ "configuration_donut_swin": ["DONUT_SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP", "DonutSwinConfig"],
21
+ "processing_donut": ["DonutProcessor"],
22
+ }
23
+
24
+ try:
25
+ if not is_torch_available():
26
+ raise OptionalDependencyNotAvailable()
27
+ except OptionalDependencyNotAvailable:
28
+ pass
29
+ else:
30
+ _import_structure["modeling_donut_swin"] = [
31
+ "DONUT_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST",
32
+ "DonutSwinModel",
33
+ "DonutSwinPreTrainedModel",
34
+ ]
35
+
36
+ try:
37
+ if not is_vision_available():
38
+ raise OptionalDependencyNotAvailable()
39
+ except OptionalDependencyNotAvailable:
40
+ pass
41
+ else:
42
+ _import_structure["feature_extraction_donut"] = ["DonutFeatureExtractor"]
43
+ _import_structure["image_processing_donut"] = ["DonutImageProcessor"]
44
+
45
+
46
+ if TYPE_CHECKING:
47
+ from .configuration_donut_swin import DONUT_SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, DonutSwinConfig
48
+ from .processing_donut import DonutProcessor
49
+
50
+ try:
51
+ if not is_torch_available():
52
+ raise OptionalDependencyNotAvailable()
53
+ except OptionalDependencyNotAvailable:
54
+ pass
55
+ else:
56
+ from .modeling_donut_swin import (
57
+ DONUT_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
58
+ DonutSwinModel,
59
+ DonutSwinPreTrainedModel,
60
+ )
61
+
62
+ try:
63
+ if not is_vision_available():
64
+ raise OptionalDependencyNotAvailable()
65
+ except OptionalDependencyNotAvailable:
66
+ pass
67
+ else:
68
+ from .feature_extraction_donut import DonutFeatureExtractor
69
+ from .image_processing_donut import DonutImageProcessor
70
+
71
+ else:
72
+ import sys
73
+
74
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
llmeval-env/lib/python3.10/site-packages/transformers/models/donut/__pycache__/configuration_donut_swin.cpython-310.pyc ADDED
Binary file (4.95 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/donut/configuration_donut_swin.py ADDED
@@ -0,0 +1,135 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Donut Swin Transformer model configuration"""
16
+
17
+ from ...configuration_utils import PretrainedConfig
18
+ from ...utils import logging
19
+
20
+
21
+ logger = logging.get_logger(__name__)
22
+
23
+
24
+ from ..deprecated._archive_maps import DONUT_SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
25
+
26
+
27
+ class DonutSwinConfig(PretrainedConfig):
28
+ r"""
29
+ This is the configuration class to store the configuration of a [`DonutSwinModel`]. It is used to instantiate a
30
+ Donut model according to the specified arguments, defining the model architecture. Instantiating a configuration
31
+ with the defaults will yield a similar configuration to that of the Donut
32
+ [naver-clova-ix/donut-base](https://huggingface.co/naver-clova-ix/donut-base) architecture.
33
+
34
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
35
+ documentation from [`PretrainedConfig`] for more information.
36
+
37
+ Args:
38
+ image_size (`int`, *optional*, defaults to 224):
39
+ The size (resolution) of each image.
40
+ patch_size (`int`, *optional*, defaults to 4):
41
+ The size (resolution) of each patch.
42
+ num_channels (`int`, *optional*, defaults to 3):
43
+ The number of input channels.
44
+ embed_dim (`int`, *optional*, defaults to 96):
45
+ Dimensionality of patch embedding.
46
+ depths (`list(int)`, *optional*, defaults to `[2, 2, 6, 2]`):
47
+ Depth of each layer in the Transformer encoder.
48
+ num_heads (`list(int)`, *optional*, defaults to `[3, 6, 12, 24]`):
49
+ Number of attention heads in each layer of the Transformer encoder.
50
+ window_size (`int`, *optional*, defaults to 7):
51
+ Size of windows.
52
+ mlp_ratio (`float`, *optional*, defaults to 4.0):
53
+ Ratio of MLP hidden dimensionality to embedding dimensionality.
54
+ qkv_bias (`bool`, *optional*, defaults to `True`):
55
+ Whether or not a learnable bias should be added to the queries, keys and values.
56
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.0):
57
+ The dropout probability for all fully connected layers in the embeddings and encoder.
58
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0):
59
+ The dropout ratio for the attention probabilities.
60
+ drop_path_rate (`float`, *optional*, defaults to 0.1):
61
+ Stochastic depth rate.
62
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
63
+ The non-linear activation function (function or string) in the encoder. If string, `"gelu"`, `"relu"`,
64
+ `"selu"` and `"gelu_new"` are supported.
65
+ use_absolute_embeddings (`bool`, *optional*, defaults to `False`):
66
+ Whether or not to add absolute position embeddings to the patch embeddings.
67
+ initializer_range (`float`, *optional*, defaults to 0.02):
68
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
69
+ layer_norm_eps (`float`, *optional*, defaults to 1e-05):
70
+ The epsilon used by the layer normalization layers.
71
+
72
+ Example:
73
+
74
+ ```python
75
+ >>> from transformers import DonutSwinConfig, DonutSwinModel
76
+
77
+ >>> # Initializing a Donut naver-clova-ix/donut-base style configuration
78
+ >>> configuration = DonutSwinConfig()
79
+
80
+ >>> # Randomly initializing a model from the naver-clova-ix/donut-base style configuration
81
+ >>> model = DonutSwinModel(configuration)
82
+
83
+ >>> # Accessing the model configuration
84
+ >>> configuration = model.config
85
+ ```"""
86
+
87
+ model_type = "donut-swin"
88
+
89
+ attribute_map = {
90
+ "num_attention_heads": "num_heads",
91
+ "num_hidden_layers": "num_layers",
92
+ }
93
+
94
+ def __init__(
95
+ self,
96
+ image_size=224,
97
+ patch_size=4,
98
+ num_channels=3,
99
+ embed_dim=96,
100
+ depths=[2, 2, 6, 2],
101
+ num_heads=[3, 6, 12, 24],
102
+ window_size=7,
103
+ mlp_ratio=4.0,
104
+ qkv_bias=True,
105
+ hidden_dropout_prob=0.0,
106
+ attention_probs_dropout_prob=0.0,
107
+ drop_path_rate=0.1,
108
+ hidden_act="gelu",
109
+ use_absolute_embeddings=False,
110
+ initializer_range=0.02,
111
+ layer_norm_eps=1e-5,
112
+ **kwargs,
113
+ ):
114
+ super().__init__(**kwargs)
115
+
116
+ self.image_size = image_size
117
+ self.patch_size = patch_size
118
+ self.num_channels = num_channels
119
+ self.embed_dim = embed_dim
120
+ self.depths = depths
121
+ self.num_layers = len(depths)
122
+ self.num_heads = num_heads
123
+ self.window_size = window_size
124
+ self.mlp_ratio = mlp_ratio
125
+ self.qkv_bias = qkv_bias
126
+ self.hidden_dropout_prob = hidden_dropout_prob
127
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
128
+ self.drop_path_rate = drop_path_rate
129
+ self.hidden_act = hidden_act
130
+ self.use_absolute_embeddings = use_absolute_embeddings
131
+ self.layer_norm_eps = layer_norm_eps
132
+ self.initializer_range = initializer_range
133
+ # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
134
+ # this indicates the channel dimension after the last stage of the model
135
+ self.hidden_size = int(embed_dim * 2 ** (len(depths) - 1))
llmeval-env/lib/python3.10/site-packages/transformers/models/donut/convert_donut_to_pytorch.py ADDED
@@ -0,0 +1,234 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Convert Donut checkpoints using the original `donut-python` library. URL: https://github.com/clovaai/donut"""
16
+
17
+ import argparse
18
+
19
+ import torch
20
+ from datasets import load_dataset
21
+ from donut import DonutModel
22
+
23
+ from transformers import (
24
+ DonutImageProcessor,
25
+ DonutProcessor,
26
+ DonutSwinConfig,
27
+ DonutSwinModel,
28
+ MBartConfig,
29
+ MBartForCausalLM,
30
+ VisionEncoderDecoderModel,
31
+ XLMRobertaTokenizerFast,
32
+ )
33
+
34
+
35
+ def get_configs(model):
36
+ original_config = model.config
37
+
38
+ encoder_config = DonutSwinConfig(
39
+ image_size=original_config.input_size,
40
+ patch_size=4,
41
+ depths=original_config.encoder_layer,
42
+ num_heads=[4, 8, 16, 32],
43
+ window_size=original_config.window_size,
44
+ embed_dim=128,
45
+ )
46
+ decoder_config = MBartConfig(
47
+ is_decoder=True,
48
+ is_encoder_decoder=False,
49
+ add_cross_attention=True,
50
+ decoder_layers=original_config.decoder_layer,
51
+ max_position_embeddings=original_config.max_position_embeddings,
52
+ vocab_size=len(
53
+ model.decoder.tokenizer
54
+ ), # several special tokens are added to the vocab of XLMRobertaTokenizer, see repo on the hub (added_tokens.json)
55
+ scale_embedding=True,
56
+ add_final_layer_norm=True,
57
+ )
58
+
59
+ return encoder_config, decoder_config
60
+
61
+
62
+ def rename_key(name):
63
+ if "encoder.model" in name:
64
+ name = name.replace("encoder.model", "encoder")
65
+ if "decoder.model" in name:
66
+ name = name.replace("decoder.model", "decoder")
67
+ if "patch_embed.proj" in name:
68
+ name = name.replace("patch_embed.proj", "embeddings.patch_embeddings.projection")
69
+ if "patch_embed.norm" in name:
70
+ name = name.replace("patch_embed.norm", "embeddings.norm")
71
+ if name.startswith("encoder"):
72
+ if "layers" in name:
73
+ name = "encoder." + name
74
+ if "attn.proj" in name:
75
+ name = name.replace("attn.proj", "attention.output.dense")
76
+ if "attn" in name and "mask" not in name:
77
+ name = name.replace("attn", "attention.self")
78
+ if "norm1" in name:
79
+ name = name.replace("norm1", "layernorm_before")
80
+ if "norm2" in name:
81
+ name = name.replace("norm2", "layernorm_after")
82
+ if "mlp.fc1" in name:
83
+ name = name.replace("mlp.fc1", "intermediate.dense")
84
+ if "mlp.fc2" in name:
85
+ name = name.replace("mlp.fc2", "output.dense")
86
+
87
+ if name == "encoder.norm.weight":
88
+ name = "encoder.layernorm.weight"
89
+ if name == "encoder.norm.bias":
90
+ name = "encoder.layernorm.bias"
91
+
92
+ return name
93
+
94
+
95
+ def convert_state_dict(orig_state_dict, model):
96
+ for key in orig_state_dict.copy().keys():
97
+ val = orig_state_dict.pop(key)
98
+
99
+ if "qkv" in key:
100
+ key_split = key.split(".")
101
+ layer_num = int(key_split[3])
102
+ block_num = int(key_split[5])
103
+ dim = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
104
+
105
+ if "weight" in key:
106
+ orig_state_dict[
107
+ f"encoder.encoder.layers.{layer_num}.blocks.{block_num}.attention.self.query.weight"
108
+ ] = val[:dim, :]
109
+ orig_state_dict[
110
+ f"encoder.encoder.layers.{layer_num}.blocks.{block_num}.attention.self.key.weight"
111
+ ] = val[dim : dim * 2, :]
112
+ orig_state_dict[
113
+ f"encoder.encoder.layers.{layer_num}.blocks.{block_num}.attention.self.value.weight"
114
+ ] = val[-dim:, :]
115
+ else:
116
+ orig_state_dict[
117
+ f"encoder.encoder.layers.{layer_num}.blocks.{block_num}.attention.self.query.bias"
118
+ ] = val[:dim]
119
+ orig_state_dict[
120
+ f"encoder.encoder.layers.{layer_num}.blocks.{block_num}.attention.self.key.bias"
121
+ ] = val[dim : dim * 2]
122
+ orig_state_dict[
123
+ f"encoder.encoder.layers.{layer_num}.blocks.{block_num}.attention.self.value.bias"
124
+ ] = val[-dim:]
125
+ elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
126
+ # HuggingFace implementation doesn't use attn_mask buffer
127
+ # and model doesn't use final LayerNorms for the encoder
128
+ pass
129
+ else:
130
+ orig_state_dict[rename_key(key)] = val
131
+
132
+ return orig_state_dict
133
+
134
+
135
+ def convert_donut_checkpoint(model_name, pytorch_dump_folder_path=None, push_to_hub=False):
136
+ # load original model
137
+ original_model = DonutModel.from_pretrained(model_name).eval()
138
+
139
+ # load HuggingFace model
140
+ encoder_config, decoder_config = get_configs(original_model)
141
+ encoder = DonutSwinModel(encoder_config)
142
+ decoder = MBartForCausalLM(decoder_config)
143
+ model = VisionEncoderDecoderModel(encoder=encoder, decoder=decoder)
144
+ model.eval()
145
+
146
+ state_dict = original_model.state_dict()
147
+ new_state_dict = convert_state_dict(state_dict, model)
148
+ model.load_state_dict(new_state_dict)
149
+
150
+ # verify results on scanned document
151
+ dataset = load_dataset("hf-internal-testing/example-documents")
152
+ image = dataset["test"][0]["image"].convert("RGB")
153
+
154
+ tokenizer = XLMRobertaTokenizerFast.from_pretrained(model_name, from_slow=True)
155
+ image_processor = DonutImageProcessor(
156
+ do_align_long_axis=original_model.config.align_long_axis, size=original_model.config.input_size[::-1]
157
+ )
158
+ processor = DonutProcessor(image_processor, tokenizer)
159
+ pixel_values = processor(image, return_tensors="pt").pixel_values
160
+
161
+ if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
162
+ task_prompt = "<s_docvqa><s_question>{user_input}</s_question><s_answer>"
163
+ question = "When is the coffee break?"
164
+ task_prompt = task_prompt.replace("{user_input}", question)
165
+ elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
166
+ task_prompt = "<s_rvlcdip>"
167
+ elif model_name in [
168
+ "naver-clova-ix/donut-base-finetuned-cord-v1",
169
+ "naver-clova-ix/donut-base-finetuned-cord-v1-2560",
170
+ ]:
171
+ task_prompt = "<s_cord>"
172
+ elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
173
+ task_prompt = "s_cord-v2>"
174
+ elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
175
+ task_prompt = "<s_zhtrainticket>"
176
+ elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
177
+ # use a random prompt
178
+ task_prompt = "hello world"
179
+ else:
180
+ raise ValueError("Model name not supported")
181
+ prompt_tensors = original_model.decoder.tokenizer(task_prompt, add_special_tokens=False, return_tensors="pt")[
182
+ "input_ids"
183
+ ]
184
+
185
+ original_patch_embed = original_model.encoder.model.patch_embed(pixel_values)
186
+ patch_embeddings, _ = model.encoder.embeddings(pixel_values)
187
+ assert torch.allclose(original_patch_embed, patch_embeddings, atol=1e-3)
188
+
189
+ # verify encoder hidden states
190
+ original_last_hidden_state = original_model.encoder(pixel_values)
191
+ last_hidden_state = model.encoder(pixel_values).last_hidden_state
192
+ assert torch.allclose(original_last_hidden_state, last_hidden_state, atol=1e-2)
193
+
194
+ # verify decoder hidden states
195
+ original_logits = original_model(pixel_values, prompt_tensors, None).logits
196
+ logits = model(pixel_values, decoder_input_ids=prompt_tensors).logits
197
+ assert torch.allclose(original_logits, logits, atol=1e-3)
198
+ print("Looks ok!")
199
+
200
+ if pytorch_dump_folder_path is not None:
201
+ print(f"Saving model and processor to {pytorch_dump_folder_path}")
202
+ model.save_pretrained(pytorch_dump_folder_path)
203
+ processor.save_pretrained(pytorch_dump_folder_path)
204
+
205
+ if push_to_hub:
206
+ model.push_to_hub("nielsr/" + model_name.split("/")[-1], commit_message="Update model")
207
+ processor.push_to_hub("nielsr/" + model_name.split("/")[-1], commit_message="Update model")
208
+
209
+
210
+ if __name__ == "__main__":
211
+ parser = argparse.ArgumentParser()
212
+ # Required parameters
213
+ parser.add_argument(
214
+ "--model_name",
215
+ default="naver-clova-ix/donut-base-finetuned-docvqa",
216
+ required=False,
217
+ type=str,
218
+ help="Name of the original model you'd like to convert.",
219
+ )
220
+ parser.add_argument(
221
+ "--pytorch_dump_folder_path",
222
+ default=None,
223
+ required=False,
224
+ type=str,
225
+ help="Path to the output PyTorch model directory.",
226
+ )
227
+ parser.add_argument(
228
+ "--push_to_hub",
229
+ action="store_true",
230
+ help="Whether or not to push the converted model and processor to the 🤗 hub.",
231
+ )
232
+
233
+ args = parser.parse_args()
234
+ convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
llmeval-env/lib/python3.10/site-packages/transformers/models/donut/modeling_donut_swin.py ADDED
@@ -0,0 +1,955 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch Donut Swin Transformer model.
16
+
17
+ This implementation is identical to a regular Swin Transformer, without final layer norm on top of the final hidden
18
+ states."""
19
+
20
+ import collections.abc
21
+ import math
22
+ from dataclasses import dataclass
23
+ from typing import Optional, Tuple, Union
24
+
25
+ import torch
26
+ import torch.utils.checkpoint
27
+ from torch import nn
28
+
29
+ from ...activations import ACT2FN
30
+ from ...modeling_utils import PreTrainedModel
31
+ from ...pytorch_utils import find_pruneable_heads_and_indices, meshgrid, prune_linear_layer
32
+ from ...utils import (
33
+ ModelOutput,
34
+ add_code_sample_docstrings,
35
+ add_start_docstrings,
36
+ add_start_docstrings_to_model_forward,
37
+ logging,
38
+ )
39
+ from .configuration_donut_swin import DonutSwinConfig
40
+
41
+
42
+ logger = logging.get_logger(__name__)
43
+
44
+ # General docstring
45
+ _CONFIG_FOR_DOC = "DonutSwinConfig"
46
+
47
+ # Base docstring
48
+ _CHECKPOINT_FOR_DOC = "https://huggingface.co/naver-clova-ix/donut-base"
49
+ _EXPECTED_OUTPUT_SHAPE = [1, 49, 768]
50
+
51
+
52
+ from ..deprecated._archive_maps import DONUT_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
53
+
54
+
55
+ @dataclass
56
+ # Copied from transformers.models.swin.modeling_swin.SwinEncoderOutput with Swin->DonutSwin
57
+ class DonutSwinEncoderOutput(ModelOutput):
58
+ """
59
+ DonutSwin encoder's outputs, with potential hidden states and attentions.
60
+
61
+ Args:
62
+ last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
63
+ Sequence of hidden-states at the output of the last layer of the model.
64
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
65
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
66
+ shape `(batch_size, sequence_length, hidden_size)`.
67
+
68
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
69
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
70
+ Tuple of `torch.FloatTensor` (one for each stage) of shape `(batch_size, num_heads, sequence_length,
71
+ sequence_length)`.
72
+
73
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
74
+ heads.
75
+ reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
76
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
77
+ shape `(batch_size, hidden_size, height, width)`.
78
+
79
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to
80
+ include the spatial dimensions.
81
+ """
82
+
83
+ last_hidden_state: torch.FloatTensor = None
84
+ hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
85
+ attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
86
+ reshaped_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
87
+
88
+
89
+ @dataclass
90
+ # Copied from transformers.models.swin.modeling_swin.SwinModelOutput with Swin->DonutSwin
91
+ class DonutSwinModelOutput(ModelOutput):
92
+ """
93
+ DonutSwin model's outputs that also contains a pooling of the last hidden states.
94
+
95
+ Args:
96
+ last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
97
+ Sequence of hidden-states at the output of the last layer of the model.
98
+ pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`, *optional*, returned when `add_pooling_layer=True` is passed):
99
+ Average pooling of the last layer hidden-state.
100
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
101
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
102
+ shape `(batch_size, sequence_length, hidden_size)`.
103
+
104
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
105
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
106
+ Tuple of `torch.FloatTensor` (one for each stage) of shape `(batch_size, num_heads, sequence_length,
107
+ sequence_length)`.
108
+
109
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
110
+ heads.
111
+ reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
112
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
113
+ shape `(batch_size, hidden_size, height, width)`.
114
+
115
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to
116
+ include the spatial dimensions.
117
+ """
118
+
119
+ last_hidden_state: torch.FloatTensor = None
120
+ pooler_output: Optional[torch.FloatTensor] = None
121
+ hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
122
+ attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
123
+ reshaped_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
124
+
125
+
126
+ # Copied from transformers.models.swin.modeling_swin.window_partition
127
+ def window_partition(input_feature, window_size):
128
+ """
129
+ Partitions the given input into windows.
130
+ """
131
+ batch_size, height, width, num_channels = input_feature.shape
132
+ input_feature = input_feature.view(
133
+ batch_size, height // window_size, window_size, width // window_size, window_size, num_channels
134
+ )
135
+ windows = input_feature.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, num_channels)
136
+ return windows
137
+
138
+
139
+ # Copied from transformers.models.swin.modeling_swin.window_reverse
140
+ def window_reverse(windows, window_size, height, width):
141
+ """
142
+ Merges windows to produce higher resolution features.
143
+ """
144
+ num_channels = windows.shape[-1]
145
+ windows = windows.view(-1, height // window_size, width // window_size, window_size, window_size, num_channels)
146
+ windows = windows.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, height, width, num_channels)
147
+ return windows
148
+
149
+
150
+ # Copied from transformers.models.swin.modeling_swin.SwinEmbeddings with Swin->DonutSwin
151
+ class DonutSwinEmbeddings(nn.Module):
152
+ """
153
+ Construct the patch and position embeddings. Optionally, also the mask token.
154
+ """
155
+
156
+ def __init__(self, config, use_mask_token=False):
157
+ super().__init__()
158
+
159
+ self.patch_embeddings = DonutSwinPatchEmbeddings(config)
160
+ num_patches = self.patch_embeddings.num_patches
161
+ self.patch_grid = self.patch_embeddings.grid_size
162
+ self.mask_token = nn.Parameter(torch.zeros(1, 1, config.embed_dim)) if use_mask_token else None
163
+
164
+ if config.use_absolute_embeddings:
165
+ self.position_embeddings = nn.Parameter(torch.zeros(1, num_patches + 1, config.embed_dim))
166
+ else:
167
+ self.position_embeddings = None
168
+
169
+ self.norm = nn.LayerNorm(config.embed_dim)
170
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
171
+
172
+ def forward(
173
+ self, pixel_values: Optional[torch.FloatTensor], bool_masked_pos: Optional[torch.BoolTensor] = None
174
+ ) -> Tuple[torch.Tensor]:
175
+ embeddings, output_dimensions = self.patch_embeddings(pixel_values)
176
+ embeddings = self.norm(embeddings)
177
+ batch_size, seq_len, _ = embeddings.size()
178
+
179
+ if bool_masked_pos is not None:
180
+ mask_tokens = self.mask_token.expand(batch_size, seq_len, -1)
181
+ # replace the masked visual tokens by mask_tokens
182
+ mask = bool_masked_pos.unsqueeze(-1).type_as(mask_tokens)
183
+ embeddings = embeddings * (1.0 - mask) + mask_tokens * mask
184
+
185
+ if self.position_embeddings is not None:
186
+ embeddings = embeddings + self.position_embeddings
187
+
188
+ embeddings = self.dropout(embeddings)
189
+
190
+ return embeddings, output_dimensions
191
+
192
+
193
+ # Copied from transformers.models.swin.modeling_swin.SwinPatchEmbeddings
194
+ class DonutSwinPatchEmbeddings(nn.Module):
195
+ """
196
+ This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial
197
+ `hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a
198
+ Transformer.
199
+ """
200
+
201
+ def __init__(self, config):
202
+ super().__init__()
203
+ image_size, patch_size = config.image_size, config.patch_size
204
+ num_channels, hidden_size = config.num_channels, config.embed_dim
205
+ image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size)
206
+ patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size)
207
+ num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
208
+ self.image_size = image_size
209
+ self.patch_size = patch_size
210
+ self.num_channels = num_channels
211
+ self.num_patches = num_patches
212
+ self.grid_size = (image_size[0] // patch_size[0], image_size[1] // patch_size[1])
213
+
214
+ self.projection = nn.Conv2d(num_channels, hidden_size, kernel_size=patch_size, stride=patch_size)
215
+
216
+ def maybe_pad(self, pixel_values, height, width):
217
+ if width % self.patch_size[1] != 0:
218
+ pad_values = (0, self.patch_size[1] - width % self.patch_size[1])
219
+ pixel_values = nn.functional.pad(pixel_values, pad_values)
220
+ if height % self.patch_size[0] != 0:
221
+ pad_values = (0, 0, 0, self.patch_size[0] - height % self.patch_size[0])
222
+ pixel_values = nn.functional.pad(pixel_values, pad_values)
223
+ return pixel_values
224
+
225
+ def forward(self, pixel_values: Optional[torch.FloatTensor]) -> Tuple[torch.Tensor, Tuple[int]]:
226
+ _, num_channels, height, width = pixel_values.shape
227
+ if num_channels != self.num_channels:
228
+ raise ValueError(
229
+ "Make sure that the channel dimension of the pixel values match with the one set in the configuration."
230
+ )
231
+ # pad the input to be divisible by self.patch_size, if needed
232
+ pixel_values = self.maybe_pad(pixel_values, height, width)
233
+ embeddings = self.projection(pixel_values)
234
+ _, _, height, width = embeddings.shape
235
+ output_dimensions = (height, width)
236
+ embeddings = embeddings.flatten(2).transpose(1, 2)
237
+
238
+ return embeddings, output_dimensions
239
+
240
+
241
+ # Copied from transformers.models.swin.modeling_swin.SwinPatchMerging
242
+ class DonutSwinPatchMerging(nn.Module):
243
+ """
244
+ Patch Merging Layer.
245
+
246
+ Args:
247
+ input_resolution (`Tuple[int]`):
248
+ Resolution of input feature.
249
+ dim (`int`):
250
+ Number of input channels.
251
+ norm_layer (`nn.Module`, *optional*, defaults to `nn.LayerNorm`):
252
+ Normalization layer class.
253
+ """
254
+
255
+ def __init__(self, input_resolution: Tuple[int], dim: int, norm_layer: nn.Module = nn.LayerNorm) -> None:
256
+ super().__init__()
257
+ self.input_resolution = input_resolution
258
+ self.dim = dim
259
+ self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False)
260
+ self.norm = norm_layer(4 * dim)
261
+
262
+ def maybe_pad(self, input_feature, height, width):
263
+ should_pad = (height % 2 == 1) or (width % 2 == 1)
264
+ if should_pad:
265
+ pad_values = (0, 0, 0, width % 2, 0, height % 2)
266
+ input_feature = nn.functional.pad(input_feature, pad_values)
267
+
268
+ return input_feature
269
+
270
+ def forward(self, input_feature: torch.Tensor, input_dimensions: Tuple[int, int]) -> torch.Tensor:
271
+ height, width = input_dimensions
272
+ # `dim` is height * width
273
+ batch_size, dim, num_channels = input_feature.shape
274
+
275
+ input_feature = input_feature.view(batch_size, height, width, num_channels)
276
+ # pad input to be disible by width and height, if needed
277
+ input_feature = self.maybe_pad(input_feature, height, width)
278
+ # [batch_size, height/2, width/2, num_channels]
279
+ input_feature_0 = input_feature[:, 0::2, 0::2, :]
280
+ # [batch_size, height/2, width/2, num_channels]
281
+ input_feature_1 = input_feature[:, 1::2, 0::2, :]
282
+ # [batch_size, height/2, width/2, num_channels]
283
+ input_feature_2 = input_feature[:, 0::2, 1::2, :]
284
+ # [batch_size, height/2, width/2, num_channels]
285
+ input_feature_3 = input_feature[:, 1::2, 1::2, :]
286
+ # batch_size height/2 width/2 4*num_channels
287
+ input_feature = torch.cat([input_feature_0, input_feature_1, input_feature_2, input_feature_3], -1)
288
+ input_feature = input_feature.view(batch_size, -1, 4 * num_channels) # batch_size height/2*width/2 4*C
289
+
290
+ input_feature = self.norm(input_feature)
291
+ input_feature = self.reduction(input_feature)
292
+
293
+ return input_feature
294
+
295
+
296
+ # Copied from transformers.models.beit.modeling_beit.drop_path
297
+ def drop_path(input: torch.Tensor, drop_prob: float = 0.0, training: bool = False) -> torch.Tensor:
298
+ """
299
+ Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
300
+
301
+ Comment by Ross Wightman: This is the same as the DropConnect impl I created for EfficientNet, etc networks,
302
+ however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
303
+ See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the
304
+ layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the
305
+ argument.
306
+ """
307
+ if drop_prob == 0.0 or not training:
308
+ return input
309
+ keep_prob = 1 - drop_prob
310
+ shape = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
311
+ random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device)
312
+ random_tensor.floor_() # binarize
313
+ output = input.div(keep_prob) * random_tensor
314
+ return output
315
+
316
+
317
+ # Copied from transformers.models.swin.modeling_swin.SwinDropPath
318
+ class DonutSwinDropPath(nn.Module):
319
+ """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
320
+
321
+ def __init__(self, drop_prob: Optional[float] = None) -> None:
322
+ super().__init__()
323
+ self.drop_prob = drop_prob
324
+
325
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
326
+ return drop_path(hidden_states, self.drop_prob, self.training)
327
+
328
+ def extra_repr(self) -> str:
329
+ return "p={}".format(self.drop_prob)
330
+
331
+
332
+ # Copied from transformers.models.swin.modeling_swin.SwinSelfAttention with Swin->DonutSwin
333
+ class DonutSwinSelfAttention(nn.Module):
334
+ def __init__(self, config, dim, num_heads, window_size):
335
+ super().__init__()
336
+ if dim % num_heads != 0:
337
+ raise ValueError(
338
+ f"The hidden size ({dim}) is not a multiple of the number of attention heads ({num_heads})"
339
+ )
340
+
341
+ self.num_attention_heads = num_heads
342
+ self.attention_head_size = int(dim / num_heads)
343
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
344
+ self.window_size = (
345
+ window_size if isinstance(window_size, collections.abc.Iterable) else (window_size, window_size)
346
+ )
347
+
348
+ self.relative_position_bias_table = nn.Parameter(
349
+ torch.zeros((2 * self.window_size[0] - 1) * (2 * self.window_size[1] - 1), num_heads)
350
+ )
351
+
352
+ # get pair-wise relative position index for each token inside the window
353
+ coords_h = torch.arange(self.window_size[0])
354
+ coords_w = torch.arange(self.window_size[1])
355
+ coords = torch.stack(meshgrid([coords_h, coords_w], indexing="ij"))
356
+ coords_flatten = torch.flatten(coords, 1)
357
+ relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :]
358
+ relative_coords = relative_coords.permute(1, 2, 0).contiguous()
359
+ relative_coords[:, :, 0] += self.window_size[0] - 1
360
+ relative_coords[:, :, 1] += self.window_size[1] - 1
361
+ relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1
362
+ relative_position_index = relative_coords.sum(-1)
363
+ self.register_buffer("relative_position_index", relative_position_index)
364
+
365
+ self.query = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias)
366
+ self.key = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias)
367
+ self.value = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias)
368
+
369
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
370
+
371
+ def transpose_for_scores(self, x):
372
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
373
+ x = x.view(new_x_shape)
374
+ return x.permute(0, 2, 1, 3)
375
+
376
+ def forward(
377
+ self,
378
+ hidden_states: torch.Tensor,
379
+ attention_mask: Optional[torch.FloatTensor] = None,
380
+ head_mask: Optional[torch.FloatTensor] = None,
381
+ output_attentions: Optional[bool] = False,
382
+ ) -> Tuple[torch.Tensor]:
383
+ batch_size, dim, num_channels = hidden_states.shape
384
+ mixed_query_layer = self.query(hidden_states)
385
+
386
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
387
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
388
+ query_layer = self.transpose_for_scores(mixed_query_layer)
389
+
390
+ # Take the dot product between "query" and "key" to get the raw attention scores.
391
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
392
+
393
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
394
+
395
+ relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)]
396
+ relative_position_bias = relative_position_bias.view(
397
+ self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1
398
+ )
399
+
400
+ relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous()
401
+ attention_scores = attention_scores + relative_position_bias.unsqueeze(0)
402
+
403
+ if attention_mask is not None:
404
+ # Apply the attention mask is (precomputed for all layers in DonutSwinModel forward() function)
405
+ mask_shape = attention_mask.shape[0]
406
+ attention_scores = attention_scores.view(
407
+ batch_size // mask_shape, mask_shape, self.num_attention_heads, dim, dim
408
+ )
409
+ attention_scores = attention_scores + attention_mask.unsqueeze(1).unsqueeze(0)
410
+ attention_scores = attention_scores.view(-1, self.num_attention_heads, dim, dim)
411
+
412
+ # Normalize the attention scores to probabilities.
413
+ attention_probs = nn.functional.softmax(attention_scores, dim=-1)
414
+
415
+ # This is actually dropping out entire tokens to attend to, which might
416
+ # seem a bit unusual, but is taken from the original Transformer paper.
417
+ attention_probs = self.dropout(attention_probs)
418
+
419
+ # Mask heads if we want to
420
+ if head_mask is not None:
421
+ attention_probs = attention_probs * head_mask
422
+
423
+ context_layer = torch.matmul(attention_probs, value_layer)
424
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
425
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
426
+ context_layer = context_layer.view(new_context_layer_shape)
427
+
428
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
429
+
430
+ return outputs
431
+
432
+
433
+ # Copied from transformers.models.swin.modeling_swin.SwinSelfOutput
434
+ class DonutSwinSelfOutput(nn.Module):
435
+ def __init__(self, config, dim):
436
+ super().__init__()
437
+ self.dense = nn.Linear(dim, dim)
438
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
439
+
440
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
441
+ hidden_states = self.dense(hidden_states)
442
+ hidden_states = self.dropout(hidden_states)
443
+
444
+ return hidden_states
445
+
446
+
447
+ # Copied from transformers.models.swin.modeling_swin.SwinAttention with Swin->DonutSwin
448
+ class DonutSwinAttention(nn.Module):
449
+ def __init__(self, config, dim, num_heads, window_size):
450
+ super().__init__()
451
+ self.self = DonutSwinSelfAttention(config, dim, num_heads, window_size)
452
+ self.output = DonutSwinSelfOutput(config, dim)
453
+ self.pruned_heads = set()
454
+
455
+ def prune_heads(self, heads):
456
+ if len(heads) == 0:
457
+ return
458
+ heads, index = find_pruneable_heads_and_indices(
459
+ heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
460
+ )
461
+
462
+ # Prune linear layers
463
+ self.self.query = prune_linear_layer(self.self.query, index)
464
+ self.self.key = prune_linear_layer(self.self.key, index)
465
+ self.self.value = prune_linear_layer(self.self.value, index)
466
+ self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
467
+
468
+ # Update hyper params and store pruned heads
469
+ self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
470
+ self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
471
+ self.pruned_heads = self.pruned_heads.union(heads)
472
+
473
+ def forward(
474
+ self,
475
+ hidden_states: torch.Tensor,
476
+ attention_mask: Optional[torch.FloatTensor] = None,
477
+ head_mask: Optional[torch.FloatTensor] = None,
478
+ output_attentions: Optional[bool] = False,
479
+ ) -> Tuple[torch.Tensor]:
480
+ self_outputs = self.self(hidden_states, attention_mask, head_mask, output_attentions)
481
+ attention_output = self.output(self_outputs[0], hidden_states)
482
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
483
+ return outputs
484
+
485
+
486
+ # Copied from transformers.models.swin.modeling_swin.SwinIntermediate
487
+ class DonutSwinIntermediate(nn.Module):
488
+ def __init__(self, config, dim):
489
+ super().__init__()
490
+ self.dense = nn.Linear(dim, int(config.mlp_ratio * dim))
491
+ if isinstance(config.hidden_act, str):
492
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
493
+ else:
494
+ self.intermediate_act_fn = config.hidden_act
495
+
496
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
497
+ hidden_states = self.dense(hidden_states)
498
+ hidden_states = self.intermediate_act_fn(hidden_states)
499
+ return hidden_states
500
+
501
+
502
+ # Copied from transformers.models.swin.modeling_swin.SwinOutput
503
+ class DonutSwinOutput(nn.Module):
504
+ def __init__(self, config, dim):
505
+ super().__init__()
506
+ self.dense = nn.Linear(int(config.mlp_ratio * dim), dim)
507
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
508
+
509
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
510
+ hidden_states = self.dense(hidden_states)
511
+ hidden_states = self.dropout(hidden_states)
512
+ return hidden_states
513
+
514
+
515
+ # Copied from transformers.models.swin.modeling_swin.SwinLayer with Swin->DonutSwin
516
+ class DonutSwinLayer(nn.Module):
517
+ def __init__(self, config, dim, input_resolution, num_heads, shift_size=0):
518
+ super().__init__()
519
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
520
+ self.shift_size = shift_size
521
+ self.window_size = config.window_size
522
+ self.input_resolution = input_resolution
523
+ self.layernorm_before = nn.LayerNorm(dim, eps=config.layer_norm_eps)
524
+ self.attention = DonutSwinAttention(config, dim, num_heads, window_size=self.window_size)
525
+ self.drop_path = DonutSwinDropPath(config.drop_path_rate) if config.drop_path_rate > 0.0 else nn.Identity()
526
+ self.layernorm_after = nn.LayerNorm(dim, eps=config.layer_norm_eps)
527
+ self.intermediate = DonutSwinIntermediate(config, dim)
528
+ self.output = DonutSwinOutput(config, dim)
529
+
530
+ def set_shift_and_window_size(self, input_resolution):
531
+ if min(input_resolution) <= self.window_size:
532
+ # if window size is larger than input resolution, we don't partition windows
533
+ self.shift_size = 0
534
+ self.window_size = min(input_resolution)
535
+
536
+ def get_attn_mask(self, height, width, dtype):
537
+ if self.shift_size > 0:
538
+ # calculate attention mask for SW-MSA
539
+ img_mask = torch.zeros((1, height, width, 1), dtype=dtype)
540
+ height_slices = (
541
+ slice(0, -self.window_size),
542
+ slice(-self.window_size, -self.shift_size),
543
+ slice(-self.shift_size, None),
544
+ )
545
+ width_slices = (
546
+ slice(0, -self.window_size),
547
+ slice(-self.window_size, -self.shift_size),
548
+ slice(-self.shift_size, None),
549
+ )
550
+ count = 0
551
+ for height_slice in height_slices:
552
+ for width_slice in width_slices:
553
+ img_mask[:, height_slice, width_slice, :] = count
554
+ count += 1
555
+
556
+ mask_windows = window_partition(img_mask, self.window_size)
557
+ mask_windows = mask_windows.view(-1, self.window_size * self.window_size)
558
+ attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
559
+ attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0))
560
+ else:
561
+ attn_mask = None
562
+ return attn_mask
563
+
564
+ def maybe_pad(self, hidden_states, height, width):
565
+ pad_right = (self.window_size - width % self.window_size) % self.window_size
566
+ pad_bottom = (self.window_size - height % self.window_size) % self.window_size
567
+ pad_values = (0, 0, 0, pad_right, 0, pad_bottom)
568
+ hidden_states = nn.functional.pad(hidden_states, pad_values)
569
+ return hidden_states, pad_values
570
+
571
+ def forward(
572
+ self,
573
+ hidden_states: torch.Tensor,
574
+ input_dimensions: Tuple[int, int],
575
+ head_mask: Optional[torch.FloatTensor] = None,
576
+ output_attentions: Optional[bool] = False,
577
+ always_partition: Optional[bool] = False,
578
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
579
+ if not always_partition:
580
+ self.set_shift_and_window_size(input_dimensions)
581
+ else:
582
+ pass
583
+ height, width = input_dimensions
584
+ batch_size, _, channels = hidden_states.size()
585
+ shortcut = hidden_states
586
+
587
+ hidden_states = self.layernorm_before(hidden_states)
588
+
589
+ hidden_states = hidden_states.view(batch_size, height, width, channels)
590
+
591
+ # pad hidden_states to multiples of window size
592
+ hidden_states, pad_values = self.maybe_pad(hidden_states, height, width)
593
+
594
+ _, height_pad, width_pad, _ = hidden_states.shape
595
+ # cyclic shift
596
+ if self.shift_size > 0:
597
+ shifted_hidden_states = torch.roll(hidden_states, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2))
598
+ else:
599
+ shifted_hidden_states = hidden_states
600
+
601
+ # partition windows
602
+ hidden_states_windows = window_partition(shifted_hidden_states, self.window_size)
603
+ hidden_states_windows = hidden_states_windows.view(-1, self.window_size * self.window_size, channels)
604
+ attn_mask = self.get_attn_mask(height_pad, width_pad, dtype=hidden_states.dtype)
605
+ if attn_mask is not None:
606
+ attn_mask = attn_mask.to(hidden_states_windows.device)
607
+
608
+ attention_outputs = self.attention(
609
+ hidden_states_windows, attn_mask, head_mask, output_attentions=output_attentions
610
+ )
611
+
612
+ attention_output = attention_outputs[0]
613
+
614
+ attention_windows = attention_output.view(-1, self.window_size, self.window_size, channels)
615
+ shifted_windows = window_reverse(attention_windows, self.window_size, height_pad, width_pad)
616
+
617
+ # reverse cyclic shift
618
+ if self.shift_size > 0:
619
+ attention_windows = torch.roll(shifted_windows, shifts=(self.shift_size, self.shift_size), dims=(1, 2))
620
+ else:
621
+ attention_windows = shifted_windows
622
+
623
+ was_padded = pad_values[3] > 0 or pad_values[5] > 0
624
+ if was_padded:
625
+ attention_windows = attention_windows[:, :height, :width, :].contiguous()
626
+
627
+ attention_windows = attention_windows.view(batch_size, height * width, channels)
628
+
629
+ hidden_states = shortcut + self.drop_path(attention_windows)
630
+
631
+ layer_output = self.layernorm_after(hidden_states)
632
+ layer_output = self.intermediate(layer_output)
633
+ layer_output = hidden_states + self.output(layer_output)
634
+
635
+ layer_outputs = (layer_output, attention_outputs[1]) if output_attentions else (layer_output,)
636
+ return layer_outputs
637
+
638
+
639
+ # Copied from transformers.models.swin.modeling_swin.SwinStage with Swin->DonutSwin
640
+ class DonutSwinStage(nn.Module):
641
+ def __init__(self, config, dim, input_resolution, depth, num_heads, drop_path, downsample):
642
+ super().__init__()
643
+ self.config = config
644
+ self.dim = dim
645
+ self.blocks = nn.ModuleList(
646
+ [
647
+ DonutSwinLayer(
648
+ config=config,
649
+ dim=dim,
650
+ input_resolution=input_resolution,
651
+ num_heads=num_heads,
652
+ shift_size=0 if (i % 2 == 0) else config.window_size // 2,
653
+ )
654
+ for i in range(depth)
655
+ ]
656
+ )
657
+
658
+ # patch merging layer
659
+ if downsample is not None:
660
+ self.downsample = downsample(input_resolution, dim=dim, norm_layer=nn.LayerNorm)
661
+ else:
662
+ self.downsample = None
663
+
664
+ self.pointing = False
665
+
666
+ def forward(
667
+ self,
668
+ hidden_states: torch.Tensor,
669
+ input_dimensions: Tuple[int, int],
670
+ head_mask: Optional[torch.FloatTensor] = None,
671
+ output_attentions: Optional[bool] = False,
672
+ always_partition: Optional[bool] = False,
673
+ ) -> Tuple[torch.Tensor]:
674
+ height, width = input_dimensions
675
+ for i, layer_module in enumerate(self.blocks):
676
+ layer_head_mask = head_mask[i] if head_mask is not None else None
677
+
678
+ layer_outputs = layer_module(
679
+ hidden_states, input_dimensions, layer_head_mask, output_attentions, always_partition
680
+ )
681
+
682
+ hidden_states = layer_outputs[0]
683
+
684
+ hidden_states_before_downsampling = hidden_states
685
+ if self.downsample is not None:
686
+ height_downsampled, width_downsampled = (height + 1) // 2, (width + 1) // 2
687
+ output_dimensions = (height, width, height_downsampled, width_downsampled)
688
+ hidden_states = self.downsample(hidden_states_before_downsampling, input_dimensions)
689
+ else:
690
+ output_dimensions = (height, width, height, width)
691
+
692
+ stage_outputs = (hidden_states, hidden_states_before_downsampling, output_dimensions)
693
+
694
+ if output_attentions:
695
+ stage_outputs += layer_outputs[1:]
696
+ return stage_outputs
697
+
698
+
699
+ # Copied from transformers.models.swin.modeling_swin.SwinEncoder with Swin->DonutSwin
700
+ class DonutSwinEncoder(nn.Module):
701
+ def __init__(self, config, grid_size):
702
+ super().__init__()
703
+ self.num_layers = len(config.depths)
704
+ self.config = config
705
+ dpr = [x.item() for x in torch.linspace(0, config.drop_path_rate, sum(config.depths))]
706
+ self.layers = nn.ModuleList(
707
+ [
708
+ DonutSwinStage(
709
+ config=config,
710
+ dim=int(config.embed_dim * 2**i_layer),
711
+ input_resolution=(grid_size[0] // (2**i_layer), grid_size[1] // (2**i_layer)),
712
+ depth=config.depths[i_layer],
713
+ num_heads=config.num_heads[i_layer],
714
+ drop_path=dpr[sum(config.depths[:i_layer]) : sum(config.depths[: i_layer + 1])],
715
+ downsample=DonutSwinPatchMerging if (i_layer < self.num_layers - 1) else None,
716
+ )
717
+ for i_layer in range(self.num_layers)
718
+ ]
719
+ )
720
+
721
+ self.gradient_checkpointing = False
722
+
723
+ def forward(
724
+ self,
725
+ hidden_states: torch.Tensor,
726
+ input_dimensions: Tuple[int, int],
727
+ head_mask: Optional[torch.FloatTensor] = None,
728
+ output_attentions: Optional[bool] = False,
729
+ output_hidden_states: Optional[bool] = False,
730
+ output_hidden_states_before_downsampling: Optional[bool] = False,
731
+ always_partition: Optional[bool] = False,
732
+ return_dict: Optional[bool] = True,
733
+ ) -> Union[Tuple, DonutSwinEncoderOutput]:
734
+ all_hidden_states = () if output_hidden_states else None
735
+ all_reshaped_hidden_states = () if output_hidden_states else None
736
+ all_self_attentions = () if output_attentions else None
737
+
738
+ if output_hidden_states:
739
+ batch_size, _, hidden_size = hidden_states.shape
740
+ # rearrange b (h w) c -> b c h w
741
+ reshaped_hidden_state = hidden_states.view(batch_size, *input_dimensions, hidden_size)
742
+ reshaped_hidden_state = reshaped_hidden_state.permute(0, 3, 1, 2)
743
+ all_hidden_states += (hidden_states,)
744
+ all_reshaped_hidden_states += (reshaped_hidden_state,)
745
+
746
+ for i, layer_module in enumerate(self.layers):
747
+ layer_head_mask = head_mask[i] if head_mask is not None else None
748
+
749
+ if self.gradient_checkpointing and self.training:
750
+ layer_outputs = self._gradient_checkpointing_func(
751
+ layer_module.__call__,
752
+ hidden_states,
753
+ input_dimensions,
754
+ layer_head_mask,
755
+ output_attentions,
756
+ always_partition,
757
+ )
758
+ else:
759
+ layer_outputs = layer_module(
760
+ hidden_states, input_dimensions, layer_head_mask, output_attentions, always_partition
761
+ )
762
+
763
+ hidden_states = layer_outputs[0]
764
+ hidden_states_before_downsampling = layer_outputs[1]
765
+ output_dimensions = layer_outputs[2]
766
+
767
+ input_dimensions = (output_dimensions[-2], output_dimensions[-1])
768
+
769
+ if output_hidden_states and output_hidden_states_before_downsampling:
770
+ batch_size, _, hidden_size = hidden_states_before_downsampling.shape
771
+ # rearrange b (h w) c -> b c h w
772
+ # here we use the original (not downsampled) height and width
773
+ reshaped_hidden_state = hidden_states_before_downsampling.view(
774
+ batch_size, *(output_dimensions[0], output_dimensions[1]), hidden_size
775
+ )
776
+ reshaped_hidden_state = reshaped_hidden_state.permute(0, 3, 1, 2)
777
+ all_hidden_states += (hidden_states_before_downsampling,)
778
+ all_reshaped_hidden_states += (reshaped_hidden_state,)
779
+ elif output_hidden_states and not output_hidden_states_before_downsampling:
780
+ batch_size, _, hidden_size = hidden_states.shape
781
+ # rearrange b (h w) c -> b c h w
782
+ reshaped_hidden_state = hidden_states.view(batch_size, *input_dimensions, hidden_size)
783
+ reshaped_hidden_state = reshaped_hidden_state.permute(0, 3, 1, 2)
784
+ all_hidden_states += (hidden_states,)
785
+ all_reshaped_hidden_states += (reshaped_hidden_state,)
786
+
787
+ if output_attentions:
788
+ all_self_attentions += layer_outputs[3:]
789
+
790
+ if not return_dict:
791
+ return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
792
+
793
+ return DonutSwinEncoderOutput(
794
+ last_hidden_state=hidden_states,
795
+ hidden_states=all_hidden_states,
796
+ attentions=all_self_attentions,
797
+ reshaped_hidden_states=all_reshaped_hidden_states,
798
+ )
799
+
800
+
801
+ # Copied from transformers.models.swin.modeling_swin.SwinPreTrainedModel with Swin->DonutSwin
802
+ class DonutSwinPreTrainedModel(PreTrainedModel):
803
+ """
804
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
805
+ models.
806
+ """
807
+
808
+ config_class = DonutSwinConfig
809
+ base_model_prefix = "swin"
810
+ main_input_name = "pixel_values"
811
+ supports_gradient_checkpointing = True
812
+
813
+ def _init_weights(self, module):
814
+ """Initialize the weights"""
815
+ if isinstance(module, (nn.Linear, nn.Conv2d)):
816
+ # Slightly different from the TF version which uses truncated_normal for initialization
817
+ # cf https://github.com/pytorch/pytorch/pull/5617
818
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
819
+ if module.bias is not None:
820
+ module.bias.data.zero_()
821
+ elif isinstance(module, nn.LayerNorm):
822
+ module.bias.data.zero_()
823
+ module.weight.data.fill_(1.0)
824
+
825
+
826
+ SWIN_START_DOCSTRING = r"""
827
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
828
+ it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
829
+ behavior.
830
+
831
+ Parameters:
832
+ config ([`DonutSwinConfig`]): Model configuration class with all the parameters of the model.
833
+ Initializing with a config file does not load the weights associated with the model, only the
834
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
835
+ """
836
+
837
+ SWIN_INPUTS_DOCSTRING = r"""
838
+ Args:
839
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
840
+ Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
841
+ [`DonutImageProcessor.__call__`] for details.
842
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
843
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
844
+
845
+ - 1 indicates the head is **not masked**,
846
+ - 0 indicates the head is **masked**.
847
+
848
+ output_attentions (`bool`, *optional*):
849
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
850
+ tensors for more detail.
851
+ output_hidden_states (`bool`, *optional*):
852
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
853
+ more detail.
854
+ return_dict (`bool`, *optional*):
855
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
856
+ """
857
+
858
+
859
+ @add_start_docstrings(
860
+ "The bare Donut Swin Model transformer outputting raw hidden-states without any specific head on top.",
861
+ SWIN_START_DOCSTRING,
862
+ )
863
+ class DonutSwinModel(DonutSwinPreTrainedModel):
864
+ def __init__(self, config, add_pooling_layer=True, use_mask_token=False):
865
+ super().__init__(config)
866
+ self.config = config
867
+ self.num_layers = len(config.depths)
868
+ self.num_features = int(config.embed_dim * 2 ** (self.num_layers - 1))
869
+
870
+ self.embeddings = DonutSwinEmbeddings(config, use_mask_token=use_mask_token)
871
+ self.encoder = DonutSwinEncoder(config, self.embeddings.patch_grid)
872
+
873
+ self.pooler = nn.AdaptiveAvgPool1d(1) if add_pooling_layer else None
874
+
875
+ # Initialize weights and apply final processing
876
+ self.post_init()
877
+
878
+ def get_input_embeddings(self):
879
+ return self.embeddings.patch_embeddings
880
+
881
+ def _prune_heads(self, heads_to_prune):
882
+ """
883
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
884
+ class PreTrainedModel
885
+ """
886
+ for layer, heads in heads_to_prune.items():
887
+ self.encoder.layer[layer].attention.prune_heads(heads)
888
+
889
+ @add_start_docstrings_to_model_forward(SWIN_INPUTS_DOCSTRING)
890
+ @add_code_sample_docstrings(
891
+ checkpoint=_CHECKPOINT_FOR_DOC,
892
+ output_type=DonutSwinModelOutput,
893
+ config_class=_CONFIG_FOR_DOC,
894
+ modality="vision",
895
+ expected_output=_EXPECTED_OUTPUT_SHAPE,
896
+ )
897
+ def forward(
898
+ self,
899
+ pixel_values: Optional[torch.FloatTensor] = None,
900
+ bool_masked_pos: Optional[torch.BoolTensor] = None,
901
+ head_mask: Optional[torch.FloatTensor] = None,
902
+ output_attentions: Optional[bool] = None,
903
+ output_hidden_states: Optional[bool] = None,
904
+ return_dict: Optional[bool] = None,
905
+ ) -> Union[Tuple, DonutSwinModelOutput]:
906
+ r"""
907
+ bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, num_patches)`):
908
+ Boolean masked positions. Indicates which patches are masked (1) and which aren't (0).
909
+ """
910
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
911
+ output_hidden_states = (
912
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
913
+ )
914
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
915
+
916
+ if pixel_values is None:
917
+ raise ValueError("You have to specify pixel_values")
918
+
919
+ # Prepare head mask if needed
920
+ # 1.0 in head_mask indicate we keep the head
921
+ # attention_probs has shape bsz x n_heads x N x N
922
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
923
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
924
+ head_mask = self.get_head_mask(head_mask, len(self.config.depths))
925
+
926
+ embedding_output, input_dimensions = self.embeddings(pixel_values, bool_masked_pos=bool_masked_pos)
927
+
928
+ encoder_outputs = self.encoder(
929
+ embedding_output,
930
+ input_dimensions,
931
+ head_mask=head_mask,
932
+ output_attentions=output_attentions,
933
+ output_hidden_states=output_hidden_states,
934
+ return_dict=return_dict,
935
+ )
936
+
937
+ sequence_output = encoder_outputs[0]
938
+
939
+ pooled_output = None
940
+ if self.pooler is not None:
941
+ pooled_output = self.pooler(sequence_output.transpose(1, 2))
942
+ pooled_output = torch.flatten(pooled_output, 1)
943
+
944
+ if not return_dict:
945
+ output = (sequence_output, pooled_output) + encoder_outputs[1:]
946
+
947
+ return output
948
+
949
+ return DonutSwinModelOutput(
950
+ last_hidden_state=sequence_output,
951
+ pooler_output=pooled_output,
952
+ hidden_states=encoder_outputs.hidden_states,
953
+ attentions=encoder_outputs.attentions,
954
+ reshaped_hidden_states=encoder_outputs.reshaped_hidden_states,
955
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/donut/processing_donut.py ADDED
@@ -0,0 +1,196 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """
16
+ Processor class for Donut.
17
+ """
18
+ import re
19
+ import warnings
20
+ from contextlib import contextmanager
21
+
22
+ from ...processing_utils import ProcessorMixin
23
+
24
+
25
+ class DonutProcessor(ProcessorMixin):
26
+ r"""
27
+ Constructs a Donut processor which wraps a Donut image processor and an XLMRoBERTa tokenizer into a single
28
+ processor.
29
+
30
+ [`DonutProcessor`] offers all the functionalities of [`DonutImageProcessor`] and
31
+ [`XLMRobertaTokenizer`/`XLMRobertaTokenizerFast`]. See the [`~DonutProcessor.__call__`] and
32
+ [`~DonutProcessor.decode`] for more information.
33
+
34
+ Args:
35
+ image_processor ([`DonutImageProcessor`], *optional*):
36
+ An instance of [`DonutImageProcessor`]. The image processor is a required input.
37
+ tokenizer ([`XLMRobertaTokenizer`/`XLMRobertaTokenizerFast`], *optional*):
38
+ An instance of [`XLMRobertaTokenizer`/`XLMRobertaTokenizerFast`]. The tokenizer is a required input.
39
+ """
40
+
41
+ attributes = ["image_processor", "tokenizer"]
42
+ image_processor_class = "AutoImageProcessor"
43
+ tokenizer_class = "AutoTokenizer"
44
+
45
+ def __init__(self, image_processor=None, tokenizer=None, **kwargs):
46
+ feature_extractor = None
47
+ if "feature_extractor" in kwargs:
48
+ warnings.warn(
49
+ "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
50
+ " instead.",
51
+ FutureWarning,
52
+ )
53
+ feature_extractor = kwargs.pop("feature_extractor")
54
+
55
+ image_processor = image_processor if image_processor is not None else feature_extractor
56
+ if image_processor is None:
57
+ raise ValueError("You need to specify an `image_processor`.")
58
+ if tokenizer is None:
59
+ raise ValueError("You need to specify a `tokenizer`.")
60
+
61
+ super().__init__(image_processor, tokenizer)
62
+ self.current_processor = self.image_processor
63
+ self._in_target_context_manager = False
64
+
65
+ def __call__(self, *args, **kwargs):
66
+ """
67
+ When used in normal mode, this method forwards all its arguments to AutoImageProcessor's
68
+ [`~AutoImageProcessor.__call__`] and returns its output. If used in the context
69
+ [`~DonutProcessor.as_target_processor`] this method forwards all its arguments to DonutTokenizer's
70
+ [`~DonutTokenizer.__call__`]. Please refer to the doctsring of the above two methods for more information.
71
+ """
72
+ # For backward compatibility
73
+ if self._in_target_context_manager:
74
+ return self.current_processor(*args, **kwargs)
75
+
76
+ images = kwargs.pop("images", None)
77
+ text = kwargs.pop("text", None)
78
+ if len(args) > 0:
79
+ images = args[0]
80
+ args = args[1:]
81
+
82
+ if images is None and text is None:
83
+ raise ValueError("You need to specify either an `images` or `text` input to process.")
84
+
85
+ if images is not None:
86
+ inputs = self.image_processor(images, *args, **kwargs)
87
+ if text is not None:
88
+ encodings = self.tokenizer(text, **kwargs)
89
+
90
+ if text is None:
91
+ return inputs
92
+ elif images is None:
93
+ return encodings
94
+ else:
95
+ inputs["labels"] = encodings["input_ids"]
96
+ return inputs
97
+
98
+ def batch_decode(self, *args, **kwargs):
99
+ """
100
+ This method forwards all its arguments to DonutTokenizer's [`~PreTrainedTokenizer.batch_decode`]. Please refer
101
+ to the docstring of this method for more information.
102
+ """
103
+ return self.tokenizer.batch_decode(*args, **kwargs)
104
+
105
+ def decode(self, *args, **kwargs):
106
+ """
107
+ This method forwards all its arguments to DonutTokenizer's [`~PreTrainedTokenizer.decode`]. Please refer to the
108
+ docstring of this method for more information.
109
+ """
110
+ return self.tokenizer.decode(*args, **kwargs)
111
+
112
+ @contextmanager
113
+ def as_target_processor(self):
114
+ """
115
+ Temporarily sets the tokenizer for processing the input. Useful for encoding the labels when fine-tuning TrOCR.
116
+ """
117
+ warnings.warn(
118
+ "`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
119
+ "labels by using the argument `text` of the regular `__call__` method (either in the same call as "
120
+ "your images inputs, or in a separate call."
121
+ )
122
+ self._in_target_context_manager = True
123
+ self.current_processor = self.tokenizer
124
+ yield
125
+ self.current_processor = self.image_processor
126
+ self._in_target_context_manager = False
127
+
128
+ def token2json(self, tokens, is_inner_value=False, added_vocab=None):
129
+ """
130
+ Convert a (generated) token sequence into an ordered JSON format.
131
+ """
132
+ if added_vocab is None:
133
+ added_vocab = self.tokenizer.get_added_vocab()
134
+
135
+ output = {}
136
+
137
+ while tokens:
138
+ start_token = re.search(r"<s_(.*?)>", tokens, re.IGNORECASE)
139
+ if start_token is None:
140
+ break
141
+ key = start_token.group(1)
142
+ key_escaped = re.escape(key)
143
+
144
+ end_token = re.search(rf"</s_{key_escaped}>", tokens, re.IGNORECASE)
145
+ start_token = start_token.group()
146
+ if end_token is None:
147
+ tokens = tokens.replace(start_token, "")
148
+ else:
149
+ end_token = end_token.group()
150
+ start_token_escaped = re.escape(start_token)
151
+ end_token_escaped = re.escape(end_token)
152
+ content = re.search(
153
+ f"{start_token_escaped}(.*?){end_token_escaped}", tokens, re.IGNORECASE | re.DOTALL
154
+ )
155
+ if content is not None:
156
+ content = content.group(1).strip()
157
+ if r"<s_" in content and r"</s_" in content: # non-leaf node
158
+ value = self.token2json(content, is_inner_value=True, added_vocab=added_vocab)
159
+ if value:
160
+ if len(value) == 1:
161
+ value = value[0]
162
+ output[key] = value
163
+ else: # leaf nodes
164
+ output[key] = []
165
+ for leaf in content.split(r"<sep/>"):
166
+ leaf = leaf.strip()
167
+ if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
168
+ leaf = leaf[1:-2] # for categorical special tokens
169
+ output[key].append(leaf)
170
+ if len(output[key]) == 1:
171
+ output[key] = output[key][0]
172
+
173
+ tokens = tokens[tokens.find(end_token) + len(end_token) :].strip()
174
+ if tokens[:6] == r"<sep/>": # non-leaf nodes
175
+ return [output] + self.token2json(tokens[6:], is_inner_value=True, added_vocab=added_vocab)
176
+
177
+ if len(output):
178
+ return [output] if is_inner_value else output
179
+ else:
180
+ return [] if is_inner_value else {"text_sequence": tokens}
181
+
182
+ @property
183
+ def feature_extractor_class(self):
184
+ warnings.warn(
185
+ "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.",
186
+ FutureWarning,
187
+ )
188
+ return self.image_processor_class
189
+
190
+ @property
191
+ def feature_extractor(self):
192
+ warnings.warn(
193
+ "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.",
194
+ FutureWarning,
195
+ )
196
+ return self.image_processor
llmeval-env/lib/python3.10/site-packages/transformers/models/encodec/__init__.py ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import (
17
+ OptionalDependencyNotAvailable,
18
+ _LazyModule,
19
+ is_torch_available,
20
+ )
21
+
22
+
23
+ _import_structure = {
24
+ "configuration_encodec": [
25
+ "ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP",
26
+ "EncodecConfig",
27
+ ],
28
+ "feature_extraction_encodec": ["EncodecFeatureExtractor"],
29
+ }
30
+
31
+ try:
32
+ if not is_torch_available():
33
+ raise OptionalDependencyNotAvailable()
34
+ except OptionalDependencyNotAvailable:
35
+ pass
36
+ else:
37
+ _import_structure["modeling_encodec"] = [
38
+ "ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST",
39
+ "EncodecModel",
40
+ "EncodecPreTrainedModel",
41
+ ]
42
+
43
+ if TYPE_CHECKING:
44
+ from .configuration_encodec import (
45
+ ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
46
+ EncodecConfig,
47
+ )
48
+ from .feature_extraction_encodec import EncodecFeatureExtractor
49
+
50
+ try:
51
+ if not is_torch_available():
52
+ raise OptionalDependencyNotAvailable()
53
+ except OptionalDependencyNotAvailable:
54
+ pass
55
+ else:
56
+ from .modeling_encodec import (
57
+ ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
58
+ EncodecModel,
59
+ EncodecPreTrainedModel,
60
+ )
61
+
62
+ else:
63
+ import sys
64
+
65
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
llmeval-env/lib/python3.10/site-packages/transformers/models/encodec/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (991 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/encodec/__pycache__/configuration_encodec.cpython-310.pyc ADDED
Binary file (7.45 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/encodec/__pycache__/convert_encodec_checkpoint_to_pytorch.cpython-310.pyc ADDED
Binary file (10.9 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/encodec/__pycache__/feature_extraction_encodec.cpython-310.pyc ADDED
Binary file (8.02 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/encodec/__pycache__/modeling_encodec.cpython-310.pyc ADDED
Binary file (26.7 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/encodec/configuration_encodec.py ADDED
@@ -0,0 +1,193 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 Meta Platforms, Inc. and affiliates, and the HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ EnCodec model configuration"""
16
+
17
+
18
+ import math
19
+ from typing import Optional
20
+
21
+ import numpy as np
22
+
23
+ from ...configuration_utils import PretrainedConfig
24
+ from ...utils import logging
25
+
26
+
27
+ logger = logging.get_logger(__name__)
28
+
29
+
30
+ from ..deprecated._archive_maps import ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
31
+
32
+
33
+ class EncodecConfig(PretrainedConfig):
34
+ r"""
35
+ This is the configuration class to store the configuration of an [`EncodecModel`]. It is used to instantiate a
36
+ Encodec model according to the specified arguments, defining the model architecture. Instantiating a configuration
37
+ with the defaults will yield a similar configuration to that of the
38
+ [facebook/encodec_24khz](https://huggingface.co/facebook/encodec_24khz) architecture.
39
+
40
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
41
+ documentation from [`PretrainedConfig`] for more information.
42
+
43
+ Args:
44
+ target_bandwidths (`List[float]`, *optional*, defaults to `[1.5, 3.0, 6.0, 12.0, 24.0]`):
45
+ The range of diffent bandwiths the model can encode audio with.
46
+ sampling_rate (`int`, *optional*, defaults to 24000):
47
+ The sampling rate at which the audio waveform should be digitalized expressed in hertz (Hz).
48
+ audio_channels (`int`, *optional*, defaults to 1):
49
+ Number of channels in the audio data. Either 1 for mono or 2 for stereo.
50
+ normalize (`bool`, *optional*, defaults to `False`):
51
+ Whether the audio shall be normalized when passed.
52
+ chunk_length_s (`float`, *optional*):
53
+ If defined the audio is pre-processed into chunks of lengths `chunk_length_s` and then encoded.
54
+ overlap (`float`, *optional*):
55
+ Defines the overlap between each chunk. It is used to compute the `chunk_stride` using the following
56
+ formulae : `int((1.0 - self.overlap) * self.chunk_length)`.
57
+ hidden_size (`int`, *optional*, defaults to 128):
58
+ Intermediate representation dimension.
59
+ num_filters (`int`, *optional*, defaults to 32):
60
+ Number of convolution kernels of first `EncodecConv1d` down sampling layer.
61
+ num_residual_layers (`int`, *optional*, defaults to 1):
62
+ Number of residual layers.
63
+ upsampling_ratios (`Sequence[int]` , *optional*, defaults to `[8, 5, 4, 2]`):
64
+ Kernel size and stride ratios. The encoder uses downsampling ratios instead of upsampling ratios, hence it
65
+ will use the ratios in the reverse order to the ones specified here that must match the decoder order.
66
+ norm_type (`str`, *optional*, defaults to `"weight_norm"`):
67
+ Normalization method. Should be in `["weight_norm", "time_group_norm"]`
68
+ kernel_size (`int`, *optional*, defaults to 7):
69
+ Kernel size for the initial convolution.
70
+ last_kernel_size (`int`, *optional*, defaults to 7):
71
+ Kernel size for the last convolution layer.
72
+ residual_kernel_size (`int`, *optional*, defaults to 3):
73
+ Kernel size for the residual layers.
74
+ dilation_growth_rate (`int`, *optional*, defaults to 2):
75
+ How much to increase the dilation with each layer.
76
+ use_causal_conv (`bool`, *optional*, defaults to `True`):
77
+ Whether to use fully causal convolution.
78
+ pad_mode (`str`, *optional*, defaults to `"reflect"`):
79
+ Padding mode for the convolutions.
80
+ compress (`int`, *optional*, defaults to 2):
81
+ Reduced dimensionality in residual branches (from Demucs v3).
82
+ num_lstm_layers (`int`, *optional*, defaults to 2):
83
+ Number of LSTM layers at the end of the encoder.
84
+ trim_right_ratio (`float`, *optional*, defaults to 1.0):
85
+ Ratio for trimming at the right of the transposed convolution under the `use_causal_conv = True` setup. If
86
+ equal to 1.0, it means that all the trimming is done at the right.
87
+ codebook_size (`int`, *optional*, defaults to 1024):
88
+ Number of discret codes that make up VQVAE.
89
+ codebook_dim (`int`, *optional*):
90
+ Dimension of the codebook vectors. If not defined, uses `hidden_size`.
91
+ use_conv_shortcut (`bool`, *optional*, defaults to `True`):
92
+ Whether to use a convolutional layer as the 'skip' connection in the `EncodecResnetBlock` block. If False,
93
+ an identity function will be used, giving a generic residual connection.
94
+
95
+ Example:
96
+
97
+ ```python
98
+ >>> from transformers import EncodecModel, EncodecConfig
99
+
100
+ >>> # Initializing a "facebook/encodec_24khz" style configuration
101
+ >>> configuration = EncodecConfig()
102
+
103
+ >>> # Initializing a model (with random weights) from the "facebook/encodec_24khz" style configuration
104
+ >>> model = EncodecModel(configuration)
105
+
106
+ >>> # Accessing the model configuration
107
+ >>> configuration = model.config
108
+ ```"""
109
+
110
+ model_type = "encodec"
111
+
112
+ def __init__(
113
+ self,
114
+ target_bandwidths=[1.5, 3.0, 6.0, 12.0, 24.0],
115
+ sampling_rate=24_000,
116
+ audio_channels=1,
117
+ normalize=False,
118
+ chunk_length_s=None,
119
+ overlap=None,
120
+ hidden_size=128,
121
+ num_filters=32,
122
+ num_residual_layers=1,
123
+ upsampling_ratios=[8, 5, 4, 2],
124
+ norm_type="weight_norm",
125
+ kernel_size=7,
126
+ last_kernel_size=7,
127
+ residual_kernel_size=3,
128
+ dilation_growth_rate=2,
129
+ use_causal_conv=True,
130
+ pad_mode="reflect",
131
+ compress=2,
132
+ num_lstm_layers=2,
133
+ trim_right_ratio=1.0,
134
+ codebook_size=1024,
135
+ codebook_dim=None,
136
+ use_conv_shortcut=True,
137
+ **kwargs,
138
+ ):
139
+ self.target_bandwidths = target_bandwidths
140
+ self.sampling_rate = sampling_rate
141
+ self.audio_channels = audio_channels
142
+ self.normalize = normalize
143
+ self.chunk_length_s = chunk_length_s
144
+ self.overlap = overlap
145
+ self.hidden_size = hidden_size
146
+ self.num_filters = num_filters
147
+ self.num_residual_layers = num_residual_layers
148
+ self.upsampling_ratios = upsampling_ratios
149
+ self.norm_type = norm_type
150
+ self.kernel_size = kernel_size
151
+ self.last_kernel_size = last_kernel_size
152
+ self.residual_kernel_size = residual_kernel_size
153
+ self.dilation_growth_rate = dilation_growth_rate
154
+ self.use_causal_conv = use_causal_conv
155
+ self.pad_mode = pad_mode
156
+ self.compress = compress
157
+ self.num_lstm_layers = num_lstm_layers
158
+ self.trim_right_ratio = trim_right_ratio
159
+ self.codebook_size = codebook_size
160
+ self.codebook_dim = codebook_dim if codebook_dim is not None else hidden_size
161
+ self.use_conv_shortcut = use_conv_shortcut
162
+
163
+ if self.norm_type not in ["weight_norm", "time_group_norm"]:
164
+ raise ValueError(
165
+ f'self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}'
166
+ )
167
+
168
+ super().__init__(**kwargs)
169
+
170
+ # This is a property because you might want to change the chunk_length_s on the fly
171
+ @property
172
+ def chunk_length(self) -> Optional[int]:
173
+ if self.chunk_length_s is None:
174
+ return None
175
+ else:
176
+ return int(self.chunk_length_s * self.sampling_rate)
177
+
178
+ # This is a property because you might want to change the chunk_length_s on the fly
179
+ @property
180
+ def chunk_stride(self) -> Optional[int]:
181
+ if self.chunk_length_s is None or self.overlap is None:
182
+ return None
183
+ else:
184
+ return max(1, int((1.0 - self.overlap) * self.chunk_length))
185
+
186
+ @property
187
+ def frame_rate(self) -> int:
188
+ hop_length = np.prod(self.upsampling_ratios)
189
+ return math.ceil(self.sampling_rate / hop_length)
190
+
191
+ @property
192
+ def num_quantizers(self) -> int:
193
+ return int(1000 * self.target_bandwidths[-1] // (self.frame_rate * 10))
llmeval-env/lib/python3.10/site-packages/transformers/models/encodec/convert_encodec_checkpoint_to_pytorch.py ADDED
@@ -0,0 +1,365 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Convert EnCodec checkpoints."""
16
+
17
+ import argparse
18
+
19
+ import torch
20
+
21
+ from transformers import (
22
+ EncodecConfig,
23
+ EncodecFeatureExtractor,
24
+ EncodecModel,
25
+ logging,
26
+ )
27
+
28
+
29
+ # checkpoints downloaded from:
30
+ # https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
31
+ # https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
32
+ # https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
33
+
34
+
35
+ logging.set_verbosity_info()
36
+ logger = logging.get_logger("transformers.models.encodec")
37
+
38
+ MAPPING_QUANTIZER = {
39
+ "quantizer.vq.layers.*._codebook.inited": "quantizer.layers.*.codebook.inited",
40
+ "quantizer.vq.layers.*._codebook.cluster_size": "quantizer.layers.*.codebook.cluster_size",
41
+ "quantizer.vq.layers.*._codebook.embed": "quantizer.layers.*.codebook.embed",
42
+ "quantizer.vq.layers.*._codebook.embed_avg": "quantizer.layers.*.codebook.embed_avg",
43
+ }
44
+ MAPPING_ENCODER = {
45
+ "encoder.model.0.conv.conv": "encoder.layers.0.conv",
46
+ "encoder.model.1.block.1.conv.conv": "encoder.layers.1.block.1.conv",
47
+ "encoder.model.1.block.3.conv.conv": "encoder.layers.1.block.3.conv",
48
+ "encoder.model.1.shortcut.conv.conv": "encoder.layers.1.shortcut.conv",
49
+ "encoder.model.3.conv.conv": "encoder.layers.3.conv",
50
+ "encoder.model.4.block.1.conv.conv": "encoder.layers.4.block.1.conv",
51
+ "encoder.model.4.block.3.conv.conv": "encoder.layers.4.block.3.conv",
52
+ "encoder.model.4.shortcut.conv.conv": "encoder.layers.4.shortcut.conv",
53
+ "encoder.model.6.conv.conv": "encoder.layers.6.conv",
54
+ "encoder.model.7.block.1.conv.conv": "encoder.layers.7.block.1.conv",
55
+ "encoder.model.7.block.3.conv.conv": "encoder.layers.7.block.3.conv",
56
+ "encoder.model.7.shortcut.conv.conv": "encoder.layers.7.shortcut.conv",
57
+ "encoder.model.9.conv.conv": "encoder.layers.9.conv",
58
+ "encoder.model.10.block.1.conv.conv": "encoder.layers.10.block.1.conv",
59
+ "encoder.model.10.block.3.conv.conv": "encoder.layers.10.block.3.conv",
60
+ "encoder.model.10.shortcut.conv.conv": "encoder.layers.10.shortcut.conv",
61
+ "encoder.model.12.conv.conv": "encoder.layers.12.conv",
62
+ "encoder.model.13.lstm": "encoder.layers.13.lstm",
63
+ "encoder.model.15.conv.conv": "encoder.layers.15.conv",
64
+ }
65
+ MAPPING_ENCODER_48K = {
66
+ "encoder.model.0.conv.norm": "encoder.layers.0.norm",
67
+ "encoder.model.1.block.1.conv.norm": "encoder.layers.1.block.1.norm",
68
+ "encoder.model.1.block.3.conv.norm": "encoder.layers.1.block.3.norm",
69
+ "encoder.model.1.shortcut.conv.norm": "encoder.layers.1.shortcut.norm",
70
+ "encoder.model.3.conv.norm": "encoder.layers.3.norm",
71
+ "encoder.model.4.block.1.conv.norm": "encoder.layers.4.block.1.norm",
72
+ "encoder.model.4.block.3.conv.norm": "encoder.layers.4.block.3.norm",
73
+ "encoder.model.4.shortcut.conv.norm": "encoder.layers.4.shortcut.norm",
74
+ "encoder.model.6.conv.norm": "encoder.layers.6.norm",
75
+ "encoder.model.7.block.1.conv.norm": "encoder.layers.7.block.1.norm",
76
+ "encoder.model.7.block.3.conv.norm": "encoder.layers.7.block.3.norm",
77
+ "encoder.model.7.shortcut.conv.norm": "encoder.layers.7.shortcut.norm",
78
+ "encoder.model.9.conv.norm": "encoder.layers.9.norm",
79
+ "encoder.model.10.block.1.conv.norm": "encoder.layers.10.block.1.norm",
80
+ "encoder.model.10.block.3.conv.norm": "encoder.layers.10.block.3.norm",
81
+ "encoder.model.10.shortcut.conv.norm": "encoder.layers.10.shortcut.norm",
82
+ "encoder.model.12.conv.norm": "encoder.layers.12.norm",
83
+ "encoder.model.15.conv.norm": "encoder.layers.15.norm",
84
+ }
85
+ MAPPING_DECODER = {
86
+ "decoder.model.0.conv.conv": "decoder.layers.0.conv",
87
+ "decoder.model.1.lstm": "decoder.layers.1.lstm",
88
+ "decoder.model.3.convtr.convtr": "decoder.layers.3.conv",
89
+ "decoder.model.4.block.1.conv.conv": "decoder.layers.4.block.1.conv",
90
+ "decoder.model.4.block.3.conv.conv": "decoder.layers.4.block.3.conv",
91
+ "decoder.model.4.shortcut.conv.conv": "decoder.layers.4.shortcut.conv",
92
+ "decoder.model.6.convtr.convtr": "decoder.layers.6.conv",
93
+ "decoder.model.7.block.1.conv.conv": "decoder.layers.7.block.1.conv",
94
+ "decoder.model.7.block.3.conv.conv": "decoder.layers.7.block.3.conv",
95
+ "decoder.model.7.shortcut.conv.conv": "decoder.layers.7.shortcut.conv",
96
+ "decoder.model.9.convtr.convtr": "decoder.layers.9.conv",
97
+ "decoder.model.10.block.1.conv.conv": "decoder.layers.10.block.1.conv",
98
+ "decoder.model.10.block.3.conv.conv": "decoder.layers.10.block.3.conv",
99
+ "decoder.model.10.shortcut.conv.conv": "decoder.layers.10.shortcut.conv",
100
+ "decoder.model.12.convtr.convtr": "decoder.layers.12.conv",
101
+ "decoder.model.13.block.1.conv.conv": "decoder.layers.13.block.1.conv",
102
+ "decoder.model.13.block.3.conv.conv": "decoder.layers.13.block.3.conv",
103
+ "decoder.model.13.shortcut.conv.conv": "decoder.layers.13.shortcut.conv",
104
+ "decoder.model.15.conv.conv": "decoder.layers.15.conv",
105
+ }
106
+ MAPPING_DECODER_48K = {
107
+ "decoder.model.0.conv.norm": "decoder.layers.0.norm",
108
+ "decoder.model.3.convtr.norm": "decoder.layers.3.norm",
109
+ "decoder.model.4.block.1.conv.norm": "decoder.layers.4.block.1.norm",
110
+ "decoder.model.4.block.3.conv.norm": "decoder.layers.4.block.3.norm",
111
+ "decoder.model.4.shortcut.conv.norm": "decoder.layers.4.shortcut.norm",
112
+ "decoder.model.6.convtr.norm": "decoder.layers.6.norm",
113
+ "decoder.model.7.block.1.conv.norm": "decoder.layers.7.block.1.norm",
114
+ "decoder.model.7.block.3.conv.norm": "decoder.layers.7.block.3.norm",
115
+ "decoder.model.7.shortcut.conv.norm": "decoder.layers.7.shortcut.norm",
116
+ "decoder.model.9.convtr.norm": "decoder.layers.9.norm",
117
+ "decoder.model.10.block.1.conv.norm": "decoder.layers.10.block.1.norm",
118
+ "decoder.model.10.block.3.conv.norm": "decoder.layers.10.block.3.norm",
119
+ "decoder.model.10.shortcut.conv.norm": "decoder.layers.10.shortcut.norm",
120
+ "decoder.model.12.convtr.norm": "decoder.layers.12.norm",
121
+ "decoder.model.13.block.1.conv.norm": "decoder.layers.13.block.1.norm",
122
+ "decoder.model.13.block.3.conv.norm": "decoder.layers.13.block.3.norm",
123
+ "decoder.model.13.shortcut.conv.norm": "decoder.layers.13.shortcut.norm",
124
+ "decoder.model.15.conv.norm": "decoder.layers.15.norm",
125
+ }
126
+ MAPPING_24K = {
127
+ **MAPPING_QUANTIZER,
128
+ **MAPPING_ENCODER,
129
+ **MAPPING_DECODER,
130
+ }
131
+ MAPPING_48K = {
132
+ **MAPPING_QUANTIZER,
133
+ **MAPPING_ENCODER,
134
+ **MAPPING_ENCODER_48K,
135
+ **MAPPING_DECODER,
136
+ **MAPPING_DECODER_48K,
137
+ }
138
+ TOP_LEVEL_KEYS = []
139
+ IGNORE_KEYS = []
140
+
141
+
142
+ def set_recursively(hf_pointer, key, value, full_name, weight_type):
143
+ for attribute in key.split("."):
144
+ hf_pointer = getattr(hf_pointer, attribute)
145
+
146
+ if weight_type is not None:
147
+ hf_shape = getattr(hf_pointer, weight_type).shape
148
+ else:
149
+ hf_shape = hf_pointer.shape
150
+
151
+ if hf_shape != value.shape:
152
+ raise ValueError(
153
+ f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
154
+ f" {value.shape} for {full_name}"
155
+ )
156
+
157
+ if weight_type == "weight":
158
+ hf_pointer.weight.data = value
159
+ elif weight_type == "weight_g":
160
+ hf_pointer.weight_g.data = value
161
+ elif weight_type == "weight_v":
162
+ hf_pointer.weight_v.data = value
163
+ elif weight_type == "bias":
164
+ hf_pointer.bias.data = value
165
+ elif weight_type == "running_mean":
166
+ hf_pointer.running_mean.data = value
167
+ elif weight_type == "running_var":
168
+ hf_pointer.running_var.data = value
169
+ elif weight_type == "num_batches_tracked":
170
+ hf_pointer.num_batches_tracked.data = value
171
+ elif weight_type == "weight_ih_l0":
172
+ hf_pointer.weight_ih_l0.data = value
173
+ elif weight_type == "weight_hh_l0":
174
+ hf_pointer.weight_hh_l0.data = value
175
+ elif weight_type == "bias_ih_l0":
176
+ hf_pointer.bias_ih_l0.data = value
177
+ elif weight_type == "bias_hh_l0":
178
+ hf_pointer.bias_hh_l0.data = value
179
+ elif weight_type == "weight_ih_l1":
180
+ hf_pointer.weight_ih_l1.data = value
181
+ elif weight_type == "weight_hh_l1":
182
+ hf_pointer.weight_hh_l1.data = value
183
+ elif weight_type == "bias_ih_l1":
184
+ hf_pointer.bias_ih_l1.data = value
185
+ elif weight_type == "bias_hh_l1":
186
+ hf_pointer.bias_hh_l1.data = value
187
+ else:
188
+ hf_pointer.data = value
189
+
190
+ logger.info(f"{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.")
191
+
192
+
193
+ def should_ignore(name, ignore_keys):
194
+ for key in ignore_keys:
195
+ if key.endswith(".*"):
196
+ if name.startswith(key[:-1]):
197
+ return True
198
+ elif ".*." in key:
199
+ prefix, suffix = key.split(".*.")
200
+ if prefix in name and suffix in name:
201
+ return True
202
+ elif key in name:
203
+ return True
204
+ return False
205
+
206
+
207
+ def recursively_load_weights(orig_dict, hf_model, model_name):
208
+ unused_weights = []
209
+
210
+ if model_name == "encodec_24khz" or "encodec_32khz":
211
+ MAPPING = MAPPING_24K
212
+ elif model_name == "encodec_48khz":
213
+ MAPPING = MAPPING_48K
214
+ else:
215
+ raise ValueError(f"Unsupported model: {model_name}")
216
+
217
+ for name, value in orig_dict.items():
218
+ if should_ignore(name, IGNORE_KEYS):
219
+ logger.info(f"{name} was ignored")
220
+ continue
221
+
222
+ is_used = False
223
+ for key, mapped_key in MAPPING.items():
224
+ if "*" in key:
225
+ prefix, suffix = key.split(".*.")
226
+ if prefix in name and suffix in name:
227
+ key = suffix
228
+
229
+ if key in name:
230
+ # HACK otherwise .embed gets initialized with .embed_avg too
231
+ if key.endswith("embed") and name.endswith("embed_avg"):
232
+ continue
233
+
234
+ is_used = True
235
+ if "*" in mapped_key:
236
+ layer_index = name.split(key)[0].split(".")[-2]
237
+ mapped_key = mapped_key.replace("*", layer_index)
238
+ if "weight_g" in name:
239
+ weight_type = "weight_g"
240
+ elif "weight_v" in name:
241
+ weight_type = "weight_v"
242
+ elif "weight_ih_l0" in name:
243
+ weight_type = "weight_ih_l0"
244
+ elif "weight_hh_l0" in name:
245
+ weight_type = "weight_hh_l0"
246
+ elif "bias_ih_l0" in name:
247
+ weight_type = "bias_ih_l0"
248
+ elif "bias_hh_l0" in name:
249
+ weight_type = "bias_hh_l0"
250
+ elif "weight_ih_l1" in name:
251
+ weight_type = "weight_ih_l1"
252
+ elif "weight_hh_l1" in name:
253
+ weight_type = "weight_hh_l1"
254
+ elif "bias_ih_l1" in name:
255
+ weight_type = "bias_ih_l1"
256
+ elif "bias_hh_l1" in name:
257
+ weight_type = "bias_hh_l1"
258
+ elif "bias" in name:
259
+ weight_type = "bias"
260
+ elif "weight" in name:
261
+ weight_type = "weight"
262
+ elif "running_mean" in name:
263
+ weight_type = "running_mean"
264
+ elif "running_var" in name:
265
+ weight_type = "running_var"
266
+ elif "num_batches_tracked" in name:
267
+ weight_type = "num_batches_tracked"
268
+ else:
269
+ weight_type = None
270
+ set_recursively(hf_model, mapped_key, value, name, weight_type)
271
+ continue
272
+ if not is_used:
273
+ unused_weights.append(name)
274
+
275
+ logger.warning(f"Unused weights: {unused_weights}")
276
+
277
+
278
+ @torch.no_grad()
279
+ def convert_checkpoint(
280
+ model_name,
281
+ checkpoint_path,
282
+ pytorch_dump_folder_path,
283
+ config_path=None,
284
+ repo_id=None,
285
+ ):
286
+ """
287
+ Copy/paste/tweak model's weights to transformers design.
288
+ """
289
+ if config_path is not None:
290
+ config = EncodecConfig.from_pretrained(config_path)
291
+ else:
292
+ config = EncodecConfig()
293
+
294
+ if model_name == "encodec_24khz":
295
+ pass # config is already correct
296
+ elif model_name == "encodec_32khz":
297
+ config.upsampling_ratios = [8, 5, 4, 4]
298
+ config.target_bandwidths = [2.2]
299
+ config.num_filters = 64
300
+ config.sampling_rate = 32_000
301
+ config.codebook_size = 2048
302
+ config.use_causal_conv = False
303
+ config.normalize = False
304
+ config.use_conv_shortcut = False
305
+ elif model_name == "encodec_48khz":
306
+ config.upsampling_ratios = [8, 5, 4, 2]
307
+ config.target_bandwidths = [3.0, 6.0, 12.0, 24.0]
308
+ config.sampling_rate = 48_000
309
+ config.audio_channels = 2
310
+ config.use_causal_conv = False
311
+ config.norm_type = "time_group_norm"
312
+ config.normalize = True
313
+ config.chunk_length_s = 1.0
314
+ config.overlap = 0.01
315
+ else:
316
+ raise ValueError(f"Unknown model name: {model_name}")
317
+
318
+ model = EncodecModel(config)
319
+
320
+ feature_extractor = EncodecFeatureExtractor(
321
+ feature_size=config.audio_channels,
322
+ sampling_rate=config.sampling_rate,
323
+ chunk_length_s=config.chunk_length_s,
324
+ overlap=config.overlap,
325
+ )
326
+ feature_extractor.save_pretrained(pytorch_dump_folder_path)
327
+
328
+ original_checkpoint = torch.load(checkpoint_path)
329
+ if "best_state" in original_checkpoint:
330
+ # we might have a training state saved, in which case discard the yaml results and just retain the weights
331
+ original_checkpoint = original_checkpoint["best_state"]
332
+ recursively_load_weights(original_checkpoint, model, model_name)
333
+ model.save_pretrained(pytorch_dump_folder_path)
334
+
335
+ if repo_id:
336
+ print("Pushing to the hub...")
337
+ feature_extractor.push_to_hub(repo_id)
338
+ model.push_to_hub(repo_id)
339
+
340
+
341
+ if __name__ == "__main__":
342
+ parser = argparse.ArgumentParser()
343
+ parser.add_argument(
344
+ "--model",
345
+ default="encodec_24khz",
346
+ type=str,
347
+ help="The model to convert. Should be one of 'encodec_24khz', 'encodec_32khz', 'encodec_48khz'.",
348
+ )
349
+ parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint")
350
+ parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
351
+ parser.add_argument(
352
+ "--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
353
+ )
354
+ parser.add_argument(
355
+ "--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
356
+ )
357
+
358
+ args = parser.parse_args()
359
+ convert_checkpoint(
360
+ args.model,
361
+ args.checkpoint_path,
362
+ args.pytorch_dump_folder_path,
363
+ args.config_path,
364
+ args.push_to_hub,
365
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/encodec/feature_extraction_encodec.py ADDED
@@ -0,0 +1,206 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Feature extractor class for EnCodec."""
16
+
17
+ from typing import List, Optional, Union
18
+
19
+ import numpy as np
20
+
21
+ from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
22
+ from ...feature_extraction_utils import BatchFeature
23
+ from ...utils import PaddingStrategy, TensorType, logging
24
+
25
+
26
+ logger = logging.get_logger(__name__)
27
+
28
+
29
+ class EncodecFeatureExtractor(SequenceFeatureExtractor):
30
+ r"""
31
+ Constructs an EnCodec feature extractor.
32
+
33
+ This feature extractor inherits from [`~feature_extraction_sequence_utils.SequenceFeatureExtractor`] which contains
34
+ most of the main methods. Users should refer to this superclass for more information regarding those methods.
35
+
36
+ Instantiating a feature extractor with the defaults will yield a similar configuration to that of the
37
+ [facebook/encodec_24khz](https://huggingface.co/facebook/encodec_24khz) architecture.
38
+
39
+ Args:
40
+ feature_size (`int`, *optional*, defaults to 1):
41
+ The feature dimension of the extracted features. Use 1 for mono, 2 for stereo.
42
+ sampling_rate (`int`, *optional*, defaults to 24000):
43
+ The sampling rate at which the audio waveform should be digitalized expressed in hertz (Hz).
44
+ padding_value (`float`, *optional*, defaults to 0.0):
45
+ The value that is used to fill the padding values.
46
+ chunk_length_s (`float`, *optional*):
47
+ If defined the audio is pre-processed into chunks of lengths `chunk_length_s` and then encoded.
48
+ overlap (`float`, *optional*):
49
+ Defines the overlap between each chunk. It is used to compute the `chunk_stride` using the following
50
+ formulae : `int((1.0 - self.overlap) * self.chunk_length)`.
51
+ """
52
+
53
+ model_input_names = ["input_values", "padding_mask"]
54
+
55
+ def __init__(
56
+ self,
57
+ feature_size: int = 1,
58
+ sampling_rate: int = 24000,
59
+ padding_value: float = 0.0,
60
+ chunk_length_s: float = None,
61
+ overlap: float = None,
62
+ **kwargs,
63
+ ):
64
+ super().__init__(feature_size=feature_size, sampling_rate=sampling_rate, padding_value=padding_value, **kwargs)
65
+ self.chunk_length_s = chunk_length_s
66
+ self.overlap = overlap
67
+
68
+ # This is a property because you might want to change the chunk_length_s on the fly
69
+ @property
70
+ def chunk_length(self) -> Optional[int]:
71
+ if self.chunk_length_s is None:
72
+ return None
73
+ else:
74
+ return int(self.chunk_length_s * self.sampling_rate)
75
+
76
+ # This is a property because you might want to change the chunk_length_s on the fly
77
+ @property
78
+ def chunk_stride(self) -> Optional[int]:
79
+ if self.chunk_length_s is None or self.overlap is None:
80
+ return None
81
+ else:
82
+ return max(1, int((1.0 - self.overlap) * self.chunk_length))
83
+
84
+ def __call__(
85
+ self,
86
+ raw_audio: Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]],
87
+ padding: Optional[Union[bool, str, PaddingStrategy]] = None,
88
+ truncation: Optional[bool] = False,
89
+ max_length: Optional[int] = None,
90
+ return_tensors: Optional[Union[str, TensorType]] = None,
91
+ sampling_rate: Optional[int] = None,
92
+ ) -> BatchFeature:
93
+ """
94
+ Main method to featurize and prepare for the model one or several sequence(s).
95
+
96
+ Args:
97
+ raw_audio (`np.ndarray`, `List[float]`, `List[np.ndarray]`, `List[List[float]]`):
98
+ The sequence or batch of sequences to be processed. Each sequence can be a numpy array, a list of float
99
+ values, a list of numpy arrays or a list of list of float values. The numpy array must be of shape
100
+ `(num_samples,)` for mono audio (`feature_size = 1`), or `(2, num_samples)` for stereo audio
101
+ (`feature_size = 2`).
102
+ padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`):
103
+ Select a strategy to pad the returned sequences (according to the model's padding side and padding
104
+ index) among:
105
+
106
+ - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
107
+ sequence if provided).
108
+ - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
109
+ acceptable input length for the model if that argument is not provided.
110
+ - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
111
+ lengths).
112
+ truncation (`bool`, *optional*, defaults to `False`):
113
+ Activates truncation to cut input sequences longer than `max_length` to `max_length`.
114
+ max_length (`int`, *optional*):
115
+ Maximum length of the returned list and optionally padding length (see above).
116
+ return_tensors (`str` or [`~utils.TensorType`], *optional*):
117
+ If set, will return tensors instead of list of python integers. Acceptable values are:
118
+
119
+ - `'tf'`: Return TensorFlow `tf.constant` objects.
120
+ - `'pt'`: Return PyTorch `torch.Tensor` objects.
121
+ - `'np'`: Return Numpy `np.ndarray` objects.
122
+ sampling_rate (`int`, *optional*):
123
+ The sampling rate at which the `audio` input was sampled. It is strongly recommended to pass
124
+ `sampling_rate` at the forward call to prevent silent errors.
125
+ """
126
+ if sampling_rate is not None:
127
+ if sampling_rate != self.sampling_rate:
128
+ raise ValueError(
129
+ f"The model corresponding to this feature extractor: {self} was trained using a sampling rate of"
130
+ f" {self.sampling_rate}. Please make sure that the provided audio input was sampled with"
131
+ f" {self.sampling_rate} and not {sampling_rate}."
132
+ )
133
+ else:
134
+ logger.warning(
135
+ "It is strongly recommended to pass the `sampling_rate` argument to this function. "
136
+ "Failing to do so can result in silent errors that might be hard to debug."
137
+ )
138
+
139
+ if padding and truncation:
140
+ raise ValueError("Both padding and truncation were set. Make sure you only set one.")
141
+ elif padding is None:
142
+ # by default let's pad the inputs
143
+ padding = True
144
+
145
+ is_batched = bool(
146
+ isinstance(raw_audio, (list, tuple)) and (isinstance(raw_audio[0], (np.ndarray, tuple, list)))
147
+ )
148
+
149
+ if is_batched:
150
+ raw_audio = [np.asarray(audio, dtype=np.float32).T for audio in raw_audio]
151
+ elif not is_batched and not isinstance(raw_audio, np.ndarray):
152
+ raw_audio = np.asarray(raw_audio, dtype=np.float32)
153
+ elif isinstance(raw_audio, np.ndarray) and raw_audio.dtype is np.dtype(np.float64):
154
+ raw_audio = raw_audio.astype(np.float32)
155
+
156
+ # always return batch
157
+ if not is_batched:
158
+ raw_audio = [np.asarray(raw_audio).T]
159
+
160
+ # verify inputs are valid
161
+ for idx, example in enumerate(raw_audio):
162
+ if example.ndim > 2:
163
+ raise ValueError(f"Expected input shape (channels, length) but got shape {example.shape}")
164
+ if self.feature_size == 1 and example.ndim != 1:
165
+ raise ValueError(f"Expected mono audio but example has {example.shape[-1]} channels")
166
+ if self.feature_size == 2 and example.shape[-1] != 2:
167
+ raise ValueError(f"Expected stereo audio but example has {example.shape[-1]} channels")
168
+
169
+ padded_inputs = None
170
+ input_values = BatchFeature({"input_values": raw_audio})
171
+ if self.chunk_stride is not None and self.chunk_length is not None and max_length is None:
172
+ if truncation:
173
+ max_length = min(array.shape[0] for array in raw_audio)
174
+ nb_step = int(np.floor(max_length / self.chunk_stride))
175
+ max_length = (nb_step - 1) * self.chunk_stride + self.chunk_length
176
+ elif padding:
177
+ max_length = max(array.shape[0] for array in raw_audio)
178
+ nb_step = int(np.ceil(max_length / self.chunk_stride))
179
+ max_length = (nb_step - 1) * self.chunk_stride + self.chunk_length
180
+ padding = "max_length"
181
+ else:
182
+ padded_inputs = input_values
183
+
184
+ # normal padding on batch
185
+ if padded_inputs is None:
186
+ padded_inputs = self.pad(
187
+ input_values,
188
+ max_length=max_length,
189
+ truncation=truncation,
190
+ padding=padding,
191
+ return_attention_mask=padding,
192
+ )
193
+ if padding:
194
+ padded_inputs["padding_mask"] = padded_inputs.pop("attention_mask")
195
+
196
+ input_values = []
197
+ for example in padded_inputs.pop("input_values"):
198
+ if self.feature_size == 1:
199
+ example = example[..., None]
200
+ input_values.append(example.T)
201
+
202
+ padded_inputs["input_values"] = input_values
203
+ if return_tensors is not None:
204
+ padded_inputs = padded_inputs.convert_to_tensors(return_tensors)
205
+
206
+ return padded_inputs
llmeval-env/lib/python3.10/site-packages/transformers/models/encodec/modeling_encodec.py ADDED
@@ -0,0 +1,810 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 Meta Platforms, Inc. and affiliates, and the HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch EnCodec model."""
16
+
17
+ import math
18
+ from dataclasses import dataclass
19
+ from typing import List, Optional, Tuple, Union
20
+
21
+ import torch
22
+ import torch.utils.checkpoint
23
+ from torch import nn
24
+
25
+ from ...modeling_utils import PreTrainedModel
26
+ from ...utils import (
27
+ ModelOutput,
28
+ add_start_docstrings,
29
+ add_start_docstrings_to_model_forward,
30
+ logging,
31
+ replace_return_docstrings,
32
+ )
33
+ from .configuration_encodec import EncodecConfig
34
+
35
+
36
+ logger = logging.get_logger(__name__)
37
+
38
+
39
+ # General docstring
40
+ _CONFIG_FOR_DOC = "EncodecConfig"
41
+
42
+
43
+ from ..deprecated._archive_maps import ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
44
+
45
+
46
+ @dataclass
47
+ class EncodecOutput(ModelOutput):
48
+ """
49
+ Args:
50
+ audio_codes (`torch.LongTensor` of shape `(batch_size, nb_chunks, chunk_length)`, *optional*):
51
+ Discret code embeddings computed using `model.encode`.
52
+ audio_values (`torch.FlaotTensor` of shape `(batch_size, sequence_length)`, *optional*)
53
+ Decoded audio values, obtained using the decoder part of Encodec.
54
+ """
55
+
56
+ audio_codes: torch.LongTensor = None
57
+ audio_values: torch.FloatTensor = None
58
+
59
+
60
+ @dataclass
61
+ class EncodecEncoderOutput(ModelOutput):
62
+ """
63
+ Args:
64
+ audio_codes (`torch.LongTensor` of shape `(batch_size, nb_chunks, chunk_length)`, *optional*):
65
+ Discret code embeddings computed using `model.encode`.
66
+ audio_scales (`torch.Tensor` of shape `(batch_size, nb_chunks)`, *optional*):
67
+ Scaling factor for each `audio_codes` input. This is used to unscale each chunk of audio when decoding.
68
+ """
69
+
70
+ audio_codes: torch.LongTensor = None
71
+ audio_scales: torch.FloatTensor = None
72
+
73
+
74
+ @dataclass
75
+ class EncodecDecoderOutput(ModelOutput):
76
+ """
77
+ Args:
78
+ audio_values (`torch.FloatTensor` of shape `(batch_size, segment_length)`, *optional*):
79
+ Decoded audio values, obtained using the decoder part of Encodec.
80
+ """
81
+
82
+ audio_values: torch.FloatTensor = None
83
+
84
+
85
+ class EncodecConv1d(nn.Module):
86
+ """Conv1d with asymmetric or causal padding and normalization."""
87
+
88
+ def __init__(
89
+ self, config, in_channels: int, out_channels: int, kernel_size: int, stride: int = 1, dilation: int = 1
90
+ ):
91
+ super().__init__()
92
+ self.causal = config.use_causal_conv
93
+ self.pad_mode = config.pad_mode
94
+ self.norm_type = config.norm_type
95
+
96
+ if self.norm_type not in ["weight_norm", "time_group_norm"]:
97
+ raise ValueError(
98
+ f'self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}'
99
+ )
100
+
101
+ # warn user on unusual setup between dilation and stride
102
+ if stride > 1 and dilation > 1:
103
+ logger.warning(
104
+ "EncodecConv1d has been initialized with stride > 1 and dilation > 1"
105
+ f" (kernel_size={kernel_size} stride={stride}, dilation={dilation})."
106
+ )
107
+
108
+ self.conv = nn.Conv1d(in_channels, out_channels, kernel_size, stride, dilation=dilation)
109
+ if self.norm_type == "weight_norm":
110
+ self.conv = nn.utils.weight_norm(self.conv)
111
+ elif self.norm_type == "time_group_norm":
112
+ self.norm = nn.GroupNorm(1, out_channels)
113
+
114
+ kernel_size = self.conv.kernel_size[0]
115
+ stride = torch.tensor(self.conv.stride[0], dtype=torch.int64)
116
+ dilation = self.conv.dilation[0]
117
+
118
+ # Effective kernel size with dilations.
119
+ kernel_size = torch.tensor((kernel_size - 1) * dilation + 1, dtype=torch.int64)
120
+
121
+ self.register_buffer("stride", stride, persistent=False)
122
+ self.register_buffer("kernel_size", kernel_size, persistent=False)
123
+ self.register_buffer("padding_total", torch.tensor(kernel_size - stride, dtype=torch.int64), persistent=False)
124
+
125
+ def _get_extra_padding_for_conv1d(
126
+ self,
127
+ hidden_states: torch.Tensor,
128
+ ) -> torch.Tensor:
129
+ """See `pad_for_conv1d`."""
130
+ length = hidden_states.shape[-1]
131
+ n_frames = (length - self.kernel_size + self.padding_total) / self.stride + 1
132
+ n_frames = torch.ceil(n_frames).to(torch.int64) - 1
133
+ ideal_length = n_frames * self.stride + self.kernel_size - self.padding_total
134
+
135
+ return ideal_length - length
136
+
137
+ @staticmethod
138
+ def _pad1d(hidden_states: torch.Tensor, paddings: Tuple[int, int], mode: str = "zero", value: float = 0.0):
139
+ """Tiny wrapper around torch.nn.functional.pad, just to allow for reflect padding on small input.
140
+ If this is the case, we insert extra 0 padding to the right before the reflection happens.
141
+ """
142
+ length = hidden_states.shape[-1]
143
+ padding_left, padding_right = paddings
144
+ if not mode == "reflect":
145
+ return nn.functional.pad(hidden_states, paddings, mode, value)
146
+
147
+ max_pad = max(padding_left, padding_right)
148
+ extra_pad = 0
149
+ if length <= max_pad:
150
+ extra_pad = max_pad - length + 1
151
+ hidden_states = nn.functional.pad(hidden_states, (0, extra_pad))
152
+ padded = nn.functional.pad(hidden_states, paddings, mode, value)
153
+ end = padded.shape[-1] - extra_pad
154
+ return padded[..., :end]
155
+
156
+ def forward(self, hidden_states):
157
+ extra_padding = self._get_extra_padding_for_conv1d(hidden_states)
158
+
159
+ if self.causal:
160
+ # Left padding for causal
161
+ hidden_states = self._pad1d(hidden_states, (self.padding_total, extra_padding), mode=self.pad_mode)
162
+ else:
163
+ # Asymmetric padding required for odd strides
164
+ padding_right = self.padding_total // 2
165
+ padding_left = self.padding_total - padding_right
166
+ hidden_states = self._pad1d(
167
+ hidden_states, (padding_left, padding_right + extra_padding), mode=self.pad_mode
168
+ )
169
+
170
+ hidden_states = self.conv(hidden_states)
171
+
172
+ if self.norm_type == "time_group_norm":
173
+ hidden_states = self.norm(hidden_states)
174
+
175
+ return hidden_states
176
+
177
+
178
+ class EncodecConvTranspose1d(nn.Module):
179
+ """ConvTranspose1d with asymmetric or causal padding and normalization."""
180
+
181
+ def __init__(self, config, in_channels: int, out_channels: int, kernel_size: int, stride: int = 1):
182
+ super().__init__()
183
+ self.causal = config.use_causal_conv
184
+ self.trim_right_ratio = config.trim_right_ratio
185
+ self.norm_type = config.norm_type
186
+ if self.norm_type not in ["weight_norm", "time_group_norm"]:
187
+ raise ValueError(
188
+ f'self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}'
189
+ )
190
+
191
+ self.conv = nn.ConvTranspose1d(in_channels, out_channels, kernel_size, stride)
192
+ if config.norm_type == "weight_norm":
193
+ self.conv = nn.utils.weight_norm(self.conv)
194
+ elif config.norm_type == "time_group_norm":
195
+ self.norm = nn.GroupNorm(1, out_channels)
196
+
197
+ if not (self.causal or self.trim_right_ratio == 1.0):
198
+ raise ValueError("`trim_right_ratio` != 1.0 only makes sense for causal convolutions")
199
+
200
+ def forward(self, hidden_states):
201
+ kernel_size = self.conv.kernel_size[0]
202
+ stride = self.conv.stride[0]
203
+ padding_total = kernel_size - stride
204
+
205
+ hidden_states = self.conv(hidden_states)
206
+
207
+ if self.norm_type == "time_group_norm":
208
+ hidden_states = self.norm(hidden_states)
209
+
210
+ # We will only trim fixed padding. Extra padding from `pad_for_conv1d` would be
211
+ # removed at the very end, when keeping only the right length for the output,
212
+ # as removing it here would require also passing the length at the matching layer
213
+ # in the encoder.
214
+ if self.causal:
215
+ # Trim the padding on the right according to the specified ratio
216
+ # if trim_right_ratio = 1.0, trim everything from right
217
+ padding_right = math.ceil(padding_total * self.trim_right_ratio)
218
+ else:
219
+ # Asymmetric padding required for odd strides
220
+ padding_right = padding_total // 2
221
+
222
+ padding_left = padding_total - padding_right
223
+
224
+ # unpad
225
+ end = hidden_states.shape[-1] - padding_right
226
+ hidden_states = hidden_states[..., padding_left:end]
227
+ return hidden_states
228
+
229
+
230
+ class EncodecLSTM(nn.Module):
231
+ """
232
+ LSTM without worrying about the hidden state, nor the layout of the data. Expects input as convolutional layout.
233
+ """
234
+
235
+ def __init__(self, config, dimension):
236
+ super().__init__()
237
+ self.lstm = nn.LSTM(dimension, dimension, config.num_lstm_layers)
238
+
239
+ def forward(self, hidden_states):
240
+ hidden_states = hidden_states.permute(2, 0, 1)
241
+ hidden_states = self.lstm(hidden_states)[0] + hidden_states
242
+ hidden_states = hidden_states.permute(1, 2, 0)
243
+ return hidden_states
244
+
245
+
246
+ class EncodecResnetBlock(nn.Module):
247
+ """
248
+ Residual block from SEANet model as used by EnCodec.
249
+ """
250
+
251
+ def __init__(self, config: EncodecConfig, dim: int, dilations: List[int]):
252
+ super().__init__()
253
+ kernel_sizes = (config.residual_kernel_size, 1)
254
+ if len(kernel_sizes) != len(dilations):
255
+ raise ValueError("Number of kernel sizes should match number of dilations")
256
+
257
+ hidden = dim // config.compress
258
+ block = []
259
+ for i, (kernel_size, dilation) in enumerate(zip(kernel_sizes, dilations)):
260
+ in_chs = dim if i == 0 else hidden
261
+ out_chs = dim if i == len(kernel_sizes) - 1 else hidden
262
+ block += [nn.ELU()]
263
+ block += [EncodecConv1d(config, in_chs, out_chs, kernel_size, dilation=dilation)]
264
+ self.block = nn.ModuleList(block)
265
+
266
+ if config.use_conv_shortcut:
267
+ self.shortcut = EncodecConv1d(config, dim, dim, kernel_size=1)
268
+ else:
269
+ self.shortcut = nn.Identity()
270
+
271
+ def forward(self, hidden_states):
272
+ residual = hidden_states
273
+ for layer in self.block:
274
+ hidden_states = layer(hidden_states)
275
+
276
+ return self.shortcut(residual) + hidden_states
277
+
278
+
279
+ class EncodecEncoder(nn.Module):
280
+ """SEANet encoder as used by EnCodec."""
281
+
282
+ def __init__(self, config: EncodecConfig):
283
+ super().__init__()
284
+ model = [EncodecConv1d(config, config.audio_channels, config.num_filters, config.kernel_size)]
285
+ scaling = 1
286
+
287
+ # Downsample to raw audio scale
288
+ for ratio in reversed(config.upsampling_ratios):
289
+ current_scale = scaling * config.num_filters
290
+ # Add residual layers
291
+ for j in range(config.num_residual_layers):
292
+ model += [EncodecResnetBlock(config, current_scale, [config.dilation_growth_rate**j, 1])]
293
+ # Add downsampling layers
294
+ model += [nn.ELU()]
295
+ model += [EncodecConv1d(config, current_scale, current_scale * 2, kernel_size=ratio * 2, stride=ratio)]
296
+ scaling *= 2
297
+
298
+ model += [EncodecLSTM(config, scaling * config.num_filters)]
299
+ model += [nn.ELU()]
300
+ model += [EncodecConv1d(config, scaling * config.num_filters, config.hidden_size, config.last_kernel_size)]
301
+
302
+ self.layers = nn.ModuleList(model)
303
+
304
+ def forward(self, hidden_states):
305
+ for layer in self.layers:
306
+ hidden_states = layer(hidden_states)
307
+ return hidden_states
308
+
309
+
310
+ class EncodecDecoder(nn.Module):
311
+ """SEANet decoder as used by EnCodec."""
312
+
313
+ def __init__(self, config: EncodecConfig):
314
+ super().__init__()
315
+ scaling = int(2 ** len(config.upsampling_ratios))
316
+ model = [EncodecConv1d(config, config.hidden_size, scaling * config.num_filters, config.kernel_size)]
317
+
318
+ model += [EncodecLSTM(config, scaling * config.num_filters)]
319
+
320
+ # Upsample to raw audio scale
321
+ for ratio in config.upsampling_ratios:
322
+ current_scale = scaling * config.num_filters
323
+ # Add upsampling layers
324
+ model += [nn.ELU()]
325
+ model += [
326
+ EncodecConvTranspose1d(config, current_scale, current_scale // 2, kernel_size=ratio * 2, stride=ratio)
327
+ ]
328
+ # Add residual layers
329
+ for j in range(config.num_residual_layers):
330
+ model += [EncodecResnetBlock(config, current_scale // 2, (config.dilation_growth_rate**j, 1))]
331
+ scaling //= 2
332
+
333
+ # Add final layers
334
+ model += [nn.ELU()]
335
+ model += [EncodecConv1d(config, config.num_filters, config.audio_channels, config.last_kernel_size)]
336
+ self.layers = nn.ModuleList(model)
337
+
338
+ def forward(self, hidden_states):
339
+ for layer in self.layers:
340
+ hidden_states = layer(hidden_states)
341
+ return hidden_states
342
+
343
+
344
+ class EncodecEuclideanCodebook(nn.Module):
345
+ """Codebook with Euclidean distance."""
346
+
347
+ def __init__(self, config: EncodecConfig):
348
+ super().__init__()
349
+ embed = torch.zeros(config.codebook_size, config.codebook_dim)
350
+
351
+ self.codebook_size = config.codebook_size
352
+
353
+ self.register_buffer("inited", torch.Tensor([True]))
354
+ self.register_buffer("cluster_size", torch.zeros(config.codebook_size))
355
+ self.register_buffer("embed", embed)
356
+ self.register_buffer("embed_avg", embed.clone())
357
+
358
+ def quantize(self, hidden_states):
359
+ embed = self.embed.t()
360
+ scaled_states = hidden_states.pow(2).sum(1, keepdim=True)
361
+ dist = -(scaled_states - 2 * hidden_states @ embed + embed.pow(2).sum(0, keepdim=True))
362
+ embed_ind = dist.max(dim=-1).indices
363
+ return embed_ind
364
+
365
+ def encode(self, hidden_states):
366
+ shape = hidden_states.shape
367
+ # pre-process
368
+ hidden_states = hidden_states.reshape((-1, shape[-1]))
369
+ # quantize
370
+ embed_ind = self.quantize(hidden_states)
371
+ # post-process
372
+ embed_ind = embed_ind.view(*shape[:-1])
373
+ return embed_ind
374
+
375
+ def decode(self, embed_ind):
376
+ quantize = nn.functional.embedding(embed_ind, self.embed)
377
+ return quantize
378
+
379
+
380
+ class EncodecVectorQuantization(nn.Module):
381
+ """
382
+ Vector quantization implementation. Currently supports only euclidean distance.
383
+ """
384
+
385
+ def __init__(self, config: EncodecConfig):
386
+ super().__init__()
387
+ self.codebook = EncodecEuclideanCodebook(config)
388
+
389
+ def encode(self, hidden_states):
390
+ hidden_states = hidden_states.permute(0, 2, 1)
391
+ embed_in = self.codebook.encode(hidden_states)
392
+ return embed_in
393
+
394
+ def decode(self, embed_ind):
395
+ quantize = self.codebook.decode(embed_ind)
396
+ quantize = quantize.permute(0, 2, 1)
397
+ return quantize
398
+
399
+
400
+ class EncodecResidualVectorQuantizer(nn.Module):
401
+ """Residual Vector Quantizer."""
402
+
403
+ def __init__(self, config: EncodecConfig):
404
+ super().__init__()
405
+ self.codebook_size = config.codebook_size
406
+ self.frame_rate = config.frame_rate
407
+ self.num_quantizers = config.num_quantizers
408
+ self.layers = nn.ModuleList([EncodecVectorQuantization(config) for _ in range(config.num_quantizers)])
409
+
410
+ def get_num_quantizers_for_bandwidth(self, bandwidth: Optional[float] = None) -> int:
411
+ """Return num_quantizers based on specified target bandwidth."""
412
+ bw_per_q = math.log2(self.codebook_size) * self.frame_rate
413
+ num_quantizers = self.num_quantizers
414
+ if bandwidth is not None and bandwidth > 0.0:
415
+ num_quantizers = int(max(1, math.floor(bandwidth * 1000 / bw_per_q)))
416
+ return num_quantizers
417
+
418
+ def encode(self, embeddings: torch.Tensor, bandwidth: Optional[float] = None) -> torch.Tensor:
419
+ """
420
+ Encode a given input tensor with the specified frame rate at the given bandwidth. The RVQ encode method sets
421
+ the appropriate number of quantizers to use and returns indices for each quantizer.
422
+ """
423
+ num_quantizers = self.get_num_quantizers_for_bandwidth(bandwidth)
424
+ residual = embeddings
425
+ all_indices = []
426
+ for layer in self.layers[:num_quantizers]:
427
+ indices = layer.encode(residual)
428
+ quantized = layer.decode(indices)
429
+ residual = residual - quantized
430
+ all_indices.append(indices)
431
+ out_indices = torch.stack(all_indices)
432
+ return out_indices
433
+
434
+ def decode(self, codes: torch.Tensor) -> torch.Tensor:
435
+ """Decode the given codes to the quantized representation."""
436
+ quantized_out = torch.tensor(0.0, device=codes.device)
437
+ for i, indices in enumerate(codes):
438
+ layer = self.layers[i]
439
+ quantized = layer.decode(indices)
440
+ quantized_out = quantized_out + quantized
441
+ return quantized_out
442
+
443
+
444
+ class EncodecPreTrainedModel(PreTrainedModel):
445
+ """
446
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
447
+ models.
448
+ """
449
+
450
+ config_class = EncodecConfig
451
+ base_model_prefix = "encodec"
452
+ main_input_name = "input_values"
453
+
454
+ def _init_weights(self, module):
455
+ """Initialize the weights"""
456
+ if isinstance(module, nn.Linear):
457
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
458
+ if module.bias is not None:
459
+ module.bias.data.zero_()
460
+ elif isinstance(module, (nn.LayerNorm, nn.GroupNorm)):
461
+ module.bias.data.zero_()
462
+ module.weight.data.fill_(1.0)
463
+ elif isinstance(module, nn.Conv1d):
464
+ nn.init.kaiming_normal_(module.weight)
465
+ if module.bias is not None:
466
+ k = math.sqrt(module.groups / (module.in_channels * module.kernel_size[0]))
467
+ nn.init.uniform_(module.bias, a=-k, b=k)
468
+ elif isinstance(module, nn.Embedding):
469
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
470
+ if module.padding_idx is not None:
471
+ module.weight.data[module.padding_idx].zero_()
472
+ elif isinstance(module, nn.LSTM):
473
+ for name, param in module.named_parameters():
474
+ if "weight" in name:
475
+ nn.init.xavier_uniform_(param)
476
+ elif "bias" in name:
477
+ nn.init.constant_(param, 0.0)
478
+
479
+
480
+ ENCODEC_START_DOCSTRING = r"""
481
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
482
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
483
+ etc.)
484
+
485
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
486
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
487
+ and behavior.
488
+
489
+ Parameters:
490
+ config ([`EncodecConfig`]):
491
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
492
+ load the weights associated with the model, only the configuration. Check out the
493
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
494
+ """
495
+
496
+
497
+ ENCODEC_INPUTS_DOCSTRING = r"""
498
+ Args:
499
+ input_values (`torch.FloatTensor` of shape `(batch_size, channels, sequence_length)`, *optional*):
500
+ Raw audio input converted to Float and padded to the approriate length in order to be encoded using chunks
501
+ of length self.chunk_length and a stride of `config.chunk_stride`.
502
+ padding_mask (`torch.BoolTensor` of shape `(batch_size, channels, sequence_length)`, *optional*):
503
+ Mask to avoid computing scaling factors on padding token indices (can we avoid computing conv on these+).
504
+ Mask values selected in `[0, 1]`:
505
+
506
+ - 1 for tokens that are **not masked**,
507
+ - 0 for tokens that are **masked**.
508
+
509
+ <Tip warning={true}>
510
+
511
+ `padding_mask` should always be passed, unless the input was truncated or not padded. This is because in
512
+ order to process tensors effectively, the input audio should be padded so that `input_length % stride =
513
+ step` with `step = chunk_length-stride`. This ensures that all chunks are of the same shape
514
+
515
+ </Tip>
516
+
517
+ bandwidth (`float`, *optional*):
518
+ The target bandwidth. Must be one of `config.target_bandwidths`. If `None`, uses the smallest possible
519
+ bandwidth. bandwidth is represented as a thousandth of what it is, e.g. 6kbps bandwidth is represented as
520
+ `bandwidth == 6.0`
521
+ audio_codes (`torch.LongTensor` of shape `(batch_size, nb_chunks, chunk_length)`, *optional*):
522
+ Discret code embeddings computed using `model.encode`.
523
+ audio_scales (`torch.Tensor` of shape `(batch_size, nb_chunks)`, *optional*):
524
+ Scaling factor for each `audio_codes` input.
525
+ return_dict (`bool`, *optional*):
526
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
527
+ """
528
+
529
+
530
+ @add_start_docstrings(
531
+ "The EnCodec neural audio codec model.",
532
+ ENCODEC_START_DOCSTRING,
533
+ )
534
+ class EncodecModel(EncodecPreTrainedModel):
535
+ def __init__(self, config: EncodecConfig):
536
+ super().__init__(config)
537
+ self.config = config
538
+
539
+ self.encoder = EncodecEncoder(config)
540
+ self.decoder = EncodecDecoder(config)
541
+
542
+ self.quantizer = EncodecResidualVectorQuantizer(config)
543
+
544
+ self.bits_per_codebook = int(math.log2(self.config.codebook_size))
545
+ if 2**self.bits_per_codebook != self.config.codebook_size:
546
+ raise ValueError("The codebook_size must be a power of 2.")
547
+
548
+ # Initialize weights and apply final processing
549
+ self.post_init()
550
+
551
+ def get_encoder(self):
552
+ return self.encoder
553
+
554
+ def get_decoder(self):
555
+ return self.decoder
556
+
557
+ def _encode_frame(
558
+ self, input_values: torch.Tensor, bandwidth: float, padding_mask: int
559
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
560
+ """
561
+ Encodes the given input using the underlying VQVAE. If `config.normalize` is set to `True` the input is first
562
+ normalized. The padding mask is required to compute the correct scale.
563
+ """
564
+ length = input_values.shape[-1]
565
+ duration = length / self.config.sampling_rate
566
+
567
+ if self.config.chunk_length_s is not None and duration > 1e-5 + self.config.chunk_length_s:
568
+ raise RuntimeError(f"Duration of frame ({duration}) is longer than chunk {self.config.chunk_length_s}")
569
+
570
+ scale = None
571
+ if self.config.normalize:
572
+ # if the padding is non zero
573
+ input_values = input_values * padding_mask
574
+ mono = torch.sum(input_values, 1, keepdim=True) / input_values.shape[1]
575
+ scale = mono.pow(2).mean(dim=-1, keepdim=True).sqrt() + 1e-8
576
+ input_values = input_values / scale
577
+
578
+ embeddings = self.encoder(input_values)
579
+ codes = self.quantizer.encode(embeddings, bandwidth)
580
+ codes = codes.transpose(0, 1)
581
+ return codes, scale
582
+
583
+ def encode(
584
+ self,
585
+ input_values: torch.Tensor,
586
+ padding_mask: torch.Tensor = None,
587
+ bandwidth: Optional[float] = None,
588
+ return_dict: Optional[bool] = None,
589
+ ) -> Union[Tuple[torch.Tensor, Optional[torch.Tensor]], EncodecEncoderOutput]:
590
+ """
591
+ Encodes the input audio waveform into discrete codes.
592
+
593
+ Args:
594
+ input_values (`torch.Tensor` of shape `(batch_size, channels, sequence_length)`):
595
+ Float values of the input audio waveform.
596
+ padding_mask (`torch.Tensor` of shape `(batch_size, channels, sequence_length)`):
597
+ Padding mask used to pad the `input_values`.
598
+ bandwidth (`float`, *optional*):
599
+ The target bandwidth. Must be one of `config.target_bandwidths`. If `None`, uses the smallest possible
600
+ bandwidth. bandwidth is represented as a thousandth of what it is, e.g. 6kbps bandwidth is represented
601
+ as bandwidth == 6.0
602
+
603
+ Returns:
604
+ A list of frames containing the discrete encoded codes for the input audio waveform, along with rescaling
605
+ factors for each chunk when `normalize` is True. Each frames is a tuple `(codebook, scale)`, with
606
+ `codebook` of shape `[batch_size, num_codebooks, frames]`.
607
+ """
608
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
609
+
610
+ if bandwidth is None:
611
+ bandwidth = self.config.target_bandwidths[0]
612
+ if bandwidth not in self.config.target_bandwidths:
613
+ raise ValueError(
614
+ f"This model doesn't support the bandwidth {bandwidth}. "
615
+ f"Select one of {self.config.target_bandwidths}."
616
+ )
617
+
618
+ _, channels, input_length = input_values.shape
619
+
620
+ if channels < 1 or channels > 2:
621
+ raise ValueError(f"Number of audio channels must be 1 or 2, but got {channels}")
622
+
623
+ chunk_length = self.config.chunk_length
624
+ if chunk_length is None:
625
+ chunk_length = input_length
626
+ stride = input_length
627
+ else:
628
+ stride = self.config.chunk_stride
629
+
630
+ if padding_mask is None:
631
+ padding_mask = torch.ones_like(input_values).bool()
632
+
633
+ encoded_frames = []
634
+ scales = []
635
+
636
+ step = chunk_length - stride
637
+ if (input_length % stride) - step != 0:
638
+ raise ValueError(
639
+ "The input length is not properly padded for batched chunked decoding. Make sure to pad the input correctly."
640
+ )
641
+
642
+ for offset in range(0, input_length - step, stride):
643
+ mask = padding_mask[..., offset : offset + chunk_length].bool()
644
+ frame = input_values[:, :, offset : offset + chunk_length]
645
+ encoded_frame, scale = self._encode_frame(frame, bandwidth, mask)
646
+ encoded_frames.append(encoded_frame)
647
+ scales.append(scale)
648
+
649
+ encoded_frames = torch.stack(encoded_frames)
650
+
651
+ if not return_dict:
652
+ return (encoded_frames, scales)
653
+
654
+ return EncodecEncoderOutput(encoded_frames, scales)
655
+
656
+ @staticmethod
657
+ def _linear_overlap_add(frames: List[torch.Tensor], stride: int):
658
+ # Generic overlap add, with linear fade-in/fade-out, supporting complex scenario
659
+ # e.g., more than 2 frames per position.
660
+ # The core idea is to use a weight function that is a triangle,
661
+ # with a maximum value at the middle of the chunk.
662
+ # We use this weighting when summing the frames, and divide by the sum of weights
663
+ # for each positions at the end. Thus:
664
+ # - if a frame is the only one to cover a position, the weighting is a no-op.
665
+ # - if 2 frames cover a position:
666
+ # ... ...
667
+ # / \/ \
668
+ # / /\ \
669
+ # S T , i.e. S offset of second frame starts, T end of first frame.
670
+ # Then the weight function for each one is: (t - S), (T - t), with `t` a given offset.
671
+ # After the final normalization, the weight of the second frame at position `t` is
672
+ # (t - S) / (t - S + (T - t)) = (t - S) / (T - S), which is exactly what we want.
673
+ #
674
+ # - if more than 2 frames overlap at a given point, we hope that by induction
675
+ # something sensible happens.
676
+ if len(frames) == 0:
677
+ raise ValueError("`frames` cannot be an empty list.")
678
+
679
+ device = frames[0].device
680
+ dtype = frames[0].dtype
681
+ shape = frames[0].shape[:-1]
682
+ total_size = stride * (len(frames) - 1) + frames[-1].shape[-1]
683
+
684
+ frame_length = frames[0].shape[-1]
685
+ time_vec = torch.linspace(0, 1, frame_length + 2, device=device, dtype=dtype)[1:-1]
686
+ weight = 0.5 - (time_vec - 0.5).abs()
687
+
688
+ sum_weight = torch.zeros(total_size, device=device, dtype=dtype)
689
+ out = torch.zeros(*shape, total_size, device=device, dtype=dtype)
690
+ offset: int = 0
691
+
692
+ for frame in frames:
693
+ frame_length = frame.shape[-1]
694
+ out[..., offset : offset + frame_length] += weight[:frame_length] * frame
695
+ sum_weight[offset : offset + frame_length] += weight[:frame_length]
696
+ offset += stride
697
+
698
+ if sum_weight.min() == 0:
699
+ raise ValueError(f"`sum_weight` minimum element must be bigger than zero: {sum_weight}`")
700
+
701
+ return out / sum_weight
702
+
703
+ def _decode_frame(self, codes: torch.Tensor, scale: Optional[torch.Tensor] = None) -> torch.Tensor:
704
+ codes = codes.transpose(0, 1)
705
+ embeddings = self.quantizer.decode(codes)
706
+ outputs = self.decoder(embeddings)
707
+ if scale is not None:
708
+ outputs = outputs * scale.view(-1, 1, 1)
709
+ return outputs
710
+
711
+ def decode(
712
+ self,
713
+ audio_codes: torch.Tensor,
714
+ audio_scales: torch.Tensor,
715
+ padding_mask: Optional[torch.Tensor] = None,
716
+ return_dict: Optional[bool] = None,
717
+ ) -> Union[Tuple[torch.Tensor, torch.Tensor], EncodecDecoderOutput]:
718
+ """
719
+ Decodes the given frames into an output audio waveform.
720
+
721
+ Note that the output might be a bit bigger than the input. In that case, any extra steps at the end can be
722
+ trimmed.
723
+
724
+ Args:
725
+ audio_codes (`torch.LongTensor` of shape `(batch_size, nb_chunks, chunk_length)`, *optional*):
726
+ Discret code embeddings computed using `model.encode`.
727
+ audio_scales (`torch.Tensor` of shape `(batch_size, nb_chunks)`, *optional*):
728
+ Scaling factor for each `audio_codes` input.
729
+ padding_mask (`torch.Tensor` of shape `(batch_size, channels, sequence_length)`):
730
+ Padding mask used to pad the `input_values`.
731
+ return_dict (`bool`, *optional*):
732
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
733
+
734
+ """
735
+ return_dict = return_dict or self.config.return_dict
736
+
737
+ chunk_length = self.config.chunk_length
738
+ if chunk_length is None:
739
+ if len(audio_codes) != 1:
740
+ raise ValueError(f"Expected one frame, got {len(audio_codes)}")
741
+ audio_values = self._decode_frame(audio_codes[0], audio_scales[0])
742
+ else:
743
+ decoded_frames = []
744
+
745
+ for frame, scale in zip(audio_codes, audio_scales):
746
+ frames = self._decode_frame(frame, scale)
747
+ decoded_frames.append(frames)
748
+
749
+ audio_values = self._linear_overlap_add(decoded_frames, self.config.chunk_stride or 1)
750
+
751
+ # truncate based on padding mask
752
+ if padding_mask is not None and padding_mask.shape[-1] < audio_values.shape[-1]:
753
+ audio_values = audio_values[..., : padding_mask.shape[-1]]
754
+
755
+ if not return_dict:
756
+ return (audio_values,)
757
+ return EncodecDecoderOutput(audio_values)
758
+
759
+ @add_start_docstrings_to_model_forward(ENCODEC_INPUTS_DOCSTRING)
760
+ @replace_return_docstrings(output_type=EncodecOutput, config_class=_CONFIG_FOR_DOC)
761
+ def forward(
762
+ self,
763
+ input_values: torch.Tensor,
764
+ padding_mask: Optional[torch.Tensor] = None,
765
+ bandwidth: Optional[float] = None,
766
+ audio_codes: Optional[torch.Tensor] = None,
767
+ audio_scales: Optional[torch.Tensor] = None,
768
+ return_dict: Optional[bool] = None,
769
+ ) -> Union[Tuple[torch.Tensor, torch.Tensor], EncodecOutput]:
770
+ r"""
771
+ Returns:
772
+
773
+ Examples:
774
+
775
+ ```python
776
+ >>> from datasets import load_dataset
777
+ >>> from transformers import AutoProcessor, EncodecModel
778
+
779
+ >>> dataset = load_dataset("hf-internal-testing/ashraq-esc50-1-dog-example")
780
+ >>> audio_sample = dataset["train"]["audio"][0]["array"]
781
+
782
+ >>> model_id = "facebook/encodec_24khz"
783
+ >>> model = EncodecModel.from_pretrained(model_id)
784
+ >>> processor = AutoProcessor.from_pretrained(model_id)
785
+
786
+ >>> inputs = processor(raw_audio=audio_sample, return_tensors="pt")
787
+
788
+ >>> outputs = model(**inputs)
789
+ >>> audio_codes = outputs.audio_codes
790
+ >>> audio_values = outputs.audio_values
791
+ ```"""
792
+ return_dict = return_dict or self.config.return_dict
793
+
794
+ if padding_mask is None:
795
+ padding_mask = torch.ones_like(input_values).bool()
796
+
797
+ if audio_codes is not None and audio_scales is None:
798
+ raise ValueError("You specified `audio_codes` but did not specify the `audio_scales`")
799
+
800
+ if audio_scales is not None and audio_codes is None:
801
+ raise ValueError("You specified `audio_scales` but did not specify the `audio_codes`")
802
+
803
+ if audio_scales is None and audio_codes is None:
804
+ audio_codes, audio_scales = self.encode(input_values, padding_mask, bandwidth, False)
805
+
806
+ audio_values = self.decode(audio_codes, audio_scales, padding_mask, return_dict=return_dict)[0]
807
+ if not return_dict:
808
+ return (audio_codes, audio_values)
809
+
810
+ return EncodecOutput(audio_codes=audio_codes, audio_values=audio_values)
llmeval-env/lib/python3.10/site-packages/transformers/models/mobilevitv2/__init__.py ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import (
17
+ OptionalDependencyNotAvailable,
18
+ _LazyModule,
19
+ is_torch_available,
20
+ is_vision_available,
21
+ )
22
+
23
+
24
+ _import_structure = {
25
+ "configuration_mobilevitv2": [
26
+ "MOBILEVITV2_PRETRAINED_CONFIG_ARCHIVE_MAP",
27
+ "MobileViTV2Config",
28
+ "MobileViTV2OnnxConfig",
29
+ ],
30
+ }
31
+
32
+
33
+ try:
34
+ if not is_torch_available():
35
+ raise OptionalDependencyNotAvailable()
36
+ except OptionalDependencyNotAvailable:
37
+ pass
38
+ else:
39
+ _import_structure["modeling_mobilevitv2"] = [
40
+ "MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST",
41
+ "MobileViTV2ForImageClassification",
42
+ "MobileViTV2ForSemanticSegmentation",
43
+ "MobileViTV2Model",
44
+ "MobileViTV2PreTrainedModel",
45
+ ]
46
+
47
+ if TYPE_CHECKING:
48
+ from .configuration_mobilevitv2 import (
49
+ MOBILEVITV2_PRETRAINED_CONFIG_ARCHIVE_MAP,
50
+ MobileViTV2Config,
51
+ MobileViTV2OnnxConfig,
52
+ )
53
+
54
+ try:
55
+ if not is_torch_available():
56
+ raise OptionalDependencyNotAvailable()
57
+ except OptionalDependencyNotAvailable:
58
+ pass
59
+ else:
60
+ from .modeling_mobilevitv2 import (
61
+ MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
62
+ MobileViTV2ForImageClassification,
63
+ MobileViTV2ForSemanticSegmentation,
64
+ MobileViTV2Model,
65
+ MobileViTV2PreTrainedModel,
66
+ )
67
+
68
+ else:
69
+ import sys
70
+
71
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
llmeval-env/lib/python3.10/site-packages/transformers/models/mobilevitv2/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.07 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/mobilevitv2/__pycache__/configuration_mobilevitv2.cpython-310.pyc ADDED
Binary file (6.62 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/mobilevitv2/__pycache__/convert_mlcvnets_to_pytorch.cpython-310.pyc ADDED
Binary file (9.1 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/mobilevitv2/__pycache__/modeling_mobilevitv2.cpython-310.pyc ADDED
Binary file (26.2 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/mobilevitv2/configuration_mobilevitv2.py ADDED
@@ -0,0 +1,168 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ MobileViTV2 model configuration"""
16
+
17
+ from collections import OrderedDict
18
+ from typing import Mapping
19
+
20
+ from packaging import version
21
+
22
+ from ...configuration_utils import PretrainedConfig
23
+ from ...onnx import OnnxConfig
24
+ from ...utils import logging
25
+
26
+
27
+ logger = logging.get_logger(__name__)
28
+
29
+
30
+ from ..deprecated._archive_maps import MOBILEVITV2_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
31
+
32
+
33
+ class MobileViTV2Config(PretrainedConfig):
34
+ r"""
35
+ This is the configuration class to store the configuration of a [`MobileViTV2Model`]. It is used to instantiate a
36
+ MobileViTV2 model according to the specified arguments, defining the model architecture. Instantiating a
37
+ configuration with the defaults will yield a similar configuration to that of the MobileViTV2
38
+ [apple/mobilevitv2-1.0](https://huggingface.co/apple/mobilevitv2-1.0) architecture.
39
+
40
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
41
+ documentation from [`PretrainedConfig`] for more information.
42
+
43
+ Args:
44
+ num_channels (`int`, *optional*, defaults to 3):
45
+ The number of input channels.
46
+ image_size (`int`, *optional*, defaults to 256):
47
+ The size (resolution) of each image.
48
+ patch_size (`int`, *optional*, defaults to 2):
49
+ The size (resolution) of each patch.
50
+ expand_ratio (`float`, *optional*, defaults to 2.0):
51
+ Expansion factor for the MobileNetv2 layers.
52
+ hidden_act (`str` or `function`, *optional*, defaults to `"swish"`):
53
+ The non-linear activation function (function or string) in the Transformer encoder and convolution layers.
54
+ conv_kernel_size (`int`, *optional*, defaults to 3):
55
+ The size of the convolutional kernel in the MobileViTV2 layer.
56
+ output_stride (`int`, *optional*, defaults to 32):
57
+ The ratio of the spatial resolution of the output to the resolution of the input image.
58
+ classifier_dropout_prob (`float`, *optional*, defaults to 0.1):
59
+ The dropout ratio for attached classifiers.
60
+ initializer_range (`float`, *optional*, defaults to 0.02):
61
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
62
+ layer_norm_eps (`float`, *optional*, defaults to 1e-05):
63
+ The epsilon used by the layer normalization layers.
64
+ aspp_out_channels (`int`, *optional*, defaults to 512):
65
+ Number of output channels used in the ASPP layer for semantic segmentation.
66
+ atrous_rates (`List[int]`, *optional*, defaults to `[6, 12, 18]`):
67
+ Dilation (atrous) factors used in the ASPP layer for semantic segmentation.
68
+ aspp_dropout_prob (`float`, *optional*, defaults to 0.1):
69
+ The dropout ratio for the ASPP layer for semantic segmentation.
70
+ semantic_loss_ignore_index (`int`, *optional*, defaults to 255):
71
+ The index that is ignored by the loss function of the semantic segmentation model.
72
+ n_attn_blocks (`List[int]`, *optional*, defaults to `[2, 4, 3]`):
73
+ The number of attention blocks in each MobileViTV2Layer
74
+ base_attn_unit_dims (`List[int]`, *optional*, defaults to `[128, 192, 256]`):
75
+ The base multiplier for dimensions of attention blocks in each MobileViTV2Layer
76
+ width_multiplier (`float`, *optional*, defaults to 1.0):
77
+ The width multiplier for MobileViTV2.
78
+ ffn_multiplier (`int`, *optional*, defaults to 2):
79
+ The FFN multiplier for MobileViTV2.
80
+ attn_dropout (`float`, *optional*, defaults to 0.0):
81
+ The dropout in the attention layer.
82
+ ffn_dropout (`float`, *optional*, defaults to 0.0):
83
+ The dropout between FFN layers.
84
+
85
+ Example:
86
+
87
+ ```python
88
+ >>> from transformers import MobileViTV2Config, MobileViTV2Model
89
+
90
+ >>> # Initializing a mobilevitv2-small style configuration
91
+ >>> configuration = MobileViTV2Config()
92
+
93
+ >>> # Initializing a model from the mobilevitv2-small style configuration
94
+ >>> model = MobileViTV2Model(configuration)
95
+
96
+ >>> # Accessing the model configuration
97
+ >>> configuration = model.config
98
+ ```"""
99
+
100
+ model_type = "mobilevitv2"
101
+
102
+ def __init__(
103
+ self,
104
+ num_channels=3,
105
+ image_size=256,
106
+ patch_size=2,
107
+ expand_ratio=2.0,
108
+ hidden_act="swish",
109
+ conv_kernel_size=3,
110
+ output_stride=32,
111
+ classifier_dropout_prob=0.1,
112
+ initializer_range=0.02,
113
+ layer_norm_eps=1e-5,
114
+ aspp_out_channels=512,
115
+ atrous_rates=[6, 12, 18],
116
+ aspp_dropout_prob=0.1,
117
+ semantic_loss_ignore_index=255,
118
+ n_attn_blocks=[2, 4, 3],
119
+ base_attn_unit_dims=[128, 192, 256],
120
+ width_multiplier=1.0,
121
+ ffn_multiplier=2,
122
+ attn_dropout=0.0,
123
+ ffn_dropout=0.0,
124
+ **kwargs,
125
+ ):
126
+ super().__init__(**kwargs)
127
+
128
+ self.num_channels = num_channels
129
+ self.image_size = image_size
130
+ self.patch_size = patch_size
131
+ self.expand_ratio = expand_ratio
132
+ self.hidden_act = hidden_act
133
+ self.conv_kernel_size = conv_kernel_size
134
+ self.output_stride = output_stride
135
+ self.initializer_range = initializer_range
136
+ self.layer_norm_eps = layer_norm_eps
137
+ self.n_attn_blocks = n_attn_blocks
138
+ self.base_attn_unit_dims = base_attn_unit_dims
139
+ self.width_multiplier = width_multiplier
140
+ self.ffn_multiplier = ffn_multiplier
141
+ self.ffn_dropout = ffn_dropout
142
+ self.attn_dropout = attn_dropout
143
+ self.classifier_dropout_prob = classifier_dropout_prob
144
+
145
+ # decode head attributes for semantic segmentation
146
+ self.aspp_out_channels = aspp_out_channels
147
+ self.atrous_rates = atrous_rates
148
+ self.aspp_dropout_prob = aspp_dropout_prob
149
+ self.semantic_loss_ignore_index = semantic_loss_ignore_index
150
+
151
+
152
+ class MobileViTV2OnnxConfig(OnnxConfig):
153
+ torch_onnx_minimum_version = version.parse("1.11")
154
+
155
+ @property
156
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
157
+ return OrderedDict([("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"})])
158
+
159
+ @property
160
+ def outputs(self) -> Mapping[str, Mapping[int, str]]:
161
+ if self.task == "image-classification":
162
+ return OrderedDict([("logits", {0: "batch"})])
163
+ else:
164
+ return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})])
165
+
166
+ @property
167
+ def atol_for_validation(self) -> float:
168
+ return 1e-4
llmeval-env/lib/python3.10/site-packages/transformers/models/mobilevitv2/convert_mlcvnets_to_pytorch.py ADDED
@@ -0,0 +1,326 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Convert MobileViTV2 checkpoints from the ml-cvnets library."""
16
+
17
+
18
+ import argparse
19
+ import collections
20
+ import json
21
+ from pathlib import Path
22
+
23
+ import requests
24
+ import torch
25
+ import yaml
26
+ from huggingface_hub import hf_hub_download
27
+ from PIL import Image
28
+
29
+ from transformers import (
30
+ MobileViTImageProcessor,
31
+ MobileViTV2Config,
32
+ MobileViTV2ForImageClassification,
33
+ MobileViTV2ForSemanticSegmentation,
34
+ )
35
+ from transformers.utils import logging
36
+
37
+
38
+ logging.set_verbosity_info()
39
+ logger = logging.get_logger(__name__)
40
+
41
+
42
+ def load_orig_config_file(orig_cfg_file):
43
+ print("Loading config file...")
44
+
45
+ def flatten_yaml_as_dict(d, parent_key="", sep="."):
46
+ items = []
47
+ for k, v in d.items():
48
+ new_key = parent_key + sep + k if parent_key else k
49
+ if isinstance(v, collections.abc.MutableMapping):
50
+ items.extend(flatten_yaml_as_dict(v, new_key, sep=sep).items())
51
+ else:
52
+ items.append((new_key, v))
53
+ return dict(items)
54
+
55
+ config = argparse.Namespace()
56
+ with open(orig_cfg_file, "r") as yaml_file:
57
+ try:
58
+ cfg = yaml.load(yaml_file, Loader=yaml.FullLoader)
59
+
60
+ flat_cfg = flatten_yaml_as_dict(cfg)
61
+ for k, v in flat_cfg.items():
62
+ setattr(config, k, v)
63
+ except yaml.YAMLError as exc:
64
+ logger.error("Error while loading config file: {}. Error message: {}".format(orig_cfg_file, str(exc)))
65
+ return config
66
+
67
+
68
+ def get_mobilevitv2_config(task_name, orig_cfg_file):
69
+ config = MobileViTV2Config()
70
+
71
+ is_segmentation_model = False
72
+
73
+ # dataset
74
+ if task_name.startswith("imagenet1k_"):
75
+ config.num_labels = 1000
76
+ if int(task_name.strip().split("_")[-1]) == 384:
77
+ config.image_size = 384
78
+ else:
79
+ config.image_size = 256
80
+ filename = "imagenet-1k-id2label.json"
81
+ elif task_name.startswith("imagenet21k_to_1k_"):
82
+ config.num_labels = 21000
83
+ if int(task_name.strip().split("_")[-1]) == 384:
84
+ config.image_size = 384
85
+ else:
86
+ config.image_size = 256
87
+ filename = "imagenet-22k-id2label.json"
88
+ elif task_name.startswith("ade20k_"):
89
+ config.num_labels = 151
90
+ config.image_size = 512
91
+ filename = "ade20k-id2label.json"
92
+ is_segmentation_model = True
93
+ elif task_name.startswith("voc_"):
94
+ config.num_labels = 21
95
+ config.image_size = 512
96
+ filename = "pascal-voc-id2label.json"
97
+ is_segmentation_model = True
98
+
99
+ # orig_config
100
+ orig_config = load_orig_config_file(orig_cfg_file)
101
+ assert getattr(orig_config, "model.classification.name", -1) == "mobilevit_v2", "Invalid model"
102
+ config.width_multiplier = getattr(orig_config, "model.classification.mitv2.width_multiplier", 1.0)
103
+ assert (
104
+ getattr(orig_config, "model.classification.mitv2.attn_norm_layer", -1) == "layer_norm_2d"
105
+ ), "Norm layers other than layer_norm_2d is not supported"
106
+ config.hidden_act = getattr(orig_config, "model.classification.activation.name", "swish")
107
+ # config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
108
+
109
+ if is_segmentation_model:
110
+ config.output_stride = getattr(orig_config, "model.segmentation.output_stride", 16)
111
+ if "_deeplabv3" in task_name:
112
+ config.atrous_rates = getattr(orig_config, "model.segmentation.deeplabv3.aspp_rates", [12, 24, 36])
113
+ config.aspp_out_channels = getattr(orig_config, "model.segmentation.deeplabv3.aspp_out_channels", 512)
114
+ config.aspp_dropout_prob = getattr(orig_config, "model.segmentation.deeplabv3.aspp_dropout", 0.1)
115
+
116
+ # id2label
117
+ repo_id = "huggingface/label-files"
118
+ id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r"))
119
+ id2label = {int(k): v for k, v in id2label.items()}
120
+ config.id2label = id2label
121
+ config.label2id = {v: k for k, v in id2label.items()}
122
+
123
+ return config
124
+
125
+
126
+ def rename_key(dct, old, new):
127
+ val = dct.pop(old)
128
+ dct[new] = val
129
+
130
+
131
+ def create_rename_keys(state_dict, base_model=False):
132
+ if base_model:
133
+ model_prefix = ""
134
+ else:
135
+ model_prefix = "mobilevitv2."
136
+
137
+ rename_keys = []
138
+ for k in state_dict.keys():
139
+ if k[:8] == "encoder.":
140
+ k_new = k[8:]
141
+ else:
142
+ k_new = k
143
+
144
+ if ".block." in k:
145
+ k_new = k_new.replace(".block.", ".")
146
+ if ".conv." in k:
147
+ k_new = k_new.replace(".conv.", ".convolution.")
148
+ if ".norm." in k:
149
+ k_new = k_new.replace(".norm.", ".normalization.")
150
+
151
+ if "conv_1." in k:
152
+ k_new = k_new.replace("conv_1.", f"{model_prefix}conv_stem.")
153
+ for i in [1, 2]:
154
+ if f"layer_{i}." in k:
155
+ k_new = k_new.replace(f"layer_{i}.", f"{model_prefix}encoder.layer.{i-1}.layer.")
156
+ if ".exp_1x1." in k:
157
+ k_new = k_new.replace(".exp_1x1.", ".expand_1x1.")
158
+ if ".red_1x1." in k:
159
+ k_new = k_new.replace(".red_1x1.", ".reduce_1x1.")
160
+
161
+ for i in [3, 4, 5]:
162
+ if f"layer_{i}.0." in k:
163
+ k_new = k_new.replace(f"layer_{i}.0.", f"{model_prefix}encoder.layer.{i-1}.downsampling_layer.")
164
+ if f"layer_{i}.1.local_rep.0." in k:
165
+ k_new = k_new.replace(f"layer_{i}.1.local_rep.0.", f"{model_prefix}encoder.layer.{i-1}.conv_kxk.")
166
+ if f"layer_{i}.1.local_rep.1." in k:
167
+ k_new = k_new.replace(f"layer_{i}.1.local_rep.1.", f"{model_prefix}encoder.layer.{i-1}.conv_1x1.")
168
+
169
+ for i in [3, 4, 5]:
170
+ if i == 3:
171
+ j_in = [0, 1]
172
+ elif i == 4:
173
+ j_in = [0, 1, 2, 3]
174
+ elif i == 5:
175
+ j_in = [0, 1, 2]
176
+
177
+ for j in j_in:
178
+ if f"layer_{i}.1.global_rep.{j}." in k:
179
+ k_new = k_new.replace(
180
+ f"layer_{i}.1.global_rep.{j}.", f"{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}."
181
+ )
182
+ if f"layer_{i}.1.global_rep.{j+1}." in k:
183
+ k_new = k_new.replace(
184
+ f"layer_{i}.1.global_rep.{j+1}.", f"{model_prefix}encoder.layer.{i-1}.layernorm."
185
+ )
186
+
187
+ if f"layer_{i}.1.conv_proj." in k:
188
+ k_new = k_new.replace(f"layer_{i}.1.conv_proj.", f"{model_prefix}encoder.layer.{i-1}.conv_projection.")
189
+
190
+ if "pre_norm_attn.0." in k:
191
+ k_new = k_new.replace("pre_norm_attn.0.", "layernorm_before.")
192
+ if "pre_norm_attn.1." in k:
193
+ k_new = k_new.replace("pre_norm_attn.1.", "attention.")
194
+ if "pre_norm_ffn.0." in k:
195
+ k_new = k_new.replace("pre_norm_ffn.0.", "layernorm_after.")
196
+ if "pre_norm_ffn.1." in k:
197
+ k_new = k_new.replace("pre_norm_ffn.1.", "ffn.conv1.")
198
+ if "pre_norm_ffn.3." in k:
199
+ k_new = k_new.replace("pre_norm_ffn.3.", "ffn.conv2.")
200
+
201
+ if "classifier.1." in k:
202
+ k_new = k_new.replace("classifier.1.", "classifier.")
203
+
204
+ if "seg_head." in k:
205
+ k_new = k_new.replace("seg_head.", "segmentation_head.")
206
+ if ".aspp_layer." in k:
207
+ k_new = k_new.replace(".aspp_layer.", ".")
208
+ if ".aspp_pool." in k:
209
+ k_new = k_new.replace(".aspp_pool.", ".")
210
+
211
+ rename_keys.append((k, k_new))
212
+ return rename_keys
213
+
214
+
215
+ def remove_unused_keys(state_dict):
216
+ """remove unused keys (e.g.: seg_head.aux_head)"""
217
+ keys_to_ignore = []
218
+ for k in state_dict.keys():
219
+ if k.startswith("seg_head.aux_head."):
220
+ keys_to_ignore.append(k)
221
+ for k in keys_to_ignore:
222
+ state_dict.pop(k, None)
223
+
224
+
225
+ # We will verify our results on an image of cute cats
226
+ def prepare_img():
227
+ url = "http://images.cocodataset.org/val2017/000000039769.jpg"
228
+ # url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
229
+ im = Image.open(requests.get(url, stream=True).raw)
230
+ return im
231
+
232
+
233
+ @torch.no_grad()
234
+ def convert_mobilevitv2_checkpoint(task_name, checkpoint_path, orig_config_path, pytorch_dump_folder_path):
235
+ """
236
+ Copy/paste/tweak model's weights to our MobileViTV2 structure.
237
+ """
238
+ config = get_mobilevitv2_config(task_name, orig_config_path)
239
+
240
+ # load original state_dict
241
+ checkpoint = torch.load(checkpoint_path, map_location="cpu")
242
+
243
+ # load huggingface model
244
+ if task_name.startswith("ade20k_") or task_name.startswith("voc_"):
245
+ model = MobileViTV2ForSemanticSegmentation(config).eval()
246
+ base_model = False
247
+ else:
248
+ model = MobileViTV2ForImageClassification(config).eval()
249
+ base_model = False
250
+
251
+ # remove and rename some keys of load the original model
252
+ state_dict = checkpoint
253
+ remove_unused_keys(state_dict)
254
+ rename_keys = create_rename_keys(state_dict, base_model=base_model)
255
+ for rename_key_src, rename_key_dest in rename_keys:
256
+ rename_key(state_dict, rename_key_src, rename_key_dest)
257
+
258
+ # load modified state_dict
259
+ model.load_state_dict(state_dict)
260
+
261
+ # Check outputs on an image, prepared by MobileViTImageProcessor
262
+ image_processor = MobileViTImageProcessor(crop_size=config.image_size, size=config.image_size + 32)
263
+ encoding = image_processor(images=prepare_img(), return_tensors="pt")
264
+ outputs = model(**encoding)
265
+
266
+ # verify classification model
267
+ if task_name.startswith("imagenet"):
268
+ logits = outputs.logits
269
+ predicted_class_idx = logits.argmax(-1).item()
270
+ print("Predicted class:", model.config.id2label[predicted_class_idx])
271
+ if task_name.startswith("imagenet1k_256") and config.width_multiplier == 1.0:
272
+ # expected_logits for base variant
273
+ expected_logits = torch.tensor([-1.6336e00, -7.3204e-02, -5.1883e-01])
274
+ assert torch.allclose(logits[0, :3], expected_logits, atol=1e-4)
275
+
276
+ Path(pytorch_dump_folder_path).mkdir(exist_ok=True)
277
+ print(f"Saving model {task_name} to {pytorch_dump_folder_path}")
278
+ model.save_pretrained(pytorch_dump_folder_path)
279
+ print(f"Saving image processor to {pytorch_dump_folder_path}")
280
+ image_processor.save_pretrained(pytorch_dump_folder_path)
281
+
282
+
283
+ if __name__ == "__main__":
284
+ parser = argparse.ArgumentParser()
285
+ # Required parameters
286
+ parser.add_argument(
287
+ "--task",
288
+ default="imagenet1k_256",
289
+ type=str,
290
+ help=(
291
+ "Name of the task for which the MobileViTV2 model you'd like to convert is trained on . "
292
+ """
293
+ Classification (ImageNet-1k)
294
+ - MobileViTV2 (256x256) : imagenet1k_256
295
+ - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384
296
+ - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :
297
+ imagenet21k_to_1k_256
298
+ - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on
299
+ ImageNet-1k 384x384) : imagenet21k_to_1k_384
300
+ Segmentation
301
+ - ADE20K Dataset : ade20k_deeplabv3
302
+ - Pascal VOC 2012 Dataset: voc_deeplabv3
303
+ """
304
+ ),
305
+ choices=[
306
+ "imagenet1k_256",
307
+ "imagenet1k_384",
308
+ "imagenet21k_to_1k_256",
309
+ "imagenet21k_to_1k_384",
310
+ "ade20k_deeplabv3",
311
+ "voc_deeplabv3",
312
+ ],
313
+ )
314
+
315
+ parser.add_argument(
316
+ "--orig_checkpoint_path", required=True, type=str, help="Path to the original state dict (.pt file)."
317
+ )
318
+ parser.add_argument("--orig_config_path", required=True, type=str, help="Path to the original config file.")
319
+ parser.add_argument(
320
+ "--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory."
321
+ )
322
+
323
+ args = parser.parse_args()
324
+ convert_mobilevitv2_checkpoint(
325
+ args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
326
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/mobilevitv2/modeling_mobilevitv2.py ADDED
@@ -0,0 +1,1030 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 Apple Inc. and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ # Original license: https://github.com/apple/ml-cvnets/blob/main/LICENSE
17
+ """ PyTorch MobileViTV2 model."""
18
+
19
+
20
+ from typing import Optional, Tuple, Union
21
+
22
+ import torch
23
+ import torch.utils.checkpoint
24
+ from torch import nn
25
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
26
+
27
+ from ...activations import ACT2FN
28
+ from ...modeling_outputs import (
29
+ BaseModelOutputWithNoAttention,
30
+ BaseModelOutputWithPoolingAndNoAttention,
31
+ ImageClassifierOutputWithNoAttention,
32
+ SemanticSegmenterOutput,
33
+ )
34
+ from ...modeling_utils import PreTrainedModel
35
+ from ...utils import (
36
+ add_code_sample_docstrings,
37
+ add_start_docstrings,
38
+ add_start_docstrings_to_model_forward,
39
+ logging,
40
+ replace_return_docstrings,
41
+ )
42
+ from .configuration_mobilevitv2 import MobileViTV2Config
43
+
44
+
45
+ logger = logging.get_logger(__name__)
46
+
47
+
48
+ # General docstring
49
+ _CONFIG_FOR_DOC = "MobileViTV2Config"
50
+
51
+ # Base docstring
52
+ _CHECKPOINT_FOR_DOC = "apple/mobilevitv2-1.0-imagenet1k-256"
53
+ _EXPECTED_OUTPUT_SHAPE = [1, 512, 8, 8]
54
+
55
+ # Image classification docstring
56
+ _IMAGE_CLASS_CHECKPOINT = "apple/mobilevitv2-1.0-imagenet1k-256"
57
+ _IMAGE_CLASS_EXPECTED_OUTPUT = "tabby, tabby cat"
58
+
59
+
60
+ from ..deprecated._archive_maps import MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
61
+
62
+
63
+ # Copied from transformers.models.mobilevit.modeling_mobilevit.make_divisible
64
+ def make_divisible(value: int, divisor: int = 8, min_value: Optional[int] = None) -> int:
65
+ """
66
+ Ensure that all layers have a channel count that is divisible by `divisor`. This function is taken from the
67
+ original TensorFlow repo. It can be seen here:
68
+ https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
69
+ """
70
+ if min_value is None:
71
+ min_value = divisor
72
+ new_value = max(min_value, int(value + divisor / 2) // divisor * divisor)
73
+ # Make sure that round down does not go down by more than 10%.
74
+ if new_value < 0.9 * value:
75
+ new_value += divisor
76
+ return int(new_value)
77
+
78
+
79
+ def clip(value: float, min_val: float = float("-inf"), max_val: float = float("inf")) -> float:
80
+ return max(min_val, min(max_val, value))
81
+
82
+
83
+ # Copied from transformers.models.mobilevit.modeling_mobilevit.MobileViTConvLayer with MobileViT->MobileViTV2
84
+ class MobileViTV2ConvLayer(nn.Module):
85
+ def __init__(
86
+ self,
87
+ config: MobileViTV2Config,
88
+ in_channels: int,
89
+ out_channels: int,
90
+ kernel_size: int,
91
+ stride: int = 1,
92
+ groups: int = 1,
93
+ bias: bool = False,
94
+ dilation: int = 1,
95
+ use_normalization: bool = True,
96
+ use_activation: Union[bool, str] = True,
97
+ ) -> None:
98
+ super().__init__()
99
+ padding = int((kernel_size - 1) / 2) * dilation
100
+
101
+ if in_channels % groups != 0:
102
+ raise ValueError(f"Input channels ({in_channels}) are not divisible by {groups} groups.")
103
+ if out_channels % groups != 0:
104
+ raise ValueError(f"Output channels ({out_channels}) are not divisible by {groups} groups.")
105
+
106
+ self.convolution = nn.Conv2d(
107
+ in_channels=in_channels,
108
+ out_channels=out_channels,
109
+ kernel_size=kernel_size,
110
+ stride=stride,
111
+ padding=padding,
112
+ dilation=dilation,
113
+ groups=groups,
114
+ bias=bias,
115
+ padding_mode="zeros",
116
+ )
117
+
118
+ if use_normalization:
119
+ self.normalization = nn.BatchNorm2d(
120
+ num_features=out_channels,
121
+ eps=1e-5,
122
+ momentum=0.1,
123
+ affine=True,
124
+ track_running_stats=True,
125
+ )
126
+ else:
127
+ self.normalization = None
128
+
129
+ if use_activation:
130
+ if isinstance(use_activation, str):
131
+ self.activation = ACT2FN[use_activation]
132
+ elif isinstance(config.hidden_act, str):
133
+ self.activation = ACT2FN[config.hidden_act]
134
+ else:
135
+ self.activation = config.hidden_act
136
+ else:
137
+ self.activation = None
138
+
139
+ def forward(self, features: torch.Tensor) -> torch.Tensor:
140
+ features = self.convolution(features)
141
+ if self.normalization is not None:
142
+ features = self.normalization(features)
143
+ if self.activation is not None:
144
+ features = self.activation(features)
145
+ return features
146
+
147
+
148
+ # Copied from transformers.models.mobilevit.modeling_mobilevit.MobileViTInvertedResidual with MobileViT->MobileViTV2
149
+ class MobileViTV2InvertedResidual(nn.Module):
150
+ """
151
+ Inverted residual block (MobileNetv2): https://arxiv.org/abs/1801.04381
152
+ """
153
+
154
+ def __init__(
155
+ self, config: MobileViTV2Config, in_channels: int, out_channels: int, stride: int, dilation: int = 1
156
+ ) -> None:
157
+ super().__init__()
158
+ expanded_channels = make_divisible(int(round(in_channels * config.expand_ratio)), 8)
159
+
160
+ if stride not in [1, 2]:
161
+ raise ValueError(f"Invalid stride {stride}.")
162
+
163
+ self.use_residual = (stride == 1) and (in_channels == out_channels)
164
+
165
+ self.expand_1x1 = MobileViTV2ConvLayer(
166
+ config, in_channels=in_channels, out_channels=expanded_channels, kernel_size=1
167
+ )
168
+
169
+ self.conv_3x3 = MobileViTV2ConvLayer(
170
+ config,
171
+ in_channels=expanded_channels,
172
+ out_channels=expanded_channels,
173
+ kernel_size=3,
174
+ stride=stride,
175
+ groups=expanded_channels,
176
+ dilation=dilation,
177
+ )
178
+
179
+ self.reduce_1x1 = MobileViTV2ConvLayer(
180
+ config,
181
+ in_channels=expanded_channels,
182
+ out_channels=out_channels,
183
+ kernel_size=1,
184
+ use_activation=False,
185
+ )
186
+
187
+ def forward(self, features: torch.Tensor) -> torch.Tensor:
188
+ residual = features
189
+
190
+ features = self.expand_1x1(features)
191
+ features = self.conv_3x3(features)
192
+ features = self.reduce_1x1(features)
193
+
194
+ return residual + features if self.use_residual else features
195
+
196
+
197
+ # Copied from transformers.models.mobilevit.modeling_mobilevit.MobileViTMobileNetLayer with MobileViT->MobileViTV2
198
+ class MobileViTV2MobileNetLayer(nn.Module):
199
+ def __init__(
200
+ self, config: MobileViTV2Config, in_channels: int, out_channels: int, stride: int = 1, num_stages: int = 1
201
+ ) -> None:
202
+ super().__init__()
203
+
204
+ self.layer = nn.ModuleList()
205
+ for i in range(num_stages):
206
+ layer = MobileViTV2InvertedResidual(
207
+ config,
208
+ in_channels=in_channels,
209
+ out_channels=out_channels,
210
+ stride=stride if i == 0 else 1,
211
+ )
212
+ self.layer.append(layer)
213
+ in_channels = out_channels
214
+
215
+ def forward(self, features: torch.Tensor) -> torch.Tensor:
216
+ for layer_module in self.layer:
217
+ features = layer_module(features)
218
+ return features
219
+
220
+
221
+ class MobileViTV2LinearSelfAttention(nn.Module):
222
+ """
223
+ This layer applies a self-attention with linear complexity, as described in MobileViTV2 paper:
224
+ https://arxiv.org/abs/2206.02680
225
+
226
+ Args:
227
+ config (`MobileVitv2Config`):
228
+ Model configuration object
229
+ embed_dim (`int`):
230
+ `input_channels` from an expected input of size :math:`(batch_size, input_channels, height, width)`
231
+ """
232
+
233
+ def __init__(self, config: MobileViTV2Config, embed_dim: int) -> None:
234
+ super().__init__()
235
+
236
+ self.qkv_proj = MobileViTV2ConvLayer(
237
+ config=config,
238
+ in_channels=embed_dim,
239
+ out_channels=1 + (2 * embed_dim),
240
+ bias=True,
241
+ kernel_size=1,
242
+ use_normalization=False,
243
+ use_activation=False,
244
+ )
245
+
246
+ self.attn_dropout = nn.Dropout(p=config.attn_dropout)
247
+ self.out_proj = MobileViTV2ConvLayer(
248
+ config=config,
249
+ in_channels=embed_dim,
250
+ out_channels=embed_dim,
251
+ bias=True,
252
+ kernel_size=1,
253
+ use_normalization=False,
254
+ use_activation=False,
255
+ )
256
+ self.embed_dim = embed_dim
257
+
258
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
259
+ # (batch_size, embed_dim, num_pixels_in_patch, num_patches) --> (batch_size, 1+2*embed_dim, num_pixels_in_patch, num_patches)
260
+ qkv = self.qkv_proj(hidden_states)
261
+
262
+ # Project hidden_states into query, key and value
263
+ # Query --> [batch_size, 1, num_pixels_in_patch, num_patches]
264
+ # value, key --> [batch_size, embed_dim, num_pixels_in_patch, num_patches]
265
+ query, key, value = torch.split(qkv, split_size_or_sections=[1, self.embed_dim, self.embed_dim], dim=1)
266
+
267
+ # apply softmax along num_patches dimension
268
+ context_scores = torch.nn.functional.softmax(query, dim=-1)
269
+ context_scores = self.attn_dropout(context_scores)
270
+
271
+ # Compute context vector
272
+ # [batch_size, embed_dim, num_pixels_in_patch, num_patches] x [batch_size, 1, num_pixels_in_patch, num_patches] -> [batch_size, embed_dim, num_pixels_in_patch, num_patches]
273
+ context_vector = key * context_scores
274
+ # [batch_size, embed_dim, num_pixels_in_patch, num_patches] --> [batch_size, embed_dim, num_pixels_in_patch, 1]
275
+ context_vector = torch.sum(context_vector, dim=-1, keepdim=True)
276
+
277
+ # combine context vector with values
278
+ # [batch_size, embed_dim, num_pixels_in_patch, num_patches] * [batch_size, embed_dim, num_pixels_in_patch, 1] --> [batch_size, embed_dim, num_pixels_in_patch, num_patches]
279
+ out = torch.nn.functional.relu(value) * context_vector.expand_as(value)
280
+ out = self.out_proj(out)
281
+ return out
282
+
283
+
284
+ class MobileViTV2FFN(nn.Module):
285
+ def __init__(
286
+ self,
287
+ config: MobileViTV2Config,
288
+ embed_dim: int,
289
+ ffn_latent_dim: int,
290
+ ffn_dropout: float = 0.0,
291
+ ) -> None:
292
+ super().__init__()
293
+ self.conv1 = MobileViTV2ConvLayer(
294
+ config=config,
295
+ in_channels=embed_dim,
296
+ out_channels=ffn_latent_dim,
297
+ kernel_size=1,
298
+ stride=1,
299
+ bias=True,
300
+ use_normalization=False,
301
+ use_activation=True,
302
+ )
303
+ self.dropout1 = nn.Dropout(ffn_dropout)
304
+
305
+ self.conv2 = MobileViTV2ConvLayer(
306
+ config=config,
307
+ in_channels=ffn_latent_dim,
308
+ out_channels=embed_dim,
309
+ kernel_size=1,
310
+ stride=1,
311
+ bias=True,
312
+ use_normalization=False,
313
+ use_activation=False,
314
+ )
315
+ self.dropout2 = nn.Dropout(ffn_dropout)
316
+
317
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
318
+ hidden_states = self.conv1(hidden_states)
319
+ hidden_states = self.dropout1(hidden_states)
320
+ hidden_states = self.conv2(hidden_states)
321
+ hidden_states = self.dropout2(hidden_states)
322
+ return hidden_states
323
+
324
+
325
+ class MobileViTV2TransformerLayer(nn.Module):
326
+ def __init__(
327
+ self,
328
+ config: MobileViTV2Config,
329
+ embed_dim: int,
330
+ ffn_latent_dim: int,
331
+ dropout: float = 0.0,
332
+ ) -> None:
333
+ super().__init__()
334
+ self.layernorm_before = nn.GroupNorm(num_groups=1, num_channels=embed_dim, eps=config.layer_norm_eps)
335
+ self.attention = MobileViTV2LinearSelfAttention(config, embed_dim)
336
+ self.dropout1 = nn.Dropout(p=dropout)
337
+ self.layernorm_after = nn.GroupNorm(num_groups=1, num_channels=embed_dim, eps=config.layer_norm_eps)
338
+ self.ffn = MobileViTV2FFN(config, embed_dim, ffn_latent_dim, config.ffn_dropout)
339
+
340
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
341
+ layernorm_1_out = self.layernorm_before(hidden_states)
342
+ attention_output = self.attention(layernorm_1_out)
343
+ hidden_states = attention_output + hidden_states
344
+
345
+ layer_output = self.layernorm_after(hidden_states)
346
+ layer_output = self.ffn(layer_output)
347
+
348
+ layer_output = layer_output + hidden_states
349
+ return layer_output
350
+
351
+
352
+ class MobileViTV2Transformer(nn.Module):
353
+ def __init__(self, config: MobileViTV2Config, n_layers: int, d_model: int) -> None:
354
+ super().__init__()
355
+
356
+ ffn_multiplier = config.ffn_multiplier
357
+
358
+ ffn_dims = [ffn_multiplier * d_model] * n_layers
359
+
360
+ # ensure that dims are multiple of 16
361
+ ffn_dims = [int((d // 16) * 16) for d in ffn_dims]
362
+
363
+ self.layer = nn.ModuleList()
364
+ for block_idx in range(n_layers):
365
+ transformer_layer = MobileViTV2TransformerLayer(
366
+ config, embed_dim=d_model, ffn_latent_dim=ffn_dims[block_idx]
367
+ )
368
+ self.layer.append(transformer_layer)
369
+
370
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
371
+ for layer_module in self.layer:
372
+ hidden_states = layer_module(hidden_states)
373
+ return hidden_states
374
+
375
+
376
+ class MobileViTV2Layer(nn.Module):
377
+ """
378
+ MobileViTV2 layer: https://arxiv.org/abs/2206.02680
379
+ """
380
+
381
+ def __init__(
382
+ self,
383
+ config: MobileViTV2Config,
384
+ in_channels: int,
385
+ out_channels: int,
386
+ attn_unit_dim: int,
387
+ n_attn_blocks: int = 2,
388
+ dilation: int = 1,
389
+ stride: int = 2,
390
+ ) -> None:
391
+ super().__init__()
392
+ self.patch_width = config.patch_size
393
+ self.patch_height = config.patch_size
394
+
395
+ cnn_out_dim = attn_unit_dim
396
+
397
+ if stride == 2:
398
+ self.downsampling_layer = MobileViTV2InvertedResidual(
399
+ config,
400
+ in_channels=in_channels,
401
+ out_channels=out_channels,
402
+ stride=stride if dilation == 1 else 1,
403
+ dilation=dilation // 2 if dilation > 1 else 1,
404
+ )
405
+ in_channels = out_channels
406
+ else:
407
+ self.downsampling_layer = None
408
+
409
+ # Local representations
410
+ self.conv_kxk = MobileViTV2ConvLayer(
411
+ config,
412
+ in_channels=in_channels,
413
+ out_channels=in_channels,
414
+ kernel_size=config.conv_kernel_size,
415
+ groups=in_channels,
416
+ )
417
+ self.conv_1x1 = MobileViTV2ConvLayer(
418
+ config,
419
+ in_channels=in_channels,
420
+ out_channels=cnn_out_dim,
421
+ kernel_size=1,
422
+ use_normalization=False,
423
+ use_activation=False,
424
+ )
425
+
426
+ # Global representations
427
+ self.transformer = MobileViTV2Transformer(config, d_model=attn_unit_dim, n_layers=n_attn_blocks)
428
+
429
+ # self.layernorm = MobileViTV2LayerNorm2D(attn_unit_dim, eps=config.layer_norm_eps)
430
+ self.layernorm = nn.GroupNorm(num_groups=1, num_channels=attn_unit_dim, eps=config.layer_norm_eps)
431
+
432
+ # Fusion
433
+ self.conv_projection = MobileViTV2ConvLayer(
434
+ config,
435
+ in_channels=cnn_out_dim,
436
+ out_channels=in_channels,
437
+ kernel_size=1,
438
+ use_normalization=True,
439
+ use_activation=False,
440
+ )
441
+
442
+ def unfolding(self, feature_map: torch.Tensor) -> Tuple[torch.Tensor, Tuple[int, int]]:
443
+ batch_size, in_channels, img_height, img_width = feature_map.shape
444
+ patches = nn.functional.unfold(
445
+ feature_map,
446
+ kernel_size=(self.patch_height, self.patch_width),
447
+ stride=(self.patch_height, self.patch_width),
448
+ )
449
+ patches = patches.reshape(batch_size, in_channels, self.patch_height * self.patch_width, -1)
450
+
451
+ return patches, (img_height, img_width)
452
+
453
+ def folding(self, patches: torch.Tensor, output_size: Tuple[int, int]) -> torch.Tensor:
454
+ batch_size, in_dim, patch_size, n_patches = patches.shape
455
+ patches = patches.reshape(batch_size, in_dim * patch_size, n_patches)
456
+
457
+ feature_map = nn.functional.fold(
458
+ patches,
459
+ output_size=output_size,
460
+ kernel_size=(self.patch_height, self.patch_width),
461
+ stride=(self.patch_height, self.patch_width),
462
+ )
463
+
464
+ return feature_map
465
+
466
+ def forward(self, features: torch.Tensor) -> torch.Tensor:
467
+ # reduce spatial dimensions if needed
468
+ if self.downsampling_layer:
469
+ features = self.downsampling_layer(features)
470
+
471
+ # local representation
472
+ features = self.conv_kxk(features)
473
+ features = self.conv_1x1(features)
474
+
475
+ # convert feature map to patches
476
+ patches, output_size = self.unfolding(features)
477
+
478
+ # learn global representations
479
+ patches = self.transformer(patches)
480
+ patches = self.layernorm(patches)
481
+
482
+ # convert patches back to feature maps
483
+ # [batch_size, patch_height, patch_width, input_dim] --> [batch_size, input_dim, patch_height, patch_width]
484
+ features = self.folding(patches, output_size)
485
+
486
+ features = self.conv_projection(features)
487
+ return features
488
+
489
+
490
+ class MobileViTV2Encoder(nn.Module):
491
+ def __init__(self, config: MobileViTV2Config) -> None:
492
+ super().__init__()
493
+ self.config = config
494
+
495
+ self.layer = nn.ModuleList()
496
+ self.gradient_checkpointing = False
497
+
498
+ # segmentation architectures like DeepLab and PSPNet modify the strides
499
+ # of the classification backbones
500
+ dilate_layer_4 = dilate_layer_5 = False
501
+ if config.output_stride == 8:
502
+ dilate_layer_4 = True
503
+ dilate_layer_5 = True
504
+ elif config.output_stride == 16:
505
+ dilate_layer_5 = True
506
+
507
+ dilation = 1
508
+
509
+ layer_0_dim = make_divisible(
510
+ clip(value=32 * config.width_multiplier, min_val=16, max_val=64), divisor=8, min_value=16
511
+ )
512
+
513
+ layer_1_dim = make_divisible(64 * config.width_multiplier, divisor=16)
514
+ layer_2_dim = make_divisible(128 * config.width_multiplier, divisor=8)
515
+ layer_3_dim = make_divisible(256 * config.width_multiplier, divisor=8)
516
+ layer_4_dim = make_divisible(384 * config.width_multiplier, divisor=8)
517
+ layer_5_dim = make_divisible(512 * config.width_multiplier, divisor=8)
518
+
519
+ layer_1 = MobileViTV2MobileNetLayer(
520
+ config,
521
+ in_channels=layer_0_dim,
522
+ out_channels=layer_1_dim,
523
+ stride=1,
524
+ num_stages=1,
525
+ )
526
+ self.layer.append(layer_1)
527
+
528
+ layer_2 = MobileViTV2MobileNetLayer(
529
+ config,
530
+ in_channels=layer_1_dim,
531
+ out_channels=layer_2_dim,
532
+ stride=2,
533
+ num_stages=2,
534
+ )
535
+ self.layer.append(layer_2)
536
+
537
+ layer_3 = MobileViTV2Layer(
538
+ config,
539
+ in_channels=layer_2_dim,
540
+ out_channels=layer_3_dim,
541
+ attn_unit_dim=make_divisible(config.base_attn_unit_dims[0] * config.width_multiplier, divisor=8),
542
+ n_attn_blocks=config.n_attn_blocks[0],
543
+ )
544
+ self.layer.append(layer_3)
545
+
546
+ if dilate_layer_4:
547
+ dilation *= 2
548
+
549
+ layer_4 = MobileViTV2Layer(
550
+ config,
551
+ in_channels=layer_3_dim,
552
+ out_channels=layer_4_dim,
553
+ attn_unit_dim=make_divisible(config.base_attn_unit_dims[1] * config.width_multiplier, divisor=8),
554
+ n_attn_blocks=config.n_attn_blocks[1],
555
+ dilation=dilation,
556
+ )
557
+ self.layer.append(layer_4)
558
+
559
+ if dilate_layer_5:
560
+ dilation *= 2
561
+
562
+ layer_5 = MobileViTV2Layer(
563
+ config,
564
+ in_channels=layer_4_dim,
565
+ out_channels=layer_5_dim,
566
+ attn_unit_dim=make_divisible(config.base_attn_unit_dims[2] * config.width_multiplier, divisor=8),
567
+ n_attn_blocks=config.n_attn_blocks[2],
568
+ dilation=dilation,
569
+ )
570
+ self.layer.append(layer_5)
571
+
572
+ def forward(
573
+ self,
574
+ hidden_states: torch.Tensor,
575
+ output_hidden_states: bool = False,
576
+ return_dict: bool = True,
577
+ ) -> Union[tuple, BaseModelOutputWithNoAttention]:
578
+ all_hidden_states = () if output_hidden_states else None
579
+
580
+ for i, layer_module in enumerate(self.layer):
581
+ if self.gradient_checkpointing and self.training:
582
+ hidden_states = self._gradient_checkpointing_func(
583
+ layer_module.__call__,
584
+ hidden_states,
585
+ )
586
+ else:
587
+ hidden_states = layer_module(hidden_states)
588
+
589
+ if output_hidden_states:
590
+ all_hidden_states = all_hidden_states + (hidden_states,)
591
+
592
+ if not return_dict:
593
+ return tuple(v for v in [hidden_states, all_hidden_states] if v is not None)
594
+
595
+ return BaseModelOutputWithNoAttention(last_hidden_state=hidden_states, hidden_states=all_hidden_states)
596
+
597
+
598
+ # Copied from transformers.models.mobilevit.modeling_mobilevit.MobileViTPreTrainedModel with MobileViT->MobileViTV2,mobilevit->mobilevitv2
599
+ class MobileViTV2PreTrainedModel(PreTrainedModel):
600
+ """
601
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
602
+ models.
603
+ """
604
+
605
+ config_class = MobileViTV2Config
606
+ base_model_prefix = "mobilevitv2"
607
+ main_input_name = "pixel_values"
608
+ supports_gradient_checkpointing = True
609
+
610
+ def _init_weights(self, module: Union[nn.Linear, nn.Conv2d, nn.LayerNorm]) -> None:
611
+ """Initialize the weights"""
612
+ if isinstance(module, (nn.Linear, nn.Conv2d)):
613
+ # Slightly different from the TF version which uses truncated_normal for initialization
614
+ # cf https://github.com/pytorch/pytorch/pull/5617
615
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
616
+ if module.bias is not None:
617
+ module.bias.data.zero_()
618
+ elif isinstance(module, nn.LayerNorm):
619
+ module.bias.data.zero_()
620
+ module.weight.data.fill_(1.0)
621
+
622
+
623
+ MOBILEVITV2_START_DOCSTRING = r"""
624
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
625
+ as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
626
+ behavior.
627
+
628
+ Parameters:
629
+ config ([`MobileViTV2Config`]): Model configuration class with all the parameters of the model.
630
+ Initializing with a config file does not load the weights associated with the model, only the
631
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
632
+ """
633
+
634
+ MOBILEVITV2_INPUTS_DOCSTRING = r"""
635
+ Args:
636
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
637
+ Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
638
+ [`MobileViTImageProcessor.__call__`] for details.
639
+ output_hidden_states (`bool`, *optional*):
640
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
641
+ more detail.
642
+ return_dict (`bool`, *optional*):
643
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
644
+ """
645
+
646
+
647
+ @add_start_docstrings(
648
+ "The bare MobileViTV2 model outputting raw hidden-states without any specific head on top.",
649
+ MOBILEVITV2_START_DOCSTRING,
650
+ )
651
+ class MobileViTV2Model(MobileViTV2PreTrainedModel):
652
+ def __init__(self, config: MobileViTV2Config, expand_output: bool = True):
653
+ super().__init__(config)
654
+ self.config = config
655
+ self.expand_output = expand_output
656
+
657
+ layer_0_dim = make_divisible(
658
+ clip(value=32 * config.width_multiplier, min_val=16, max_val=64), divisor=8, min_value=16
659
+ )
660
+
661
+ self.conv_stem = MobileViTV2ConvLayer(
662
+ config,
663
+ in_channels=config.num_channels,
664
+ out_channels=layer_0_dim,
665
+ kernel_size=3,
666
+ stride=2,
667
+ use_normalization=True,
668
+ use_activation=True,
669
+ )
670
+ self.encoder = MobileViTV2Encoder(config)
671
+
672
+ # Initialize weights and apply final processing
673
+ self.post_init()
674
+
675
+ def _prune_heads(self, heads_to_prune):
676
+ """Prunes heads of the model.
677
+ heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel
678
+ """
679
+ for layer_index, heads in heads_to_prune.items():
680
+ mobilevitv2_layer = self.encoder.layer[layer_index]
681
+ if isinstance(mobilevitv2_layer, MobileViTV2Layer):
682
+ for transformer_layer in mobilevitv2_layer.transformer.layer:
683
+ transformer_layer.attention.prune_heads(heads)
684
+
685
+ @add_start_docstrings_to_model_forward(MOBILEVITV2_INPUTS_DOCSTRING)
686
+ @add_code_sample_docstrings(
687
+ checkpoint=_CHECKPOINT_FOR_DOC,
688
+ output_type=BaseModelOutputWithPoolingAndNoAttention,
689
+ config_class=_CONFIG_FOR_DOC,
690
+ modality="vision",
691
+ expected_output=_EXPECTED_OUTPUT_SHAPE,
692
+ )
693
+ def forward(
694
+ self,
695
+ pixel_values: Optional[torch.Tensor] = None,
696
+ output_hidden_states: Optional[bool] = None,
697
+ return_dict: Optional[bool] = None,
698
+ ) -> Union[tuple, BaseModelOutputWithPoolingAndNoAttention]:
699
+ output_hidden_states = (
700
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
701
+ )
702
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
703
+
704
+ if pixel_values is None:
705
+ raise ValueError("You have to specify pixel_values")
706
+
707
+ embedding_output = self.conv_stem(pixel_values)
708
+
709
+ encoder_outputs = self.encoder(
710
+ embedding_output,
711
+ output_hidden_states=output_hidden_states,
712
+ return_dict=return_dict,
713
+ )
714
+
715
+ if self.expand_output:
716
+ last_hidden_state = encoder_outputs[0]
717
+
718
+ # global average pooling: (batch_size, channels, height, width) -> (batch_size, channels)
719
+ pooled_output = torch.mean(last_hidden_state, dim=[-2, -1], keepdim=False)
720
+ else:
721
+ last_hidden_state = encoder_outputs[0]
722
+ pooled_output = None
723
+
724
+ if not return_dict:
725
+ output = (last_hidden_state, pooled_output) if pooled_output is not None else (last_hidden_state,)
726
+ return output + encoder_outputs[1:]
727
+
728
+ return BaseModelOutputWithPoolingAndNoAttention(
729
+ last_hidden_state=last_hidden_state,
730
+ pooler_output=pooled_output,
731
+ hidden_states=encoder_outputs.hidden_states,
732
+ )
733
+
734
+
735
+ @add_start_docstrings(
736
+ """
737
+ MobileViTV2 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
738
+ ImageNet.
739
+ """,
740
+ MOBILEVITV2_START_DOCSTRING,
741
+ )
742
+ class MobileViTV2ForImageClassification(MobileViTV2PreTrainedModel):
743
+ def __init__(self, config: MobileViTV2Config) -> None:
744
+ super().__init__(config)
745
+
746
+ self.num_labels = config.num_labels
747
+ self.mobilevitv2 = MobileViTV2Model(config)
748
+
749
+ out_channels = make_divisible(512 * config.width_multiplier, divisor=8) # layer 5 output dimension
750
+ # Classifier head
751
+ self.classifier = (
752
+ nn.Linear(in_features=out_channels, out_features=config.num_labels)
753
+ if config.num_labels > 0
754
+ else nn.Identity()
755
+ )
756
+
757
+ # Initialize weights and apply final processing
758
+ self.post_init()
759
+
760
+ @add_start_docstrings_to_model_forward(MOBILEVITV2_INPUTS_DOCSTRING)
761
+ @add_code_sample_docstrings(
762
+ checkpoint=_IMAGE_CLASS_CHECKPOINT,
763
+ output_type=ImageClassifierOutputWithNoAttention,
764
+ config_class=_CONFIG_FOR_DOC,
765
+ expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT,
766
+ )
767
+ def forward(
768
+ self,
769
+ pixel_values: Optional[torch.Tensor] = None,
770
+ output_hidden_states: Optional[bool] = None,
771
+ labels: Optional[torch.Tensor] = None,
772
+ return_dict: Optional[bool] = None,
773
+ ) -> Union[tuple, ImageClassifierOutputWithNoAttention]:
774
+ r"""
775
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
776
+ Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
777
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss). If
778
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
779
+ """
780
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
781
+
782
+ outputs = self.mobilevitv2(pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict)
783
+
784
+ pooled_output = outputs.pooler_output if return_dict else outputs[1]
785
+
786
+ logits = self.classifier(pooled_output)
787
+
788
+ loss = None
789
+ if labels is not None:
790
+ if self.config.problem_type is None:
791
+ if self.num_labels == 1:
792
+ self.config.problem_type = "regression"
793
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
794
+ self.config.problem_type = "single_label_classification"
795
+ else:
796
+ self.config.problem_type = "multi_label_classification"
797
+
798
+ if self.config.problem_type == "regression":
799
+ loss_fct = MSELoss()
800
+ if self.num_labels == 1:
801
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
802
+ else:
803
+ loss = loss_fct(logits, labels)
804
+ elif self.config.problem_type == "single_label_classification":
805
+ loss_fct = CrossEntropyLoss()
806
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
807
+ elif self.config.problem_type == "multi_label_classification":
808
+ loss_fct = BCEWithLogitsLoss()
809
+ loss = loss_fct(logits, labels)
810
+
811
+ if not return_dict:
812
+ output = (logits,) + outputs[2:]
813
+ return ((loss,) + output) if loss is not None else output
814
+
815
+ return ImageClassifierOutputWithNoAttention(
816
+ loss=loss,
817
+ logits=logits,
818
+ hidden_states=outputs.hidden_states,
819
+ )
820
+
821
+
822
+ # Copied from transformers.models.mobilevit.modeling_mobilevit.MobileViTASPPPooling with MobileViT->MobileViTV2
823
+ class MobileViTV2ASPPPooling(nn.Module):
824
+ def __init__(self, config: MobileViTV2Config, in_channels: int, out_channels: int) -> None:
825
+ super().__init__()
826
+
827
+ self.global_pool = nn.AdaptiveAvgPool2d(output_size=1)
828
+
829
+ self.conv_1x1 = MobileViTV2ConvLayer(
830
+ config,
831
+ in_channels=in_channels,
832
+ out_channels=out_channels,
833
+ kernel_size=1,
834
+ stride=1,
835
+ use_normalization=True,
836
+ use_activation="relu",
837
+ )
838
+
839
+ def forward(self, features: torch.Tensor) -> torch.Tensor:
840
+ spatial_size = features.shape[-2:]
841
+ features = self.global_pool(features)
842
+ features = self.conv_1x1(features)
843
+ features = nn.functional.interpolate(features, size=spatial_size, mode="bilinear", align_corners=False)
844
+ return features
845
+
846
+
847
+ class MobileViTV2ASPP(nn.Module):
848
+ """
849
+ ASPP module defined in DeepLab papers: https://arxiv.org/abs/1606.00915, https://arxiv.org/abs/1706.05587
850
+ """
851
+
852
+ def __init__(self, config: MobileViTV2Config) -> None:
853
+ super().__init__()
854
+
855
+ encoder_out_channels = make_divisible(512 * config.width_multiplier, divisor=8) # layer 5 output dimension
856
+ in_channels = encoder_out_channels
857
+ out_channels = config.aspp_out_channels
858
+
859
+ if len(config.atrous_rates) != 3:
860
+ raise ValueError("Expected 3 values for atrous_rates")
861
+
862
+ self.convs = nn.ModuleList()
863
+
864
+ in_projection = MobileViTV2ConvLayer(
865
+ config,
866
+ in_channels=in_channels,
867
+ out_channels=out_channels,
868
+ kernel_size=1,
869
+ use_activation="relu",
870
+ )
871
+ self.convs.append(in_projection)
872
+
873
+ self.convs.extend(
874
+ [
875
+ MobileViTV2ConvLayer(
876
+ config,
877
+ in_channels=in_channels,
878
+ out_channels=out_channels,
879
+ kernel_size=3,
880
+ dilation=rate,
881
+ use_activation="relu",
882
+ )
883
+ for rate in config.atrous_rates
884
+ ]
885
+ )
886
+
887
+ pool_layer = MobileViTV2ASPPPooling(config, in_channels, out_channels)
888
+ self.convs.append(pool_layer)
889
+
890
+ self.project = MobileViTV2ConvLayer(
891
+ config, in_channels=5 * out_channels, out_channels=out_channels, kernel_size=1, use_activation="relu"
892
+ )
893
+
894
+ self.dropout = nn.Dropout(p=config.aspp_dropout_prob)
895
+
896
+ def forward(self, features: torch.Tensor) -> torch.Tensor:
897
+ pyramid = []
898
+ for conv in self.convs:
899
+ pyramid.append(conv(features))
900
+ pyramid = torch.cat(pyramid, dim=1)
901
+
902
+ pooled_features = self.project(pyramid)
903
+ pooled_features = self.dropout(pooled_features)
904
+ return pooled_features
905
+
906
+
907
+ # Copied from transformers.models.mobilevit.modeling_mobilevit.MobileViTDeepLabV3 with MobileViT->MobileViTV2
908
+ class MobileViTV2DeepLabV3(nn.Module):
909
+ """
910
+ DeepLabv3 architecture: https://arxiv.org/abs/1706.05587
911
+ """
912
+
913
+ def __init__(self, config: MobileViTV2Config) -> None:
914
+ super().__init__()
915
+ self.aspp = MobileViTV2ASPP(config)
916
+
917
+ self.dropout = nn.Dropout2d(config.classifier_dropout_prob)
918
+
919
+ self.classifier = MobileViTV2ConvLayer(
920
+ config,
921
+ in_channels=config.aspp_out_channels,
922
+ out_channels=config.num_labels,
923
+ kernel_size=1,
924
+ use_normalization=False,
925
+ use_activation=False,
926
+ bias=True,
927
+ )
928
+
929
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
930
+ features = self.aspp(hidden_states[-1])
931
+ features = self.dropout(features)
932
+ features = self.classifier(features)
933
+ return features
934
+
935
+
936
+ @add_start_docstrings(
937
+ """
938
+ MobileViTV2 model with a semantic segmentation head on top, e.g. for Pascal VOC.
939
+ """,
940
+ MOBILEVITV2_START_DOCSTRING,
941
+ )
942
+ class MobileViTV2ForSemanticSegmentation(MobileViTV2PreTrainedModel):
943
+ def __init__(self, config: MobileViTV2Config) -> None:
944
+ super().__init__(config)
945
+
946
+ self.num_labels = config.num_labels
947
+ self.mobilevitv2 = MobileViTV2Model(config, expand_output=False)
948
+ self.segmentation_head = MobileViTV2DeepLabV3(config)
949
+
950
+ # Initialize weights and apply final processing
951
+ self.post_init()
952
+
953
+ @add_start_docstrings_to_model_forward(MOBILEVITV2_INPUTS_DOCSTRING)
954
+ @replace_return_docstrings(output_type=SemanticSegmenterOutput, config_class=_CONFIG_FOR_DOC)
955
+ def forward(
956
+ self,
957
+ pixel_values: Optional[torch.Tensor] = None,
958
+ labels: Optional[torch.Tensor] = None,
959
+ output_hidden_states: Optional[bool] = None,
960
+ return_dict: Optional[bool] = None,
961
+ ) -> Union[tuple, SemanticSegmenterOutput]:
962
+ r"""
963
+ labels (`torch.LongTensor` of shape `(batch_size, height, width)`, *optional*):
964
+ Ground truth semantic segmentation maps for computing the loss. Indices should be in `[0, ...,
965
+ config.num_labels - 1]`. If `config.num_labels > 1`, a classification loss is computed (Cross-Entropy).
966
+
967
+ Returns:
968
+
969
+ Examples:
970
+
971
+ ```python
972
+ >>> import requests
973
+ >>> import torch
974
+ >>> from PIL import Image
975
+ >>> from transformers import AutoImageProcessor, MobileViTV2ForSemanticSegmentation
976
+
977
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
978
+ >>> image = Image.open(requests.get(url, stream=True).raw)
979
+
980
+ >>> image_processor = AutoImageProcessor.from_pretrained("apple/mobilevitv2-1.0-imagenet1k-256")
981
+ >>> model = MobileViTV2ForSemanticSegmentation.from_pretrained("apple/mobilevitv2-1.0-imagenet1k-256")
982
+
983
+ >>> inputs = image_processor(images=image, return_tensors="pt")
984
+
985
+ >>> with torch.no_grad():
986
+ ... outputs = model(**inputs)
987
+
988
+ >>> # logits are of shape (batch_size, num_labels, height, width)
989
+ >>> logits = outputs.logits
990
+ ```"""
991
+ output_hidden_states = (
992
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
993
+ )
994
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
995
+
996
+ outputs = self.mobilevitv2(
997
+ pixel_values,
998
+ output_hidden_states=True, # we need the intermediate hidden states
999
+ return_dict=return_dict,
1000
+ )
1001
+
1002
+ encoder_hidden_states = outputs.hidden_states if return_dict else outputs[1]
1003
+
1004
+ logits = self.segmentation_head(encoder_hidden_states)
1005
+
1006
+ loss = None
1007
+ if labels is not None:
1008
+ if self.config.num_labels == 1:
1009
+ raise ValueError("The number of labels should be greater than one")
1010
+ else:
1011
+ # upsample logits to the images' original size
1012
+ upsampled_logits = nn.functional.interpolate(
1013
+ logits, size=labels.shape[-2:], mode="bilinear", align_corners=False
1014
+ )
1015
+ loss_fct = CrossEntropyLoss(ignore_index=self.config.semantic_loss_ignore_index)
1016
+ loss = loss_fct(upsampled_logits, labels)
1017
+
1018
+ if not return_dict:
1019
+ if output_hidden_states:
1020
+ output = (logits,) + outputs[1:]
1021
+ else:
1022
+ output = (logits,) + outputs[2:]
1023
+ return ((loss,) + output) if loss is not None else output
1024
+
1025
+ return SemanticSegmenterOutput(
1026
+ loss=loss,
1027
+ logits=logits,
1028
+ hidden_states=outputs.hidden_states if output_hidden_states else None,
1029
+ attentions=None,
1030
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/owlvit/__init__.py ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import (
17
+ OptionalDependencyNotAvailable,
18
+ _LazyModule,
19
+ is_flax_available,
20
+ is_tf_available,
21
+ is_tokenizers_available,
22
+ is_torch_available,
23
+ is_vision_available,
24
+ )
25
+
26
+
27
+ _import_structure = {
28
+ "configuration_owlvit": [
29
+ "OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP",
30
+ "OwlViTConfig",
31
+ "OwlViTOnnxConfig",
32
+ "OwlViTTextConfig",
33
+ "OwlViTVisionConfig",
34
+ ],
35
+ "processing_owlvit": ["OwlViTProcessor"],
36
+ }
37
+
38
+
39
+ try:
40
+ if not is_vision_available():
41
+ raise OptionalDependencyNotAvailable()
42
+ except OptionalDependencyNotAvailable:
43
+ pass
44
+ else:
45
+ _import_structure["feature_extraction_owlvit"] = ["OwlViTFeatureExtractor"]
46
+ _import_structure["image_processing_owlvit"] = ["OwlViTImageProcessor"]
47
+
48
+ try:
49
+ if not is_torch_available():
50
+ raise OptionalDependencyNotAvailable()
51
+ except OptionalDependencyNotAvailable:
52
+ pass
53
+ else:
54
+ _import_structure["modeling_owlvit"] = [
55
+ "OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
56
+ "OwlViTModel",
57
+ "OwlViTPreTrainedModel",
58
+ "OwlViTTextModel",
59
+ "OwlViTVisionModel",
60
+ "OwlViTForObjectDetection",
61
+ ]
62
+
63
+ if TYPE_CHECKING:
64
+ from .configuration_owlvit import (
65
+ OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
66
+ OwlViTConfig,
67
+ OwlViTOnnxConfig,
68
+ OwlViTTextConfig,
69
+ OwlViTVisionConfig,
70
+ )
71
+ from .processing_owlvit import OwlViTProcessor
72
+
73
+ try:
74
+ if not is_vision_available():
75
+ raise OptionalDependencyNotAvailable()
76
+ except OptionalDependencyNotAvailable:
77
+ pass
78
+ else:
79
+ from .feature_extraction_owlvit import OwlViTFeatureExtractor
80
+ from .image_processing_owlvit import OwlViTImageProcessor
81
+
82
+ try:
83
+ if not is_torch_available():
84
+ raise OptionalDependencyNotAvailable()
85
+ except OptionalDependencyNotAvailable:
86
+ pass
87
+ else:
88
+ from .modeling_owlvit import (
89
+ OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
90
+ OwlViTForObjectDetection,
91
+ OwlViTModel,
92
+ OwlViTPreTrainedModel,
93
+ OwlViTTextModel,
94
+ OwlViTVisionModel,
95
+ )
96
+
97
+ else:
98
+ import sys
99
+
100
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
llmeval-env/lib/python3.10/site-packages/transformers/models/owlvit/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.56 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/owlvit/__pycache__/configuration_owlvit.cpython-310.pyc ADDED
Binary file (14.6 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/owlvit/__pycache__/convert_owlvit_original_flax_to_hf.cpython-310.pyc ADDED
Binary file (9.53 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/owlvit/__pycache__/feature_extraction_owlvit.cpython-310.pyc ADDED
Binary file (1.02 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/owlvit/__pycache__/image_processing_owlvit.cpython-310.pyc ADDED
Binary file (23 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/owlvit/__pycache__/modeling_owlvit.cpython-310.pyc ADDED
Binary file (55.7 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/owlvit/__pycache__/processing_owlvit.cpython-310.pyc ADDED
Binary file (9.52 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/owlvit/configuration_owlvit.py ADDED
@@ -0,0 +1,383 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ OWL-ViT model configuration"""
16
+
17
+ import os
18
+ from collections import OrderedDict
19
+ from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
20
+
21
+
22
+ if TYPE_CHECKING:
23
+ from ...processing_utils import ProcessorMixin
24
+ from ...utils import TensorType
25
+
26
+ from ...configuration_utils import PretrainedConfig
27
+ from ...onnx import OnnxConfig
28
+ from ...utils import logging
29
+
30
+
31
+ logger = logging.get_logger(__name__)
32
+
33
+
34
+ from ..deprecated._archive_maps import OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
35
+
36
+
37
+ class OwlViTTextConfig(PretrainedConfig):
38
+ r"""
39
+ This is the configuration class to store the configuration of an [`OwlViTTextModel`]. It is used to instantiate an
40
+ OwlViT text encoder according to the specified arguments, defining the model architecture. Instantiating a
41
+ configuration with the defaults will yield a similar configuration to that of the OwlViT
42
+ [google/owlvit-base-patch32](https://huggingface.co/google/owlvit-base-patch32) architecture.
43
+
44
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
45
+ documentation from [`PretrainedConfig`] for more information.
46
+
47
+
48
+ Args:
49
+ vocab_size (`int`, *optional*, defaults to 49408):
50
+ Vocabulary size of the OWL-ViT text model. Defines the number of different tokens that can be represented
51
+ by the `inputs_ids` passed when calling [`OwlViTTextModel`].
52
+ hidden_size (`int`, *optional*, defaults to 512):
53
+ Dimensionality of the encoder layers and the pooler layer.
54
+ intermediate_size (`int`, *optional*, defaults to 2048):
55
+ Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
56
+ num_hidden_layers (`int`, *optional*, defaults to 12):
57
+ Number of hidden layers in the Transformer encoder.
58
+ num_attention_heads (`int`, *optional*, defaults to 8):
59
+ Number of attention heads for each attention layer in the Transformer encoder.
60
+ max_position_embeddings (`int`, *optional*, defaults to 16):
61
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
62
+ just in case (e.g., 512 or 1024 or 2048).
63
+ hidden_act (`str` or `function`, *optional*, defaults to `"quick_gelu"`):
64
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
65
+ `"relu"`, `"selu"` and `"gelu_new"` ``"quick_gelu"` are supported.
66
+ layer_norm_eps (`float`, *optional*, defaults to 1e-05):
67
+ The epsilon used by the layer normalization layers.
68
+ attention_dropout (`float`, *optional*, defaults to 0.0):
69
+ The dropout ratio for the attention probabilities.
70
+ initializer_range (`float`, *optional*, defaults to 0.02):
71
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
72
+ initializer_factor (`float`, *optional*, defaults to 1.0):
73
+ A factor for initializing all weight matrices (should be kept to 1, used internally for initialization
74
+ testing).
75
+ pad_token_id (`int`, *optional*, defaults to 0):
76
+ The id of the padding token in the input sequences.
77
+ bos_token_id (`int`, *optional*, defaults to 49406):
78
+ The id of the beginning-of-sequence token in the input sequences.
79
+ eos_token_id (`int`, *optional*, defaults to 49407):
80
+ The id of the end-of-sequence token in the input sequences.
81
+
82
+ Example:
83
+
84
+ ```python
85
+ >>> from transformers import OwlViTTextConfig, OwlViTTextModel
86
+
87
+ >>> # Initializing a OwlViTTextModel with google/owlvit-base-patch32 style configuration
88
+ >>> configuration = OwlViTTextConfig()
89
+
90
+ >>> # Initializing a OwlViTTextConfig from the google/owlvit-base-patch32 style configuration
91
+ >>> model = OwlViTTextModel(configuration)
92
+
93
+ >>> # Accessing the model configuration
94
+ >>> configuration = model.config
95
+ ```"""
96
+
97
+ model_type = "owlvit_text_model"
98
+
99
+ def __init__(
100
+ self,
101
+ vocab_size=49408,
102
+ hidden_size=512,
103
+ intermediate_size=2048,
104
+ num_hidden_layers=12,
105
+ num_attention_heads=8,
106
+ max_position_embeddings=16,
107
+ hidden_act="quick_gelu",
108
+ layer_norm_eps=1e-5,
109
+ attention_dropout=0.0,
110
+ initializer_range=0.02,
111
+ initializer_factor=1.0,
112
+ pad_token_id=0,
113
+ bos_token_id=49406,
114
+ eos_token_id=49407,
115
+ **kwargs,
116
+ ):
117
+ super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
118
+
119
+ self.vocab_size = vocab_size
120
+ self.hidden_size = hidden_size
121
+ self.intermediate_size = intermediate_size
122
+ self.num_hidden_layers = num_hidden_layers
123
+ self.num_attention_heads = num_attention_heads
124
+ self.max_position_embeddings = max_position_embeddings
125
+ self.hidden_act = hidden_act
126
+ self.layer_norm_eps = layer_norm_eps
127
+ self.attention_dropout = attention_dropout
128
+ self.initializer_range = initializer_range
129
+ self.initializer_factor = initializer_factor
130
+
131
+ @classmethod
132
+ def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig":
133
+ cls._set_token_in_kwargs(kwargs)
134
+
135
+ config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
136
+
137
+ # get the text config dict if we are loading from OwlViTConfig
138
+ if config_dict.get("model_type") == "owlvit":
139
+ config_dict = config_dict["text_config"]
140
+
141
+ if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
142
+ logger.warning(
143
+ f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
144
+ f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
145
+ )
146
+
147
+ return cls.from_dict(config_dict, **kwargs)
148
+
149
+
150
+ class OwlViTVisionConfig(PretrainedConfig):
151
+ r"""
152
+ This is the configuration class to store the configuration of an [`OwlViTVisionModel`]. It is used to instantiate
153
+ an OWL-ViT image encoder according to the specified arguments, defining the model architecture. Instantiating a
154
+ configuration with the defaults will yield a similar configuration to that of the OWL-ViT
155
+ [google/owlvit-base-patch32](https://huggingface.co/google/owlvit-base-patch32) architecture.
156
+
157
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
158
+ documentation from [`PretrainedConfig`] for more information.
159
+
160
+ Args:
161
+ hidden_size (`int`, *optional*, defaults to 768):
162
+ Dimensionality of the encoder layers and the pooler layer.
163
+ intermediate_size (`int`, *optional*, defaults to 3072):
164
+ Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
165
+ num_hidden_layers (`int`, *optional*, defaults to 12):
166
+ Number of hidden layers in the Transformer encoder.
167
+ num_attention_heads (`int`, *optional*, defaults to 12):
168
+ Number of attention heads for each attention layer in the Transformer encoder.
169
+ num_channels (`int`, *optional*, defaults to 3):
170
+ Number of channels in the input images.
171
+ image_size (`int`, *optional*, defaults to 768):
172
+ The size (resolution) of each image.
173
+ patch_size (`int`, *optional*, defaults to 32):
174
+ The size (resolution) of each patch.
175
+ hidden_act (`str` or `function`, *optional*, defaults to `"quick_gelu"`):
176
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
177
+ `"relu"`, `"selu"` and `"gelu_new"` ``"quick_gelu"` are supported.
178
+ layer_norm_eps (`float`, *optional*, defaults to 1e-05):
179
+ The epsilon used by the layer normalization layers.
180
+ attention_dropout (`float`, *optional*, defaults to 0.0):
181
+ The dropout ratio for the attention probabilities.
182
+ initializer_range (`float`, *optional*, defaults to 0.02):
183
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
184
+ initializer_factor (`float`, *optional*, defaults to 1.0):
185
+ A factor for initializing all weight matrices (should be kept to 1, used internally for initialization
186
+ testing).
187
+
188
+ Example:
189
+
190
+ ```python
191
+ >>> from transformers import OwlViTVisionConfig, OwlViTVisionModel
192
+
193
+ >>> # Initializing a OwlViTVisionModel with google/owlvit-base-patch32 style configuration
194
+ >>> configuration = OwlViTVisionConfig()
195
+
196
+ >>> # Initializing a OwlViTVisionModel model from the google/owlvit-base-patch32 style configuration
197
+ >>> model = OwlViTVisionModel(configuration)
198
+
199
+ >>> # Accessing the model configuration
200
+ >>> configuration = model.config
201
+ ```"""
202
+
203
+ model_type = "owlvit_vision_model"
204
+
205
+ def __init__(
206
+ self,
207
+ hidden_size=768,
208
+ intermediate_size=3072,
209
+ num_hidden_layers=12,
210
+ num_attention_heads=12,
211
+ num_channels=3,
212
+ image_size=768,
213
+ patch_size=32,
214
+ hidden_act="quick_gelu",
215
+ layer_norm_eps=1e-5,
216
+ attention_dropout=0.0,
217
+ initializer_range=0.02,
218
+ initializer_factor=1.0,
219
+ **kwargs,
220
+ ):
221
+ super().__init__(**kwargs)
222
+
223
+ self.hidden_size = hidden_size
224
+ self.intermediate_size = intermediate_size
225
+ self.num_hidden_layers = num_hidden_layers
226
+ self.num_attention_heads = num_attention_heads
227
+ self.num_channels = num_channels
228
+ self.image_size = image_size
229
+ self.patch_size = patch_size
230
+ self.hidden_act = hidden_act
231
+ self.layer_norm_eps = layer_norm_eps
232
+ self.attention_dropout = attention_dropout
233
+ self.initializer_range = initializer_range
234
+ self.initializer_factor = initializer_factor
235
+
236
+ @classmethod
237
+ def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig":
238
+ cls._set_token_in_kwargs(kwargs)
239
+
240
+ config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
241
+
242
+ # get the vision config dict if we are loading from OwlViTConfig
243
+ if config_dict.get("model_type") == "owlvit":
244
+ config_dict = config_dict["vision_config"]
245
+
246
+ if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
247
+ logger.warning(
248
+ f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
249
+ f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
250
+ )
251
+
252
+ return cls.from_dict(config_dict, **kwargs)
253
+
254
+
255
+ class OwlViTConfig(PretrainedConfig):
256
+ r"""
257
+ [`OwlViTConfig`] is the configuration class to store the configuration of an [`OwlViTModel`]. It is used to
258
+ instantiate an OWL-ViT model according to the specified arguments, defining the text model and vision model
259
+ configs. Instantiating a configuration with the defaults will yield a similar configuration to that of the OWL-ViT
260
+ [google/owlvit-base-patch32](https://huggingface.co/google/owlvit-base-patch32) architecture.
261
+
262
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
263
+ documentation from [`PretrainedConfig`] for more information.
264
+
265
+ Args:
266
+ text_config (`dict`, *optional*):
267
+ Dictionary of configuration options used to initialize [`OwlViTTextConfig`].
268
+ vision_config (`dict`, *optional*):
269
+ Dictionary of configuration options used to initialize [`OwlViTVisionConfig`].
270
+ projection_dim (`int`, *optional*, defaults to 512):
271
+ Dimensionality of text and vision projection layers.
272
+ logit_scale_init_value (`float`, *optional*, defaults to 2.6592):
273
+ The inital value of the *logit_scale* parameter. Default is used as per the original OWL-ViT
274
+ implementation.
275
+ return_dict (`bool`, *optional*, defaults to `True`):
276
+ Whether or not the model should return a dictionary. If `False`, returns a tuple.
277
+ kwargs (*optional*):
278
+ Dictionary of keyword arguments.
279
+ """
280
+
281
+ model_type = "owlvit"
282
+
283
+ def __init__(
284
+ self,
285
+ text_config=None,
286
+ vision_config=None,
287
+ projection_dim=512,
288
+ logit_scale_init_value=2.6592,
289
+ return_dict=True,
290
+ **kwargs,
291
+ ):
292
+ super().__init__(**kwargs)
293
+
294
+ if text_config is None:
295
+ text_config = {}
296
+ logger.info("text_config is None. Initializing the OwlViTTextConfig with default values.")
297
+
298
+ if vision_config is None:
299
+ vision_config = {}
300
+ logger.info("vision_config is None. initializing the OwlViTVisionConfig with default values.")
301
+
302
+ self.text_config = OwlViTTextConfig(**text_config)
303
+ self.vision_config = OwlViTVisionConfig(**vision_config)
304
+
305
+ self.projection_dim = projection_dim
306
+ self.logit_scale_init_value = logit_scale_init_value
307
+ self.return_dict = return_dict
308
+ self.initializer_factor = 1.0
309
+
310
+ @classmethod
311
+ def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig":
312
+ cls._set_token_in_kwargs(kwargs)
313
+
314
+ config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
315
+
316
+ if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
317
+ logger.warning(
318
+ f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
319
+ f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
320
+ )
321
+
322
+ return cls.from_dict(config_dict, **kwargs)
323
+
324
+ @classmethod
325
+ def from_text_vision_configs(cls, text_config: Dict, vision_config: Dict, **kwargs):
326
+ r"""
327
+ Instantiate a [`OwlViTConfig`] (or a derived class) from owlvit text model configuration and owlvit vision
328
+ model configuration.
329
+
330
+ Returns:
331
+ [`OwlViTConfig`]: An instance of a configuration object
332
+ """
333
+ config_dict = {}
334
+ config_dict["text_config"] = text_config
335
+ config_dict["vision_config"] = vision_config
336
+
337
+ return cls.from_dict(config_dict, **kwargs)
338
+
339
+
340
+ class OwlViTOnnxConfig(OnnxConfig):
341
+ @property
342
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
343
+ return OrderedDict(
344
+ [
345
+ ("input_ids", {0: "batch", 1: "sequence"}),
346
+ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
347
+ ("attention_mask", {0: "batch", 1: "sequence"}),
348
+ ]
349
+ )
350
+
351
+ @property
352
+ def outputs(self) -> Mapping[str, Mapping[int, str]]:
353
+ return OrderedDict(
354
+ [
355
+ ("logits_per_image", {0: "batch"}),
356
+ ("logits_per_text", {0: "batch"}),
357
+ ("text_embeds", {0: "batch"}),
358
+ ("image_embeds", {0: "batch"}),
359
+ ]
360
+ )
361
+
362
+ @property
363
+ def atol_for_validation(self) -> float:
364
+ return 1e-4
365
+
366
+ def generate_dummy_inputs(
367
+ self,
368
+ processor: "ProcessorMixin",
369
+ batch_size: int = -1,
370
+ seq_length: int = -1,
371
+ framework: Optional["TensorType"] = None,
372
+ ) -> Mapping[str, Any]:
373
+ text_input_dict = super().generate_dummy_inputs(
374
+ processor.tokenizer, batch_size=batch_size, seq_length=seq_length, framework=framework
375
+ )
376
+ image_input_dict = super().generate_dummy_inputs(
377
+ processor.image_processor, batch_size=batch_size, framework=framework
378
+ )
379
+ return {**text_input_dict, **image_input_dict}
380
+
381
+ @property
382
+ def default_onnx_opset(self) -> int:
383
+ return 14
llmeval-env/lib/python3.10/site-packages/transformers/models/owlvit/convert_owlvit_original_flax_to_hf.py ADDED
@@ -0,0 +1,406 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Convert OWL-ViT checkpoints from the original repository. URL:
16
+ https://github.com/google-research/scenic/tree/main/scenic/projects/owl_vit"""
17
+
18
+ import argparse
19
+ import collections
20
+
21
+ import jax
22
+ import jax.numpy as jnp
23
+ import torch
24
+ import torch.nn as nn
25
+ from clip.model import CLIP
26
+ from flax.training import checkpoints
27
+ from huggingface_hub import Repository
28
+
29
+ from transformers import (
30
+ CLIPTokenizer,
31
+ OwlViTConfig,
32
+ OwlViTForObjectDetection,
33
+ OwlViTImageProcessor,
34
+ OwlViTModel,
35
+ OwlViTProcessor,
36
+ )
37
+
38
+
39
+ CONFIGS = {
40
+ "vit_b32": {
41
+ "embed_dim": 512,
42
+ "image_resolution": 768,
43
+ "context_length": 16,
44
+ "vocab_size": 49408,
45
+ "vision_layers": 12,
46
+ "vision_width": 768,
47
+ "vision_patch_size": 32,
48
+ "transformer_width": 512,
49
+ "transformer_heads": 8,
50
+ "transformer_layers": 12,
51
+ },
52
+ "vit_b16": {
53
+ "embed_dim": 512,
54
+ "image_resolution": 768,
55
+ "context_length": 16,
56
+ "vocab_size": 49408,
57
+ "vision_layers": 12,
58
+ "vision_width": 768,
59
+ "vision_patch_size": 16,
60
+ "transformer_width": 512,
61
+ "transformer_heads": 8,
62
+ "transformer_layers": 12,
63
+ },
64
+ "vit_l14": {
65
+ "embed_dim": 768,
66
+ "image_resolution": 840,
67
+ "context_length": 16,
68
+ "vocab_size": 49408,
69
+ "vision_layers": 24,
70
+ "vision_width": 1024,
71
+ "vision_patch_size": 14,
72
+ "transformer_width": 768,
73
+ "transformer_heads": 12,
74
+ "transformer_layers": 12,
75
+ },
76
+ }
77
+
78
+
79
+ def flatten_nested_dict(params, parent_key="", sep="/"):
80
+ items = []
81
+
82
+ for k, v in params.items():
83
+ new_key = parent_key + sep + k if parent_key else k
84
+
85
+ if isinstance(v, collections.MutableMapping):
86
+ items.extend(flatten_nested_dict(v, new_key, sep=sep).items())
87
+ else:
88
+ items.append((new_key, v))
89
+ return dict(items)
90
+
91
+
92
+ def to_f32(params):
93
+ return jax.tree_util.tree_map(lambda x: x.astype(jnp.float32) if x.dtype == jnp.bfloat16 else x, params)
94
+
95
+
96
+ def copy_attn_layer(hf_attn_layer, pt_attn_layer):
97
+ q_proj, k_proj, v_proj = pt_attn_layer.in_proj_weight.chunk(3, dim=0)
98
+ q_proj_bias, k_proj_bias, v_proj_bias = pt_attn_layer.in_proj_bias.chunk(3, dim=0)
99
+
100
+ out_proj_weights = pt_attn_layer.out_proj.weight
101
+ out_proj_bias = pt_attn_layer.out_proj.bias
102
+
103
+ hf_attn_layer.q_proj.weight.data = q_proj
104
+ hf_attn_layer.q_proj.bias.data = q_proj_bias
105
+
106
+ hf_attn_layer.k_proj.weight.data = k_proj
107
+ hf_attn_layer.k_proj.bias.data = k_proj_bias
108
+
109
+ hf_attn_layer.v_proj.weight.data = v_proj
110
+ hf_attn_layer.v_proj.bias.data = v_proj_bias
111
+
112
+ hf_attn_layer.out_proj.weight = out_proj_weights
113
+ hf_attn_layer.out_proj.bias = out_proj_bias
114
+
115
+
116
+ def copy_mlp(hf_mlp, pt_mlp):
117
+ copy_linear(hf_mlp.fc1, pt_mlp.c_fc)
118
+ copy_linear(hf_mlp.fc2, pt_mlp.c_proj)
119
+
120
+
121
+ def copy_linear(hf_linear, pt_linear):
122
+ hf_linear.weight = pt_linear.weight
123
+ hf_linear.bias = pt_linear.bias
124
+
125
+
126
+ def copy_layer(hf_layer, pt_layer):
127
+ # copy layer norms
128
+ copy_linear(hf_layer.layer_norm1, pt_layer.ln_1)
129
+ copy_linear(hf_layer.layer_norm2, pt_layer.ln_2)
130
+
131
+ # copy MLP
132
+ copy_mlp(hf_layer.mlp, pt_layer.mlp)
133
+
134
+ # copy attn
135
+ copy_attn_layer(hf_layer.self_attn, pt_layer.attn)
136
+
137
+
138
+ def copy_layers(hf_layers, pt_layers):
139
+ for hf_layer, pt_layer in zip(hf_layers, pt_layers):
140
+ copy_layer(hf_layer, pt_layer)
141
+
142
+
143
+ def copy_encoder(hf_encoder, pt_model):
144
+ # copy embeds
145
+ hf_encoder.embeddings.token_embedding.weight = pt_model.token_embedding.weight
146
+ hf_encoder.embeddings.position_embedding.weight.data = pt_model.positional_embedding
147
+
148
+ # copy layer norm
149
+ copy_linear(hf_encoder.final_layer_norm, pt_model.ln_final)
150
+
151
+ # copy hidden layers
152
+ copy_layers(hf_encoder.encoder.layers, pt_model.transformer.resblocks)
153
+
154
+
155
+ def copy_text_model_and_projection(hf_model, pt_model):
156
+ # copy projection
157
+ hf_model.text_projection.weight.data = pt_model.text_projection.data.T
158
+
159
+ # copy text encoder
160
+ copy_encoder(hf_model.text_model, pt_model)
161
+
162
+
163
+ def copy_vision_model_and_projection(hf_model, pt_model):
164
+ # copy projection
165
+ hf_model.visual_projection.weight.data = pt_model.visual.proj.data.T
166
+
167
+ # copy layer norms
168
+ copy_linear(hf_model.vision_model.pre_layernorm, pt_model.visual.ln_pre)
169
+ copy_linear(hf_model.vision_model.post_layernorm, pt_model.visual.ln_post)
170
+
171
+ # copy embeds
172
+ hf_model.vision_model.embeddings.patch_embedding.weight.data = pt_model.visual.conv1.weight.data
173
+ hf_model.vision_model.embeddings.class_embedding = pt_model.visual.class_embedding
174
+ hf_model.vision_model.embeddings.position_embedding.weight.data = pt_model.visual.positional_embedding.data
175
+
176
+ # copy encoder
177
+ copy_layers(hf_model.vision_model.encoder.layers, pt_model.visual.transformer.resblocks)
178
+
179
+
180
+ def copy_class_merge_token(hf_model, flax_params):
181
+ flax_class_token_params = flatten_nested_dict(flax_params["backbone"]["merged_class_token"])
182
+
183
+ weight = torch.from_numpy(flax_class_token_params["scale"])
184
+ bias = torch.from_numpy(flax_class_token_params["bias"])
185
+ hf_model.layer_norm.weight = nn.Parameter(weight)
186
+ hf_model.layer_norm.bias = nn.Parameter(bias)
187
+
188
+
189
+ def copy_class_box_heads(hf_model, flax_params):
190
+ pt_params = hf_model.state_dict()
191
+ new_params = {}
192
+
193
+ # Rename class prediction head flax params to pytorch HF
194
+ flax_class_params = flatten_nested_dict(flax_params["class_head"])
195
+
196
+ for flax_key, v in flax_class_params.items():
197
+ torch_key = flax_key.replace("/", ".")
198
+ torch_key = torch_key.replace(".kernel", ".weight")
199
+ torch_key = torch_key.replace("Dense_0", "dense0")
200
+ torch_key = "class_head." + torch_key
201
+
202
+ if "weight" in torch_key and v.ndim == 2:
203
+ v = v.T
204
+
205
+ new_params[torch_key] = nn.Parameter(torch.from_numpy(v))
206
+
207
+ # Rename box prediction box flax params to pytorch HF
208
+ flax_box_params = flatten_nested_dict(flax_params["obj_box_head"])
209
+
210
+ for flax_key, v in flax_box_params.items():
211
+ torch_key = flax_key.replace("/", ".")
212
+ torch_key = torch_key.replace(".kernel", ".weight")
213
+ torch_key = torch_key.replace("_", "").lower()
214
+ torch_key = "box_head." + torch_key
215
+
216
+ if "weight" in torch_key and v.ndim == 2:
217
+ v = v.T
218
+
219
+ new_params[torch_key] = nn.Parameter(torch.from_numpy(v))
220
+
221
+ # Copy flax params to PyTorch params
222
+ for name, param in new_params.items():
223
+ if name in pt_params.keys():
224
+ pt_params[name].copy_(param)
225
+
226
+
227
+ def copy_flax_attn_params(hf_backbone, flax_attn_params):
228
+ for k, v in flax_attn_params.items():
229
+ if k.startswith("transformer"):
230
+ torch_key = k.replace("transformer.resblocks", "text_model.encoder.layers")
231
+ else:
232
+ torch_key = k.replace("visual.transformer.resblocks", "vision_model.encoder.layers")
233
+
234
+ torch_key = torch_key.replace("attn", "self_attn")
235
+ torch_key = torch_key.replace("key", "k_proj")
236
+ torch_key = torch_key.replace("value", "v_proj")
237
+ torch_key = torch_key.replace("query", "q_proj")
238
+ torch_key = torch_key.replace("out", "out_proj")
239
+
240
+ if "bias" in torch_key and v.ndim == 2:
241
+ shape = v.shape[0] * v.shape[1]
242
+ v = v.reshape(shape)
243
+
244
+ if "weight" in torch_key and "out" in torch_key:
245
+ shape = (v.shape[0] * v.shape[1], v.shape[2])
246
+ v = v.reshape(shape).T
247
+
248
+ if "weight" in torch_key and "out" not in torch_key:
249
+ shape = (v.shape[0], v.shape[1] * v.shape[2])
250
+ v = v.reshape(shape).T
251
+
252
+ # Copy flax CLIP attn params to HF PyTorch params
253
+ v = torch.from_numpy(v)
254
+ hf_backbone.state_dict()[torch_key].copy_(v)
255
+
256
+
257
+ def _convert_attn_layers(params):
258
+ new_params = {}
259
+ processed_attn_layers = []
260
+
261
+ for k, v in params.items():
262
+ if "attn." in k:
263
+ base = k[: k.rindex("attn.") + 5]
264
+ if base in processed_attn_layers:
265
+ continue
266
+
267
+ processed_attn_layers.append(base)
268
+ dim = params[base + "out.weight"].shape[-1]
269
+ new_params[base + "out_proj.weight"] = params[base + "out.weight"].reshape(dim, dim).T
270
+ new_params[base + "out_proj.bias"] = params[base + "out.bias"]
271
+ else:
272
+ new_params[k] = v
273
+ return new_params
274
+
275
+
276
+ def convert_clip_backbone(flax_params, torch_config):
277
+ torch_model = CLIP(**torch_config)
278
+ torch_model.eval()
279
+ torch_clip_params = torch_model.state_dict()
280
+
281
+ flax_clip_params = flatten_nested_dict(flax_params["backbone"]["clip"])
282
+ new_torch_params = {}
283
+
284
+ for flax_key, v in flax_clip_params.items():
285
+ torch_key = flax_key.replace("/", ".")
286
+ torch_key = torch_key.replace("text.token_embedding.embedding", "token_embedding.kernel")
287
+
288
+ if (
289
+ torch_key.startswith("text.transformer")
290
+ or torch_key.startswith("text.text_projection")
291
+ or torch_key.startswith("text.ln_final")
292
+ or torch_key.startswith("text.positional_embedding")
293
+ ):
294
+ torch_key = torch_key[5:]
295
+
296
+ torch_key = torch_key.replace("text_projection.kernel", "text_projection")
297
+ torch_key = torch_key.replace("visual.proj.kernel", "visual.proj")
298
+ torch_key = torch_key.replace(".scale", ".weight")
299
+ torch_key = torch_key.replace(".kernel", ".weight")
300
+
301
+ if "conv" in torch_key or "downsample.0.weight" in torch_key:
302
+ v = v.transpose(3, 2, 0, 1)
303
+
304
+ elif "weight" in torch_key and v.ndim == 2 and "embedding" not in torch_key:
305
+ # Fully connected layers are transposed, embeddings are not
306
+ v = v.T
307
+
308
+ new_torch_params[torch_key] = v
309
+
310
+ attn_params = _convert_attn_layers(new_torch_params)
311
+ new_torch_params.update(attn_params)
312
+ attn_params = {}
313
+
314
+ # Copy flax CLIP backbone params to PyTorch params
315
+ for name, param in new_torch_params.items():
316
+ if name in torch_clip_params.keys():
317
+ new_param = torch.from_numpy(new_torch_params[name])
318
+ torch_clip_params[name].copy_(new_param)
319
+ else:
320
+ attn_params[name] = param
321
+
322
+ return torch_clip_params, torch_model, attn_params
323
+
324
+
325
+ @torch.no_grad()
326
+ def convert_owlvit_checkpoint(pt_backbone, flax_params, attn_params, pytorch_dump_folder_path, config_path=None):
327
+ """
328
+ Copy/paste/tweak model's weights to transformers design.
329
+ """
330
+ repo = Repository(pytorch_dump_folder_path, clone_from=f"google/{pytorch_dump_folder_path}")
331
+ repo.git_pull()
332
+
333
+ if config_path is not None:
334
+ config = OwlViTConfig.from_pretrained(config_path)
335
+ else:
336
+ config = OwlViTConfig()
337
+
338
+ hf_backbone = OwlViTModel(config).eval()
339
+ hf_model = OwlViTForObjectDetection(config).eval()
340
+
341
+ copy_text_model_and_projection(hf_backbone, pt_backbone)
342
+ copy_vision_model_and_projection(hf_backbone, pt_backbone)
343
+ hf_backbone.logit_scale = pt_backbone.logit_scale
344
+ copy_flax_attn_params(hf_backbone, attn_params)
345
+
346
+ hf_model.owlvit = hf_backbone
347
+ copy_class_merge_token(hf_model, flax_params)
348
+ copy_class_box_heads(hf_model, flax_params)
349
+
350
+ # Save HF model
351
+ hf_model.save_pretrained(repo.local_dir)
352
+
353
+ # Initialize image processor
354
+ image_processor = OwlViTImageProcessor(
355
+ size=config.vision_config.image_size, crop_size=config.vision_config.image_size
356
+ )
357
+ # Initialize tokenizer
358
+ tokenizer = CLIPTokenizer.from_pretrained("openai/clip-vit-base-patch32", pad_token="!", model_max_length=16)
359
+
360
+ # Initialize processor
361
+ processor = OwlViTProcessor(image_processor=image_processor, tokenizer=tokenizer)
362
+ image_processor.save_pretrained(repo.local_dir)
363
+ processor.save_pretrained(repo.local_dir)
364
+
365
+ repo.git_add()
366
+ repo.git_commit("Upload model and processor")
367
+ repo.git_push()
368
+
369
+
370
+ if __name__ == "__main__":
371
+ parser = argparse.ArgumentParser()
372
+ # Required parameters
373
+ parser.add_argument(
374
+ "--owlvit_version",
375
+ default=None,
376
+ type=str,
377
+ required=True,
378
+ help="OWL-ViT model name [clip_b16, clip_b32, clip_l14].",
379
+ )
380
+ parser.add_argument(
381
+ "--owlvit_checkpoint", default=None, type=str, required=True, help="Path to flax model checkpoint."
382
+ )
383
+ parser.add_argument("--hf_config", default=None, type=str, required=True, help="Path to HF model config.")
384
+ parser.add_argument(
385
+ "--pytorch_dump_folder_path", default="hf_model", type=str, help="Path to the output PyTorch model."
386
+ )
387
+ args = parser.parse_args()
388
+
389
+ # Initialize PyToch clip model
390
+ model_name = args.owlvit_version
391
+ if model_name == "clip_b16":
392
+ torch_config = CONFIGS["vit_b16"]
393
+ elif model_name == "clip_b32":
394
+ torch_config = CONFIGS["vit_b32"]
395
+ elif model_name == "clip_l14":
396
+ torch_config = CONFIGS["vit_l14"]
397
+
398
+ # Load from checkpoint and convert params to float-32
399
+ variables = checkpoints.restore_checkpoint(args.owlvit_checkpoint, target=None)["optimizer"]["target"]
400
+ flax_params = jax.tree_util.tree_map(lambda x: x.astype(jnp.float32) if x.dtype == jnp.bfloat16 else x, variables)
401
+ del variables
402
+
403
+ # Convert CLIP backbone
404
+ pt_backbone_params, clip_pt, attn_params = convert_clip_backbone(flax_params, torch_config)
405
+
406
+ convert_owlvit_checkpoint(clip_pt, flax_params, attn_params, args.pytorch_dump_folder_path, args.hf_config)
llmeval-env/lib/python3.10/site-packages/transformers/models/owlvit/feature_extraction_owlvit.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Feature extractor class for OwlViT."""
16
+
17
+ import warnings
18
+
19
+ from ...utils import logging
20
+ from .image_processing_owlvit import OwlViTImageProcessor
21
+
22
+
23
+ logger = logging.get_logger(__name__)
24
+
25
+
26
+ class OwlViTFeatureExtractor(OwlViTImageProcessor):
27
+ def __init__(self, *args, **kwargs) -> None:
28
+ warnings.warn(
29
+ "The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
30
+ " use OwlViTImageProcessor instead.",
31
+ FutureWarning,
32
+ )
33
+ super().__init__(*args, **kwargs)
llmeval-env/lib/python3.10/site-packages/transformers/models/owlvit/image_processing_owlvit.py ADDED
@@ -0,0 +1,611 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Image processor class for OwlViT"""
16
+
17
+ import warnings
18
+ from typing import Dict, List, Optional, Tuple, Union
19
+
20
+ import numpy as np
21
+
22
+ from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
23
+ from ...image_transforms import (
24
+ center_crop,
25
+ center_to_corners_format,
26
+ rescale,
27
+ resize,
28
+ to_channel_dimension_format,
29
+ )
30
+ from ...image_utils import (
31
+ OPENAI_CLIP_MEAN,
32
+ OPENAI_CLIP_STD,
33
+ ChannelDimension,
34
+ ImageInput,
35
+ PILImageResampling,
36
+ infer_channel_dimension_format,
37
+ is_scaled_image,
38
+ make_list_of_images,
39
+ to_numpy_array,
40
+ valid_images,
41
+ validate_kwargs,
42
+ validate_preprocess_arguments,
43
+ )
44
+ from ...utils import TensorType, is_torch_available, logging
45
+
46
+
47
+ if is_torch_available():
48
+ import torch
49
+
50
+
51
+ logger = logging.get_logger(__name__)
52
+
53
+
54
+ def _upcast(t):
55
+ # Protects from numerical overflows in multiplications by upcasting to the equivalent higher type
56
+ if t.is_floating_point():
57
+ return t if t.dtype in (torch.float32, torch.float64) else t.float()
58
+ else:
59
+ return t if t.dtype in (torch.int32, torch.int64) else t.int()
60
+
61
+
62
+ def box_area(boxes):
63
+ """
64
+ Computes the area of a set of bounding boxes, which are specified by its (x1, y1, x2, y2) coordinates.
65
+
66
+ Args:
67
+ boxes (`torch.FloatTensor` of shape `(number_of_boxes, 4)`):
68
+ Boxes for which the area will be computed. They are expected to be in (x1, y1, x2, y2) format with `0 <= x1
69
+ < x2` and `0 <= y1 < y2`.
70
+ Returns:
71
+ `torch.FloatTensor`: a tensor containing the area for each box.
72
+ """
73
+ boxes = _upcast(boxes)
74
+ return (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])
75
+
76
+
77
+ def box_iou(boxes1, boxes2):
78
+ area1 = box_area(boxes1)
79
+ area2 = box_area(boxes2)
80
+
81
+ left_top = torch.max(boxes1[:, None, :2], boxes2[:, :2]) # [N,M,2]
82
+ right_bottom = torch.min(boxes1[:, None, 2:], boxes2[:, 2:]) # [N,M,2]
83
+
84
+ width_height = (right_bottom - left_top).clamp(min=0) # [N,M,2]
85
+ inter = width_height[:, :, 0] * width_height[:, :, 1] # [N,M]
86
+
87
+ union = area1[:, None] + area2 - inter
88
+
89
+ iou = inter / union
90
+ return iou, union
91
+
92
+
93
+ class OwlViTImageProcessor(BaseImageProcessor):
94
+ r"""
95
+ Constructs an OWL-ViT image processor.
96
+
97
+ This image processor inherits from [`ImageProcessingMixin`] which contains most of the main methods. Users should
98
+ refer to this superclass for more information regarding those methods.
99
+
100
+ Args:
101
+ do_resize (`bool`, *optional*, defaults to `True`):
102
+ Whether to resize the shorter edge of the input to a certain `size`.
103
+ size (`Dict[str, int]`, *optional*, defaults to {"height": 768, "width": 768}):
104
+ The size to use for resizing the image. Only has an effect if `do_resize` is set to `True`. If `size` is a
105
+ sequence like (h, w), output size will be matched to this. If `size` is an int, then image will be resized
106
+ to (size, size).
107
+ resample (`int`, *optional*, defaults to `Resampling.BICUBIC`):
108
+ An optional resampling filter. This can be one of `PIL.Image.Resampling.NEAREST`,
109
+ `PIL.Image.Resampling.BOX`, `PIL.Image.Resampling.BILINEAR`, `PIL.Image.Resampling.HAMMING`,
110
+ `PIL.Image.Resampling.BICUBIC` or `PIL.Image.Resampling.LANCZOS`. Only has an effect if `do_resize` is set
111
+ to `True`.
112
+ do_center_crop (`bool`, *optional*, defaults to `False`):
113
+ Whether to crop the input at the center. If the input size is smaller than `crop_size` along any edge, the
114
+ image is padded with 0's and then center cropped.
115
+ crop_size (`int`, *optional*, defaults to {"height": 768, "width": 768}):
116
+ The size to use for center cropping the image. Only has an effect if `do_center_crop` is set to `True`.
117
+ do_rescale (`bool`, *optional*, defaults to `True`):
118
+ Whether to rescale the input by a certain factor.
119
+ rescale_factor (`float`, *optional*, defaults to `1/255`):
120
+ The factor to use for rescaling the image. Only has an effect if `do_rescale` is set to `True`.
121
+ do_normalize (`bool`, *optional*, defaults to `True`):
122
+ Whether or not to normalize the input with `image_mean` and `image_std`. Desired output size when applying
123
+ center-cropping. Only has an effect if `do_center_crop` is set to `True`.
124
+ image_mean (`List[int]`, *optional*, defaults to `[0.48145466, 0.4578275, 0.40821073]`):
125
+ The sequence of means for each channel, to be used when normalizing images.
126
+ image_std (`List[int]`, *optional*, defaults to `[0.26862954, 0.26130258, 0.27577711]`):
127
+ The sequence of standard deviations for each channel, to be used when normalizing images.
128
+ """
129
+
130
+ model_input_names = ["pixel_values"]
131
+
132
+ def __init__(
133
+ self,
134
+ do_resize=True,
135
+ size=None,
136
+ resample=PILImageResampling.BICUBIC,
137
+ do_center_crop=False,
138
+ crop_size=None,
139
+ do_rescale=True,
140
+ rescale_factor=1 / 255,
141
+ do_normalize=True,
142
+ image_mean=None,
143
+ image_std=None,
144
+ **kwargs,
145
+ ):
146
+ size = size if size is not None else {"height": 768, "width": 768}
147
+ size = get_size_dict(size, default_to_square=True)
148
+
149
+ crop_size = crop_size if crop_size is not None else {"height": 768, "width": 768}
150
+ crop_size = get_size_dict(crop_size, default_to_square=True)
151
+
152
+ # Early versions of the OWL-ViT config on the hub had "rescale" as a flag. This clashes with the
153
+ # vision image processor method `rescale` as it would be set as an attribute during the super().__init__
154
+ # call. This is for backwards compatibility.
155
+ if "rescale" in kwargs:
156
+ rescale_val = kwargs.pop("rescale")
157
+ kwargs["do_rescale"] = rescale_val
158
+
159
+ super().__init__(**kwargs)
160
+ self.do_resize = do_resize
161
+ self.size = size
162
+ self.resample = resample
163
+ self.do_center_crop = do_center_crop
164
+ self.crop_size = crop_size
165
+ self.do_rescale = do_rescale
166
+ self.rescale_factor = rescale_factor
167
+ self.do_normalize = do_normalize
168
+ self.image_mean = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
169
+ self.image_std = image_std if image_std is not None else OPENAI_CLIP_STD
170
+ self._valid_processor_keys = [
171
+ "images",
172
+ "do_resize",
173
+ "size",
174
+ "resample",
175
+ "do_center_crop",
176
+ "crop_size",
177
+ "do_rescale",
178
+ "rescale_factor",
179
+ "do_normalize",
180
+ "image_mean",
181
+ "image_std",
182
+ "return_tensors",
183
+ "data_format",
184
+ "input_data_format",
185
+ ]
186
+
187
+ def resize(
188
+ self,
189
+ image: np.ndarray,
190
+ size: Dict[str, int],
191
+ resample: PILImageResampling.BICUBIC,
192
+ data_format: Optional[Union[str, ChannelDimension]] = None,
193
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
194
+ **kwargs,
195
+ ) -> np.ndarray:
196
+ """
197
+ Resize an image to a certain size.
198
+
199
+ Args:
200
+ image (`np.ndarray`):
201
+ Image to resize.
202
+ size (`Dict[str, int]`):
203
+ The size to resize the image to. Must contain height and width keys.
204
+ resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
205
+ The resampling filter to use when resizing the input.
206
+ data_format (`str` or `ChannelDimension`, *optional*):
207
+ The channel dimension format for the output image. If unset, the channel dimension format of the input
208
+ image is used.
209
+ input_data_format (`str` or `ChannelDimension`, *optional*):
210
+ The channel dimension format of the input image. If not provided, it will be inferred.
211
+ """
212
+ size = get_size_dict(size, default_to_square=True)
213
+ if "height" not in size or "width" not in size:
214
+ raise ValueError("size dictionary must contain height and width keys")
215
+
216
+ return resize(
217
+ image,
218
+ (size["height"], size["width"]),
219
+ resample=resample,
220
+ data_format=data_format,
221
+ input_data_format=input_data_format,
222
+ **kwargs,
223
+ )
224
+
225
+ def center_crop(
226
+ self,
227
+ image: np.ndarray,
228
+ crop_size: Dict[str, int],
229
+ data_format: Optional[Union[str, ChannelDimension]] = None,
230
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
231
+ **kwargs,
232
+ ) -> np.ndarray:
233
+ """
234
+ Center crop an image to a certain size.
235
+
236
+ Args:
237
+ image (`np.ndarray`):
238
+ Image to center crop.
239
+ crop_size (`Dict[str, int]`):
240
+ The size to center crop the image to. Must contain height and width keys.
241
+ data_format (`str` or `ChannelDimension`, *optional*):
242
+ The channel dimension format for the output image. If unset, the channel dimension format of the input
243
+ image is used.
244
+ input_data_format (`str` or `ChannelDimension`, *optional*):
245
+ The channel dimension format of the input image. If not provided, it will be inferred.
246
+ """
247
+ crop_size = get_size_dict(crop_size, default_to_square=True)
248
+ if "height" not in crop_size or "width" not in crop_size:
249
+ raise ValueError("crop_size dictionary must contain height and width keys")
250
+
251
+ return center_crop(
252
+ image,
253
+ (crop_size["height"], crop_size["width"]),
254
+ data_format=data_format,
255
+ input_data_format=input_data_format,
256
+ **kwargs,
257
+ )
258
+
259
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.rescale
260
+ def rescale(
261
+ self,
262
+ image: np.ndarray,
263
+ rescale_factor: float,
264
+ data_format: Optional[Union[str, ChannelDimension]] = None,
265
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
266
+ ) -> np.ndarray:
267
+ """
268
+ Rescale the image by the given factor. image = image * rescale_factor.
269
+
270
+ Args:
271
+ image (`np.ndarray`):
272
+ Image to rescale.
273
+ rescale_factor (`float`):
274
+ The value to use for rescaling.
275
+ data_format (`str` or `ChannelDimension`, *optional*):
276
+ The channel dimension format for the output image. If unset, the channel dimension format of the input
277
+ image is used. Can be one of:
278
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
279
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
280
+ input_data_format (`str` or `ChannelDimension`, *optional*):
281
+ The channel dimension format for the input image. If unset, is inferred from the input image. Can be
282
+ one of:
283
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
284
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
285
+ """
286
+ return rescale(image, rescale_factor, data_format=data_format, input_data_format=input_data_format)
287
+
288
+ def preprocess(
289
+ self,
290
+ images: ImageInput,
291
+ do_resize: Optional[bool] = None,
292
+ size: Optional[Dict[str, int]] = None,
293
+ resample: PILImageResampling = None,
294
+ do_center_crop: Optional[bool] = None,
295
+ crop_size: Optional[Dict[str, int]] = None,
296
+ do_rescale: Optional[bool] = None,
297
+ rescale_factor: Optional[float] = None,
298
+ do_normalize: Optional[bool] = None,
299
+ image_mean: Optional[Union[float, List[float]]] = None,
300
+ image_std: Optional[Union[float, List[float]]] = None,
301
+ return_tensors: Optional[Union[TensorType, str]] = None,
302
+ data_format: Union[str, ChannelDimension] = ChannelDimension.FIRST,
303
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
304
+ **kwargs,
305
+ ) -> BatchFeature:
306
+ """
307
+ Prepares an image or batch of images for the model.
308
+
309
+ Args:
310
+ images (`ImageInput`):
311
+ The image or batch of images to be prepared. Expects a single or batch of images with pixel values
312
+ ranging from 0 to 255. If passing in images with pixel values between 0 and 1, set `do_rescale=False`.
313
+ do_resize (`bool`, *optional*, defaults to `self.do_resize`):
314
+ Whether or not to resize the input. If `True`, will resize the input to the size specified by `size`.
315
+ size (`Dict[str, int]`, *optional*, defaults to `self.size`):
316
+ The size to resize the input to. Only has an effect if `do_resize` is set to `True`.
317
+ resample (`PILImageResampling`, *optional*, defaults to `self.resample`):
318
+ The resampling filter to use when resizing the input. Only has an effect if `do_resize` is set to
319
+ `True`.
320
+ do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`):
321
+ Whether or not to center crop the input. If `True`, will center crop the input to the size specified by
322
+ `crop_size`.
323
+ crop_size (`Dict[str, int]`, *optional*, defaults to `self.crop_size`):
324
+ The size to center crop the input to. Only has an effect if `do_center_crop` is set to `True`.
325
+ do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
326
+ Whether or not to rescale the input. If `True`, will rescale the input by dividing it by
327
+ `rescale_factor`.
328
+ rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
329
+ The factor to rescale the input by. Only has an effect if `do_rescale` is set to `True`.
330
+ do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
331
+ Whether or not to normalize the input. If `True`, will normalize the input by subtracting `image_mean`
332
+ and dividing by `image_std`.
333
+ image_mean (`Union[float, List[float]]`, *optional*, defaults to `self.image_mean`):
334
+ The mean to subtract from the input when normalizing. Only has an effect if `do_normalize` is set to
335
+ `True`.
336
+ image_std (`Union[float, List[float]]`, *optional*, defaults to `self.image_std`):
337
+ The standard deviation to divide the input by when normalizing. Only has an effect if `do_normalize` is
338
+ set to `True`.
339
+ return_tensors (`str` or `TensorType`, *optional*):
340
+ The type of tensors to return. Can be one of:
341
+ - Unset: Return a list of `np.ndarray`.
342
+ - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
343
+ - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
344
+ - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
345
+ - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
346
+ data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
347
+ The channel dimension format for the output image. Can be one of:
348
+ - `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
349
+ - `ChannelDimension.LAST`: image in (height, width, num_channels) format.
350
+ - Unset: defaults to the channel dimension format of the input image.
351
+ input_data_format (`ChannelDimension` or `str`, *optional*):
352
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
353
+ from the input image. Can be one of:
354
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
355
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
356
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
357
+ """
358
+ do_resize = do_resize if do_resize is not None else self.do_resize
359
+ size = size if size is not None else self.size
360
+ resample = resample if resample is not None else self.resample
361
+ do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop
362
+ crop_size = crop_size if crop_size is not None else self.crop_size
363
+ do_rescale = do_rescale if do_rescale is not None else self.do_rescale
364
+ rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
365
+ do_normalize = do_normalize if do_normalize is not None else self.do_normalize
366
+ image_mean = image_mean if image_mean is not None else self.image_mean
367
+ image_std = image_std if image_std is not None else self.image_std
368
+
369
+ images = make_list_of_images(images)
370
+
371
+ if not valid_images(images):
372
+ raise ValueError(
373
+ "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
374
+ "torch.Tensor, tf.Tensor or jax.ndarray."
375
+ )
376
+ validate_kwargs(captured_kwargs=kwargs.keys(), valid_processor_keys=self._valid_processor_keys)
377
+
378
+ validate_preprocess_arguments(
379
+ do_rescale=do_rescale,
380
+ rescale_factor=rescale_factor,
381
+ do_normalize=do_normalize,
382
+ image_mean=image_mean,
383
+ image_std=image_std,
384
+ do_center_crop=do_center_crop,
385
+ crop_size=crop_size,
386
+ do_resize=do_resize,
387
+ size=size,
388
+ resample=resample,
389
+ )
390
+
391
+ # All transformations expect numpy arrays
392
+ images = [to_numpy_array(image) for image in images]
393
+
394
+ if is_scaled_image(images[0]) and do_rescale:
395
+ logger.warning_once(
396
+ "It looks like you are trying to rescale already rescaled images. If the input"
397
+ " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
398
+ )
399
+
400
+ if input_data_format is None:
401
+ # We assume that all images have the same channel dimension format.
402
+ input_data_format = infer_channel_dimension_format(images[0])
403
+
404
+ if do_resize:
405
+ images = [
406
+ self.resize(image, size=size, resample=resample, input_data_format=input_data_format)
407
+ for image in images
408
+ ]
409
+
410
+ if do_center_crop:
411
+ images = [
412
+ self.center_crop(image, crop_size=crop_size, input_data_format=input_data_format) for image in images
413
+ ]
414
+
415
+ if do_rescale:
416
+ images = [
417
+ self.rescale(image, rescale_factor=rescale_factor, input_data_format=input_data_format)
418
+ for image in images
419
+ ]
420
+
421
+ if do_normalize:
422
+ images = [
423
+ self.normalize(image, mean=image_mean, std=image_std, input_data_format=input_data_format)
424
+ for image in images
425
+ ]
426
+
427
+ images = [
428
+ to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images
429
+ ]
430
+ encoded_inputs = BatchFeature(data={"pixel_values": images}, tensor_type=return_tensors)
431
+ return encoded_inputs
432
+
433
+ def post_process(self, outputs, target_sizes):
434
+ """
435
+ Converts the raw output of [`OwlViTForObjectDetection`] into final bounding boxes in (top_left_x, top_left_y,
436
+ bottom_right_x, bottom_right_y) format.
437
+
438
+ Args:
439
+ outputs ([`OwlViTObjectDetectionOutput`]):
440
+ Raw outputs of the model.
441
+ target_sizes (`torch.Tensor` of shape `(batch_size, 2)`):
442
+ Tensor containing the size (h, w) of each image of the batch. For evaluation, this must be the original
443
+ image size (before any data augmentation). For visualization, this should be the image size after data
444
+ augment, but before padding.
445
+ Returns:
446
+ `List[Dict]`: A list of dictionaries, each dictionary containing the scores, labels and boxes for an image
447
+ in the batch as predicted by the model.
448
+ """
449
+ # TODO: (amy) add support for other frameworks
450
+ warnings.warn(
451
+ "`post_process` is deprecated and will be removed in v5 of Transformers, please use"
452
+ " `post_process_object_detection` instead, with `threshold=0.` for equivalent results.",
453
+ FutureWarning,
454
+ )
455
+
456
+ logits, boxes = outputs.logits, outputs.pred_boxes
457
+
458
+ if len(logits) != len(target_sizes):
459
+ raise ValueError("Make sure that you pass in as many target sizes as the batch dimension of the logits")
460
+ if target_sizes.shape[1] != 2:
461
+ raise ValueError("Each element of target_sizes must contain the size (h, w) of each image of the batch")
462
+
463
+ probs = torch.max(logits, dim=-1)
464
+ scores = torch.sigmoid(probs.values)
465
+ labels = probs.indices
466
+
467
+ # Convert to [x0, y0, x1, y1] format
468
+ boxes = center_to_corners_format(boxes)
469
+
470
+ # Convert from relative [0, 1] to absolute [0, height] coordinates
471
+ img_h, img_w = target_sizes.unbind(1)
472
+ scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1).to(boxes.device)
473
+ boxes = boxes * scale_fct[:, None, :]
474
+
475
+ results = [{"scores": s, "labels": l, "boxes": b} for s, l, b in zip(scores, labels, boxes)]
476
+
477
+ return results
478
+
479
+ def post_process_object_detection(
480
+ self, outputs, threshold: float = 0.1, target_sizes: Union[TensorType, List[Tuple]] = None
481
+ ):
482
+ """
483
+ Converts the raw output of [`OwlViTForObjectDetection`] into final bounding boxes in (top_left_x, top_left_y,
484
+ bottom_right_x, bottom_right_y) format.
485
+
486
+ Args:
487
+ outputs ([`OwlViTObjectDetectionOutput`]):
488
+ Raw outputs of the model.
489
+ threshold (`float`, *optional*):
490
+ Score threshold to keep object detection predictions.
491
+ target_sizes (`torch.Tensor` or `List[Tuple[int, int]]`, *optional*):
492
+ Tensor of shape `(batch_size, 2)` or list of tuples (`Tuple[int, int]`) containing the target size
493
+ `(height, width)` of each image in the batch. If unset, predictions will not be resized.
494
+ Returns:
495
+ `List[Dict]`: A list of dictionaries, each dictionary containing the scores, labels and boxes for an image
496
+ in the batch as predicted by the model.
497
+ """
498
+ # TODO: (amy) add support for other frameworks
499
+ logits, boxes = outputs.logits, outputs.pred_boxes
500
+
501
+ if target_sizes is not None:
502
+ if len(logits) != len(target_sizes):
503
+ raise ValueError(
504
+ "Make sure that you pass in as many target sizes as the batch dimension of the logits"
505
+ )
506
+
507
+ probs = torch.max(logits, dim=-1)
508
+ scores = torch.sigmoid(probs.values)
509
+ labels = probs.indices
510
+
511
+ # Convert to [x0, y0, x1, y1] format
512
+ boxes = center_to_corners_format(boxes)
513
+
514
+ # Convert from relative [0, 1] to absolute [0, height] coordinates
515
+ if target_sizes is not None:
516
+ if isinstance(target_sizes, List):
517
+ img_h = torch.Tensor([i[0] for i in target_sizes])
518
+ img_w = torch.Tensor([i[1] for i in target_sizes])
519
+ else:
520
+ img_h, img_w = target_sizes.unbind(1)
521
+
522
+ scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1).to(boxes.device)
523
+ boxes = boxes * scale_fct[:, None, :]
524
+
525
+ results = []
526
+ for s, l, b in zip(scores, labels, boxes):
527
+ score = s[s > threshold]
528
+ label = l[s > threshold]
529
+ box = b[s > threshold]
530
+ results.append({"scores": score, "labels": label, "boxes": box})
531
+
532
+ return results
533
+
534
+ # TODO: (Amy) Make compatible with other frameworks
535
+ def post_process_image_guided_detection(self, outputs, threshold=0.0, nms_threshold=0.3, target_sizes=None):
536
+ """
537
+ Converts the output of [`OwlViTForObjectDetection.image_guided_detection`] into the format expected by the COCO
538
+ api.
539
+
540
+ Args:
541
+ outputs ([`OwlViTImageGuidedObjectDetectionOutput`]):
542
+ Raw outputs of the model.
543
+ threshold (`float`, *optional*, defaults to 0.0):
544
+ Minimum confidence threshold to use to filter out predicted boxes.
545
+ nms_threshold (`float`, *optional*, defaults to 0.3):
546
+ IoU threshold for non-maximum suppression of overlapping boxes.
547
+ target_sizes (`torch.Tensor`, *optional*):
548
+ Tensor of shape (batch_size, 2) where each entry is the (height, width) of the corresponding image in
549
+ the batch. If set, predicted normalized bounding boxes are rescaled to the target sizes. If left to
550
+ None, predictions will not be unnormalized.
551
+
552
+ Returns:
553
+ `List[Dict]`: A list of dictionaries, each dictionary containing the scores, labels and boxes for an image
554
+ in the batch as predicted by the model. All labels are set to None as
555
+ `OwlViTForObjectDetection.image_guided_detection` perform one-shot object detection.
556
+ """
557
+ logits, target_boxes = outputs.logits, outputs.target_pred_boxes
558
+
559
+ if len(logits) != len(target_sizes):
560
+ raise ValueError("Make sure that you pass in as many target sizes as the batch dimension of the logits")
561
+ if target_sizes.shape[1] != 2:
562
+ raise ValueError("Each element of target_sizes must contain the size (h, w) of each image of the batch")
563
+
564
+ probs = torch.max(logits, dim=-1)
565
+ scores = torch.sigmoid(probs.values)
566
+
567
+ # Convert to [x0, y0, x1, y1] format
568
+ target_boxes = center_to_corners_format(target_boxes)
569
+
570
+ # Apply non-maximum suppression (NMS)
571
+ if nms_threshold < 1.0:
572
+ for idx in range(target_boxes.shape[0]):
573
+ for i in torch.argsort(-scores[idx]):
574
+ if not scores[idx][i]:
575
+ continue
576
+
577
+ ious = box_iou(target_boxes[idx][i, :].unsqueeze(0), target_boxes[idx])[0][0]
578
+ ious[i] = -1.0 # Mask self-IoU.
579
+ scores[idx][ious > nms_threshold] = 0.0
580
+
581
+ # Convert from relative [0, 1] to absolute [0, height] coordinates
582
+ img_h, img_w = target_sizes.unbind(1)
583
+ scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1).to(target_boxes.device)
584
+ target_boxes = target_boxes * scale_fct[:, None, :]
585
+
586
+ # Compute box display alphas based on prediction scores
587
+ results = []
588
+ alphas = torch.zeros_like(scores)
589
+
590
+ for idx in range(target_boxes.shape[0]):
591
+ # Select scores for boxes matching the current query:
592
+ query_scores = scores[idx]
593
+ if not query_scores.nonzero().numel():
594
+ continue
595
+
596
+ # Apply threshold on scores before scaling
597
+ query_scores[query_scores < threshold] = 0.0
598
+
599
+ # Scale box alpha such that the best box for each query has alpha 1.0 and the worst box has alpha 0.1.
600
+ # All other boxes will either belong to a different query, or will not be shown.
601
+ max_score = torch.max(query_scores) + 1e-6
602
+ query_alphas = (query_scores - (max_score * 0.1)) / (max_score * 0.9)
603
+ query_alphas = torch.clip(query_alphas, 0.0, 1.0)
604
+ alphas[idx] = query_alphas
605
+
606
+ mask = alphas[idx] > 0
607
+ box_scores = alphas[idx][mask]
608
+ boxes = target_boxes[idx][mask]
609
+ results.append({"scores": box_scores, "labels": None, "boxes": boxes})
610
+
611
+ return results
llmeval-env/lib/python3.10/site-packages/transformers/models/owlvit/modeling_owlvit.py ADDED
@@ -0,0 +1,1685 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 Google AI and The HuggingFace Team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch OWL-ViT model."""
16
+
17
+ import warnings
18
+ from dataclasses import dataclass
19
+ from functools import lru_cache
20
+ from typing import Any, Dict, Optional, Tuple, Union
21
+
22
+ import torch
23
+ import torch.utils.checkpoint
24
+ from torch import Tensor, nn
25
+
26
+ from ...activations import ACT2FN
27
+ from ...modeling_attn_mask_utils import _create_4d_causal_attention_mask, _prepare_4d_attention_mask
28
+ from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling
29
+ from ...modeling_utils import PreTrainedModel
30
+ from ...utils import (
31
+ ModelOutput,
32
+ add_start_docstrings,
33
+ add_start_docstrings_to_model_forward,
34
+ is_vision_available,
35
+ logging,
36
+ replace_return_docstrings,
37
+ )
38
+ from .configuration_owlvit import OwlViTConfig, OwlViTTextConfig, OwlViTVisionConfig
39
+
40
+
41
+ if is_vision_available():
42
+ from transformers.image_transforms import center_to_corners_format
43
+
44
+
45
+ logger = logging.get_logger(__name__)
46
+
47
+ _CHECKPOINT_FOR_DOC = "google/owlvit-base-patch32"
48
+
49
+ # See all OwlViT models at https://huggingface.co/models?filter=owlvit
50
+
51
+ from ..deprecated._archive_maps import OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
52
+
53
+
54
+ # Copied from transformers.models.clip.modeling_clip.contrastive_loss with clip->owlvit
55
+ def contrastive_loss(logits: torch.Tensor) -> torch.Tensor:
56
+ return nn.functional.cross_entropy(logits, torch.arange(len(logits), device=logits.device))
57
+
58
+
59
+ # Copied from transformers.models.clip.modeling_clip.clip_loss with clip->owlvit
60
+ def owlvit_loss(similarity: torch.Tensor) -> torch.Tensor:
61
+ caption_loss = contrastive_loss(similarity)
62
+ image_loss = contrastive_loss(similarity.t())
63
+ return (caption_loss + image_loss) / 2.0
64
+
65
+
66
+ @dataclass
67
+ class OwlViTOutput(ModelOutput):
68
+ """
69
+ Args:
70
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`):
71
+ Contrastive loss for image-text similarity.
72
+ logits_per_image (`torch.FloatTensor` of shape `(image_batch_size, text_batch_size)`):
73
+ The scaled dot product scores between `image_embeds` and `text_embeds`. This represents the image-text
74
+ similarity scores.
75
+ logits_per_text (`torch.FloatTensor` of shape `(text_batch_size, image_batch_size)`):
76
+ The scaled dot product scores between `text_embeds` and `image_embeds`. This represents the text-image
77
+ similarity scores.
78
+ text_embeds (`torch.FloatTensor` of shape `(batch_size * num_max_text_queries, output_dim`):
79
+ The text embeddings obtained by applying the projection layer to the pooled output of [`OwlViTTextModel`].
80
+ image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim`):
81
+ The image embeddings obtained by applying the projection layer to the pooled output of
82
+ [`OwlViTVisionModel`].
83
+ text_model_output (Tuple[`BaseModelOutputWithPooling`]):
84
+ The output of the [`OwlViTTextModel`].
85
+ vision_model_output (`BaseModelOutputWithPooling`):
86
+ The output of the [`OwlViTVisionModel`].
87
+ """
88
+
89
+ loss: Optional[torch.FloatTensor] = None
90
+ logits_per_image: torch.FloatTensor = None
91
+ logits_per_text: torch.FloatTensor = None
92
+ text_embeds: torch.FloatTensor = None
93
+ image_embeds: torch.FloatTensor = None
94
+ text_model_output: BaseModelOutputWithPooling = None
95
+ vision_model_output: BaseModelOutputWithPooling = None
96
+
97
+ def to_tuple(self) -> Tuple[Any]:
98
+ return tuple(
99
+ self[k] if k not in ["text_model_output", "vision_model_output"] else getattr(self, k).to_tuple()
100
+ for k in self.keys()
101
+ )
102
+
103
+
104
+ # Copied from transformers.models.detr.modeling_detr._upcast
105
+ def _upcast(t: Tensor) -> Tensor:
106
+ # Protects from numerical overflows in multiplications by upcasting to the equivalent higher type
107
+ if t.is_floating_point():
108
+ return t if t.dtype in (torch.float32, torch.float64) else t.float()
109
+ else:
110
+ return t if t.dtype in (torch.int32, torch.int64) else t.int()
111
+
112
+
113
+ # Copied from transformers.models.detr.modeling_detr.box_area
114
+ def box_area(boxes: Tensor) -> Tensor:
115
+ """
116
+ Computes the area of a set of bounding boxes, which are specified by its (x1, y1, x2, y2) coordinates.
117
+
118
+ Args:
119
+ boxes (`torch.FloatTensor` of shape `(number_of_boxes, 4)`):
120
+ Boxes for which the area will be computed. They are expected to be in (x1, y1, x2, y2) format with `0 <= x1
121
+ < x2` and `0 <= y1 < y2`.
122
+
123
+ Returns:
124
+ `torch.FloatTensor`: a tensor containing the area for each box.
125
+ """
126
+ boxes = _upcast(boxes)
127
+ return (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])
128
+
129
+
130
+ # Copied from transformers.models.detr.modeling_detr.box_iou
131
+ def box_iou(boxes1, boxes2):
132
+ area1 = box_area(boxes1)
133
+ area2 = box_area(boxes2)
134
+
135
+ left_top = torch.max(boxes1[:, None, :2], boxes2[:, :2]) # [N,M,2]
136
+ right_bottom = torch.min(boxes1[:, None, 2:], boxes2[:, 2:]) # [N,M,2]
137
+
138
+ width_height = (right_bottom - left_top).clamp(min=0) # [N,M,2]
139
+ inter = width_height[:, :, 0] * width_height[:, :, 1] # [N,M]
140
+
141
+ union = area1[:, None] + area2 - inter
142
+
143
+ iou = inter / union
144
+ return iou, union
145
+
146
+
147
+ # Copied from transformers.models.detr.modeling_detr.generalized_box_iou
148
+ def generalized_box_iou(boxes1, boxes2):
149
+ """
150
+ Generalized IoU from https://giou.stanford.edu/. The boxes should be in [x0, y0, x1, y1] (corner) format.
151
+
152
+ Returns:
153
+ `torch.FloatTensor`: a [N, M] pairwise matrix, where N = len(boxes1) and M = len(boxes2)
154
+ """
155
+ # degenerate boxes gives inf / nan results
156
+ # so do an early check
157
+ if not (boxes1[:, 2:] >= boxes1[:, :2]).all():
158
+ raise ValueError(f"boxes1 must be in [x0, y0, x1, y1] (corner) format, but got {boxes1}")
159
+ if not (boxes2[:, 2:] >= boxes2[:, :2]).all():
160
+ raise ValueError(f"boxes2 must be in [x0, y0, x1, y1] (corner) format, but got {boxes2}")
161
+ iou, union = box_iou(boxes1, boxes2)
162
+
163
+ top_left = torch.min(boxes1[:, None, :2], boxes2[:, :2])
164
+ bottom_right = torch.max(boxes1[:, None, 2:], boxes2[:, 2:])
165
+
166
+ width_height = (bottom_right - top_left).clamp(min=0) # [N,M,2]
167
+ area = width_height[:, :, 0] * width_height[:, :, 1]
168
+
169
+ return iou - (area - union) / area
170
+
171
+
172
+ @dataclass
173
+ class OwlViTObjectDetectionOutput(ModelOutput):
174
+ """
175
+ Output type of [`OwlViTForObjectDetection`].
176
+
177
+ Args:
178
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` are provided)):
179
+ Total loss as a linear combination of a negative log-likehood (cross-entropy) for class prediction and a
180
+ bounding box loss. The latter is defined as a linear combination of the L1 loss and the generalized
181
+ scale-invariant IoU loss.
182
+ loss_dict (`Dict`, *optional*):
183
+ A dictionary containing the individual losses. Useful for logging.
184
+ logits (`torch.FloatTensor` of shape `(batch_size, num_patches, num_queries)`):
185
+ Classification logits (including no-object) for all queries.
186
+ pred_boxes (`torch.FloatTensor` of shape `(batch_size, num_patches, 4)`):
187
+ Normalized boxes coordinates for all queries, represented as (center_x, center_y, width, height). These
188
+ values are normalized in [0, 1], relative to the size of each individual image in the batch (disregarding
189
+ possible padding). You can use [`~OwlViTImageProcessor.post_process_object_detection`] to retrieve the
190
+ unnormalized bounding boxes.
191
+ text_embeds (`torch.FloatTensor` of shape `(batch_size, num_max_text_queries, output_dim`):
192
+ The text embeddings obtained by applying the projection layer to the pooled output of [`OwlViTTextModel`].
193
+ image_embeds (`torch.FloatTensor` of shape `(batch_size, patch_size, patch_size, output_dim`):
194
+ Pooled output of [`OwlViTVisionModel`]. OWL-ViT represents images as a set of image patches and computes
195
+ image embeddings for each patch.
196
+ class_embeds (`torch.FloatTensor` of shape `(batch_size, num_patches, hidden_size)`):
197
+ Class embeddings of all image patches. OWL-ViT represents images as a set of image patches where the total
198
+ number of patches is (image_size / patch_size)**2.
199
+ text_model_output (Tuple[`BaseModelOutputWithPooling`]):
200
+ The output of the [`OwlViTTextModel`].
201
+ vision_model_output (`BaseModelOutputWithPooling`):
202
+ The output of the [`OwlViTVisionModel`].
203
+ """
204
+
205
+ loss: Optional[torch.FloatTensor] = None
206
+ loss_dict: Optional[Dict] = None
207
+ logits: torch.FloatTensor = None
208
+ pred_boxes: torch.FloatTensor = None
209
+ text_embeds: torch.FloatTensor = None
210
+ image_embeds: torch.FloatTensor = None
211
+ class_embeds: torch.FloatTensor = None
212
+ text_model_output: BaseModelOutputWithPooling = None
213
+ vision_model_output: BaseModelOutputWithPooling = None
214
+
215
+ def to_tuple(self) -> Tuple[Any]:
216
+ return tuple(
217
+ self[k] if k not in ["text_model_output", "vision_model_output"] else getattr(self, k).to_tuple()
218
+ for k in self.keys()
219
+ )
220
+
221
+
222
+ @dataclass
223
+ class OwlViTImageGuidedObjectDetectionOutput(ModelOutput):
224
+ """
225
+ Output type of [`OwlViTForObjectDetection.image_guided_detection`].
226
+
227
+ Args:
228
+ logits (`torch.FloatTensor` of shape `(batch_size, num_patches, num_queries)`):
229
+ Classification logits (including no-object) for all queries.
230
+ target_pred_boxes (`torch.FloatTensor` of shape `(batch_size, num_patches, 4)`):
231
+ Normalized boxes coordinates for all queries, represented as (center_x, center_y, width, height). These
232
+ values are normalized in [0, 1], relative to the size of each individual target image in the batch
233
+ (disregarding possible padding). You can use [`~OwlViTImageProcessor.post_process_object_detection`] to
234
+ retrieve the unnormalized bounding boxes.
235
+ query_pred_boxes (`torch.FloatTensor` of shape `(batch_size, num_patches, 4)`):
236
+ Normalized boxes coordinates for all queries, represented as (center_x, center_y, width, height). These
237
+ values are normalized in [0, 1], relative to the size of each individual query image in the batch
238
+ (disregarding possible padding). You can use [`~OwlViTImageProcessor.post_process_object_detection`] to
239
+ retrieve the unnormalized bounding boxes.
240
+ image_embeds (`torch.FloatTensor` of shape `(batch_size, patch_size, patch_size, output_dim`):
241
+ Pooled output of [`OwlViTVisionModel`]. OWL-ViT represents images as a set of image patches and computes
242
+ image embeddings for each patch.
243
+ query_image_embeds (`torch.FloatTensor` of shape `(batch_size, patch_size, patch_size, output_dim`):
244
+ Pooled output of [`OwlViTVisionModel`]. OWL-ViT represents images as a set of image patches and computes
245
+ image embeddings for each patch.
246
+ class_embeds (`torch.FloatTensor` of shape `(batch_size, num_patches, hidden_size)`):
247
+ Class embeddings of all image patches. OWL-ViT represents images as a set of image patches where the total
248
+ number of patches is (image_size / patch_size)**2.
249
+ text_model_output (Tuple[`BaseModelOutputWithPooling`]):
250
+ The output of the [`OwlViTTextModel`].
251
+ vision_model_output (`BaseModelOutputWithPooling`):
252
+ The output of the [`OwlViTVisionModel`].
253
+ """
254
+
255
+ logits: torch.FloatTensor = None
256
+ image_embeds: torch.FloatTensor = None
257
+ query_image_embeds: torch.FloatTensor = None
258
+ target_pred_boxes: torch.FloatTensor = None
259
+ query_pred_boxes: torch.FloatTensor = None
260
+ class_embeds: torch.FloatTensor = None
261
+ text_model_output: BaseModelOutputWithPooling = None
262
+ vision_model_output: BaseModelOutputWithPooling = None
263
+
264
+ def to_tuple(self) -> Tuple[Any]:
265
+ return tuple(
266
+ self[k] if k not in ["text_model_output", "vision_model_output"] else getattr(self, k).to_tuple()
267
+ for k in self.keys()
268
+ )
269
+
270
+
271
+ class OwlViTVisionEmbeddings(nn.Module):
272
+ def __init__(self, config: OwlViTVisionConfig):
273
+ super().__init__()
274
+ self.config = config
275
+ self.embed_dim = config.hidden_size
276
+ self.class_embedding = nn.Parameter(torch.randn(config.hidden_size))
277
+
278
+ self.patch_embedding = nn.Conv2d(
279
+ in_channels=config.num_channels,
280
+ out_channels=self.embed_dim,
281
+ kernel_size=config.patch_size,
282
+ stride=config.patch_size,
283
+ bias=False,
284
+ )
285
+
286
+ self.num_patches = (config.image_size // config.patch_size) ** 2
287
+ self.num_positions = self.num_patches + 1
288
+ self.position_embedding = nn.Embedding(self.num_positions, self.embed_dim)
289
+ self.register_buffer("position_ids", torch.arange(self.num_positions).expand((1, -1)), persistent=False)
290
+
291
+ def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor:
292
+ batch_size = pixel_values.shape[0]
293
+ patch_embeds = self.patch_embedding(pixel_values) # shape = [batch_size, num_channels, height, width]
294
+ patch_embeds = patch_embeds.flatten(2).transpose(1, 2)
295
+
296
+ class_embeds = self.class_embedding.expand(batch_size, 1, -1)
297
+ embeddings = torch.cat([class_embeds, patch_embeds], dim=1)
298
+ embeddings = embeddings + self.position_embedding(self.position_ids)
299
+
300
+ return embeddings
301
+
302
+
303
+ class OwlViTTextEmbeddings(nn.Module):
304
+ def __init__(self, config: OwlViTTextConfig):
305
+ super().__init__()
306
+ self.token_embedding = nn.Embedding(config.vocab_size, config.hidden_size)
307
+ self.position_embedding = nn.Embedding(config.max_position_embeddings, config.hidden_size)
308
+
309
+ # position_ids (1, len position emb) is contiguous in memory and exported when serialized
310
+ self.register_buffer(
311
+ "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
312
+ )
313
+
314
+ def forward(
315
+ self,
316
+ input_ids: Optional[torch.LongTensor] = None,
317
+ position_ids: Optional[torch.LongTensor] = None,
318
+ inputs_embeds: Optional[torch.FloatTensor] = None,
319
+ ) -> torch.Tensor:
320
+ seq_length = input_ids.shape[-1] if input_ids is not None else inputs_embeds.shape[-2]
321
+
322
+ if position_ids is None:
323
+ position_ids = self.position_ids[:, :seq_length]
324
+
325
+ if inputs_embeds is None:
326
+ inputs_embeds = self.token_embedding(input_ids)
327
+
328
+ position_embeddings = self.position_embedding(position_ids)
329
+ embeddings = inputs_embeds + position_embeddings
330
+
331
+ return embeddings
332
+
333
+
334
+ class OwlViTAttention(nn.Module):
335
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
336
+
337
+ def __init__(self, config):
338
+ super().__init__()
339
+ self.config = config
340
+ self.embed_dim = config.hidden_size
341
+ self.num_heads = config.num_attention_heads
342
+ self.head_dim = self.embed_dim // self.num_heads
343
+ if self.head_dim * self.num_heads != self.embed_dim:
344
+ raise ValueError(
345
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:"
346
+ f" {self.num_heads})."
347
+ )
348
+ self.scale = self.head_dim**-0.5
349
+ self.dropout = config.attention_dropout
350
+
351
+ self.k_proj = nn.Linear(self.embed_dim, self.embed_dim)
352
+ self.v_proj = nn.Linear(self.embed_dim, self.embed_dim)
353
+ self.q_proj = nn.Linear(self.embed_dim, self.embed_dim)
354
+ self.out_proj = nn.Linear(self.embed_dim, self.embed_dim)
355
+
356
+ def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
357
+ return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
358
+
359
+ def forward(
360
+ self,
361
+ hidden_states: torch.Tensor,
362
+ attention_mask: Optional[torch.Tensor] = None,
363
+ causal_attention_mask: Optional[torch.Tensor] = None,
364
+ output_attentions: Optional[bool] = False,
365
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
366
+ """Input shape: Batch x Time x Channel"""
367
+
368
+ bsz, tgt_len, embed_dim = hidden_states.size()
369
+
370
+ # get query proj
371
+ query_states = self.q_proj(hidden_states) * self.scale
372
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
373
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
374
+
375
+ proj_shape = (bsz * self.num_heads, -1, self.head_dim)
376
+ query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)
377
+ key_states = key_states.view(*proj_shape)
378
+ value_states = value_states.view(*proj_shape)
379
+
380
+ src_len = key_states.size(1)
381
+ attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
382
+
383
+ if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len):
384
+ raise ValueError(
385
+ f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is"
386
+ f" {attn_weights.size()}"
387
+ )
388
+
389
+ # apply the causal_attention_mask first
390
+ if causal_attention_mask is not None:
391
+ if causal_attention_mask.size() != (bsz, 1, tgt_len, src_len):
392
+ raise ValueError(
393
+ f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is"
394
+ f" {causal_attention_mask.size()}"
395
+ )
396
+ attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + causal_attention_mask
397
+ attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
398
+
399
+ if attention_mask is not None:
400
+ if attention_mask.size() != (bsz, 1, tgt_len, src_len):
401
+ raise ValueError(
402
+ f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}"
403
+ )
404
+ attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask
405
+ attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
406
+
407
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1)
408
+
409
+ if output_attentions:
410
+ # this operation is a bit akward, but it's required to
411
+ # make sure that attn_weights keeps its gradient.
412
+ # In order to do so, attn_weights have to reshaped
413
+ # twice and have to be reused in the following
414
+ attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
415
+ attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)
416
+ else:
417
+ attn_weights_reshaped = None
418
+
419
+ attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
420
+
421
+ # For int8 compatibility, sometimes the `attn_probs` are in `fp32`
422
+ attn_probs = attn_probs.to(value_states.dtype)
423
+
424
+ attn_output = torch.bmm(attn_probs, value_states)
425
+
426
+ if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):
427
+ raise ValueError(
428
+ f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is"
429
+ f" {attn_output.size()}"
430
+ )
431
+
432
+ attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
433
+ attn_output = attn_output.transpose(1, 2)
434
+ attn_output = attn_output.reshape(bsz, tgt_len, embed_dim)
435
+
436
+ attn_output = self.out_proj(attn_output)
437
+
438
+ return attn_output, attn_weights_reshaped
439
+
440
+
441
+ # Copied from transformers.models.clip.modeling_clip.CLIPMLP with CLIP->OwlViT
442
+ class OwlViTMLP(nn.Module):
443
+ def __init__(self, config):
444
+ super().__init__()
445
+ self.config = config
446
+ self.activation_fn = ACT2FN[config.hidden_act]
447
+ self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size)
448
+ self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size)
449
+
450
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
451
+ hidden_states = self.fc1(hidden_states)
452
+ hidden_states = self.activation_fn(hidden_states)
453
+ hidden_states = self.fc2(hidden_states)
454
+ return hidden_states
455
+
456
+
457
+ # Copied from transformers.models.clip.modeling_clip.CLIPEncoderLayer with CLIP->OwlViT
458
+ class OwlViTEncoderLayer(nn.Module):
459
+ def __init__(self, config: OwlViTConfig):
460
+ super().__init__()
461
+ self.embed_dim = config.hidden_size
462
+ self.self_attn = OwlViTAttention(config)
463
+ self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
464
+ self.mlp = OwlViTMLP(config)
465
+ self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
466
+
467
+ def forward(
468
+ self,
469
+ hidden_states: torch.Tensor,
470
+ attention_mask: torch.Tensor,
471
+ causal_attention_mask: torch.Tensor,
472
+ output_attentions: Optional[bool] = False,
473
+ ) -> Tuple[torch.FloatTensor]:
474
+ """
475
+ Args:
476
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
477
+ attention_mask (`torch.FloatTensor`): attention mask of size
478
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
479
+ `(config.encoder_attention_heads,)`.
480
+ output_attentions (`bool`, *optional*):
481
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
482
+ returned tensors for more detail.
483
+ """
484
+ residual = hidden_states
485
+
486
+ hidden_states = self.layer_norm1(hidden_states)
487
+ hidden_states, attn_weights = self.self_attn(
488
+ hidden_states=hidden_states,
489
+ attention_mask=attention_mask,
490
+ causal_attention_mask=causal_attention_mask,
491
+ output_attentions=output_attentions,
492
+ )
493
+ hidden_states = residual + hidden_states
494
+
495
+ residual = hidden_states
496
+ hidden_states = self.layer_norm2(hidden_states)
497
+ hidden_states = self.mlp(hidden_states)
498
+ hidden_states = residual + hidden_states
499
+
500
+ outputs = (hidden_states,)
501
+
502
+ if output_attentions:
503
+ outputs += (attn_weights,)
504
+
505
+ return outputs
506
+
507
+
508
+ class OwlViTPreTrainedModel(PreTrainedModel):
509
+ """
510
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
511
+ models.
512
+ """
513
+
514
+ config_class = OwlViTConfig
515
+ base_model_prefix = "owlvit"
516
+ supports_gradient_checkpointing = True
517
+ _no_split_modules = ["OwlViTEncoderLayer"]
518
+
519
+ def _init_weights(self, module):
520
+ """Initialize the weights"""
521
+ factor = self.config.initializer_factor
522
+ if isinstance(module, OwlViTTextEmbeddings):
523
+ module.token_embedding.weight.data.normal_(mean=0.0, std=factor * 0.02)
524
+ module.position_embedding.weight.data.normal_(mean=0.0, std=factor * 0.02)
525
+ elif isinstance(module, OwlViTVisionEmbeddings):
526
+ factor = self.config.initializer_factor
527
+ nn.init.normal_(module.class_embedding, mean=0.0, std=module.embed_dim**-0.5 * factor)
528
+ nn.init.normal_(module.patch_embedding.weight, std=module.config.initializer_range * factor)
529
+ nn.init.normal_(module.position_embedding.weight, std=module.config.initializer_range * factor)
530
+ elif isinstance(module, OwlViTAttention):
531
+ factor = self.config.initializer_factor
532
+ in_proj_std = (module.embed_dim**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor
533
+ out_proj_std = (module.embed_dim**-0.5) * factor
534
+ nn.init.normal_(module.q_proj.weight, std=in_proj_std)
535
+ nn.init.normal_(module.k_proj.weight, std=in_proj_std)
536
+ nn.init.normal_(module.v_proj.weight, std=in_proj_std)
537
+ nn.init.normal_(module.out_proj.weight, std=out_proj_std)
538
+ elif isinstance(module, OwlViTMLP):
539
+ factor = self.config.initializer_factor
540
+ in_proj_std = (module.config.hidden_size**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor
541
+ fc_std = (2 * module.config.hidden_size) ** -0.5 * factor
542
+ nn.init.normal_(module.fc1.weight, std=fc_std)
543
+ nn.init.normal_(module.fc2.weight, std=in_proj_std)
544
+ elif isinstance(module, OwlViTModel):
545
+ nn.init.normal_(
546
+ module.text_projection.weight,
547
+ std=module.text_embed_dim**-0.5 * self.config.initializer_factor,
548
+ )
549
+ nn.init.normal_(
550
+ module.visual_projection.weight,
551
+ std=module.vision_embed_dim**-0.5 * self.config.initializer_factor,
552
+ )
553
+ if isinstance(module, nn.LayerNorm):
554
+ module.bias.data.zero_()
555
+ module.weight.data.fill_(1.0)
556
+ if isinstance(module, nn.Linear) and module.bias is not None:
557
+ module.bias.data.zero_()
558
+
559
+
560
+ OWLVIT_START_DOCSTRING = r"""
561
+
562
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
563
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
564
+ etc.)
565
+
566
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
567
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
568
+ and behavior.
569
+
570
+ Parameters:
571
+ config ([`OwlViTConfig`]): Model configuration class with all the parameters of the model.
572
+ Initializing with a config file does not load the weights associated with the model, only the
573
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
574
+ """
575
+
576
+ OWLVIT_TEXT_INPUTS_DOCSTRING = r"""
577
+ Args:
578
+ input_ids (`torch.LongTensor` of shape `(batch_size * num_max_text_queries, sequence_length)`):
579
+ Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See
580
+ [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input
581
+ IDs?](../glossary#input-ids)
582
+ attention_mask (`torch.Tensor` of shape `(batch_size, num_max_text_queries, sequence_length)`, *optional*):
583
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
584
+ - 1 for tokens that are **not masked**,
585
+ - 0 for tokens that are **masked**.
586
+ [What are attention masks?](../glossary#attention-mask)
587
+ output_attentions (`bool`, *optional*):
588
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
589
+ tensors for more detail.
590
+ output_hidden_states (`bool`, *optional*):
591
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
592
+ more detail.
593
+ return_dict (`bool`, *optional*):
594
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
595
+ """
596
+
597
+ OWLVIT_VISION_INPUTS_DOCSTRING = r"""
598
+ Args:
599
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
600
+ Pixel values.
601
+ output_attentions (`bool`, *optional*):
602
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
603
+ tensors for more detail.
604
+ output_hidden_states (`bool`, *optional*):
605
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
606
+ more detail.
607
+ return_dict (`bool`, *optional*):
608
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
609
+ """
610
+
611
+ OWLVIT_INPUTS_DOCSTRING = r"""
612
+ Args:
613
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
614
+ Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See
615
+ [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input
616
+ IDs?](../glossary#input-ids)
617
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
618
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
619
+ - 1 for tokens that are **not masked**,
620
+ - 0 for tokens that are **masked**.
621
+ [What are attention masks?](../glossary#attention-mask)
622
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
623
+ Pixel values.
624
+ return_loss (`bool`, *optional*):
625
+ Whether or not to return the contrastive loss.
626
+ output_attentions (`bool`, *optional*):
627
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
628
+ tensors for more detail.
629
+ output_hidden_states (`bool`, *optional*):
630
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
631
+ more detail.
632
+ return_dict (`bool`, *optional*):
633
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
634
+ """
635
+
636
+ OWLVIT_OBJECT_DETECTION_INPUTS_DOCSTRING = r"""
637
+ Args:
638
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
639
+ Pixel values.
640
+ input_ids (`torch.LongTensor` of shape `(batch_size * num_max_text_queries, sequence_length)`, *optional*):
641
+ Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See
642
+ [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input
643
+ IDs?](../glossary#input-ids).
644
+ attention_mask (`torch.Tensor` of shape `(batch_size, num_max_text_queries, sequence_length)`, *optional*):
645
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
646
+ - 1 for tokens that are **not masked**,
647
+ - 0 for tokens that are **masked**.
648
+ [What are attention masks?](../glossary#attention-mask)
649
+ output_hidden_states (`bool`, *optional*):
650
+ Whether or not to return the last hidden state. See `text_model_last_hidden_state` and
651
+ `vision_model_last_hidden_state` under returned tensors for more detail.
652
+ return_dict (`bool`, *optional*):
653
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
654
+ """
655
+
656
+ OWLVIT_IMAGE_GUIDED_OBJECT_DETECTION_INPUTS_DOCSTRING = r"""
657
+ Args:
658
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
659
+ Pixel values.
660
+ query_pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
661
+ Pixel values of query image(s) to be detected. Pass in one query image per target image.
662
+ output_attentions (`bool`, *optional*):
663
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
664
+ tensors for more detail.
665
+ output_hidden_states (`bool`, *optional*):
666
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
667
+ more detail.
668
+ return_dict (`bool`, *optional*):
669
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
670
+ """
671
+
672
+
673
+ class OwlViTEncoder(nn.Module):
674
+ """
675
+ Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
676
+ [`OwlViTEncoderLayer`].
677
+
678
+ Args:
679
+ config: OwlViTConfig
680
+ """
681
+
682
+ def __init__(self, config: OwlViTConfig):
683
+ super().__init__()
684
+ self.layers = nn.ModuleList([OwlViTEncoderLayer(config) for _ in range(config.num_hidden_layers)])
685
+ self.gradient_checkpointing = False
686
+
687
+ def forward(
688
+ self,
689
+ inputs_embeds,
690
+ attention_mask: Optional[torch.Tensor] = None,
691
+ causal_attention_mask: Optional[torch.Tensor] = None,
692
+ output_attentions: Optional[bool] = None,
693
+ output_hidden_states: Optional[bool] = None,
694
+ return_dict: Optional[bool] = None,
695
+ ) -> Union[Tuple, BaseModelOutput]:
696
+ r"""
697
+ Args:
698
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`).
699
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
700
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
701
+ - 1 for tokens that are **not masked**,
702
+ - 0 for tokens that are **masked**.
703
+ [What are attention masks?](../glossary#attention-mask)
704
+ causal_attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
705
+ Causal mask for the text model. Mask values selected in `[0, 1]`:
706
+ - 1 for tokens that are **not masked**,
707
+ - 0 for tokens that are **masked**.
708
+ [What are attention masks?](../glossary#attention-mask)
709
+ output_attentions (`bool`, *optional*):
710
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
711
+ returned tensors for more detail.
712
+ output_hidden_states (`bool`, *optional*):
713
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
714
+ for more detail.
715
+ return_dict (`bool`, *optional*):
716
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
717
+ """
718
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
719
+ output_hidden_states = (
720
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
721
+ )
722
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
723
+
724
+ encoder_states = () if output_hidden_states else None
725
+ all_attentions = () if output_attentions else None
726
+
727
+ hidden_states = inputs_embeds
728
+ for encoder_layer in self.layers:
729
+ if output_hidden_states:
730
+ encoder_states = encoder_states + (hidden_states,)
731
+ if self.gradient_checkpointing and self.training:
732
+ layer_outputs = self._gradient_checkpointing_func(
733
+ encoder_layer.__call__,
734
+ hidden_states,
735
+ attention_mask,
736
+ causal_attention_mask,
737
+ output_attentions,
738
+ )
739
+ else:
740
+ layer_outputs = encoder_layer(
741
+ hidden_states,
742
+ attention_mask,
743
+ causal_attention_mask,
744
+ output_attentions=output_attentions,
745
+ )
746
+
747
+ hidden_states = layer_outputs[0]
748
+
749
+ if output_attentions:
750
+ all_attentions = all_attentions + (layer_outputs[1],)
751
+
752
+ if output_hidden_states:
753
+ encoder_states = encoder_states + (hidden_states,)
754
+
755
+ if not return_dict:
756
+ return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
757
+ return BaseModelOutput(
758
+ last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
759
+ )
760
+
761
+
762
+ class OwlViTTextTransformer(nn.Module):
763
+ def __init__(self, config: OwlViTTextConfig):
764
+ super().__init__()
765
+ self.config = config
766
+ embed_dim = config.hidden_size
767
+ self.embeddings = OwlViTTextEmbeddings(config)
768
+ self.encoder = OwlViTEncoder(config)
769
+ self.final_layer_norm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
770
+
771
+ @add_start_docstrings_to_model_forward(OWLVIT_TEXT_INPUTS_DOCSTRING)
772
+ @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=OwlViTTextConfig)
773
+ def forward(
774
+ self,
775
+ input_ids: torch.Tensor,
776
+ attention_mask: Optional[torch.Tensor] = None,
777
+ position_ids: Optional[torch.Tensor] = None,
778
+ output_attentions: Optional[bool] = None,
779
+ output_hidden_states: Optional[bool] = None,
780
+ return_dict: Optional[bool] = None,
781
+ ) -> Union[Tuple, BaseModelOutputWithPooling]:
782
+ r"""
783
+ Returns:
784
+ """
785
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
786
+ output_hidden_states = (
787
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
788
+ )
789
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
790
+
791
+ input_shape = input_ids.size()
792
+ input_ids = input_ids.view(-1, input_shape[-1])
793
+ hidden_states = self.embeddings(input_ids=input_ids, position_ids=position_ids)
794
+
795
+ # num_samples, seq_len = input_shape where num_samples = batch_size * num_max_text_queries
796
+ # OWLVIT's text model uses causal mask, prepare it here.
797
+ # https://github.com/openai/CLIP/blob/cfcffb90e69f37bf2ff1e988237a0fbe41f33c04/clip/model.py#L324
798
+ causal_attention_mask = _create_4d_causal_attention_mask(
799
+ input_shape, hidden_states.dtype, device=hidden_states.device
800
+ )
801
+ # expand attention_mask
802
+ if attention_mask is not None:
803
+ # [num_samples, seq_len] -> [num_samples, 1, tgt_seq_len, src_seq_len]
804
+ attention_mask = _prepare_4d_attention_mask(attention_mask, hidden_states.dtype)
805
+
806
+ encoder_outputs = self.encoder(
807
+ inputs_embeds=hidden_states,
808
+ attention_mask=attention_mask,
809
+ causal_attention_mask=causal_attention_mask,
810
+ output_attentions=output_attentions,
811
+ output_hidden_states=output_hidden_states,
812
+ return_dict=return_dict,
813
+ )
814
+
815
+ last_hidden_state = encoder_outputs[0]
816
+ last_hidden_state = self.final_layer_norm(last_hidden_state)
817
+
818
+ # take features from the end of tokens embedding (end of token is the highest number in each sequence)
819
+ # casting to torch.int for onnx compatibility: argmax doesn't support int64 inputs with opset 14
820
+ pooled_output = last_hidden_state[
821
+ torch.arange(last_hidden_state.shape[0], device=last_hidden_state.device),
822
+ input_ids.to(torch.int).argmax(dim=-1).to(last_hidden_state.device),
823
+ ]
824
+
825
+ if not return_dict:
826
+ return (last_hidden_state, pooled_output) + encoder_outputs[1:]
827
+
828
+ return BaseModelOutputWithPooling(
829
+ last_hidden_state=last_hidden_state,
830
+ pooler_output=pooled_output,
831
+ hidden_states=encoder_outputs.hidden_states,
832
+ attentions=encoder_outputs.attentions,
833
+ )
834
+
835
+
836
+ class OwlViTTextModel(OwlViTPreTrainedModel):
837
+ config_class = OwlViTTextConfig
838
+
839
+ def __init__(self, config: OwlViTTextConfig):
840
+ super().__init__(config)
841
+ self.text_model = OwlViTTextTransformer(config)
842
+ # Initialize weights and apply final processing
843
+ self.post_init()
844
+
845
+ def get_input_embeddings(self) -> nn.Module:
846
+ return self.text_model.embeddings.token_embedding
847
+
848
+ def set_input_embeddings(self, value):
849
+ self.text_model.embeddings.token_embedding = value
850
+
851
+ @add_start_docstrings_to_model_forward(OWLVIT_TEXT_INPUTS_DOCSTRING)
852
+ @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=OwlViTTextConfig)
853
+ def forward(
854
+ self,
855
+ input_ids: torch.Tensor,
856
+ attention_mask: Optional[torch.Tensor] = None,
857
+ output_attentions: Optional[bool] = None,
858
+ output_hidden_states: Optional[bool] = None,
859
+ return_dict: Optional[bool] = None,
860
+ ) -> Union[Tuple, BaseModelOutputWithPooling]:
861
+ r"""
862
+ Returns:
863
+
864
+ Examples:
865
+ ```python
866
+ >>> from transformers import AutoProcessor, OwlViTTextModel
867
+
868
+ >>> model = OwlViTTextModel.from_pretrained("google/owlvit-base-patch32")
869
+ >>> processor = AutoProcessor.from_pretrained("google/owlvit-base-patch32")
870
+ >>> inputs = processor(
871
+ ... text=[["a photo of a cat", "a photo of a dog"], ["photo of a astranaut"]], return_tensors="pt"
872
+ ... )
873
+ >>> outputs = model(**inputs)
874
+ >>> last_hidden_state = outputs.last_hidden_state
875
+ >>> pooled_output = outputs.pooler_output # pooled (EOS token) states
876
+ ```"""
877
+
878
+ # Get embeddings for all text queries in all batch samples
879
+ return self.text_model(
880
+ input_ids=input_ids,
881
+ attention_mask=attention_mask,
882
+ output_attentions=output_attentions,
883
+ output_hidden_states=output_hidden_states,
884
+ return_dict=return_dict,
885
+ )
886
+
887
+
888
+ class OwlViTVisionTransformer(nn.Module):
889
+ def __init__(self, config: OwlViTVisionConfig):
890
+ super().__init__()
891
+ self.config = config
892
+
893
+ self.embeddings = OwlViTVisionEmbeddings(config)
894
+ self.pre_layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
895
+ self.encoder = OwlViTEncoder(config)
896
+ self.post_layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
897
+
898
+ @add_start_docstrings_to_model_forward(OWLVIT_VISION_INPUTS_DOCSTRING)
899
+ @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=OwlViTVisionConfig)
900
+ def forward(
901
+ self,
902
+ pixel_values: torch.FloatTensor,
903
+ output_attentions: Optional[bool] = None,
904
+ output_hidden_states: Optional[bool] = None,
905
+ return_dict: Optional[bool] = None,
906
+ ) -> Union[Tuple, BaseModelOutputWithPooling]:
907
+ r"""
908
+ Returns:
909
+ """
910
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
911
+ output_hidden_states = (
912
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
913
+ )
914
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
915
+
916
+ # Cast the input to the expected `dtype`
917
+ expected_input_dtype = self.embeddings.patch_embedding.weight.dtype
918
+ pixel_values = pixel_values.to(expected_input_dtype)
919
+
920
+ hidden_states = self.embeddings(pixel_values)
921
+ hidden_states = self.pre_layernorm(hidden_states)
922
+
923
+ encoder_outputs = self.encoder(
924
+ inputs_embeds=hidden_states,
925
+ output_attentions=output_attentions,
926
+ output_hidden_states=output_hidden_states,
927
+ return_dict=return_dict,
928
+ )
929
+
930
+ last_hidden_state = encoder_outputs[0]
931
+ pooled_output = last_hidden_state[:, 0, :]
932
+
933
+ pooled_output = self.post_layernorm(pooled_output)
934
+
935
+ if not return_dict:
936
+ return (last_hidden_state, pooled_output) + encoder_outputs[1:]
937
+
938
+ return BaseModelOutputWithPooling(
939
+ last_hidden_state=last_hidden_state,
940
+ pooler_output=pooled_output,
941
+ hidden_states=encoder_outputs.hidden_states,
942
+ attentions=encoder_outputs.attentions,
943
+ )
944
+
945
+
946
+ class OwlViTVisionModel(OwlViTPreTrainedModel):
947
+ config_class = OwlViTVisionConfig
948
+ main_input_name = "pixel_values"
949
+
950
+ def __init__(self, config: OwlViTVisionConfig):
951
+ super().__init__(config)
952
+ self.vision_model = OwlViTVisionTransformer(config)
953
+ # Initialize weights and apply final processing
954
+ self.post_init()
955
+
956
+ def get_input_embeddings(self) -> nn.Module:
957
+ return self.vision_model.embeddings.patch_embedding
958
+
959
+ @add_start_docstrings_to_model_forward(OWLVIT_VISION_INPUTS_DOCSTRING)
960
+ @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=OwlViTVisionConfig)
961
+ def forward(
962
+ self,
963
+ pixel_values: Optional[torch.FloatTensor] = None,
964
+ output_attentions: Optional[bool] = None,
965
+ output_hidden_states: Optional[bool] = None,
966
+ return_dict: Optional[bool] = None,
967
+ ) -> Union[Tuple, BaseModelOutputWithPooling]:
968
+ r"""
969
+ Returns:
970
+
971
+ Examples:
972
+ ```python
973
+ >>> from PIL import Image
974
+ >>> import requests
975
+ >>> from transformers import AutoProcessor, OwlViTVisionModel
976
+
977
+ >>> model = OwlViTVisionModel.from_pretrained("google/owlvit-base-patch32")
978
+ >>> processor = AutoProcessor.from_pretrained("google/owlvit-base-patch32")
979
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
980
+ >>> image = Image.open(requests.get(url, stream=True).raw)
981
+
982
+ >>> inputs = processor(images=image, return_tensors="pt")
983
+
984
+ >>> outputs = model(**inputs)
985
+ >>> last_hidden_state = outputs.last_hidden_state
986
+ >>> pooled_output = outputs.pooler_output # pooled CLS states
987
+ ```"""
988
+ return self.vision_model(
989
+ pixel_values=pixel_values,
990
+ output_attentions=output_attentions,
991
+ output_hidden_states=output_hidden_states,
992
+ return_dict=return_dict,
993
+ )
994
+
995
+
996
+ @add_start_docstrings(OWLVIT_START_DOCSTRING)
997
+ class OwlViTModel(OwlViTPreTrainedModel):
998
+ config_class = OwlViTConfig
999
+
1000
+ def __init__(self, config: OwlViTConfig):
1001
+ super().__init__(config)
1002
+
1003
+ if not isinstance(config.text_config, OwlViTTextConfig):
1004
+ raise ValueError(
1005
+ "config.text_config is expected to be of type OwlViTTextConfig but is of type"
1006
+ f" {type(config.text_config)}."
1007
+ )
1008
+
1009
+ if not isinstance(config.vision_config, OwlViTVisionConfig):
1010
+ raise ValueError(
1011
+ "config.vision_config is expected to be of type OwlViTVisionConfig but is of type"
1012
+ f" {type(config.vision_config)}."
1013
+ )
1014
+
1015
+ text_config = config.text_config
1016
+ vision_config = config.vision_config
1017
+
1018
+ self.projection_dim = config.projection_dim
1019
+ self.text_embed_dim = text_config.hidden_size
1020
+ self.vision_embed_dim = vision_config.hidden_size
1021
+
1022
+ self.text_model = OwlViTTextTransformer(text_config)
1023
+ self.vision_model = OwlViTVisionTransformer(vision_config)
1024
+
1025
+ self.visual_projection = nn.Linear(self.vision_embed_dim, self.projection_dim, bias=False)
1026
+ self.text_projection = nn.Linear(self.text_embed_dim, self.projection_dim, bias=False)
1027
+ self.logit_scale = nn.Parameter(torch.tensor(config.logit_scale_init_value))
1028
+
1029
+ # Initialize weights and apply final processing
1030
+ self.post_init()
1031
+
1032
+ @add_start_docstrings_to_model_forward(OWLVIT_TEXT_INPUTS_DOCSTRING)
1033
+ def get_text_features(
1034
+ self,
1035
+ input_ids: Optional[torch.Tensor] = None,
1036
+ attention_mask: Optional[torch.Tensor] = None,
1037
+ output_attentions: Optional[bool] = None,
1038
+ output_hidden_states: Optional[bool] = None,
1039
+ return_dict: Optional[bool] = None,
1040
+ ) -> torch.FloatTensor:
1041
+ r"""
1042
+ Returns:
1043
+ text_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by
1044
+ applying the projection layer to the pooled output of [`OwlViTTextModel`].
1045
+
1046
+ Examples:
1047
+ ```python
1048
+ >>> from transformers import AutoProcessor, OwlViTModel
1049
+
1050
+ >>> model = OwlViTModel.from_pretrained("google/owlvit-base-patch32")
1051
+ >>> processor = AutoProcessor.from_pretrained("google/owlvit-base-patch32")
1052
+ >>> inputs = processor(
1053
+ ... text=[["a photo of a cat", "a photo of a dog"], ["photo of a astranaut"]], return_tensors="pt"
1054
+ ... )
1055
+ >>> text_features = model.get_text_features(**inputs)
1056
+ ```"""
1057
+ # Use OWL-ViT model's config for some fields (if specified) instead of those of vision & text components.
1058
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1059
+
1060
+ # Get embeddings for all text queries in all batch samples
1061
+ text_output = self.text_model(input_ids=input_ids, attention_mask=attention_mask, return_dict=return_dict)
1062
+ pooled_output = text_output[1]
1063
+ text_features = self.text_projection(pooled_output)
1064
+
1065
+ return text_features
1066
+
1067
+ @add_start_docstrings_to_model_forward(OWLVIT_VISION_INPUTS_DOCSTRING)
1068
+ def get_image_features(
1069
+ self,
1070
+ pixel_values: Optional[torch.FloatTensor] = None,
1071
+ output_attentions: Optional[bool] = None,
1072
+ output_hidden_states: Optional[bool] = None,
1073
+ return_dict: Optional[bool] = None,
1074
+ ) -> torch.FloatTensor:
1075
+ r"""
1076
+ Returns:
1077
+ image_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The image embeddings obtained by
1078
+ applying the projection layer to the pooled output of [`OwlViTVisionModel`].
1079
+
1080
+ Examples:
1081
+ ```python
1082
+ >>> from PIL import Image
1083
+ >>> import requests
1084
+ >>> from transformers import AutoProcessor, OwlViTModel
1085
+
1086
+ >>> model = OwlViTModel.from_pretrained("google/owlvit-base-patch32")
1087
+ >>> processor = AutoProcessor.from_pretrained("google/owlvit-base-patch32")
1088
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
1089
+ >>> image = Image.open(requests.get(url, stream=True).raw)
1090
+ >>> inputs = processor(images=image, return_tensors="pt")
1091
+ >>> image_features = model.get_image_features(**inputs)
1092
+ ```"""
1093
+ # Use OWL-ViT model's config for some fields (if specified) instead of those of vision & text components.
1094
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1095
+ output_hidden_states = (
1096
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1097
+ )
1098
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1099
+
1100
+ vision_outputs = self.vision_model(
1101
+ pixel_values=pixel_values,
1102
+ output_attentions=output_attentions,
1103
+ output_hidden_states=output_hidden_states,
1104
+ return_dict=return_dict,
1105
+ )
1106
+
1107
+ pooled_output = vision_outputs[1]
1108
+ image_features = self.visual_projection(pooled_output)
1109
+
1110
+ return image_features
1111
+
1112
+ @add_start_docstrings_to_model_forward(OWLVIT_INPUTS_DOCSTRING)
1113
+ @replace_return_docstrings(output_type=OwlViTOutput, config_class=OwlViTConfig)
1114
+ def forward(
1115
+ self,
1116
+ input_ids: Optional[torch.LongTensor] = None,
1117
+ pixel_values: Optional[torch.FloatTensor] = None,
1118
+ attention_mask: Optional[torch.Tensor] = None,
1119
+ return_loss: Optional[bool] = None,
1120
+ output_attentions: Optional[bool] = None,
1121
+ output_hidden_states: Optional[bool] = None,
1122
+ return_base_image_embeds: Optional[bool] = None,
1123
+ return_dict: Optional[bool] = None,
1124
+ ) -> Union[Tuple, OwlViTOutput]:
1125
+ r"""
1126
+ Returns:
1127
+
1128
+ Examples:
1129
+ ```python
1130
+ >>> from PIL import Image
1131
+ >>> import requests
1132
+ >>> from transformers import AutoProcessor, OwlViTModel
1133
+
1134
+ >>> model = OwlViTModel.from_pretrained("google/owlvit-base-patch32")
1135
+ >>> processor = AutoProcessor.from_pretrained("google/owlvit-base-patch32")
1136
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
1137
+ >>> image = Image.open(requests.get(url, stream=True).raw)
1138
+ >>> inputs = processor(text=[["a photo of a cat", "a photo of a dog"]], images=image, return_tensors="pt")
1139
+ >>> outputs = model(**inputs)
1140
+ >>> logits_per_image = outputs.logits_per_image # this is the image-text similarity score
1141
+ >>> probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities
1142
+ ```"""
1143
+ # Use OWL-ViT model's config for some fields (if specified) instead of those of vision & text components.
1144
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1145
+ output_hidden_states = (
1146
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1147
+ )
1148
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1149
+
1150
+ vision_outputs = self.vision_model(
1151
+ pixel_values=pixel_values,
1152
+ output_attentions=output_attentions,
1153
+ output_hidden_states=output_hidden_states,
1154
+ return_dict=return_dict,
1155
+ )
1156
+
1157
+ # Get embeddings for all text queries in all batch samples
1158
+ text_outputs = self.text_model(
1159
+ input_ids=input_ids,
1160
+ attention_mask=attention_mask,
1161
+ output_attentions=output_attentions,
1162
+ output_hidden_states=output_hidden_states,
1163
+ return_dict=return_dict,
1164
+ )
1165
+
1166
+ text_embeds = text_outputs[1]
1167
+ text_embeds = self.text_projection(text_embeds)
1168
+ image_embeds = vision_outputs[1]
1169
+ image_embeds = self.visual_projection(image_embeds)
1170
+
1171
+ # normalized features
1172
+ image_embeds = image_embeds / torch.linalg.norm(image_embeds, ord=2, dim=-1, keepdim=True)
1173
+ text_embeds_norm = text_embeds / torch.linalg.norm(text_embeds, ord=2, dim=-1, keepdim=True)
1174
+
1175
+ # cosine similarity as logits and set it on the correct device
1176
+ logit_scale = self.logit_scale.exp().to(image_embeds.device)
1177
+
1178
+ logits_per_text = torch.matmul(text_embeds_norm, image_embeds.t()) * logit_scale
1179
+ logits_per_image = logits_per_text.t()
1180
+
1181
+ loss = None
1182
+ if return_loss:
1183
+ loss = owlvit_loss(logits_per_text)
1184
+
1185
+ if return_base_image_embeds:
1186
+ warnings.warn(
1187
+ "`return_base_image_embeds` is deprecated and will be removed in v4.27 of Transformers, one can"
1188
+ " obtain the base (unprojected) image embeddings from outputs.vision_model_output.",
1189
+ FutureWarning,
1190
+ )
1191
+ last_hidden_state = vision_outputs[0]
1192
+ image_embeds = self.vision_model.post_layernorm(last_hidden_state)
1193
+ else:
1194
+ text_embeds = text_embeds_norm
1195
+
1196
+ if not return_dict:
1197
+ output = (logits_per_image, logits_per_text, text_embeds, image_embeds, text_outputs, vision_outputs)
1198
+ return ((loss,) + output) if loss is not None else output
1199
+
1200
+ return OwlViTOutput(
1201
+ loss=loss,
1202
+ logits_per_image=logits_per_image,
1203
+ logits_per_text=logits_per_text,
1204
+ text_embeds=text_embeds,
1205
+ image_embeds=image_embeds,
1206
+ text_model_output=text_outputs,
1207
+ vision_model_output=vision_outputs,
1208
+ )
1209
+
1210
+
1211
+ class OwlViTBoxPredictionHead(nn.Module):
1212
+ def __init__(self, config: OwlViTConfig, out_dim: int = 4):
1213
+ super().__init__()
1214
+
1215
+ width = config.vision_config.hidden_size
1216
+ self.dense0 = nn.Linear(width, width)
1217
+ self.dense1 = nn.Linear(width, width)
1218
+ self.gelu = nn.GELU()
1219
+ self.dense2 = nn.Linear(width, out_dim)
1220
+
1221
+ def forward(self, image_features: torch.Tensor) -> torch.FloatTensor:
1222
+ output = self.dense0(image_features)
1223
+ output = self.gelu(output)
1224
+ output = self.dense1(output)
1225
+ output = self.gelu(output)
1226
+ output = self.dense2(output)
1227
+ return output
1228
+
1229
+
1230
+ class OwlViTClassPredictionHead(nn.Module):
1231
+ def __init__(self, config: OwlViTConfig):
1232
+ super().__init__()
1233
+
1234
+ out_dim = config.text_config.hidden_size
1235
+ self.query_dim = config.vision_config.hidden_size
1236
+
1237
+ self.dense0 = nn.Linear(self.query_dim, out_dim)
1238
+ self.logit_shift = nn.Linear(self.query_dim, 1)
1239
+ self.logit_scale = nn.Linear(self.query_dim, 1)
1240
+ self.elu = nn.ELU()
1241
+
1242
+ def forward(
1243
+ self,
1244
+ image_embeds: torch.FloatTensor,
1245
+ query_embeds: Optional[torch.FloatTensor],
1246
+ query_mask: Optional[torch.Tensor],
1247
+ ) -> Tuple[torch.FloatTensor]:
1248
+ image_class_embeds = self.dense0(image_embeds)
1249
+ if query_embeds is None:
1250
+ device = image_class_embeds.device
1251
+ batch_size, num_patches = image_class_embeds.shape[:2]
1252
+ pred_logits = torch.zeros((batch_size, num_patches, self.query_dim)).to(device)
1253
+ return (pred_logits, image_class_embeds)
1254
+
1255
+ # Normalize image and text features
1256
+ image_class_embeds = image_class_embeds / (torch.linalg.norm(image_class_embeds, dim=-1, keepdim=True) + 1e-6)
1257
+ query_embeds = query_embeds / (torch.linalg.norm(query_embeds, dim=-1, keepdim=True) + 1e-6)
1258
+
1259
+ # Get class predictions
1260
+ pred_logits = torch.einsum("...pd,...qd->...pq", image_class_embeds, query_embeds)
1261
+
1262
+ # Apply a learnable shift and scale to logits
1263
+ logit_shift = self.logit_shift(image_embeds)
1264
+ logit_scale = self.logit_scale(image_embeds)
1265
+ logit_scale = self.elu(logit_scale) + 1
1266
+ pred_logits = (pred_logits + logit_shift) * logit_scale
1267
+
1268
+ if query_mask is not None:
1269
+ if query_mask.ndim > 1:
1270
+ query_mask = torch.unsqueeze(query_mask, dim=-2)
1271
+
1272
+ pred_logits = pred_logits.to(torch.float64)
1273
+ pred_logits = torch.where(query_mask == 0, -1e6, pred_logits)
1274
+ pred_logits = pred_logits.to(torch.float32)
1275
+
1276
+ return (pred_logits, image_class_embeds)
1277
+
1278
+
1279
+ class OwlViTForObjectDetection(OwlViTPreTrainedModel):
1280
+ config_class = OwlViTConfig
1281
+
1282
+ def __init__(self, config: OwlViTConfig):
1283
+ super().__init__(config)
1284
+
1285
+ self.owlvit = OwlViTModel(config)
1286
+ self.class_head = OwlViTClassPredictionHead(config)
1287
+ self.box_head = OwlViTBoxPredictionHead(config)
1288
+
1289
+ self.layer_norm = nn.LayerNorm(config.vision_config.hidden_size, eps=config.vision_config.layer_norm_eps)
1290
+ self.sigmoid = nn.Sigmoid()
1291
+
1292
+ self.sqrt_num_patches = config.vision_config.image_size // config.vision_config.patch_size
1293
+ self.box_bias = self.compute_box_bias(self.sqrt_num_patches)
1294
+
1295
+ @staticmethod
1296
+ def normalize_grid_corner_coordinates(num_patches: int) -> torch.Tensor:
1297
+ # Create grid coordinates using torch
1298
+ x_coordinates = torch.arange(1, num_patches + 1, dtype=torch.float32)
1299
+ y_coordinates = torch.arange(1, num_patches + 1, dtype=torch.float32)
1300
+ xx, yy = torch.meshgrid(x_coordinates, y_coordinates, indexing="xy")
1301
+
1302
+ # Stack the coordinates and divide by num_patches
1303
+ box_coordinates = torch.stack((xx, yy), dim=-1)
1304
+ box_coordinates /= num_patches
1305
+
1306
+ # Flatten (h, w, 2) -> (h*w, 2)
1307
+ box_coordinates = box_coordinates.view(-1, 2)
1308
+
1309
+ return box_coordinates
1310
+
1311
+ @lru_cache(maxsize=2)
1312
+ def compute_box_bias(self, num_patches: int, feature_map: Optional[torch.FloatTensor] = None) -> torch.Tensor:
1313
+ if feature_map is not None:
1314
+ raise ValueError("feature_map has been deprecated as an input. Please pass in num_patches instead")
1315
+ # The box center is biased to its position on the feature grid
1316
+ box_coordinates = self.normalize_grid_corner_coordinates(num_patches)
1317
+ box_coordinates = torch.clip(box_coordinates, 0.0, 1.0)
1318
+
1319
+ # Unnormalize xy
1320
+ box_coord_bias = torch.log(box_coordinates + 1e-4) - torch.log1p(-box_coordinates + 1e-4)
1321
+
1322
+ # The box size is biased to the patch size
1323
+ box_size = torch.full_like(box_coord_bias, 1.0 / num_patches)
1324
+ box_size_bias = torch.log(box_size + 1e-4) - torch.log1p(-box_size + 1e-4)
1325
+
1326
+ # Compute box bias
1327
+ box_bias = torch.cat([box_coord_bias, box_size_bias], dim=-1)
1328
+ return box_bias
1329
+
1330
+ def box_predictor(
1331
+ self,
1332
+ image_feats: torch.FloatTensor,
1333
+ feature_map: torch.FloatTensor,
1334
+ ) -> torch.FloatTensor:
1335
+ """
1336
+ Args:
1337
+ image_feats:
1338
+ Features extracted from the image, returned by the `image_text_embedder` method.
1339
+ feature_map:
1340
+ A spatial re-arrangement of image_features, also returned by the `image_text_embedder` method.
1341
+ Returns:
1342
+ pred_boxes:
1343
+ List of predicted boxes (cxcywh normalized to 0, 1) nested within a dictionary.
1344
+ """
1345
+ # Bounding box detection head [batch_size, num_boxes, 4].
1346
+ pred_boxes = self.box_head(image_feats)
1347
+
1348
+ # Compute the location of each token on the grid and use it to compute a bias for the bbox prediction
1349
+ box_bias = self.box_bias.to(feature_map.device)
1350
+ pred_boxes += box_bias
1351
+ pred_boxes = self.sigmoid(pred_boxes)
1352
+ return pred_boxes
1353
+
1354
+ def class_predictor(
1355
+ self,
1356
+ image_feats: torch.FloatTensor,
1357
+ query_embeds: Optional[torch.FloatTensor] = None,
1358
+ query_mask: Optional[torch.Tensor] = None,
1359
+ ) -> Tuple[torch.FloatTensor]:
1360
+ """
1361
+ Args:
1362
+ image_feats:
1363
+ Features extracted from the `image_text_embedder`.
1364
+ query_embeds:
1365
+ Text query embeddings.
1366
+ query_mask:
1367
+ Must be provided with query_embeddings. A mask indicating which query embeddings are valid.
1368
+ """
1369
+ (pred_logits, image_class_embeds) = self.class_head(image_feats, query_embeds, query_mask)
1370
+
1371
+ return (pred_logits, image_class_embeds)
1372
+
1373
+ def image_text_embedder(
1374
+ self,
1375
+ input_ids: torch.Tensor,
1376
+ pixel_values: torch.FloatTensor,
1377
+ attention_mask: torch.Tensor,
1378
+ output_attentions: Optional[bool] = None,
1379
+ output_hidden_states: Optional[bool] = None,
1380
+ ) -> Tuple[torch.FloatTensor]:
1381
+ # Encode text and image
1382
+ outputs = self.owlvit(
1383
+ pixel_values=pixel_values,
1384
+ input_ids=input_ids,
1385
+ attention_mask=attention_mask,
1386
+ output_attentions=output_attentions,
1387
+ output_hidden_states=output_hidden_states,
1388
+ return_dict=True,
1389
+ )
1390
+
1391
+ # Get image embeddings
1392
+ last_hidden_state = outputs.vision_model_output[0]
1393
+ image_embeds = self.owlvit.vision_model.post_layernorm(last_hidden_state)
1394
+
1395
+ # Resize class token
1396
+ class_token_out = torch.broadcast_to(image_embeds[:, :1, :], image_embeds[:, :-1].shape)
1397
+
1398
+ # Merge image embedding with class tokens
1399
+ image_embeds = image_embeds[:, 1:, :] * class_token_out
1400
+ image_embeds = self.layer_norm(image_embeds)
1401
+
1402
+ # Resize to [batch_size, num_patches, num_patches, hidden_size]
1403
+ new_size = (
1404
+ image_embeds.shape[0],
1405
+ self.sqrt_num_patches,
1406
+ self.sqrt_num_patches,
1407
+ image_embeds.shape[-1],
1408
+ )
1409
+ image_embeds = image_embeds.reshape(new_size)
1410
+ text_embeds = outputs[-4]
1411
+
1412
+ return (text_embeds, image_embeds, outputs)
1413
+
1414
+ def image_embedder(
1415
+ self,
1416
+ pixel_values: torch.FloatTensor,
1417
+ output_attentions: Optional[bool] = None,
1418
+ output_hidden_states: Optional[bool] = None,
1419
+ ) -> Tuple[torch.FloatTensor]:
1420
+ # Get OwlViTModel vision embeddings (same as CLIP)
1421
+ vision_outputs = self.owlvit.vision_model(pixel_values=pixel_values, return_dict=True)
1422
+
1423
+ # Apply post_layernorm to last_hidden_state, return non-projected output
1424
+ last_hidden_state = vision_outputs[0]
1425
+ image_embeds = self.owlvit.vision_model.post_layernorm(last_hidden_state)
1426
+
1427
+ # Resize class token
1428
+ class_token_out = torch.broadcast_to(image_embeds[:, :1, :], image_embeds[:, :-1].shape)
1429
+
1430
+ # Merge image embedding with class tokens
1431
+ image_embeds = image_embeds[:, 1:, :] * class_token_out
1432
+ image_embeds = self.layer_norm(image_embeds)
1433
+
1434
+ # Resize to [batch_size, num_patches, num_patches, hidden_size]
1435
+ new_size = (
1436
+ image_embeds.shape[0],
1437
+ self.sqrt_num_patches,
1438
+ self.sqrt_num_patches,
1439
+ image_embeds.shape[-1],
1440
+ )
1441
+ image_embeds = image_embeds.reshape(new_size)
1442
+
1443
+ return (image_embeds, vision_outputs)
1444
+
1445
+ def embed_image_query(
1446
+ self, query_image_features: torch.FloatTensor, query_feature_map: torch.FloatTensor
1447
+ ) -> torch.FloatTensor:
1448
+ _, class_embeds = self.class_predictor(query_image_features)
1449
+ pred_boxes = self.box_predictor(query_image_features, query_feature_map)
1450
+ pred_boxes_as_corners = center_to_corners_format(pred_boxes)
1451
+
1452
+ # Loop over query images
1453
+ best_class_embeds = []
1454
+ best_box_indices = []
1455
+ pred_boxes_device = pred_boxes_as_corners.device
1456
+
1457
+ for i in range(query_image_features.shape[0]):
1458
+ each_query_box = torch.tensor([[0, 0, 1, 1]], device=pred_boxes_device)
1459
+ each_query_pred_boxes = pred_boxes_as_corners[i]
1460
+ ious, _ = box_iou(each_query_box, each_query_pred_boxes)
1461
+
1462
+ # If there are no overlapping boxes, fall back to generalized IoU
1463
+ if torch.all(ious[0] == 0.0):
1464
+ ious = generalized_box_iou(each_query_box, each_query_pred_boxes)
1465
+
1466
+ # Use an adaptive threshold to include all boxes within 80% of the best IoU
1467
+ iou_threshold = torch.max(ious) * 0.8
1468
+
1469
+ selected_inds = (ious[0] >= iou_threshold).nonzero()
1470
+ if selected_inds.numel():
1471
+ selected_embeddings = class_embeds[i][selected_inds.squeeze(1)]
1472
+ mean_embeds = torch.mean(class_embeds[i], axis=0)
1473
+ mean_sim = torch.einsum("d,id->i", mean_embeds, selected_embeddings)
1474
+ best_box_ind = selected_inds[torch.argmin(mean_sim)]
1475
+ best_class_embeds.append(class_embeds[i][best_box_ind])
1476
+ best_box_indices.append(best_box_ind)
1477
+
1478
+ if best_class_embeds:
1479
+ query_embeds = torch.stack(best_class_embeds)
1480
+ box_indices = torch.stack(best_box_indices)
1481
+ else:
1482
+ query_embeds, box_indices = None, None
1483
+
1484
+ return query_embeds, box_indices, pred_boxes
1485
+
1486
+ @add_start_docstrings_to_model_forward(OWLVIT_IMAGE_GUIDED_OBJECT_DETECTION_INPUTS_DOCSTRING)
1487
+ @replace_return_docstrings(output_type=OwlViTImageGuidedObjectDetectionOutput, config_class=OwlViTConfig)
1488
+ def image_guided_detection(
1489
+ self,
1490
+ pixel_values: torch.FloatTensor,
1491
+ query_pixel_values: Optional[torch.FloatTensor] = None,
1492
+ output_attentions: Optional[bool] = None,
1493
+ output_hidden_states: Optional[bool] = None,
1494
+ return_dict: Optional[bool] = None,
1495
+ ) -> OwlViTImageGuidedObjectDetectionOutput:
1496
+ r"""
1497
+ Returns:
1498
+
1499
+ Examples:
1500
+ ```python
1501
+ >>> import requests
1502
+ >>> from PIL import Image
1503
+ >>> import torch
1504
+ >>> from transformers import AutoProcessor, OwlViTForObjectDetection
1505
+
1506
+ >>> processor = AutoProcessor.from_pretrained("google/owlvit-base-patch16")
1507
+ >>> model = OwlViTForObjectDetection.from_pretrained("google/owlvit-base-patch16")
1508
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
1509
+ >>> image = Image.open(requests.get(url, stream=True).raw)
1510
+ >>> query_url = "http://images.cocodataset.org/val2017/000000001675.jpg"
1511
+ >>> query_image = Image.open(requests.get(query_url, stream=True).raw)
1512
+ >>> inputs = processor(images=image, query_images=query_image, return_tensors="pt")
1513
+ >>> with torch.no_grad():
1514
+ ... outputs = model.image_guided_detection(**inputs)
1515
+ >>> # Target image sizes (height, width) to rescale box predictions [batch_size, 2]
1516
+ >>> target_sizes = torch.Tensor([image.size[::-1]])
1517
+ >>> # Convert outputs (bounding boxes and class logits) to Pascal VOC format (xmin, ymin, xmax, ymax)
1518
+ >>> results = processor.post_process_image_guided_detection(
1519
+ ... outputs=outputs, threshold=0.6, nms_threshold=0.3, target_sizes=target_sizes
1520
+ ... )
1521
+ >>> i = 0 # Retrieve predictions for the first image
1522
+ >>> boxes, scores = results[i]["boxes"], results[i]["scores"]
1523
+ >>> for box, score in zip(boxes, scores):
1524
+ ... box = [round(i, 2) for i in box.tolist()]
1525
+ ... print(f"Detected similar object with confidence {round(score.item(), 3)} at location {box}")
1526
+ Detected similar object with confidence 0.856 at location [10.94, 50.4, 315.8, 471.39]
1527
+ Detected similar object with confidence 1.0 at location [334.84, 25.33, 636.16, 374.71]
1528
+ ```"""
1529
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1530
+ output_hidden_states = (
1531
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1532
+ )
1533
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
1534
+
1535
+ # Compute feature maps for the input and query images
1536
+ query_feature_map = self.image_embedder(pixel_values=query_pixel_values)[0]
1537
+ feature_map, vision_outputs = self.image_embedder(
1538
+ pixel_values=pixel_values,
1539
+ output_attentions=output_attentions,
1540
+ output_hidden_states=output_hidden_states,
1541
+ )
1542
+
1543
+ batch_size, num_patches, num_patches, hidden_dim = feature_map.shape
1544
+ image_feats = torch.reshape(feature_map, (batch_size, num_patches * num_patches, hidden_dim))
1545
+
1546
+ batch_size, num_patches, num_patches, hidden_dim = query_feature_map.shape
1547
+ query_image_feats = torch.reshape(query_feature_map, (batch_size, num_patches * num_patches, hidden_dim))
1548
+ # Get top class embedding and best box index for each query image in batch
1549
+ query_embeds, best_box_indices, query_pred_boxes = self.embed_image_query(query_image_feats, query_feature_map)
1550
+
1551
+ # Predict object classes [batch_size, num_patches, num_queries+1]
1552
+ (pred_logits, class_embeds) = self.class_predictor(image_feats=image_feats, query_embeds=query_embeds)
1553
+
1554
+ # Predict object boxes
1555
+ target_pred_boxes = self.box_predictor(image_feats, feature_map)
1556
+
1557
+ if not return_dict:
1558
+ output = (
1559
+ feature_map,
1560
+ query_feature_map,
1561
+ target_pred_boxes,
1562
+ query_pred_boxes,
1563
+ pred_logits,
1564
+ class_embeds,
1565
+ vision_outputs.to_tuple(),
1566
+ )
1567
+ output = tuple(x for x in output if x is not None)
1568
+ return output
1569
+
1570
+ return OwlViTImageGuidedObjectDetectionOutput(
1571
+ image_embeds=feature_map,
1572
+ query_image_embeds=query_feature_map,
1573
+ target_pred_boxes=target_pred_boxes,
1574
+ query_pred_boxes=query_pred_boxes,
1575
+ logits=pred_logits,
1576
+ class_embeds=class_embeds,
1577
+ text_model_output=None,
1578
+ vision_model_output=vision_outputs,
1579
+ )
1580
+
1581
+ @add_start_docstrings_to_model_forward(OWLVIT_OBJECT_DETECTION_INPUTS_DOCSTRING)
1582
+ @replace_return_docstrings(output_type=OwlViTObjectDetectionOutput, config_class=OwlViTConfig)
1583
+ def forward(
1584
+ self,
1585
+ input_ids: torch.Tensor,
1586
+ pixel_values: torch.FloatTensor,
1587
+ attention_mask: Optional[torch.Tensor] = None,
1588
+ output_attentions: Optional[bool] = None,
1589
+ output_hidden_states: Optional[bool] = None,
1590
+ return_dict: Optional[bool] = None,
1591
+ ) -> OwlViTObjectDetectionOutput:
1592
+ r"""
1593
+ Returns:
1594
+
1595
+ Examples:
1596
+ ```python
1597
+ >>> import requests
1598
+ >>> from PIL import Image
1599
+ >>> import torch
1600
+ >>> from transformers import AutoProcessor, OwlViTForObjectDetection
1601
+
1602
+ >>> processor = AutoProcessor.from_pretrained("google/owlvit-base-patch32")
1603
+ >>> model = OwlViTForObjectDetection.from_pretrained("google/owlvit-base-patch32")
1604
+
1605
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
1606
+ >>> image = Image.open(requests.get(url, stream=True).raw)
1607
+ >>> texts = [["a photo of a cat", "a photo of a dog"]]
1608
+ >>> inputs = processor(text=texts, images=image, return_tensors="pt")
1609
+ >>> outputs = model(**inputs)
1610
+
1611
+ >>> # Target image sizes (height, width) to rescale box predictions [batch_size, 2]
1612
+ >>> target_sizes = torch.Tensor([image.size[::-1]])
1613
+ >>> # Convert outputs (bounding boxes and class logits) to final bounding boxes and scores
1614
+ >>> results = processor.post_process_object_detection(
1615
+ ... outputs=outputs, threshold=0.1, target_sizes=target_sizes
1616
+ ... )
1617
+
1618
+ >>> i = 0 # Retrieve predictions for the first image for the corresponding text queries
1619
+ >>> text = texts[i]
1620
+ >>> boxes, scores, labels = results[i]["boxes"], results[i]["scores"], results[i]["labels"]
1621
+
1622
+ >>> for box, score, label in zip(boxes, scores, labels):
1623
+ ... box = [round(i, 2) for i in box.tolist()]
1624
+ ... print(f"Detected {text[label]} with confidence {round(score.item(), 3)} at location {box}")
1625
+ Detected a photo of a cat with confidence 0.707 at location [324.97, 20.44, 640.58, 373.29]
1626
+ Detected a photo of a cat with confidence 0.717 at location [1.46, 55.26, 315.55, 472.17]
1627
+ ```"""
1628
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1629
+ output_hidden_states = (
1630
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1631
+ )
1632
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
1633
+
1634
+ # Embed images and text queries
1635
+ query_embeds, feature_map, outputs = self.image_text_embedder(
1636
+ input_ids=input_ids,
1637
+ pixel_values=pixel_values,
1638
+ attention_mask=attention_mask,
1639
+ output_attentions=output_attentions,
1640
+ output_hidden_states=output_hidden_states,
1641
+ )
1642
+
1643
+ # Text and vision model outputs
1644
+ text_outputs = outputs.text_model_output
1645
+ vision_outputs = outputs.vision_model_output
1646
+
1647
+ batch_size, num_patches, num_patches, hidden_dim = feature_map.shape
1648
+ image_feats = torch.reshape(feature_map, (batch_size, num_patches * num_patches, hidden_dim))
1649
+
1650
+ # Reshape from [batch_size * max_text_queries, hidden_dim] -> [batch_size, max_text_queries, hidden_dim]
1651
+ max_text_queries = input_ids.shape[0] // batch_size
1652
+ query_embeds = query_embeds.reshape(batch_size, max_text_queries, query_embeds.shape[-1])
1653
+
1654
+ # If first token is 0, then this is a padded query [batch_size, num_queries].
1655
+ input_ids = input_ids.reshape(batch_size, max_text_queries, input_ids.shape[-1])
1656
+ query_mask = input_ids[..., 0] > 0
1657
+
1658
+ # Predict object classes [batch_size, num_patches, num_queries+1]
1659
+ (pred_logits, class_embeds) = self.class_predictor(image_feats, query_embeds, query_mask)
1660
+
1661
+ # Predict object boxes
1662
+ pred_boxes = self.box_predictor(image_feats, feature_map)
1663
+
1664
+ if not return_dict:
1665
+ output = (
1666
+ pred_logits,
1667
+ pred_boxes,
1668
+ query_embeds,
1669
+ feature_map,
1670
+ class_embeds,
1671
+ text_outputs.to_tuple(),
1672
+ vision_outputs.to_tuple(),
1673
+ )
1674
+ output = tuple(x for x in output if x is not None)
1675
+ return output
1676
+
1677
+ return OwlViTObjectDetectionOutput(
1678
+ image_embeds=feature_map,
1679
+ text_embeds=query_embeds,
1680
+ pred_boxes=pred_boxes,
1681
+ logits=pred_logits,
1682
+ class_embeds=class_embeds,
1683
+ text_model_output=text_outputs,
1684
+ vision_model_output=vision_outputs,
1685
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/owlvit/processing_owlvit.py ADDED
@@ -0,0 +1,224 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """
16
+ Image/Text processor class for OWL-ViT
17
+ """
18
+
19
+ import warnings
20
+ from typing import List
21
+
22
+ import numpy as np
23
+
24
+ from ...processing_utils import ProcessorMixin
25
+ from ...tokenization_utils_base import BatchEncoding
26
+ from ...utils import is_flax_available, is_tf_available, is_torch_available
27
+
28
+
29
+ class OwlViTProcessor(ProcessorMixin):
30
+ r"""
31
+ Constructs an OWL-ViT processor which wraps [`OwlViTImageProcessor`] and [`CLIPTokenizer`]/[`CLIPTokenizerFast`]
32
+ into a single processor that interits both the image processor and tokenizer functionalities. See the
33
+ [`~OwlViTProcessor.__call__`] and [`~OwlViTProcessor.decode`] for more information.
34
+
35
+ Args:
36
+ image_processor ([`OwlViTImageProcessor`], *optional*):
37
+ The image processor is a required input.
38
+ tokenizer ([`CLIPTokenizer`, `CLIPTokenizerFast`], *optional*):
39
+ The tokenizer is a required input.
40
+ """
41
+
42
+ attributes = ["image_processor", "tokenizer"]
43
+ image_processor_class = "OwlViTImageProcessor"
44
+ tokenizer_class = ("CLIPTokenizer", "CLIPTokenizerFast")
45
+
46
+ def __init__(self, image_processor=None, tokenizer=None, **kwargs):
47
+ feature_extractor = None
48
+ if "feature_extractor" in kwargs:
49
+ warnings.warn(
50
+ "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
51
+ " instead.",
52
+ FutureWarning,
53
+ )
54
+ feature_extractor = kwargs.pop("feature_extractor")
55
+
56
+ image_processor = image_processor if image_processor is not None else feature_extractor
57
+ if image_processor is None:
58
+ raise ValueError("You need to specify an `image_processor`.")
59
+ if tokenizer is None:
60
+ raise ValueError("You need to specify a `tokenizer`.")
61
+
62
+ super().__init__(image_processor, tokenizer)
63
+
64
+ def __call__(self, text=None, images=None, query_images=None, padding="max_length", return_tensors="np", **kwargs):
65
+ """
66
+ Main method to prepare for the model one or several text(s) and image(s). This method forwards the `text` and
67
+ `kwargs` arguments to CLIPTokenizerFast's [`~CLIPTokenizerFast.__call__`] if `text` is not `None` to encode:
68
+ the text. To prepare the image(s), this method forwards the `images` and `kwrags` arguments to
69
+ CLIPImageProcessor's [`~CLIPImageProcessor.__call__`] if `images` is not `None`. Please refer to the doctsring
70
+ of the above two methods for more information.
71
+
72
+ Args:
73
+ text (`str`, `List[str]`, `List[List[str]]`):
74
+ The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
75
+ (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
76
+ `is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
77
+ images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`,
78
+ `List[torch.Tensor]`):
79
+ The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
80
+ tensor. Both channels-first and channels-last formats are supported.
81
+ query_images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`, `List[torch.Tensor]`):
82
+ The query image to be prepared, one query image is expected per target image to be queried. Each image
83
+ can be a PIL image, NumPy array or PyTorch tensor. In case of a NumPy array/PyTorch tensor, each image
84
+ should be of shape (C, H, W), where C is a number of channels, H and W are image height and width.
85
+ return_tensors (`str` or [`~utils.TensorType`], *optional*):
86
+ If set, will return tensors of a particular framework. Acceptable values are:
87
+ - `'tf'`: Return TensorFlow `tf.constant` objects.
88
+ - `'pt'`: Return PyTorch `torch.Tensor` objects.
89
+ - `'np'`: Return NumPy `np.ndarray` objects.
90
+ - `'jax'`: Return JAX `jnp.ndarray` objects.
91
+ Returns:
92
+ [`BatchEncoding`]: A [`BatchEncoding`] with the following fields:
93
+ - **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`.
94
+ - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
95
+ `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not
96
+ `None`).
97
+ - **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`.
98
+ """
99
+
100
+ if text is None and query_images is None and images is None:
101
+ raise ValueError(
102
+ "You have to specify at least one text or query image or image. All three cannot be none."
103
+ )
104
+
105
+ if text is not None:
106
+ if isinstance(text, str) or (isinstance(text, List) and not isinstance(text[0], List)):
107
+ encodings = [self.tokenizer(text, padding=padding, return_tensors=return_tensors, **kwargs)]
108
+
109
+ elif isinstance(text, List) and isinstance(text[0], List):
110
+ encodings = []
111
+
112
+ # Maximum number of queries across batch
113
+ max_num_queries = max([len(t) for t in text])
114
+
115
+ # Pad all batch samples to max number of text queries
116
+ for t in text:
117
+ if len(t) != max_num_queries:
118
+ t = t + [" "] * (max_num_queries - len(t))
119
+
120
+ encoding = self.tokenizer(t, padding=padding, return_tensors=return_tensors, **kwargs)
121
+ encodings.append(encoding)
122
+ else:
123
+ raise TypeError("Input text should be a string, a list of strings or a nested list of strings")
124
+
125
+ if return_tensors == "np":
126
+ input_ids = np.concatenate([encoding["input_ids"] for encoding in encodings], axis=0)
127
+ attention_mask = np.concatenate([encoding["attention_mask"] for encoding in encodings], axis=0)
128
+
129
+ elif return_tensors == "jax" and is_flax_available():
130
+ import jax.numpy as jnp
131
+
132
+ input_ids = jnp.concatenate([encoding["input_ids"] for encoding in encodings], axis=0)
133
+ attention_mask = jnp.concatenate([encoding["attention_mask"] for encoding in encodings], axis=0)
134
+
135
+ elif return_tensors == "pt" and is_torch_available():
136
+ import torch
137
+
138
+ input_ids = torch.cat([encoding["input_ids"] for encoding in encodings], dim=0)
139
+ attention_mask = torch.cat([encoding["attention_mask"] for encoding in encodings], dim=0)
140
+
141
+ elif return_tensors == "tf" and is_tf_available():
142
+ import tensorflow as tf
143
+
144
+ input_ids = tf.stack([encoding["input_ids"] for encoding in encodings], axis=0)
145
+ attention_mask = tf.stack([encoding["attention_mask"] for encoding in encodings], axis=0)
146
+
147
+ else:
148
+ raise ValueError("Target return tensor type could not be returned")
149
+
150
+ encoding = BatchEncoding()
151
+ encoding["input_ids"] = input_ids
152
+ encoding["attention_mask"] = attention_mask
153
+
154
+ if query_images is not None:
155
+ encoding = BatchEncoding()
156
+ query_pixel_values = self.image_processor(
157
+ query_images, return_tensors=return_tensors, **kwargs
158
+ ).pixel_values
159
+ encoding["query_pixel_values"] = query_pixel_values
160
+
161
+ if images is not None:
162
+ image_features = self.image_processor(images, return_tensors=return_tensors, **kwargs)
163
+
164
+ if text is not None and images is not None:
165
+ encoding["pixel_values"] = image_features.pixel_values
166
+ return encoding
167
+ elif query_images is not None and images is not None:
168
+ encoding["pixel_values"] = image_features.pixel_values
169
+ return encoding
170
+ elif text is not None or query_images is not None:
171
+ return encoding
172
+ else:
173
+ return BatchEncoding(data=dict(**image_features), tensor_type=return_tensors)
174
+
175
+ def post_process(self, *args, **kwargs):
176
+ """
177
+ This method forwards all its arguments to [`OwlViTImageProcessor.post_process`]. Please refer to the docstring
178
+ of this method for more information.
179
+ """
180
+ return self.image_processor.post_process(*args, **kwargs)
181
+
182
+ def post_process_object_detection(self, *args, **kwargs):
183
+ """
184
+ This method forwards all its arguments to [`OwlViTImageProcessor.post_process_object_detection`]. Please refer
185
+ to the docstring of this method for more information.
186
+ """
187
+ return self.image_processor.post_process_object_detection(*args, **kwargs)
188
+
189
+ def post_process_image_guided_detection(self, *args, **kwargs):
190
+ """
191
+ This method forwards all its arguments to [`OwlViTImageProcessor.post_process_one_shot_object_detection`].
192
+ Please refer to the docstring of this method for more information.
193
+ """
194
+ return self.image_processor.post_process_image_guided_detection(*args, **kwargs)
195
+
196
+ def batch_decode(self, *args, **kwargs):
197
+ """
198
+ This method forwards all its arguments to CLIPTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please
199
+ refer to the docstring of this method for more information.
200
+ """
201
+ return self.tokenizer.batch_decode(*args, **kwargs)
202
+
203
+ def decode(self, *args, **kwargs):
204
+ """
205
+ This method forwards all its arguments to CLIPTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to
206
+ the docstring of this method for more information.
207
+ """
208
+ return self.tokenizer.decode(*args, **kwargs)
209
+
210
+ @property
211
+ def feature_extractor_class(self):
212
+ warnings.warn(
213
+ "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.",
214
+ FutureWarning,
215
+ )
216
+ return self.image_processor_class
217
+
218
+ @property
219
+ def feature_extractor(self):
220
+ warnings.warn(
221
+ "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.",
222
+ FutureWarning,
223
+ )
224
+ return self.image_processor
llmeval-env/lib/python3.10/site-packages/transformers/models/persimmon/__init__.py ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 AdeptAI and The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import (
17
+ OptionalDependencyNotAvailable,
18
+ _LazyModule,
19
+ is_torch_available,
20
+ )
21
+
22
+
23
+ _import_structure = {
24
+ "configuration_persimmon": ["PERSIMMON_PRETRAINED_CONFIG_ARCHIVE_MAP", "PersimmonConfig"],
25
+ }
26
+
27
+
28
+ try:
29
+ if not is_torch_available():
30
+ raise OptionalDependencyNotAvailable()
31
+ except OptionalDependencyNotAvailable:
32
+ pass
33
+ else:
34
+ _import_structure["modeling_persimmon"] = [
35
+ "PersimmonForCausalLM",
36
+ "PersimmonModel",
37
+ "PersimmonPreTrainedModel",
38
+ "PersimmonForSequenceClassification",
39
+ ]
40
+
41
+
42
+ if TYPE_CHECKING:
43
+ from .configuration_persimmon import PERSIMMON_PRETRAINED_CONFIG_ARCHIVE_MAP, PersimmonConfig
44
+
45
+ try:
46
+ if not is_torch_available():
47
+ raise OptionalDependencyNotAvailable()
48
+ except OptionalDependencyNotAvailable:
49
+ pass
50
+ else:
51
+ from .modeling_persimmon import (
52
+ PersimmonForCausalLM,
53
+ PersimmonForSequenceClassification,
54
+ PersimmonModel,
55
+ PersimmonPreTrainedModel,
56
+ )
57
+
58
+
59
+ else:
60
+ import sys
61
+
62
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
llmeval-env/lib/python3.10/site-packages/transformers/models/persimmon/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (959 Bytes). View file