applied-ai-018 commited on
Commit
1e1c2c5
·
verified ·
1 Parent(s): 132bf9e

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. llmeval-env/lib/python3.10/site-packages/transformers/models/altclip/__init__.py +71 -0
  2. llmeval-env/lib/python3.10/site-packages/transformers/models/altclip/__pycache__/__init__.cpython-310.pyc +0 -0
  3. llmeval-env/lib/python3.10/site-packages/transformers/models/altclip/__pycache__/configuration_altclip.cpython-310.pyc +0 -0
  4. llmeval-env/lib/python3.10/site-packages/transformers/models/altclip/__pycache__/modeling_altclip.cpython-310.pyc +0 -0
  5. llmeval-env/lib/python3.10/site-packages/transformers/models/altclip/__pycache__/processing_altclip.cpython-310.pyc +0 -0
  6. llmeval-env/lib/python3.10/site-packages/transformers/models/altclip/configuration_altclip.py +402 -0
  7. llmeval-env/lib/python3.10/site-packages/transformers/models/altclip/modeling_altclip.py +1693 -0
  8. llmeval-env/lib/python3.10/site-packages/transformers/models/altclip/processing_altclip.py +131 -0
  9. llmeval-env/lib/python3.10/site-packages/transformers/models/decision_transformer/__init__.py +65 -0
  10. llmeval-env/lib/python3.10/site-packages/transformers/models/decision_transformer/__pycache__/__init__.cpython-310.pyc +0 -0
  11. llmeval-env/lib/python3.10/site-packages/transformers/models/decision_transformer/__pycache__/configuration_decision_transformer.cpython-310.pyc +0 -0
  12. llmeval-env/lib/python3.10/site-packages/transformers/models/decision_transformer/__pycache__/modeling_decision_transformer.cpython-310.pyc +0 -0
  13. llmeval-env/lib/python3.10/site-packages/transformers/models/decision_transformer/configuration_decision_transformer.py +157 -0
  14. llmeval-env/lib/python3.10/site-packages/transformers/models/decision_transformer/modeling_decision_transformer.py +937 -0
  15. llmeval-env/lib/python3.10/site-packages/transformers/models/deta/__init__.py +73 -0
  16. llmeval-env/lib/python3.10/site-packages/transformers/models/deta/convert_deta_resnet_to_pytorch.py +320 -0
  17. llmeval-env/lib/python3.10/site-packages/transformers/models/deta/convert_deta_swin_to_pytorch.py +327 -0
  18. llmeval-env/lib/python3.10/site-packages/transformers/models/deta/image_processing_deta.py +1174 -0
  19. llmeval-env/lib/python3.10/site-packages/transformers/models/deta/modeling_deta.py +0 -0
  20. llmeval-env/lib/python3.10/site-packages/transformers/models/hubert/__init__.py +83 -0
  21. llmeval-env/lib/python3.10/site-packages/transformers/models/hubert/__pycache__/__init__.cpython-310.pyc +0 -0
  22. llmeval-env/lib/python3.10/site-packages/transformers/models/hubert/__pycache__/configuration_hubert.cpython-310.pyc +0 -0
  23. llmeval-env/lib/python3.10/site-packages/transformers/models/hubert/__pycache__/convert_distilhubert_original_s3prl_checkpoint_to_pytorch.cpython-310.pyc +0 -0
  24. llmeval-env/lib/python3.10/site-packages/transformers/models/hubert/__pycache__/convert_hubert_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc +0 -0
  25. llmeval-env/lib/python3.10/site-packages/transformers/models/hubert/__pycache__/convert_hubert_original_s3prl_checkpoint_to_pytorch.cpython-310.pyc +0 -0
  26. llmeval-env/lib/python3.10/site-packages/transformers/models/hubert/__pycache__/modeling_hubert.cpython-310.pyc +0 -0
  27. llmeval-env/lib/python3.10/site-packages/transformers/models/hubert/__pycache__/modeling_tf_hubert.cpython-310.pyc +0 -0
  28. llmeval-env/lib/python3.10/site-packages/transformers/models/hubert/configuration_hubert.py +261 -0
  29. llmeval-env/lib/python3.10/site-packages/transformers/models/hubert/convert_distilhubert_original_s3prl_checkpoint_to_pytorch.py +223 -0
  30. llmeval-env/lib/python3.10/site-packages/transformers/models/hubert/convert_hubert_original_s3prl_checkpoint_to_pytorch.py +69 -0
  31. llmeval-env/lib/python3.10/site-packages/transformers/models/hubert/modeling_hubert.py +1386 -0
  32. llmeval-env/lib/python3.10/site-packages/transformers/models/hubert/modeling_tf_hubert.py +1676 -0
  33. llmeval-env/lib/python3.10/site-packages/transformers/models/llava_next/__init__.py +74 -0
  34. llmeval-env/lib/python3.10/site-packages/transformers/models/llava_next/__pycache__/__init__.cpython-310.pyc +0 -0
  35. llmeval-env/lib/python3.10/site-packages/transformers/models/llava_next/__pycache__/configuration_llava_next.cpython-310.pyc +0 -0
  36. llmeval-env/lib/python3.10/site-packages/transformers/models/llava_next/__pycache__/convert_llava_next_weights_to_hf.cpython-310.pyc +0 -0
  37. llmeval-env/lib/python3.10/site-packages/transformers/models/llava_next/__pycache__/image_processing_llava_next.cpython-310.pyc +0 -0
  38. llmeval-env/lib/python3.10/site-packages/transformers/models/llava_next/__pycache__/modeling_llava_next.cpython-310.pyc +0 -0
  39. llmeval-env/lib/python3.10/site-packages/transformers/models/llava_next/__pycache__/processing_llava_next.cpython-310.pyc +0 -0
  40. llmeval-env/lib/python3.10/site-packages/transformers/models/llava_next/configuration_llava_next.py +141 -0
  41. llmeval-env/lib/python3.10/site-packages/transformers/models/llava_next/convert_llava_next_weights_to_hf.py +342 -0
  42. llmeval-env/lib/python3.10/site-packages/transformers/models/llava_next/image_processing_llava_next.py +608 -0
  43. llmeval-env/lib/python3.10/site-packages/transformers/models/llava_next/modeling_llava_next.py +698 -0
  44. llmeval-env/lib/python3.10/site-packages/transformers/models/llava_next/processing_llava_next.py +135 -0
  45. llmeval-env/lib/python3.10/site-packages/transformers/models/regnet/__init__.py +111 -0
  46. llmeval-env/lib/python3.10/site-packages/transformers/models/regnet/__pycache__/__init__.cpython-310.pyc +0 -0
  47. llmeval-env/lib/python3.10/site-packages/transformers/models/regnet/__pycache__/configuration_regnet.cpython-310.pyc +0 -0
  48. llmeval-env/lib/python3.10/site-packages/transformers/models/regnet/__pycache__/convert_regnet_seer_10b_to_pytorch.cpython-310.pyc +0 -0
  49. llmeval-env/lib/python3.10/site-packages/transformers/models/regnet/__pycache__/convert_regnet_to_pytorch.cpython-310.pyc +0 -0
  50. llmeval-env/lib/python3.10/site-packages/transformers/models/regnet/__pycache__/modeling_flax_regnet.cpython-310.pyc +0 -0
llmeval-env/lib/python3.10/site-packages/transformers/models/altclip/__init__.py ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
17
+
18
+
19
+ _import_structure = {
20
+ "configuration_altclip": [
21
+ "ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
22
+ "AltCLIPConfig",
23
+ "AltCLIPTextConfig",
24
+ "AltCLIPVisionConfig",
25
+ ],
26
+ "processing_altclip": ["AltCLIPProcessor"],
27
+ }
28
+
29
+ try:
30
+ if not is_torch_available():
31
+ raise OptionalDependencyNotAvailable()
32
+ except OptionalDependencyNotAvailable:
33
+ pass
34
+ else:
35
+ _import_structure["modeling_altclip"] = [
36
+ "ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
37
+ "AltCLIPPreTrainedModel",
38
+ "AltCLIPModel",
39
+ "AltCLIPTextModel",
40
+ "AltCLIPVisionModel",
41
+ ]
42
+
43
+
44
+ if TYPE_CHECKING:
45
+ from .configuration_altclip import (
46
+ ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
47
+ AltCLIPConfig,
48
+ AltCLIPTextConfig,
49
+ AltCLIPVisionConfig,
50
+ )
51
+ from .processing_altclip import AltCLIPProcessor
52
+
53
+ try:
54
+ if not is_torch_available():
55
+ raise OptionalDependencyNotAvailable()
56
+ except OptionalDependencyNotAvailable:
57
+ pass
58
+ else:
59
+ from .modeling_altclip import (
60
+ ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
61
+ AltCLIPModel,
62
+ AltCLIPPreTrainedModel,
63
+ AltCLIPTextModel,
64
+ AltCLIPVisionModel,
65
+ )
66
+
67
+
68
+ else:
69
+ import sys
70
+
71
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
llmeval-env/lib/python3.10/site-packages/transformers/models/altclip/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.14 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/altclip/__pycache__/configuration_altclip.cpython-310.pyc ADDED
Binary file (15.6 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/altclip/__pycache__/modeling_altclip.cpython-310.pyc ADDED
Binary file (50.8 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/altclip/__pycache__/processing_altclip.cpython-310.pyc ADDED
Binary file (5.64 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/altclip/configuration_altclip.py ADDED
@@ -0,0 +1,402 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 WenXiang ZhongzhiCheng LedellWu LiuGuang BoWenZhang and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ AltCLIP model configuration"""
16
+ import os
17
+ from typing import Union
18
+
19
+ from ...configuration_utils import PretrainedConfig
20
+ from ...utils import logging
21
+
22
+
23
+ logger = logging.get_logger(__name__)
24
+
25
+
26
+ from ..deprecated._archive_maps import ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
27
+
28
+
29
+ class AltCLIPTextConfig(PretrainedConfig):
30
+ r"""
31
+ This is the configuration class to store the configuration of a [`AltCLIPTextModel`]. It is used to instantiate a
32
+ AltCLIP text model according to the specified arguments, defining the model architecture. Instantiating a
33
+ configuration with the defaults will yield a similar configuration to that of the AltCLIP
34
+ [BAAI/AltCLIP](https://huggingface.co/BAAI/AltCLIP) architecture.
35
+
36
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
37
+ documentation from [`PretrainedConfig`] for more information.
38
+
39
+
40
+ Args:
41
+ vocab_size (`int`, *optional*, defaults to 250002):
42
+ Vocabulary size of the AltCLIP model. Defines the number of different tokens that can be represented by the
43
+ `inputs_ids` passed when calling [`AltCLIPTextModel`].
44
+ hidden_size (`int`, *optional*, defaults to 1024):
45
+ Dimensionality of the encoder layers and the pooler layer.
46
+ num_hidden_layers (`int`, *optional*, defaults to 24):
47
+ Number of hidden layers in the Transformer encoder.
48
+ num_attention_heads (`int`, *optional*, defaults to 16):
49
+ Number of attention heads for each attention layer in the Transformer encoder.
50
+ intermediate_size (`int`, *optional*, defaults to 4096):
51
+ Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
52
+ hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`):
53
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
54
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
55
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
56
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
57
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
58
+ The dropout ratio for the attention probabilities.
59
+ max_position_embeddings (`int`, *optional*, defaults to 514):
60
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
61
+ just in case (e.g., 512 or 1024 or 2048).
62
+ type_vocab_size (`int`, *optional*, defaults to 1):
63
+ The vocabulary size of the `token_type_ids` passed when calling [`AltCLIPTextModel`]
64
+ initializer_range (`float`, *optional*, defaults to 0.02):
65
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
66
+ initializer_factor (`float`, *optional*, defaults to 0.02):
67
+ A factor for initializing all weight matrices (should be kept to 1, used internally for initialization
68
+ testing).
69
+ layer_norm_eps (`float`, *optional*, defaults to 1e-05):
70
+ The epsilon used by the layer normalization layers.
71
+ pad_token_id (`int`, *optional*, defaults to 1): The id of the *padding* token.
72
+ bos_token_id (`int`, *optional*, defaults to 0): The id of the *beginning-of-sequence* token.
73
+ eos_token_id (`Union[int, List[int]]`, *optional*, defaults to 2):
74
+ The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens.
75
+ position_embedding_type (`str`, *optional*, defaults to `"absolute"`):
76
+ Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For
77
+ positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to
78
+ [Self-Attention with Relative Position Representations (Shaw et al.)](https://arxiv.org/abs/1803.02155).
79
+ For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models
80
+ with Better Relative Position Embeddings (Huang et al.)](https://arxiv.org/abs/2009.13658).
81
+ use_cache (`bool`, *optional*, defaults to `True`):
82
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
83
+ relevant if `config.is_decoder=True`.
84
+ project_dim (`int`, *optional*, defaults to 768):
85
+ The dimentions of the teacher model before the mapping layer.
86
+
87
+ Examples:
88
+
89
+ ```python
90
+ >>> from transformers import AltCLIPTextModel, AltCLIPTextConfig
91
+
92
+ >>> # Initializing a AltCLIPTextConfig with BAAI/AltCLIP style configuration
93
+ >>> configuration = AltCLIPTextConfig()
94
+
95
+ >>> # Initializing a AltCLIPTextModel (with random weights) from the BAAI/AltCLIP style configuration
96
+ >>> model = AltCLIPTextModel(configuration)
97
+
98
+ >>> # Accessing the model configuration
99
+ >>> configuration = model.config
100
+ ```"""
101
+
102
+ model_type = "altclip_text_model"
103
+
104
+ def __init__(
105
+ self,
106
+ vocab_size=250002,
107
+ hidden_size=1024,
108
+ num_hidden_layers=24,
109
+ num_attention_heads=16,
110
+ intermediate_size=4096,
111
+ hidden_act="gelu",
112
+ hidden_dropout_prob=0.1,
113
+ attention_probs_dropout_prob=0.1,
114
+ max_position_embeddings=514,
115
+ type_vocab_size=1,
116
+ initializer_range=0.02,
117
+ initializer_factor=0.02,
118
+ layer_norm_eps=1e-05,
119
+ pad_token_id=1,
120
+ bos_token_id=0,
121
+ eos_token_id=2,
122
+ position_embedding_type="absolute",
123
+ use_cache=True,
124
+ project_dim=768,
125
+ **kwargs,
126
+ ):
127
+ super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
128
+
129
+ self.vocab_size = vocab_size
130
+ self.hidden_size = hidden_size
131
+ self.num_hidden_layers = num_hidden_layers
132
+ self.num_attention_heads = num_attention_heads
133
+ self.hidden_act = hidden_act
134
+ self.intermediate_size = intermediate_size
135
+ self.hidden_dropout_prob = hidden_dropout_prob
136
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
137
+ self.max_position_embeddings = max_position_embeddings
138
+ self.type_vocab_size = type_vocab_size
139
+ self.initializer_range = initializer_range
140
+ self.initializer_factor = initializer_factor
141
+ self.layer_norm_eps = layer_norm_eps
142
+ self.position_embedding_type = position_embedding_type
143
+ self.use_cache = use_cache
144
+ self.project_dim = project_dim
145
+
146
+
147
+ class AltCLIPVisionConfig(PretrainedConfig):
148
+ r"""
149
+ This is the configuration class to store the configuration of a [`AltCLIPModel`]. It is used to instantiate an
150
+ AltCLIP model according to the specified arguments, defining the model architecture. Instantiating a configuration
151
+ with the defaults will yield a similar configuration to that of the AltCLIP
152
+ [BAAI/AltCLIP](https://huggingface.co/BAAI/AltCLIP) architecture.
153
+
154
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
155
+ documentation from [`PretrainedConfig`] for more information.
156
+
157
+
158
+ Args:
159
+ hidden_size (`int`, *optional*, defaults to 768):
160
+ Dimensionality of the encoder layers and the pooler layer.
161
+ intermediate_size (`int`, *optional*, defaults to 3072):
162
+ Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
163
+ projection_dim (`int`, *optional*, defaults to 512):
164
+ Dimentionality of text and vision projection layers.
165
+ num_hidden_layers (`int`, *optional*, defaults to 12):
166
+ Number of hidden layers in the Transformer encoder.
167
+ num_attention_heads (`int`, *optional*, defaults to 12):
168
+ Number of attention heads for each attention layer in the Transformer encoder.
169
+ num_channels (`int`, *optional*, defaults to 3):
170
+ The number of input channels.
171
+ image_size (`int`, *optional*, defaults to 224):
172
+ The size (resolution) of each image.
173
+ patch_size (`int`, *optional*, defaults to 32):
174
+ The size (resolution) of each patch.
175
+ hidden_act (`str` or `function`, *optional*, defaults to `"quick_gelu"`):
176
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
177
+ `"relu"`, `"selu"` and `"gelu_new"` ``"quick_gelu"` are supported.
178
+ layer_norm_eps (`float`, *optional*, defaults to 1e-05):
179
+ The epsilon used by the layer normalization layers.
180
+ attention_dropout (`float`, *optional*, defaults to 0.0):
181
+ The dropout ratio for the attention probabilities.
182
+ initializer_range (`float`, *optional*, defaults to 0.02):
183
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
184
+ initializer_factor (`float`, *optional*, defaults to 1.0):
185
+ A factor for initializing all weight matrices (should be kept to 1, used internally for initialization
186
+ testing).
187
+
188
+ Example:
189
+
190
+ ```python
191
+ >>> from transformers import AltCLIPVisionConfig, AltCLIPVisionModel
192
+
193
+ >>> # Initializing a AltCLIPVisionConfig with BAAI/AltCLIP style configuration
194
+ >>> configuration = AltCLIPVisionConfig()
195
+
196
+ >>> # Initializing a AltCLIPVisionModel (with random weights) from the BAAI/AltCLIP style configuration
197
+ >>> model = AltCLIPVisionModel(configuration)
198
+
199
+ >>> # Accessing the model configuration
200
+ >>> configuration = model.config
201
+ ```"""
202
+
203
+ model_type = "altclip_vision_model"
204
+
205
+ def __init__(
206
+ self,
207
+ hidden_size=768,
208
+ intermediate_size=3072,
209
+ projection_dim=512,
210
+ num_hidden_layers=12,
211
+ num_attention_heads=12,
212
+ num_channels=3,
213
+ image_size=224,
214
+ patch_size=32,
215
+ hidden_act="quick_gelu",
216
+ layer_norm_eps=1e-5,
217
+ attention_dropout=0.0,
218
+ initializer_range=0.02,
219
+ initializer_factor=1.0,
220
+ **kwargs,
221
+ ):
222
+ super().__init__(**kwargs)
223
+
224
+ self.hidden_size = hidden_size
225
+ self.intermediate_size = intermediate_size
226
+ self.projection_dim = projection_dim
227
+ self.num_hidden_layers = num_hidden_layers
228
+ self.num_attention_heads = num_attention_heads
229
+ self.num_channels = num_channels
230
+ self.patch_size = patch_size
231
+ self.image_size = image_size
232
+ self.initializer_range = initializer_range
233
+ self.initializer_factor = initializer_factor
234
+ self.attention_dropout = attention_dropout
235
+ self.layer_norm_eps = layer_norm_eps
236
+ self.hidden_act = hidden_act
237
+
238
+ @classmethod
239
+ def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig":
240
+ cls._set_token_in_kwargs(kwargs)
241
+
242
+ config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
243
+
244
+ # get the vision config dict if we are loading from AltCLIPConfig
245
+ if config_dict.get("model_type") == "altclip":
246
+ config_dict = config_dict["vision_config"]
247
+
248
+ if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
249
+ logger.warning(
250
+ f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
251
+ f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
252
+ )
253
+
254
+ return cls.from_dict(config_dict, **kwargs)
255
+
256
+
257
+ class AltCLIPConfig(PretrainedConfig):
258
+ r"""
259
+ This is the configuration class to store the configuration of a [`AltCLIPModel`]. It is used to instantiate an
260
+ AltCLIP model according to the specified arguments, defining the model architecture. Instantiating a configuration
261
+ with the defaults will yield a similar configuration to that of the AltCLIP
262
+ [BAAI/AltCLIP](https://huggingface.co/BAAI/AltCLIP) architecture.
263
+
264
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
265
+ documentation from [`PretrainedConfig`] for more information.
266
+
267
+ Args:
268
+ text_config (`dict`, *optional*):
269
+ Dictionary of configuration options used to initialize [`AltCLIPTextConfig`].
270
+ vision_config (`dict`, *optional*):
271
+ Dictionary of configuration options used to initialize [`AltCLIPVisionConfig`].
272
+ projection_dim (`int`, *optional*, defaults to 768):
273
+ Dimentionality of text and vision projection layers.
274
+ logit_scale_init_value (`float`, *optional*, defaults to 2.6592):
275
+ The inital value of the *logit_scale* paramter. Default is used as per the original CLIP implementation.
276
+ kwargs (*optional*):
277
+ Dictionary of keyword arguments.
278
+
279
+ Example:
280
+
281
+ ```python
282
+ >>> from transformers import AltCLIPConfig, AltCLIPModel
283
+
284
+ >>> # Initializing a AltCLIPConfig with BAAI/AltCLIP style configuration
285
+ >>> configuration = AltCLIPConfig()
286
+
287
+ >>> # Initializing a AltCLIPModel (with random weights) from the BAAI/AltCLIP style configuration
288
+ >>> model = AltCLIPModel(configuration)
289
+
290
+ >>> # Accessing the model configuration
291
+ >>> configuration = model.config
292
+
293
+ >>> # We can also initialize a AltCLIPConfig from a AltCLIPTextConfig and a AltCLIPVisionConfig
294
+
295
+ >>> # Initializing a AltCLIPText and AltCLIPVision configuration
296
+ >>> config_text = AltCLIPTextConfig()
297
+ >>> config_vision = AltCLIPVisionConfig()
298
+
299
+ >>> config = AltCLIPConfig.from_text_vision_configs(config_text, config_vision)
300
+ ```"""
301
+
302
+ model_type = "altclip"
303
+
304
+ def __init__(
305
+ self, text_config=None, vision_config=None, projection_dim=768, logit_scale_init_value=2.6592, **kwargs
306
+ ):
307
+ # If `_config_dict` exist, we use them for the backward compatibility.
308
+ # We pop out these 2 attributes before calling `super().__init__` to avoid them being saved (which causes a lot
309
+ # of confusion!).
310
+ text_config_dict = kwargs.pop("text_config_dict", None)
311
+ vision_config_dict = kwargs.pop("vision_config_dict", None)
312
+
313
+ super().__init__(**kwargs)
314
+
315
+ # Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in
316
+ # `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most
317
+ # cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.
318
+ if text_config_dict is not None:
319
+ if text_config is None:
320
+ text_config = {}
321
+
322
+ # This is the complete result when using `text_config_dict`.
323
+ _text_config_dict = AltCLIPTextConfig(**text_config_dict).to_dict()
324
+
325
+ # Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
326
+ for key, value in _text_config_dict.items():
327
+ if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
328
+ # If specified in `text_config_dict`
329
+ if key in text_config_dict:
330
+ message = (
331
+ f"`{key}` is found in both `text_config_dict` and `text_config` but with different values. "
332
+ f'The value `text_config_dict["{key}"]` will be used instead.'
333
+ )
334
+ # If inferred from default argument values (just to be super careful)
335
+ else:
336
+ message = (
337
+ f"`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The "
338
+ f'value `text_config["{key}"]` will be overriden.'
339
+ )
340
+ logger.info(message)
341
+
342
+ # Update all values in `text_config` with the ones in `_text_config_dict`.
343
+ text_config.update(_text_config_dict)
344
+
345
+ if vision_config_dict is not None:
346
+ if vision_config is None:
347
+ vision_config = {}
348
+
349
+ # This is the complete result when using `vision_config_dict`.
350
+ _vision_config_dict = AltCLIPVisionConfig(**vision_config_dict).to_dict()
351
+ # convert keys to string instead of integer
352
+ if "id2label" in _vision_config_dict:
353
+ _vision_config_dict["id2label"] = {
354
+ str(key): value for key, value in _vision_config_dict["id2label"].items()
355
+ }
356
+
357
+ # Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
358
+ for key, value in _vision_config_dict.items():
359
+ if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
360
+ # If specified in `vision_config_dict`
361
+ if key in vision_config_dict:
362
+ message = (
363
+ f"`{key}` is found in both `vision_config_dict` and `vision_config` but with different "
364
+ f'values. The value `vision_config_dict["{key}"]` will be used instead.'
365
+ )
366
+ # If inferred from default argument values (just to be super careful)
367
+ else:
368
+ message = (
369
+ f"`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. "
370
+ f'The value `vision_config["{key}"]` will be overriden.'
371
+ )
372
+ logger.info(message)
373
+
374
+ # Update all values in `vision_config` with the ones in `_vision_config_dict`.
375
+ vision_config.update(_vision_config_dict)
376
+
377
+ if text_config is None:
378
+ text_config = {}
379
+ logger.info("`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values.")
380
+
381
+ if vision_config is None:
382
+ vision_config = {}
383
+ logger.info("`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values.")
384
+
385
+ self.text_config = AltCLIPTextConfig(**text_config)
386
+ self.vision_config = AltCLIPVisionConfig(**vision_config)
387
+
388
+ self.projection_dim = projection_dim
389
+ self.logit_scale_init_value = logit_scale_init_value
390
+ self.initializer_factor = 1.0
391
+
392
+ @classmethod
393
+ def from_text_vision_configs(cls, text_config: AltCLIPTextConfig, vision_config: AltCLIPVisionConfig, **kwargs):
394
+ r"""
395
+ Instantiate a [`AltCLIPConfig`] (or a derived class) from altclip text model configuration and altclip vision
396
+ model configuration.
397
+
398
+ Returns:
399
+ [`AltCLIPConfig`]: An instance of a configuration object
400
+ """
401
+
402
+ return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **kwargs)
llmeval-env/lib/python3.10/site-packages/transformers/models/altclip/modeling_altclip.py ADDED
@@ -0,0 +1,1693 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The BAAI Teams Authors and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch AltCLIP model."""
16
+ import math
17
+ from dataclasses import dataclass
18
+ from typing import Any, List, Optional, Tuple, Union
19
+
20
+ import torch
21
+ import torch.nn as nn
22
+ import torch.utils.checkpoint
23
+
24
+ from ...activations import ACT2FN
25
+ from ...modeling_outputs import (
26
+ BaseModelOutput,
27
+ BaseModelOutputWithPastAndCrossAttentions,
28
+ BaseModelOutputWithPooling,
29
+ BaseModelOutputWithPoolingAndCrossAttentions,
30
+ BaseModelOutputWithPoolingAndProjection,
31
+ )
32
+ from ...modeling_utils import PreTrainedModel
33
+ from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
34
+ from ...utils import ModelOutput, add_start_docstrings_to_model_forward, logging, replace_return_docstrings
35
+ from .configuration_altclip import AltCLIPConfig, AltCLIPTextConfig, AltCLIPVisionConfig
36
+
37
+
38
+ logger = logging.get_logger(__name__)
39
+
40
+ _CHECKPOINT_FOR_DOC = "BAAI/AltCLIP"
41
+ _CONFIG_FOR_DOC = "AltCLIPConfig"
42
+
43
+
44
+ from ..deprecated._archive_maps import ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
45
+
46
+
47
+ ALTCLIP_START_DOCSTRING = r"""
48
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
49
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
50
+ etc.)
51
+
52
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
53
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
54
+ and behavior.
55
+
56
+ Parameters:
57
+ config ([`CLIPConfig`]): Model configuration class with all the parameters of the model.
58
+ Initializing with a config file does not load the weights associated with the model, only the
59
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
60
+ """
61
+
62
+ ALTCLIP_TEXT_INPUTS_DOCSTRING = r"""
63
+ Args:
64
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
65
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
66
+ it.
67
+
68
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
69
+ [`PreTrainedTokenizer.__call__`] for details.
70
+
71
+ [What are input IDs?](../glossary#input-ids)
72
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
73
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
74
+
75
+ - 1 for tokens that are **not masked**,
76
+ - 0 for tokens that are **masked**.
77
+
78
+ [What are attention masks?](../glossary#attention-mask)
79
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
80
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
81
+ config.max_position_embeddings - 1]`.
82
+
83
+ [What are position IDs?](../glossary#position-ids)
84
+ output_attentions (`bool`, *optional*):
85
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
86
+ tensors for more detail.
87
+ output_hidden_states (`bool`, *optional*):
88
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
89
+ more detail.
90
+ return_dict (`bool`, *optional*):
91
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
92
+ """
93
+
94
+ ALTCLIP_VISION_INPUTS_DOCSTRING = r"""
95
+ Args:
96
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
97
+ Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
98
+ [`AutoImageProcessor`]. See [`CLIPImageProcessor.__call__`] for details.
99
+ output_attentions (`bool`, *optional*):
100
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
101
+ tensors for more detail.
102
+ output_hidden_states (`bool`, *optional*):
103
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
104
+ more detail.
105
+ return_dict (`bool`, *optional*):
106
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
107
+ """
108
+
109
+ ALTCLIP_INPUTS_DOCSTRING = r"""
110
+ Args:
111
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
112
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
113
+ it.
114
+
115
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
116
+ [`PreTrainedTokenizer.__call__`] for details.
117
+
118
+ [What are input IDs?](../glossary#input-ids)
119
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
120
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
121
+
122
+ - 1 for tokens that are **not masked**,
123
+ - 0 for tokens that are **masked**.
124
+
125
+ [What are attention masks?](../glossary#attention-mask)
126
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
127
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
128
+ config.max_position_embeddings - 1]`.
129
+
130
+ [What are position IDs?](../glossary#position-ids)
131
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
132
+ Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
133
+ [`AutoImageProcessor`]. See [`CLIPImageProcessor.__call__`] for details.
134
+ return_loss (`bool`, *optional*):
135
+ Whether or not to return the contrastive loss.
136
+ output_attentions (`bool`, *optional*):
137
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
138
+ tensors for more detail.
139
+ output_hidden_states (`bool`, *optional*):
140
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
141
+ more detail.
142
+ return_dict (`bool`, *optional*):
143
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
144
+ """
145
+
146
+
147
+ # contrastive loss function, adapted from
148
+ # https://sachinruk.github.io/blog/pytorch/pytorch%20lightning/loss%20function/gpu/2021/03/07/CLIP.html
149
+ def contrastive_loss(logits: torch.Tensor) -> torch.Tensor:
150
+ return nn.functional.cross_entropy(logits, torch.arange(len(logits), device=logits.device))
151
+
152
+
153
+ def clip_loss(similarity: torch.Tensor) -> torch.Tensor:
154
+ caption_loss = contrastive_loss(similarity)
155
+ image_loss = contrastive_loss(similarity.t())
156
+ return (caption_loss + image_loss) / 2.0
157
+
158
+
159
+ @dataclass
160
+ # Copied from transformers.models.clip.modeling_clip.CLIPOutput with CLIP->AltCLIP
161
+ class AltCLIPOutput(ModelOutput):
162
+ """
163
+ Args:
164
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`):
165
+ Contrastive loss for image-text similarity.
166
+ logits_per_image:(`torch.FloatTensor` of shape `(image_batch_size, text_batch_size)`):
167
+ The scaled dot product scores between `image_embeds` and `text_embeds`. This represents the image-text
168
+ similarity scores.
169
+ logits_per_text:(`torch.FloatTensor` of shape `(text_batch_size, image_batch_size)`):
170
+ The scaled dot product scores between `text_embeds` and `image_embeds`. This represents the text-image
171
+ similarity scores.
172
+ text_embeds(`torch.FloatTensor` of shape `(batch_size, output_dim`):
173
+ The text embeddings obtained by applying the projection layer to the pooled output of [`AltCLIPTextModel`].
174
+ image_embeds(`torch.FloatTensor` of shape `(batch_size, output_dim`):
175
+ The image embeddings obtained by applying the projection layer to the pooled output of [`AltCLIPVisionModel`].
176
+ text_model_output(`BaseModelOutputWithPooling`):
177
+ The output of the [`AltCLIPTextModel`].
178
+ vision_model_output(`BaseModelOutputWithPooling`):
179
+ The output of the [`AltCLIPVisionModel`].
180
+ """
181
+
182
+ loss: Optional[torch.FloatTensor] = None
183
+ logits_per_image: torch.FloatTensor = None
184
+ logits_per_text: torch.FloatTensor = None
185
+ text_embeds: torch.FloatTensor = None
186
+ image_embeds: torch.FloatTensor = None
187
+ text_model_output: BaseModelOutputWithPooling = None
188
+ vision_model_output: BaseModelOutputWithPooling = None
189
+
190
+ def to_tuple(self) -> Tuple[Any]:
191
+ return tuple(
192
+ self[k] if k not in ["text_model_output", "vision_model_output"] else getattr(self, k).to_tuple()
193
+ for k in self.keys()
194
+ )
195
+
196
+
197
+ # Copied from transformers.models.roberta.modeling_roberta.RobertaEmbeddings with Roberta->AltRoberta
198
+ class AltRobertaEmbeddings(nn.Module):
199
+ """
200
+ Same as BertEmbeddings with a tiny tweak for positional embeddings indexing.
201
+ """
202
+
203
+ # Copied from transformers.models.bert.modeling_bert.BertEmbeddings.__init__
204
+ def __init__(self, config):
205
+ super().__init__()
206
+ self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
207
+ self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
208
+ self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
209
+
210
+ # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
211
+ # any TensorFlow checkpoint file
212
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
213
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
214
+ # position_ids (1, len position emb) is contiguous in memory and exported when serialized
215
+ self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
216
+ self.register_buffer(
217
+ "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
218
+ )
219
+ self.register_buffer(
220
+ "token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False
221
+ )
222
+
223
+ # End copy
224
+ self.padding_idx = config.pad_token_id
225
+ self.position_embeddings = nn.Embedding(
226
+ config.max_position_embeddings, config.hidden_size, padding_idx=self.padding_idx
227
+ )
228
+
229
+ def forward(
230
+ self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0
231
+ ):
232
+ if position_ids is None:
233
+ if input_ids is not None:
234
+ # Create the position ids from the input token ids. Any padded tokens remain padded.
235
+ position_ids = create_position_ids_from_input_ids(input_ids, self.padding_idx, past_key_values_length)
236
+ else:
237
+ position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds)
238
+
239
+ if input_ids is not None:
240
+ input_shape = input_ids.size()
241
+ else:
242
+ input_shape = inputs_embeds.size()[:-1]
243
+
244
+ seq_length = input_shape[1]
245
+
246
+ # Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs
247
+ # when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves
248
+ # issue #5664
249
+ if token_type_ids is None:
250
+ if hasattr(self, "token_type_ids"):
251
+ buffered_token_type_ids = self.token_type_ids[:, :seq_length]
252
+ buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length)
253
+ token_type_ids = buffered_token_type_ids_expanded
254
+ else:
255
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
256
+
257
+ if inputs_embeds is None:
258
+ inputs_embeds = self.word_embeddings(input_ids)
259
+ token_type_embeddings = self.token_type_embeddings(token_type_ids)
260
+
261
+ embeddings = inputs_embeds + token_type_embeddings
262
+ if self.position_embedding_type == "absolute":
263
+ position_embeddings = self.position_embeddings(position_ids)
264
+ embeddings += position_embeddings
265
+ embeddings = self.LayerNorm(embeddings)
266
+ embeddings = self.dropout(embeddings)
267
+ return embeddings
268
+
269
+ def create_position_ids_from_inputs_embeds(self, inputs_embeds):
270
+ """
271
+ We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids.
272
+
273
+ Args:
274
+ inputs_embeds: torch.Tensor
275
+
276
+ Returns: torch.Tensor
277
+ """
278
+ input_shape = inputs_embeds.size()[:-1]
279
+ sequence_length = input_shape[1]
280
+
281
+ position_ids = torch.arange(
282
+ self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device
283
+ )
284
+ return position_ids.unsqueeze(0).expand(input_shape)
285
+
286
+
287
+ # Copied from transformers.models.roberta.modeling_roberta.RobertaSelfAttention with Roberta->AltRoberta
288
+ class AltRobertaSelfAttention(nn.Module):
289
+ def __init__(self, config, position_embedding_type=None):
290
+ super().__init__()
291
+ if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
292
+ raise ValueError(
293
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
294
+ f"heads ({config.num_attention_heads})"
295
+ )
296
+
297
+ self.num_attention_heads = config.num_attention_heads
298
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
299
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
300
+
301
+ self.query = nn.Linear(config.hidden_size, self.all_head_size)
302
+ self.key = nn.Linear(config.hidden_size, self.all_head_size)
303
+ self.value = nn.Linear(config.hidden_size, self.all_head_size)
304
+
305
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
306
+ self.position_embedding_type = position_embedding_type or getattr(
307
+ config, "position_embedding_type", "absolute"
308
+ )
309
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
310
+ self.max_position_embeddings = config.max_position_embeddings
311
+ self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
312
+
313
+ self.is_decoder = config.is_decoder
314
+
315
+ def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor:
316
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
317
+ x = x.view(new_x_shape)
318
+ return x.permute(0, 2, 1, 3)
319
+
320
+ def forward(
321
+ self,
322
+ hidden_states: torch.Tensor,
323
+ attention_mask: Optional[torch.FloatTensor] = None,
324
+ head_mask: Optional[torch.FloatTensor] = None,
325
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
326
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
327
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
328
+ output_attentions: Optional[bool] = False,
329
+ ) -> Tuple[torch.Tensor]:
330
+ mixed_query_layer = self.query(hidden_states)
331
+
332
+ # If this is instantiated as a cross-attention module, the keys
333
+ # and values come from an encoder; the attention mask needs to be
334
+ # such that the encoder's padding tokens are not attended to.
335
+ is_cross_attention = encoder_hidden_states is not None
336
+
337
+ if is_cross_attention and past_key_value is not None:
338
+ # reuse k,v, cross_attentions
339
+ key_layer = past_key_value[0]
340
+ value_layer = past_key_value[1]
341
+ attention_mask = encoder_attention_mask
342
+ elif is_cross_attention:
343
+ key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
344
+ value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
345
+ attention_mask = encoder_attention_mask
346
+ elif past_key_value is not None:
347
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
348
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
349
+ key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
350
+ value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
351
+ else:
352
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
353
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
354
+
355
+ query_layer = self.transpose_for_scores(mixed_query_layer)
356
+
357
+ use_cache = past_key_value is not None
358
+ if self.is_decoder:
359
+ # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
360
+ # Further calls to cross_attention layer can then reuse all cross-attention
361
+ # key/value_states (first "if" case)
362
+ # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
363
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
364
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
365
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
366
+ past_key_value = (key_layer, value_layer)
367
+
368
+ # Take the dot product between "query" and "key" to get the raw attention scores.
369
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
370
+
371
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
372
+ query_length, key_length = query_layer.shape[2], key_layer.shape[2]
373
+ if use_cache:
374
+ position_ids_l = torch.tensor(key_length - 1, dtype=torch.long, device=hidden_states.device).view(
375
+ -1, 1
376
+ )
377
+ else:
378
+ position_ids_l = torch.arange(query_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
379
+ position_ids_r = torch.arange(key_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
380
+ distance = position_ids_l - position_ids_r
381
+
382
+ positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
383
+ positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
384
+
385
+ if self.position_embedding_type == "relative_key":
386
+ relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
387
+ attention_scores = attention_scores + relative_position_scores
388
+ elif self.position_embedding_type == "relative_key_query":
389
+ relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
390
+ relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
391
+ attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
392
+
393
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
394
+ if attention_mask is not None:
395
+ # Apply the attention mask is (precomputed for all layers in AltRobertaModel forward() function)
396
+ attention_scores = attention_scores + attention_mask
397
+
398
+ # Normalize the attention scores to probabilities.
399
+ attention_probs = nn.functional.softmax(attention_scores, dim=-1)
400
+
401
+ # This is actually dropping out entire tokens to attend to, which might
402
+ # seem a bit unusual, but is taken from the original Transformer paper.
403
+ attention_probs = self.dropout(attention_probs)
404
+
405
+ # Mask heads if we want to
406
+ if head_mask is not None:
407
+ attention_probs = attention_probs * head_mask
408
+
409
+ context_layer = torch.matmul(attention_probs, value_layer)
410
+
411
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
412
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
413
+ context_layer = context_layer.view(new_context_layer_shape)
414
+
415
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
416
+
417
+ if self.is_decoder:
418
+ outputs = outputs + (past_key_value,)
419
+ return outputs
420
+
421
+
422
+ # Copied from transformers.models.roberta.modeling_roberta.RobertaSelfOutput
423
+ class AltRobertaSelfOutput(nn.Module):
424
+ def __init__(self, config):
425
+ super().__init__()
426
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
427
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
428
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
429
+
430
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
431
+ hidden_states = self.dense(hidden_states)
432
+ hidden_states = self.dropout(hidden_states)
433
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
434
+ return hidden_states
435
+
436
+
437
+ # Copied from transformers.models.roberta.modeling_roberta.RobertaAttention with Roberta->AltRoberta
438
+ class AltRobertaAttention(nn.Module):
439
+ def __init__(self, config, position_embedding_type=None):
440
+ super().__init__()
441
+ self.self = AltRobertaSelfAttention(config, position_embedding_type=position_embedding_type)
442
+ self.output = AltRobertaSelfOutput(config)
443
+ self.pruned_heads = set()
444
+
445
+ def prune_heads(self, heads):
446
+ if len(heads) == 0:
447
+ return
448
+ heads, index = find_pruneable_heads_and_indices(
449
+ heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
450
+ )
451
+
452
+ # Prune linear layers
453
+ self.self.query = prune_linear_layer(self.self.query, index)
454
+ self.self.key = prune_linear_layer(self.self.key, index)
455
+ self.self.value = prune_linear_layer(self.self.value, index)
456
+ self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
457
+
458
+ # Update hyper params and store pruned heads
459
+ self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
460
+ self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
461
+ self.pruned_heads = self.pruned_heads.union(heads)
462
+
463
+ def forward(
464
+ self,
465
+ hidden_states: torch.Tensor,
466
+ attention_mask: Optional[torch.FloatTensor] = None,
467
+ head_mask: Optional[torch.FloatTensor] = None,
468
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
469
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
470
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
471
+ output_attentions: Optional[bool] = False,
472
+ ) -> Tuple[torch.Tensor]:
473
+ self_outputs = self.self(
474
+ hidden_states,
475
+ attention_mask,
476
+ head_mask,
477
+ encoder_hidden_states,
478
+ encoder_attention_mask,
479
+ past_key_value,
480
+ output_attentions,
481
+ )
482
+ attention_output = self.output(self_outputs[0], hidden_states)
483
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
484
+ return outputs
485
+
486
+
487
+ # Copied from transformers.models.roberta.modeling_roberta.RobertaIntermediate with Roberta->AltRoberta
488
+ class AltRobertaIntermediate(nn.Module):
489
+ def __init__(self, config):
490
+ super().__init__()
491
+ self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
492
+ if isinstance(config.hidden_act, str):
493
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
494
+ else:
495
+ self.intermediate_act_fn = config.hidden_act
496
+
497
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
498
+ hidden_states = self.dense(hidden_states)
499
+ hidden_states = self.intermediate_act_fn(hidden_states)
500
+ return hidden_states
501
+
502
+
503
+ # Copied from transformers.models.roberta.modeling_roberta.RobertaOutput
504
+ class AltRobertaOutput(nn.Module):
505
+ def __init__(self, config):
506
+ super().__init__()
507
+ self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
508
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
509
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
510
+
511
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
512
+ hidden_states = self.dense(hidden_states)
513
+ hidden_states = self.dropout(hidden_states)
514
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
515
+ return hidden_states
516
+
517
+
518
+ # Copied from transformers.models.roberta.modeling_roberta.RobertaLayer with Roberta->AltRoberta
519
+ class AltRobertaLayer(nn.Module):
520
+ def __init__(self, config):
521
+ super().__init__()
522
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
523
+ self.seq_len_dim = 1
524
+ self.attention = AltRobertaAttention(config)
525
+ self.is_decoder = config.is_decoder
526
+ self.add_cross_attention = config.add_cross_attention
527
+ if self.add_cross_attention:
528
+ if not self.is_decoder:
529
+ raise ValueError(f"{self} should be used as a decoder model if cross attention is added")
530
+ self.crossattention = AltRobertaAttention(config, position_embedding_type="absolute")
531
+ self.intermediate = AltRobertaIntermediate(config)
532
+ self.output = AltRobertaOutput(config)
533
+
534
+ def forward(
535
+ self,
536
+ hidden_states: torch.Tensor,
537
+ attention_mask: Optional[torch.FloatTensor] = None,
538
+ head_mask: Optional[torch.FloatTensor] = None,
539
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
540
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
541
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
542
+ output_attentions: Optional[bool] = False,
543
+ ) -> Tuple[torch.Tensor]:
544
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
545
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
546
+ self_attention_outputs = self.attention(
547
+ hidden_states,
548
+ attention_mask,
549
+ head_mask,
550
+ output_attentions=output_attentions,
551
+ past_key_value=self_attn_past_key_value,
552
+ )
553
+ attention_output = self_attention_outputs[0]
554
+
555
+ # if decoder, the last output is tuple of self-attn cache
556
+ if self.is_decoder:
557
+ outputs = self_attention_outputs[1:-1]
558
+ present_key_value = self_attention_outputs[-1]
559
+ else:
560
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
561
+
562
+ cross_attn_present_key_value = None
563
+ if self.is_decoder and encoder_hidden_states is not None:
564
+ if not hasattr(self, "crossattention"):
565
+ raise ValueError(
566
+ f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers"
567
+ " by setting `config.add_cross_attention=True`"
568
+ )
569
+
570
+ # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple
571
+ cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
572
+ cross_attention_outputs = self.crossattention(
573
+ attention_output,
574
+ attention_mask,
575
+ head_mask,
576
+ encoder_hidden_states,
577
+ encoder_attention_mask,
578
+ cross_attn_past_key_value,
579
+ output_attentions,
580
+ )
581
+ attention_output = cross_attention_outputs[0]
582
+ outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
583
+
584
+ # add cross-attn cache to positions 3,4 of present_key_value tuple
585
+ cross_attn_present_key_value = cross_attention_outputs[-1]
586
+ present_key_value = present_key_value + cross_attn_present_key_value
587
+
588
+ layer_output = apply_chunking_to_forward(
589
+ self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
590
+ )
591
+ outputs = (layer_output,) + outputs
592
+
593
+ # if decoder, return the attn key/values as the last output
594
+ if self.is_decoder:
595
+ outputs = outputs + (present_key_value,)
596
+
597
+ return outputs
598
+
599
+ def feed_forward_chunk(self, attention_output):
600
+ intermediate_output = self.intermediate(attention_output)
601
+ layer_output = self.output(intermediate_output, attention_output)
602
+ return layer_output
603
+
604
+
605
+ # Copied from transformers.models.roberta.modeling_roberta.RobertaEncoder with Roberta->AltRoberta
606
+ class AltRobertaEncoder(nn.Module):
607
+ def __init__(self, config):
608
+ super().__init__()
609
+ self.config = config
610
+ self.layer = nn.ModuleList([AltRobertaLayer(config) for _ in range(config.num_hidden_layers)])
611
+ self.gradient_checkpointing = False
612
+
613
+ def forward(
614
+ self,
615
+ hidden_states: torch.Tensor,
616
+ attention_mask: Optional[torch.FloatTensor] = None,
617
+ head_mask: Optional[torch.FloatTensor] = None,
618
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
619
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
620
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
621
+ use_cache: Optional[bool] = None,
622
+ output_attentions: Optional[bool] = False,
623
+ output_hidden_states: Optional[bool] = False,
624
+ return_dict: Optional[bool] = True,
625
+ ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]:
626
+ all_hidden_states = () if output_hidden_states else None
627
+ all_self_attentions = () if output_attentions else None
628
+ all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
629
+
630
+ if self.gradient_checkpointing and self.training:
631
+ if use_cache:
632
+ logger.warning_once(
633
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
634
+ )
635
+ use_cache = False
636
+
637
+ next_decoder_cache = () if use_cache else None
638
+ for i, layer_module in enumerate(self.layer):
639
+ if output_hidden_states:
640
+ all_hidden_states = all_hidden_states + (hidden_states,)
641
+
642
+ layer_head_mask = head_mask[i] if head_mask is not None else None
643
+ past_key_value = past_key_values[i] if past_key_values is not None else None
644
+
645
+ if self.gradient_checkpointing and self.training:
646
+ layer_outputs = self._gradient_checkpointing_func(
647
+ layer_module.__call__,
648
+ hidden_states,
649
+ attention_mask,
650
+ layer_head_mask,
651
+ encoder_hidden_states,
652
+ encoder_attention_mask,
653
+ past_key_value,
654
+ output_attentions,
655
+ )
656
+ else:
657
+ layer_outputs = layer_module(
658
+ hidden_states,
659
+ attention_mask,
660
+ layer_head_mask,
661
+ encoder_hidden_states,
662
+ encoder_attention_mask,
663
+ past_key_value,
664
+ output_attentions,
665
+ )
666
+
667
+ hidden_states = layer_outputs[0]
668
+ if use_cache:
669
+ next_decoder_cache += (layer_outputs[-1],)
670
+ if output_attentions:
671
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
672
+ if self.config.add_cross_attention:
673
+ all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
674
+
675
+ if output_hidden_states:
676
+ all_hidden_states = all_hidden_states + (hidden_states,)
677
+
678
+ if not return_dict:
679
+ return tuple(
680
+ v
681
+ for v in [
682
+ hidden_states,
683
+ next_decoder_cache,
684
+ all_hidden_states,
685
+ all_self_attentions,
686
+ all_cross_attentions,
687
+ ]
688
+ if v is not None
689
+ )
690
+ return BaseModelOutputWithPastAndCrossAttentions(
691
+ last_hidden_state=hidden_states,
692
+ past_key_values=next_decoder_cache,
693
+ hidden_states=all_hidden_states,
694
+ attentions=all_self_attentions,
695
+ cross_attentions=all_cross_attentions,
696
+ )
697
+
698
+
699
+ # Copied from transformers.models.roberta.modeling_roberta.RobertaPooler
700
+ class AltRobertaPooler(nn.Module):
701
+ def __init__(self, config):
702
+ super().__init__()
703
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
704
+ self.activation = nn.Tanh()
705
+
706
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
707
+ # We "pool" the model by simply taking the hidden state corresponding
708
+ # to the first token.
709
+ first_token_tensor = hidden_states[:, 0]
710
+ pooled_output = self.dense(first_token_tensor)
711
+ pooled_output = self.activation(pooled_output)
712
+ return pooled_output
713
+
714
+
715
+ # Copied from transformers.models.clip.modeling_clip.CLIPAttention with CLIP->AltCLIP
716
+ class AltCLIPAttention(nn.Module):
717
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
718
+
719
+ def __init__(self, config):
720
+ super().__init__()
721
+ self.config = config
722
+ self.embed_dim = config.hidden_size
723
+ self.num_heads = config.num_attention_heads
724
+ self.head_dim = self.embed_dim // self.num_heads
725
+ if self.head_dim * self.num_heads != self.embed_dim:
726
+ raise ValueError(
727
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:"
728
+ f" {self.num_heads})."
729
+ )
730
+ self.scale = self.head_dim**-0.5
731
+ self.dropout = config.attention_dropout
732
+
733
+ self.k_proj = nn.Linear(self.embed_dim, self.embed_dim)
734
+ self.v_proj = nn.Linear(self.embed_dim, self.embed_dim)
735
+ self.q_proj = nn.Linear(self.embed_dim, self.embed_dim)
736
+ self.out_proj = nn.Linear(self.embed_dim, self.embed_dim)
737
+
738
+ def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
739
+ return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
740
+
741
+ def forward(
742
+ self,
743
+ hidden_states: torch.Tensor,
744
+ attention_mask: Optional[torch.Tensor] = None,
745
+ causal_attention_mask: Optional[torch.Tensor] = None,
746
+ output_attentions: Optional[bool] = False,
747
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
748
+ """Input shape: Batch x Time x Channel"""
749
+
750
+ bsz, tgt_len, embed_dim = hidden_states.size()
751
+
752
+ # get query proj
753
+ query_states = self.q_proj(hidden_states) * self.scale
754
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
755
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
756
+
757
+ proj_shape = (bsz * self.num_heads, -1, self.head_dim)
758
+ query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)
759
+ key_states = key_states.view(*proj_shape)
760
+ value_states = value_states.view(*proj_shape)
761
+
762
+ src_len = key_states.size(1)
763
+ attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
764
+
765
+ if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len):
766
+ raise ValueError(
767
+ f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is"
768
+ f" {attn_weights.size()}"
769
+ )
770
+
771
+ # apply the causal_attention_mask first
772
+ if causal_attention_mask is not None:
773
+ if causal_attention_mask.size() != (bsz, 1, tgt_len, src_len):
774
+ raise ValueError(
775
+ f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is"
776
+ f" {causal_attention_mask.size()}"
777
+ )
778
+ attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + causal_attention_mask
779
+ attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
780
+
781
+ if attention_mask is not None:
782
+ if attention_mask.size() != (bsz, 1, tgt_len, src_len):
783
+ raise ValueError(
784
+ f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}"
785
+ )
786
+ attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask
787
+ attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
788
+
789
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1)
790
+
791
+ if output_attentions:
792
+ # this operation is a bit akward, but it's required to
793
+ # make sure that attn_weights keeps its gradient.
794
+ # In order to do so, attn_weights have to reshaped
795
+ # twice and have to be reused in the following
796
+ attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
797
+ attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)
798
+ else:
799
+ attn_weights_reshaped = None
800
+
801
+ attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
802
+
803
+ attn_output = torch.bmm(attn_probs, value_states)
804
+
805
+ if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):
806
+ raise ValueError(
807
+ f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is"
808
+ f" {attn_output.size()}"
809
+ )
810
+
811
+ attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
812
+ attn_output = attn_output.transpose(1, 2)
813
+ attn_output = attn_output.reshape(bsz, tgt_len, embed_dim)
814
+
815
+ attn_output = self.out_proj(attn_output)
816
+
817
+ return attn_output, attn_weights_reshaped
818
+
819
+
820
+ # Copied from transformers.models.clip.modeling_clip.CLIPMLP with CLIP->AltCLIP
821
+ class AltCLIPMLP(nn.Module):
822
+ def __init__(self, config):
823
+ super().__init__()
824
+ self.config = config
825
+ self.activation_fn = ACT2FN[config.hidden_act]
826
+ self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size)
827
+ self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size)
828
+
829
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
830
+ hidden_states = self.fc1(hidden_states)
831
+ hidden_states = self.activation_fn(hidden_states)
832
+ hidden_states = self.fc2(hidden_states)
833
+ return hidden_states
834
+
835
+
836
+ # Copied from transformers.models.clip.modeling_clip.CLIPEncoderLayer with CLIP->AltCLIP
837
+ class AltCLIPEncoderLayer(nn.Module):
838
+ def __init__(self, config: AltCLIPConfig):
839
+ super().__init__()
840
+ self.embed_dim = config.hidden_size
841
+ self.self_attn = AltCLIPAttention(config)
842
+ self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
843
+ self.mlp = AltCLIPMLP(config)
844
+ self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
845
+
846
+ def forward(
847
+ self,
848
+ hidden_states: torch.Tensor,
849
+ attention_mask: torch.Tensor,
850
+ causal_attention_mask: torch.Tensor,
851
+ output_attentions: Optional[bool] = False,
852
+ ) -> Tuple[torch.FloatTensor]:
853
+ """
854
+ Args:
855
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
856
+ attention_mask (`torch.FloatTensor`): attention mask of size
857
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
858
+ `(config.encoder_attention_heads,)`.
859
+ output_attentions (`bool`, *optional*):
860
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
861
+ returned tensors for more detail.
862
+ """
863
+ residual = hidden_states
864
+
865
+ hidden_states = self.layer_norm1(hidden_states)
866
+ hidden_states, attn_weights = self.self_attn(
867
+ hidden_states=hidden_states,
868
+ attention_mask=attention_mask,
869
+ causal_attention_mask=causal_attention_mask,
870
+ output_attentions=output_attentions,
871
+ )
872
+ hidden_states = residual + hidden_states
873
+
874
+ residual = hidden_states
875
+ hidden_states = self.layer_norm2(hidden_states)
876
+ hidden_states = self.mlp(hidden_states)
877
+ hidden_states = residual + hidden_states
878
+
879
+ outputs = (hidden_states,)
880
+
881
+ if output_attentions:
882
+ outputs += (attn_weights,)
883
+
884
+ return outputs
885
+
886
+
887
+ # Copied from transformers.models.clip.modeling_clip.CLIPEncoder with CLIP->AltCLIP
888
+ class AltCLIPEncoder(nn.Module):
889
+ """
890
+ Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
891
+ [`AltCLIPEncoderLayer`].
892
+
893
+ Args:
894
+ config: AltCLIPConfig
895
+ """
896
+
897
+ def __init__(self, config: AltCLIPConfig):
898
+ super().__init__()
899
+ self.config = config
900
+ self.layers = nn.ModuleList([AltCLIPEncoderLayer(config) for _ in range(config.num_hidden_layers)])
901
+ self.gradient_checkpointing = False
902
+
903
+ def forward(
904
+ self,
905
+ inputs_embeds,
906
+ attention_mask: Optional[torch.Tensor] = None,
907
+ causal_attention_mask: Optional[torch.Tensor] = None,
908
+ output_attentions: Optional[bool] = None,
909
+ output_hidden_states: Optional[bool] = None,
910
+ return_dict: Optional[bool] = None,
911
+ ) -> Union[Tuple, BaseModelOutput]:
912
+ r"""
913
+ Args:
914
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
915
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
916
+ This is useful if you want more control over how to convert `input_ids` indices into associated vectors
917
+ than the model's internal embedding lookup matrix.
918
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
919
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
920
+
921
+ - 1 for tokens that are **not masked**,
922
+ - 0 for tokens that are **masked**.
923
+
924
+ [What are attention masks?](../glossary#attention-mask)
925
+ causal_attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
926
+ Causal mask for the text model. Mask values selected in `[0, 1]`:
927
+
928
+ - 1 for tokens that are **not masked**,
929
+ - 0 for tokens that are **masked**.
930
+
931
+ [What are attention masks?](../glossary#attention-mask)
932
+ output_attentions (`bool`, *optional*):
933
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
934
+ returned tensors for more detail.
935
+ output_hidden_states (`bool`, *optional*):
936
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
937
+ for more detail.
938
+ return_dict (`bool`, *optional*):
939
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
940
+ """
941
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
942
+ output_hidden_states = (
943
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
944
+ )
945
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
946
+
947
+ encoder_states = () if output_hidden_states else None
948
+ all_attentions = () if output_attentions else None
949
+
950
+ hidden_states = inputs_embeds
951
+ for idx, encoder_layer in enumerate(self.layers):
952
+ if output_hidden_states:
953
+ encoder_states = encoder_states + (hidden_states,)
954
+ if self.gradient_checkpointing and self.training:
955
+ layer_outputs = self._gradient_checkpointing_func(
956
+ encoder_layer.__call__,
957
+ hidden_states,
958
+ attention_mask,
959
+ causal_attention_mask,
960
+ output_attentions,
961
+ )
962
+ else:
963
+ layer_outputs = encoder_layer(
964
+ hidden_states,
965
+ attention_mask,
966
+ causal_attention_mask,
967
+ output_attentions=output_attentions,
968
+ )
969
+
970
+ hidden_states = layer_outputs[0]
971
+
972
+ if output_attentions:
973
+ all_attentions = all_attentions + (layer_outputs[1],)
974
+
975
+ if output_hidden_states:
976
+ encoder_states = encoder_states + (hidden_states,)
977
+
978
+ if not return_dict:
979
+ return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
980
+ return BaseModelOutput(
981
+ last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
982
+ )
983
+
984
+
985
+ # Copied from transformers.models.clip.modeling_clip.CLIPVisionEmbeddings with CLIP->AltCLIP
986
+ class AltCLIPVisionEmbeddings(nn.Module):
987
+ def __init__(self, config: AltCLIPVisionConfig):
988
+ super().__init__()
989
+ self.config = config
990
+ self.embed_dim = config.hidden_size
991
+ self.image_size = config.image_size
992
+ self.patch_size = config.patch_size
993
+
994
+ self.class_embedding = nn.Parameter(torch.randn(self.embed_dim))
995
+
996
+ self.patch_embedding = nn.Conv2d(
997
+ in_channels=config.num_channels,
998
+ out_channels=self.embed_dim,
999
+ kernel_size=self.patch_size,
1000
+ stride=self.patch_size,
1001
+ bias=False,
1002
+ )
1003
+
1004
+ self.num_patches = (self.image_size // self.patch_size) ** 2
1005
+ self.num_positions = self.num_patches + 1
1006
+ self.position_embedding = nn.Embedding(self.num_positions, self.embed_dim)
1007
+ self.register_buffer("position_ids", torch.arange(self.num_positions).expand((1, -1)), persistent=False)
1008
+
1009
+ def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor:
1010
+ batch_size = pixel_values.shape[0]
1011
+ target_dtype = self.patch_embedding.weight.dtype
1012
+ patch_embeds = self.patch_embedding(pixel_values.to(dtype=target_dtype)) # shape = [*, width, grid, grid]
1013
+ patch_embeds = patch_embeds.flatten(2).transpose(1, 2)
1014
+
1015
+ class_embeds = self.class_embedding.expand(batch_size, 1, -1)
1016
+ embeddings = torch.cat([class_embeds, patch_embeds], dim=1)
1017
+ embeddings = embeddings + self.position_embedding(self.position_ids)
1018
+ return embeddings
1019
+
1020
+
1021
+ class AltCLIPPreTrainedModel(PreTrainedModel):
1022
+ """
1023
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
1024
+ models.
1025
+ """
1026
+
1027
+ config_class = AltCLIPConfig
1028
+ base_model_prefix = "altclip"
1029
+ supports_gradient_checkpointing = True
1030
+
1031
+ def _init_weights(self, module):
1032
+ """Initialize the weights"""
1033
+ factor = self.config.initializer_factor
1034
+ if isinstance(module, AltCLIPVisionEmbeddings):
1035
+ factor = self.config.initializer_factor
1036
+ nn.init.normal_(module.class_embedding, mean=0.0, std=module.embed_dim**-0.5 * factor)
1037
+ nn.init.normal_(module.patch_embedding.weight, std=module.config.initializer_range * factor)
1038
+ nn.init.normal_(module.position_embedding.weight, std=module.config.initializer_range * factor)
1039
+ elif isinstance(module, AltCLIPAttention):
1040
+ factor = self.config.initializer_factor
1041
+ in_proj_std = (module.embed_dim**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor
1042
+ out_proj_std = (module.embed_dim**-0.5) * factor
1043
+ nn.init.normal_(module.q_proj.weight, std=in_proj_std)
1044
+ nn.init.normal_(module.k_proj.weight, std=in_proj_std)
1045
+ nn.init.normal_(module.v_proj.weight, std=in_proj_std)
1046
+ nn.init.normal_(module.out_proj.weight, std=out_proj_std)
1047
+ elif isinstance(module, AltCLIPMLP):
1048
+ factor = self.config.initializer_factor
1049
+ in_proj_std = (module.config.hidden_size**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor
1050
+ fc_std = (2 * module.config.hidden_size) ** -0.5 * factor
1051
+ nn.init.normal_(module.fc1.weight, std=fc_std)
1052
+ nn.init.normal_(module.fc2.weight, std=in_proj_std)
1053
+ elif isinstance(module, AltCLIPModel):
1054
+ nn.init.normal_(
1055
+ module.text_projection.weight,
1056
+ std=module.text_embed_dim**-0.5 * self.config.initializer_factor,
1057
+ )
1058
+ module.text_projection._is_hf_initialized = True
1059
+ nn.init.normal_(
1060
+ module.visual_projection.weight,
1061
+ std=module.vision_embed_dim**-0.5 * self.config.initializer_factor,
1062
+ )
1063
+ module.visual_projection._is_hf_initialized = True
1064
+ elif isinstance(module, nn.LayerNorm):
1065
+ module.bias.data.zero_()
1066
+ module.weight.data.fill_(1.0)
1067
+ elif isinstance(module, nn.Linear):
1068
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_factor)
1069
+ if module.bias is not None:
1070
+ module.bias.data.zero_()
1071
+ elif isinstance(module, nn.Embedding):
1072
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_factor)
1073
+ if module.padding_idx is not None:
1074
+ module.weight.data[module.padding_idx].zero_()
1075
+
1076
+
1077
+ # Copied from transformers.models.clip.modeling_clip.CLIPVisionTransformer with CLIPVisionTransformer->AltCLIPVisionTransformer,CLIPVisionConfig->AltCLIPVisionConfig,CLIPVisionEmbeddings->AltCLIPVisionEmbeddings,CLIPEncoder->AltCLIPEncoder,CLIP_VISION_INPUTS_DOCSTRING->ALTCLIP_VISION_INPUTS_DOCSTRING
1078
+ class AltCLIPVisionTransformer(nn.Module):
1079
+ def __init__(self, config: AltCLIPVisionConfig):
1080
+ super().__init__()
1081
+ self.config = config
1082
+ embed_dim = config.hidden_size
1083
+
1084
+ self.embeddings = AltCLIPVisionEmbeddings(config)
1085
+ self.pre_layrnorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
1086
+ self.encoder = AltCLIPEncoder(config)
1087
+ self.post_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
1088
+
1089
+ @add_start_docstrings_to_model_forward(ALTCLIP_VISION_INPUTS_DOCSTRING)
1090
+ @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=AltCLIPVisionConfig)
1091
+ def forward(
1092
+ self,
1093
+ pixel_values: Optional[torch.FloatTensor] = None,
1094
+ output_attentions: Optional[bool] = None,
1095
+ output_hidden_states: Optional[bool] = None,
1096
+ return_dict: Optional[bool] = None,
1097
+ ) -> Union[Tuple, BaseModelOutputWithPooling]:
1098
+ r"""
1099
+ Returns:
1100
+
1101
+ """
1102
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1103
+ output_hidden_states = (
1104
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1105
+ )
1106
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1107
+
1108
+ if pixel_values is None:
1109
+ raise ValueError("You have to specify pixel_values")
1110
+
1111
+ hidden_states = self.embeddings(pixel_values)
1112
+ hidden_states = self.pre_layrnorm(hidden_states)
1113
+
1114
+ encoder_outputs = self.encoder(
1115
+ inputs_embeds=hidden_states,
1116
+ output_attentions=output_attentions,
1117
+ output_hidden_states=output_hidden_states,
1118
+ return_dict=return_dict,
1119
+ )
1120
+
1121
+ last_hidden_state = encoder_outputs[0]
1122
+ pooled_output = last_hidden_state[:, 0, :]
1123
+ pooled_output = self.post_layernorm(pooled_output)
1124
+
1125
+ if not return_dict:
1126
+ return (last_hidden_state, pooled_output) + encoder_outputs[1:]
1127
+
1128
+ return BaseModelOutputWithPooling(
1129
+ last_hidden_state=last_hidden_state,
1130
+ pooler_output=pooled_output,
1131
+ hidden_states=encoder_outputs.hidden_states,
1132
+ attentions=encoder_outputs.attentions,
1133
+ )
1134
+
1135
+
1136
+ class AltCLIPVisionModel(AltCLIPPreTrainedModel):
1137
+ config_class = AltCLIPVisionConfig
1138
+ main_input_name = "pixel_values"
1139
+
1140
+ def __init__(self, config: AltCLIPVisionConfig):
1141
+ super().__init__(config)
1142
+ self.vision_model = AltCLIPVisionTransformer(config)
1143
+ # Initialize weights and apply final processing
1144
+ self.post_init()
1145
+
1146
+ def get_input_embeddings(self) -> nn.Module:
1147
+ return self.vision_model.embeddings.patch_embedding
1148
+
1149
+ @add_start_docstrings_to_model_forward(ALTCLIP_VISION_INPUTS_DOCSTRING)
1150
+ @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=AltCLIPVisionConfig)
1151
+ def forward(
1152
+ self,
1153
+ pixel_values: Optional[torch.FloatTensor] = None,
1154
+ output_attentions: Optional[bool] = None,
1155
+ output_hidden_states: Optional[bool] = None,
1156
+ return_dict: Optional[bool] = None,
1157
+ ) -> Union[Tuple, BaseModelOutputWithPooling]:
1158
+ r"""
1159
+ Returns:
1160
+
1161
+ Examples:
1162
+
1163
+ ```python
1164
+ >>> from PIL import Image
1165
+ >>> import requests
1166
+ >>> from transformers import AutoProcessor, AltCLIPVisionModel
1167
+
1168
+ >>> model = AltCLIPVisionModel.from_pretrained("BAAI/AltCLIP")
1169
+ >>> processor = AutoProcessor.from_pretrained("BAAI/AltCLIP")
1170
+
1171
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
1172
+ >>> image = Image.open(requests.get(url, stream=True).raw)
1173
+
1174
+ >>> inputs = processor(images=image, return_tensors="pt")
1175
+
1176
+ >>> outputs = model(**inputs)
1177
+ >>> last_hidden_state = outputs.last_hidden_state
1178
+ >>> pooled_output = outputs.pooler_output # pooled CLS states
1179
+ ```"""
1180
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1181
+
1182
+ return self.vision_model(
1183
+ pixel_values=pixel_values,
1184
+ output_attentions=output_attentions,
1185
+ output_hidden_states=output_hidden_states,
1186
+ return_dict=return_dict,
1187
+ )
1188
+
1189
+
1190
+ class AltRobertaModel(AltCLIPPreTrainedModel):
1191
+ """
1192
+
1193
+ The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
1194
+ cross-attention is added between the self-attention layers, following the architecture described in *Attention is
1195
+ all you need*_ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz
1196
+ Kaiser and Illia Polosukhin.
1197
+
1198
+ To behave as an decoder the model needs to be initialized with the `is_decoder` argument of the configuration set
1199
+ to `True`. To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder` argument and
1200
+ `add_cross_attention` set to `True`; an `encoder_hidden_states` is then expected as an input to the forward pass.
1201
+
1202
+ .. _*Attention is all you need*: https://arxiv.org/abs/1706.03762
1203
+
1204
+ """
1205
+
1206
+ config_class = AltCLIPTextConfig
1207
+
1208
+ # Copied from transformers.models.bert.modeling_bert.BertModel.__init__ with Bert->AltRoberta
1209
+ def __init__(self, config, add_pooling_layer=True):
1210
+ super().__init__(config)
1211
+ self.config = config
1212
+
1213
+ self.embeddings = AltRobertaEmbeddings(config)
1214
+ self.encoder = AltRobertaEncoder(config)
1215
+
1216
+ self.pooler = AltRobertaPooler(config) if add_pooling_layer else None
1217
+
1218
+ # Initialize weights and apply final processing
1219
+ self.post_init()
1220
+
1221
+ def get_input_embeddings(self):
1222
+ return self.embeddings.word_embeddings
1223
+
1224
+ def set_input_embeddings(self, value):
1225
+ self.embeddings.word_embeddings = value
1226
+
1227
+ def _prune_heads(self, heads_to_prune):
1228
+ """
1229
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
1230
+ class PreTrainedModel
1231
+ """
1232
+ for layer, heads in heads_to_prune.items():
1233
+ self.encoder.layer[layer].attention.prune_heads(heads)
1234
+
1235
+ # Copied from transformers.models.bert.modeling_bert.BertModel.forward
1236
+ def forward(
1237
+ self,
1238
+ input_ids: Optional[torch.Tensor] = None,
1239
+ attention_mask: Optional[torch.Tensor] = None,
1240
+ token_type_ids: Optional[torch.Tensor] = None,
1241
+ position_ids: Optional[torch.Tensor] = None,
1242
+ head_mask: Optional[torch.Tensor] = None,
1243
+ inputs_embeds: Optional[torch.Tensor] = None,
1244
+ encoder_hidden_states: Optional[torch.Tensor] = None,
1245
+ encoder_attention_mask: Optional[torch.Tensor] = None,
1246
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1247
+ use_cache: Optional[bool] = None,
1248
+ output_attentions: Optional[bool] = None,
1249
+ output_hidden_states: Optional[bool] = None,
1250
+ return_dict: Optional[bool] = None,
1251
+ ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]:
1252
+ r"""
1253
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
1254
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
1255
+ the model is configured as a decoder.
1256
+ encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
1257
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
1258
+ the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
1259
+
1260
+ - 1 for tokens that are **not masked**,
1261
+ - 0 for tokens that are **masked**.
1262
+ past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
1263
+ Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
1264
+
1265
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
1266
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
1267
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
1268
+ use_cache (`bool`, *optional*):
1269
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
1270
+ `past_key_values`).
1271
+ """
1272
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1273
+ output_hidden_states = (
1274
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1275
+ )
1276
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1277
+
1278
+ if self.config.is_decoder:
1279
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
1280
+ else:
1281
+ use_cache = False
1282
+
1283
+ if input_ids is not None and inputs_embeds is not None:
1284
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
1285
+ elif input_ids is not None:
1286
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
1287
+ input_shape = input_ids.size()
1288
+ elif inputs_embeds is not None:
1289
+ input_shape = inputs_embeds.size()[:-1]
1290
+ else:
1291
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
1292
+
1293
+ batch_size, seq_length = input_shape
1294
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
1295
+
1296
+ # past_key_values_length
1297
+ past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
1298
+
1299
+ if attention_mask is None:
1300
+ attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)
1301
+
1302
+ if token_type_ids is None:
1303
+ if hasattr(self.embeddings, "token_type_ids"):
1304
+ buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length]
1305
+ buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length)
1306
+ token_type_ids = buffered_token_type_ids_expanded
1307
+ else:
1308
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
1309
+
1310
+ # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
1311
+ # ourselves in which case we just need to make it broadcastable to all heads.
1312
+ extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape)
1313
+
1314
+ # If a 2D or 3D attention mask is provided for the cross-attention
1315
+ # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
1316
+ if self.config.is_decoder and encoder_hidden_states is not None:
1317
+ encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
1318
+ encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
1319
+ if encoder_attention_mask is None:
1320
+ encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
1321
+ encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
1322
+ else:
1323
+ encoder_extended_attention_mask = None
1324
+
1325
+ # Prepare head mask if needed
1326
+ # 1.0 in head_mask indicate we keep the head
1327
+ # attention_probs has shape bsz x n_heads x N x N
1328
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
1329
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
1330
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
1331
+
1332
+ embedding_output = self.embeddings(
1333
+ input_ids=input_ids,
1334
+ position_ids=position_ids,
1335
+ token_type_ids=token_type_ids,
1336
+ inputs_embeds=inputs_embeds,
1337
+ past_key_values_length=past_key_values_length,
1338
+ )
1339
+ encoder_outputs = self.encoder(
1340
+ embedding_output,
1341
+ attention_mask=extended_attention_mask,
1342
+ head_mask=head_mask,
1343
+ encoder_hidden_states=encoder_hidden_states,
1344
+ encoder_attention_mask=encoder_extended_attention_mask,
1345
+ past_key_values=past_key_values,
1346
+ use_cache=use_cache,
1347
+ output_attentions=output_attentions,
1348
+ output_hidden_states=output_hidden_states,
1349
+ return_dict=return_dict,
1350
+ )
1351
+ sequence_output = encoder_outputs[0]
1352
+ pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
1353
+
1354
+ if not return_dict:
1355
+ return (sequence_output, pooled_output) + encoder_outputs[1:]
1356
+
1357
+ return BaseModelOutputWithPoolingAndCrossAttentions(
1358
+ last_hidden_state=sequence_output,
1359
+ pooler_output=pooled_output,
1360
+ past_key_values=encoder_outputs.past_key_values,
1361
+ hidden_states=encoder_outputs.hidden_states,
1362
+ attentions=encoder_outputs.attentions,
1363
+ cross_attentions=encoder_outputs.cross_attentions,
1364
+ )
1365
+
1366
+
1367
+ class AltCLIPTextModel(AltCLIPPreTrainedModel):
1368
+ config_class = AltCLIPTextConfig
1369
+
1370
+ def __init__(self, config):
1371
+ super().__init__(config)
1372
+ self.roberta = AltRobertaModel(config, add_pooling_layer=False)
1373
+ self.transformation = nn.Linear(config.hidden_size, config.project_dim)
1374
+ self.pre_LN = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
1375
+ self.post_init()
1376
+
1377
+ def get_input_embeddings(self) -> nn.Module:
1378
+ return self.roberta.embeddings.word_embeddings
1379
+
1380
+ def set_input_embeddings(self, value: nn.Embedding) -> None:
1381
+ self.roberta.embeddings.word_embeddings = value
1382
+
1383
+ def resize_token_embeddings(self, new_num_tokens: Optional[int] = None) -> nn.Embedding:
1384
+ return super().resize_token_embeddings(new_num_tokens)
1385
+
1386
+ @add_start_docstrings_to_model_forward(ALTCLIP_TEXT_INPUTS_DOCSTRING)
1387
+ @replace_return_docstrings(output_type=BaseModelOutputWithPoolingAndProjection, config_class=AltCLIPTextConfig)
1388
+ def forward(
1389
+ self,
1390
+ input_ids: Optional[torch.Tensor] = None,
1391
+ attention_mask: Optional[torch.Tensor] = None,
1392
+ token_type_ids: Optional[torch.Tensor] = None,
1393
+ position_ids: Optional[torch.Tensor] = None,
1394
+ head_mask: Optional[torch.Tensor] = None,
1395
+ inputs_embeds: Optional[torch.Tensor] = None,
1396
+ encoder_hidden_states: Optional[torch.Tensor] = None,
1397
+ encoder_attention_mask: Optional[torch.Tensor] = None,
1398
+ output_attentions: Optional[bool] = None,
1399
+ return_dict: Optional[bool] = None,
1400
+ output_hidden_states: Optional[bool] = None,
1401
+ ) -> Union[Tuple, BaseModelOutputWithPoolingAndProjection]:
1402
+ r"""
1403
+ Returns:
1404
+
1405
+ Examples:
1406
+
1407
+ ```python
1408
+ >>> from transformers import AutoProcessor, AltCLIPTextModel
1409
+
1410
+ >>> model = AltCLIPTextModel.from_pretrained("BAAI/AltCLIP")
1411
+ >>> processor = AutoProcessor.from_pretrained("BAAI/AltCLIP")
1412
+
1413
+ >>> texts = ["it's a cat", "it's a dog"]
1414
+
1415
+ >>> inputs = processor(text=texts, padding=True, return_tensors="pt")
1416
+
1417
+ >>> outputs = model(**inputs)
1418
+ >>> last_hidden_state = outputs.last_hidden_state
1419
+ >>> pooled_output = outputs.pooler_output # pooled CLS states
1420
+ ```"""
1421
+
1422
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1423
+
1424
+ outputs = self.roberta(
1425
+ input_ids=input_ids,
1426
+ attention_mask=attention_mask,
1427
+ token_type_ids=token_type_ids,
1428
+ position_ids=position_ids,
1429
+ head_mask=head_mask,
1430
+ inputs_embeds=inputs_embeds,
1431
+ encoder_hidden_states=encoder_hidden_states,
1432
+ encoder_attention_mask=encoder_attention_mask,
1433
+ output_attentions=output_attentions,
1434
+ output_hidden_states=output_hidden_states,
1435
+ return_dict=return_dict,
1436
+ )
1437
+
1438
+ # last module outputs
1439
+ sequence_output = outputs[0]
1440
+
1441
+ # project every module
1442
+ sequence_output = self.pre_LN(sequence_output)
1443
+
1444
+ # pooler
1445
+ projection_state = self.transformation(sequence_output)
1446
+ pooler_output = projection_state[:, 0]
1447
+
1448
+ if not return_dict:
1449
+ return (projection_state, pooler_output) + outputs[2:4]
1450
+
1451
+ return BaseModelOutputWithPoolingAndProjection(
1452
+ last_hidden_state=projection_state,
1453
+ pooler_output=pooler_output,
1454
+ hidden_states=outputs.hidden_states,
1455
+ attentions=outputs.attentions,
1456
+ )
1457
+
1458
+
1459
+ class AltCLIPModel(AltCLIPPreTrainedModel):
1460
+ config_class = AltCLIPConfig
1461
+
1462
+ def __init__(self, config: AltCLIPConfig):
1463
+ super().__init__(config)
1464
+
1465
+ if not isinstance(config.vision_config, AltCLIPVisionConfig):
1466
+ raise ValueError(
1467
+ "config.vision_config is expected to be of type AltCLIPVisionConfig but is of type"
1468
+ f" {type(config.vision_config)}."
1469
+ )
1470
+ if not isinstance(config.text_config, AltCLIPTextConfig):
1471
+ raise ValueError(
1472
+ "config.text_config is expected to be of type AltCLIPTextConfig but is of type"
1473
+ f" {type(config.text_config)}."
1474
+ )
1475
+
1476
+ text_config = config.text_config
1477
+ vision_config = config.vision_config
1478
+
1479
+ self.projection_dim = config.projection_dim
1480
+ self.text_embed_dim = text_config.project_dim
1481
+ self.vision_embed_dim = vision_config.hidden_size
1482
+
1483
+ self.text_model = AltCLIPTextModel(text_config)
1484
+ self.vision_model = AltCLIPVisionTransformer(vision_config)
1485
+
1486
+ self.visual_projection = nn.Linear(self.vision_embed_dim, self.projection_dim, bias=False)
1487
+ self.text_projection = nn.Linear(self.text_embed_dim, self.projection_dim, bias=False)
1488
+ self.logit_scale = nn.Parameter(torch.tensor(self.config.logit_scale_init_value))
1489
+
1490
+ # Initialize weights and apply final processing
1491
+ self.post_init()
1492
+
1493
+ @add_start_docstrings_to_model_forward(ALTCLIP_TEXT_INPUTS_DOCSTRING)
1494
+ def get_text_features(
1495
+ self,
1496
+ input_ids: Optional[torch.Tensor] = None,
1497
+ attention_mask: Optional[torch.Tensor] = None,
1498
+ position_ids: Optional[torch.Tensor] = None,
1499
+ token_type_ids=None,
1500
+ output_attentions: Optional[bool] = None,
1501
+ output_hidden_states: Optional[bool] = None,
1502
+ return_dict: Optional[bool] = None,
1503
+ ) -> torch.FloatTensor:
1504
+ r"""
1505
+ Returns:
1506
+ text_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by
1507
+ applying the projection layer to the pooled output of [`AltCLIPTextModel`].
1508
+
1509
+ Examples:
1510
+
1511
+ ```python
1512
+ >>> from transformers import AutoProcessor, AltCLIPModel
1513
+
1514
+ >>> model = AltCLIPModel.from_pretrained("BAAI/AltCLIP")
1515
+ >>> processor = AutoProcessor.from_pretrained("BAAI/AltCLIP")
1516
+ >>> inputs = processor(text=["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="pt")
1517
+ >>> text_features = model.get_text_features(**inputs)
1518
+ ```"""
1519
+ # Use AltCLIP model's config for some fields (if specified) instead of those of vision & text components.
1520
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1521
+ output_hidden_states = (
1522
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1523
+ )
1524
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1525
+
1526
+ text_outputs = self.text_model(
1527
+ input_ids=input_ids,
1528
+ attention_mask=attention_mask,
1529
+ position_ids=position_ids,
1530
+ token_type_ids=token_type_ids,
1531
+ output_attentions=output_attentions,
1532
+ output_hidden_states=output_hidden_states,
1533
+ return_dict=return_dict,
1534
+ )
1535
+ pooled_output = text_outputs[1]
1536
+ text_features = self.text_projection(pooled_output)
1537
+
1538
+ return text_features
1539
+
1540
+ @add_start_docstrings_to_model_forward(ALTCLIP_VISION_INPUTS_DOCSTRING)
1541
+ def get_image_features(
1542
+ self,
1543
+ pixel_values: Optional[torch.FloatTensor] = None,
1544
+ output_attentions: Optional[bool] = None,
1545
+ output_hidden_states: Optional[bool] = None,
1546
+ return_dict: Optional[bool] = None,
1547
+ ) -> torch.FloatTensor:
1548
+ r"""
1549
+ Returns:
1550
+ image_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The image embeddings obtained by
1551
+ applying the projection layer to the pooled output of [`AltCLIPVisionModel`].
1552
+
1553
+ Examples:
1554
+
1555
+ ```python
1556
+ >>> from PIL import Image
1557
+ >>> import requests
1558
+ >>> from transformers import AutoProcessor, AltCLIPModel
1559
+
1560
+ >>> model = AltCLIPModel.from_pretrained("BAAI/AltCLIP")
1561
+ >>> processor = AutoProcessor.from_pretrained("BAAI/AltCLIP")
1562
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
1563
+ >>> image = Image.open(requests.get(url, stream=True).raw)
1564
+ >>> inputs = processor(images=image, return_tensors="pt")
1565
+ >>> image_features = model.get_image_features(**inputs)
1566
+ ```"""
1567
+ # Use AltCLIP model's config for some fields (if specified) instead of those of vision & text components.
1568
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1569
+ output_hidden_states = (
1570
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1571
+ )
1572
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1573
+
1574
+ vision_outputs = self.vision_model(
1575
+ pixel_values=pixel_values,
1576
+ output_attentions=output_attentions,
1577
+ output_hidden_states=output_hidden_states,
1578
+ return_dict=return_dict,
1579
+ )
1580
+
1581
+ pooled_output = vision_outputs[1] # pooled_output
1582
+ image_features = self.visual_projection(pooled_output)
1583
+
1584
+ return image_features
1585
+
1586
+ @add_start_docstrings_to_model_forward(ALTCLIP_INPUTS_DOCSTRING)
1587
+ @replace_return_docstrings(output_type=AltCLIPOutput, config_class=AltCLIPConfig)
1588
+ def forward(
1589
+ self,
1590
+ input_ids: Optional[torch.LongTensor] = None,
1591
+ pixel_values: Optional[torch.FloatTensor] = None,
1592
+ attention_mask: Optional[torch.Tensor] = None,
1593
+ position_ids: Optional[torch.LongTensor] = None,
1594
+ token_type_ids: Optional[torch.Tensor] = None,
1595
+ return_loss: Optional[bool] = None,
1596
+ output_attentions: Optional[bool] = None,
1597
+ output_hidden_states: Optional[bool] = None,
1598
+ return_dict: Optional[bool] = None,
1599
+ ) -> Union[Tuple, AltCLIPOutput]:
1600
+ r"""
1601
+ Returns:
1602
+
1603
+ Examples:
1604
+
1605
+ ```python
1606
+ >>> from PIL import Image
1607
+ >>> import requests
1608
+ >>> from transformers import AutoProcessor, AltCLIPModel
1609
+
1610
+ >>> model = AltCLIPModel.from_pretrained("BAAI/AltCLIP")
1611
+ >>> processor = AutoProcessor.from_pretrained("BAAI/AltCLIP")
1612
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
1613
+ >>> image = Image.open(requests.get(url, stream=True).raw)
1614
+ >>> inputs = processor(
1615
+ ... text=["a photo of a cat", "a photo of a dog"], images=image, return_tensors="pt", padding=True
1616
+ ... )
1617
+ >>> outputs = model(**inputs)
1618
+ >>> logits_per_image = outputs.logits_per_image # this is the image-text similarity score
1619
+ >>> probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities
1620
+ ```"""
1621
+ # Use AltCLIP model's config for some fields (if specified) instead of those of vision & text components.
1622
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1623
+ output_hidden_states = (
1624
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1625
+ )
1626
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1627
+
1628
+ text_outputs = self.text_model(
1629
+ input_ids=input_ids,
1630
+ attention_mask=attention_mask,
1631
+ token_type_ids=token_type_ids,
1632
+ position_ids=position_ids,
1633
+ output_attentions=output_attentions,
1634
+ output_hidden_states=output_hidden_states,
1635
+ return_dict=return_dict,
1636
+ )
1637
+
1638
+ vision_outputs = self.vision_model(
1639
+ pixel_values=pixel_values,
1640
+ output_attentions=output_attentions,
1641
+ output_hidden_states=output_hidden_states,
1642
+ return_dict=return_dict,
1643
+ )
1644
+
1645
+ image_embeds = vision_outputs[1]
1646
+ image_embeds = self.visual_projection(image_embeds)
1647
+
1648
+ text_embeds = text_outputs[1]
1649
+ text_embeds = self.text_projection(text_embeds)
1650
+
1651
+ # normalized features
1652
+ image_embeds = image_embeds / image_embeds.norm(p=2, dim=-1, keepdim=True)
1653
+ text_embeds = text_embeds / text_embeds.norm(p=2, dim=-1, keepdim=True)
1654
+
1655
+ # cosine similarity as logits
1656
+ logit_scale = self.logit_scale.exp()
1657
+ logits_per_text = torch.matmul(text_embeds, image_embeds.t()) * logit_scale
1658
+ logits_per_image = logits_per_text.T
1659
+
1660
+ loss = None
1661
+ if return_loss:
1662
+ loss = clip_loss(logits_per_text)
1663
+
1664
+ if not return_dict:
1665
+ output = (logits_per_image, logits_per_text, text_embeds, image_embeds, text_outputs, vision_outputs)
1666
+ return ((loss,) + output) if loss is not None else output
1667
+
1668
+ return AltCLIPOutput(
1669
+ loss=loss,
1670
+ logits_per_image=logits_per_image,
1671
+ logits_per_text=logits_per_text,
1672
+ text_embeds=text_embeds,
1673
+ image_embeds=image_embeds,
1674
+ text_model_output=text_outputs,
1675
+ vision_model_output=vision_outputs,
1676
+ )
1677
+
1678
+
1679
+ # Copied from transformers.models.roberta.modeling_roberta.create_position_ids_from_input_ids
1680
+ def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_length=0):
1681
+ """
1682
+ Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols
1683
+ are ignored. This is modified from fairseq's `utils.make_positions`.
1684
+
1685
+ Args:
1686
+ x: torch.Tensor x:
1687
+
1688
+ Returns: torch.Tensor
1689
+ """
1690
+ # The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA.
1691
+ mask = input_ids.ne(padding_idx).int()
1692
+ incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask
1693
+ return incremental_indices.long() + padding_idx
llmeval-env/lib/python3.10/site-packages/transformers/models/altclip/processing_altclip.py ADDED
@@ -0,0 +1,131 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 WenXiang ZhongzhiCheng LedellWu LiuGuang BoWenZhang The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """
16
+ Image/Text processor class for AltCLIP
17
+ """
18
+ import warnings
19
+
20
+ from ...processing_utils import ProcessorMixin
21
+ from ...tokenization_utils_base import BatchEncoding
22
+
23
+
24
+ class AltCLIPProcessor(ProcessorMixin):
25
+ r"""
26
+ Constructs a AltCLIP processor which wraps a CLIP image processor and a XLM-Roberta tokenizer into a single
27
+ processor.
28
+
29
+ [`AltCLIPProcessor`] offers all the functionalities of [`CLIPImageProcessor`] and [`XLMRobertaTokenizerFast`]. See
30
+ the [`~AltCLIPProcessor.__call__`] and [`~AltCLIPProcessor.decode`] for more information.
31
+
32
+ Args:
33
+ image_processor ([`CLIPImageProcessor`], *optional*):
34
+ The image processor is a required input.
35
+ tokenizer ([`XLMRobertaTokenizerFast`], *optional*):
36
+ The tokenizer is a required input.
37
+ """
38
+
39
+ attributes = ["image_processor", "tokenizer"]
40
+ image_processor_class = "CLIPImageProcessor"
41
+ tokenizer_class = ("XLMRobertaTokenizer", "XLMRobertaTokenizerFast")
42
+
43
+ def __init__(self, image_processor=None, tokenizer=None, **kwargs):
44
+ feature_extractor = None
45
+ if "feature_extractor" in kwargs:
46
+ warnings.warn(
47
+ "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
48
+ " instead.",
49
+ FutureWarning,
50
+ )
51
+ feature_extractor = kwargs.pop("feature_extractor")
52
+
53
+ image_processor = image_processor if image_processor is not None else feature_extractor
54
+ if image_processor is None:
55
+ raise ValueError("You need to specify an `image_processor`.")
56
+ if tokenizer is None:
57
+ raise ValueError("You need to specify a `tokenizer`.")
58
+
59
+ super().__init__(image_processor, tokenizer)
60
+
61
+ def __call__(self, text=None, images=None, return_tensors=None, **kwargs):
62
+ """
63
+ Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text`
64
+ and `kwargs` arguments to XLMRobertaTokenizerFast's [`~XLMRobertaTokenizerFast.__call__`] if `text` is not
65
+ `None` to encode the text. To prepare the image(s), this method forwards the `images` and `kwrags` arguments to
66
+ CLIPImageProcessor's [`~CLIPImageProcessor.__call__`] if `images` is not `None`. Please refer to the doctsring
67
+ of the above two methods for more information.
68
+
69
+ Args:
70
+ text (`str`, `List[str]`, `List[List[str]]`):
71
+ The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
72
+ (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
73
+ `is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
74
+ images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`, `List[torch.Tensor]`):
75
+ The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
76
+ tensor. Both channels-first and channels-last formats are supported.
77
+
78
+ return_tensors (`str` or [`~utils.TensorType`], *optional*):
79
+ If set, will return tensors of a particular framework. Acceptable values are:
80
+
81
+ - `'tf'`: Return TensorFlow `tf.constant` objects.
82
+ - `'pt'`: Return PyTorch `torch.Tensor` objects.
83
+ - `'np'`: Return NumPy `np.ndarray` objects.
84
+ - `'jax'`: Return JAX `jnp.ndarray` objects.
85
+
86
+ Returns:
87
+ [`BatchEncoding`]: A [`BatchEncoding`] with the following fields:
88
+
89
+ - **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`.
90
+ - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
91
+ `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not
92
+ `None`).
93
+ - **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`.
94
+ """
95
+
96
+ if text is None and images is None:
97
+ raise ValueError("You have to specify either text or images. Both cannot be none.")
98
+
99
+ if text is not None:
100
+ encoding = self.tokenizer(text, return_tensors=return_tensors, **kwargs)
101
+
102
+ if images is not None:
103
+ image_features = self.image_processor(images, return_tensors=return_tensors, **kwargs)
104
+
105
+ if text is not None and images is not None:
106
+ encoding["pixel_values"] = image_features.pixel_values
107
+ return encoding
108
+ elif text is not None:
109
+ return encoding
110
+ else:
111
+ return BatchEncoding(data=dict(**image_features), tensor_type=return_tensors)
112
+
113
+ def batch_decode(self, *args, **kwargs):
114
+ """
115
+ This method forwards all its arguments to XLMRobertaTokenizerFast's [`~PreTrainedTokenizer.batch_decode`].
116
+ Please refer to the docstring of this method for more information.
117
+ """
118
+ return self.tokenizer.batch_decode(*args, **kwargs)
119
+
120
+ def decode(self, *args, **kwargs):
121
+ """
122
+ This method forwards all its arguments to XLMRobertaTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please
123
+ refer to the docstring of this method for more information.
124
+ """
125
+ return self.tokenizer.decode(*args, **kwargs)
126
+
127
+ @property
128
+ def model_input_names(self):
129
+ tokenizer_input_names = self.tokenizer.model_input_names
130
+ image_processor_input_names = self.image_processor.model_input_names
131
+ return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
llmeval-env/lib/python3.10/site-packages/transformers/models/decision_transformer/__init__.py ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
17
+
18
+
19
+ _import_structure = {
20
+ "configuration_decision_transformer": [
21
+ "DECISION_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
22
+ "DecisionTransformerConfig",
23
+ ],
24
+ }
25
+
26
+ try:
27
+ if not is_torch_available():
28
+ raise OptionalDependencyNotAvailable()
29
+ except OptionalDependencyNotAvailable:
30
+ pass
31
+ else:
32
+ _import_structure["modeling_decision_transformer"] = [
33
+ "DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
34
+ "DecisionTransformerGPT2Model",
35
+ "DecisionTransformerGPT2PreTrainedModel",
36
+ "DecisionTransformerModel",
37
+ "DecisionTransformerPreTrainedModel",
38
+ ]
39
+
40
+
41
+ if TYPE_CHECKING:
42
+ from .configuration_decision_transformer import (
43
+ DECISION_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
44
+ DecisionTransformerConfig,
45
+ )
46
+
47
+ try:
48
+ if not is_torch_available():
49
+ raise OptionalDependencyNotAvailable()
50
+ except OptionalDependencyNotAvailable:
51
+ pass
52
+ else:
53
+ from .modeling_decision_transformer import (
54
+ DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
55
+ DecisionTransformerGPT2Model,
56
+ DecisionTransformerGPT2PreTrainedModel,
57
+ DecisionTransformerModel,
58
+ DecisionTransformerPreTrainedModel,
59
+ )
60
+
61
+
62
+ else:
63
+ import sys
64
+
65
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
llmeval-env/lib/python3.10/site-packages/transformers/models/decision_transformer/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.09 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/decision_transformer/__pycache__/configuration_decision_transformer.cpython-310.pyc ADDED
Binary file (6.19 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/decision_transformer/__pycache__/modeling_decision_transformer.cpython-310.pyc ADDED
Binary file (25.6 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/decision_transformer/configuration_decision_transformer.py ADDED
@@ -0,0 +1,157 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Team and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Decision Transformer model configuration"""
16
+
17
+ from ...configuration_utils import PretrainedConfig
18
+ from ...utils import logging
19
+
20
+
21
+ logger = logging.get_logger(__name__)
22
+
23
+
24
+ from ..deprecated._archive_maps import DECISION_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
25
+
26
+
27
+ class DecisionTransformerConfig(PretrainedConfig):
28
+ """
29
+ This is the configuration class to store the configuration of a [`DecisionTransformerModel`]. It is used to
30
+ instantiate a Decision Transformer model according to the specified arguments, defining the model architecture.
31
+ Instantiating a configuration with the defaults will yield a similar configuration to that of the standard
32
+ DecisionTransformer architecture. Many of the config options are used to instatiate the GPT2 model that is used as
33
+ part of the architecture.
34
+
35
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
36
+ documentation from [`PretrainedConfig`] for more information.
37
+
38
+
39
+ Args:
40
+ state_dim (`int`, *optional*, defaults to 17):
41
+ The state size for the RL environment
42
+ act_dim (`int`, *optional*, defaults to 4):
43
+ The size of the output action space
44
+ hidden_size (`int`, *optional*, defaults to 128):
45
+ The size of the hidden layers
46
+ max_ep_len (`int`, *optional*, defaults to 4096):
47
+ The maximum length of an episode in the environment
48
+ action_tanh (`bool`, *optional*, defaults to True):
49
+ Whether to use a tanh activation on action prediction
50
+ vocab_size (`int`, *optional*, defaults to 50257):
51
+ Vocabulary size of the GPT-2 model. Defines the number of different tokens that can be represented by the
52
+ `inputs_ids` passed when calling [`DecisionTransformerModel`].
53
+ n_positions (`int`, *optional*, defaults to 1024):
54
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
55
+ just in case (e.g., 512 or 1024 or 2048).
56
+ n_layer (`int`, *optional*, defaults to 3):
57
+ Number of hidden layers in the Transformer encoder.
58
+ n_head (`int`, *optional*, defaults to 1):
59
+ Number of attention heads for each attention layer in the Transformer encoder.
60
+ n_inner (`int`, *optional*):
61
+ Dimensionality of the inner feed-forward layers. If unset, will default to 4 times `n_embd`.
62
+ activation_function (`str`, *optional*, defaults to `"gelu"`):
63
+ Activation function, to be selected in the list `["relu", "silu", "gelu", "tanh", "gelu_new"]`.
64
+ resid_pdrop (`float`, *optional*, defaults to 0.1):
65
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
66
+ embd_pdrop (`int`, *optional*, defaults to 0.1):
67
+ The dropout ratio for the embeddings.
68
+ attn_pdrop (`float`, *optional*, defaults to 0.1):
69
+ The dropout ratio for the attention.
70
+ layer_norm_epsilon (`float`, *optional*, defaults to 1e-5):
71
+ The epsilon to use in the layer normalization layers.
72
+ initializer_range (`float`, *optional*, defaults to 0.02):
73
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
74
+ scale_attn_weights (`bool`, *optional*, defaults to `True`):
75
+ Scale attention weights by dividing by sqrt(hidden_size)..
76
+ use_cache (`bool`, *optional*, defaults to `True`):
77
+ Whether or not the model should return the last key/values attentions (not used by all models).
78
+ scale_attn_by_inverse_layer_idx (`bool`, *optional*, defaults to `False`):
79
+ Whether to additionally scale attention weights by `1 / layer_idx + 1`.
80
+ reorder_and_upcast_attn (`bool`, *optional*, defaults to `False`):
81
+ Whether to scale keys (K) prior to computing attention (dot-product) and upcast attention
82
+ dot-product/softmax to float() when training with mixed precision.
83
+
84
+ Example:
85
+
86
+ ```python
87
+ >>> from transformers import DecisionTransformerConfig, DecisionTransformerModel
88
+
89
+ >>> # Initializing a DecisionTransformer configuration
90
+ >>> configuration = DecisionTransformerConfig()
91
+
92
+ >>> # Initializing a model (with random weights) from the configuration
93
+ >>> model = DecisionTransformerModel(configuration)
94
+
95
+ >>> # Accessing the model configuration
96
+ >>> configuration = model.config
97
+ ```"""
98
+
99
+ model_type = "decision_transformer"
100
+ keys_to_ignore_at_inference = ["past_key_values"]
101
+ attribute_map = {
102
+ "max_position_embeddings": "n_positions",
103
+ "num_attention_heads": "n_head",
104
+ "num_hidden_layers": "n_layer",
105
+ }
106
+
107
+ def __init__(
108
+ self,
109
+ state_dim=17,
110
+ act_dim=4,
111
+ hidden_size=128,
112
+ max_ep_len=4096,
113
+ action_tanh=True,
114
+ vocab_size=1,
115
+ n_positions=1024,
116
+ n_layer=3,
117
+ n_head=1,
118
+ n_inner=None,
119
+ activation_function="relu",
120
+ resid_pdrop=0.1,
121
+ embd_pdrop=0.1,
122
+ attn_pdrop=0.1,
123
+ layer_norm_epsilon=1e-5,
124
+ initializer_range=0.02,
125
+ scale_attn_weights=True,
126
+ use_cache=True,
127
+ bos_token_id=50256,
128
+ eos_token_id=50256,
129
+ scale_attn_by_inverse_layer_idx=False,
130
+ reorder_and_upcast_attn=False,
131
+ **kwargs,
132
+ ):
133
+ self.state_dim = state_dim
134
+ self.act_dim = act_dim
135
+ self.hidden_size = hidden_size
136
+ self.max_ep_len = max_ep_len
137
+ self.action_tanh = action_tanh
138
+ self.vocab_size = vocab_size
139
+ self.n_positions = n_positions
140
+ self.n_layer = n_layer
141
+ self.n_head = n_head
142
+ self.n_inner = n_inner
143
+ self.activation_function = activation_function
144
+ self.resid_pdrop = resid_pdrop
145
+ self.embd_pdrop = embd_pdrop
146
+ self.attn_pdrop = attn_pdrop
147
+ self.layer_norm_epsilon = layer_norm_epsilon
148
+ self.initializer_range = initializer_range
149
+ self.scale_attn_weights = scale_attn_weights
150
+ self.use_cache = use_cache
151
+ self.scale_attn_by_inverse_layer_idx = scale_attn_by_inverse_layer_idx
152
+ self.reorder_and_upcast_attn = reorder_and_upcast_attn
153
+
154
+ self.bos_token_id = bos_token_id
155
+ self.eos_token_id = eos_token_id
156
+
157
+ super().__init__(bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
llmeval-env/lib/python3.10/site-packages/transformers/models/decision_transformer/modeling_decision_transformer.py ADDED
@@ -0,0 +1,937 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Team The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch DecisionTransformer model."""
16
+
17
+ import math
18
+ import os
19
+ from dataclasses import dataclass
20
+ from typing import Optional, Tuple, Union
21
+
22
+ import torch
23
+ import torch.utils.checkpoint
24
+ from torch import nn
25
+ from torch.cuda.amp import autocast
26
+
27
+ from ...activations import ACT2FN
28
+ from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions
29
+ from ...modeling_utils import PreTrainedModel
30
+ from ...pytorch_utils import Conv1D, find_pruneable_heads_and_indices, prune_conv1d_layer
31
+ from ...utils import (
32
+ ModelOutput,
33
+ add_start_docstrings,
34
+ add_start_docstrings_to_model_forward,
35
+ logging,
36
+ replace_return_docstrings,
37
+ )
38
+ from .configuration_decision_transformer import DecisionTransformerConfig
39
+
40
+
41
+ logger = logging.get_logger(__name__)
42
+
43
+ _CHECKPOINT_FOR_DOC = "edbeeching/decision-transformer-gym-hopper-medium"
44
+ _CONFIG_FOR_DOC = "DecisionTransformerConfig"
45
+
46
+
47
+ from ..deprecated._archive_maps import DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
48
+
49
+
50
+ # Copied from transformers.models.gpt2.modeling_gpt2.load_tf_weights_in_gpt2
51
+ def load_tf_weights_in_gpt2(model, config, gpt2_checkpoint_path):
52
+ """Load tf checkpoints in a pytorch model"""
53
+ try:
54
+ import re
55
+
56
+ import tensorflow as tf
57
+ except ImportError:
58
+ logger.error(
59
+ "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
60
+ "https://www.tensorflow.org/install/ for installation instructions."
61
+ )
62
+ raise
63
+ tf_path = os.path.abspath(gpt2_checkpoint_path)
64
+ logger.info(f"Converting TensorFlow checkpoint from {tf_path}")
65
+ # Load weights from TF model
66
+ init_vars = tf.train.list_variables(tf_path)
67
+ names = []
68
+ arrays = []
69
+ for name, shape in init_vars:
70
+ logger.info(f"Loading TF weight {name} with shape {shape}")
71
+ array = tf.train.load_variable(tf_path, name)
72
+ names.append(name)
73
+ arrays.append(array.squeeze())
74
+
75
+ for name, array in zip(names, arrays):
76
+ name = name[6:] # skip "model/"
77
+ name = name.split("/")
78
+ pointer = model
79
+ for m_name in name:
80
+ if re.fullmatch(r"[A-Za-z]+\d+", m_name):
81
+ scope_names = re.split(r"(\d+)", m_name)
82
+ else:
83
+ scope_names = [m_name]
84
+ if scope_names[0] == "w" or scope_names[0] == "g":
85
+ pointer = getattr(pointer, "weight")
86
+ elif scope_names[0] == "b":
87
+ pointer = getattr(pointer, "bias")
88
+ elif scope_names[0] == "wpe" or scope_names[0] == "wte":
89
+ pointer = getattr(pointer, scope_names[0])
90
+ pointer = getattr(pointer, "weight")
91
+ else:
92
+ pointer = getattr(pointer, scope_names[0])
93
+ if len(scope_names) >= 2:
94
+ num = int(scope_names[1])
95
+ pointer = pointer[num]
96
+ try:
97
+ if pointer.shape != array.shape:
98
+ raise ValueError(f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched")
99
+ except ValueError as e:
100
+ e.args += (pointer.shape, array.shape)
101
+ raise
102
+ logger.info(f"Initialize PyTorch weight {name}")
103
+ pointer.data = torch.from_numpy(array)
104
+ return model
105
+
106
+
107
+ # Copied from transformers.models.gpt2.modeling_gpt2.GPT2Attention with GPT2->DecisionTransformerGPT2
108
+ class DecisionTransformerGPT2Attention(nn.Module):
109
+ def __init__(self, config, is_cross_attention=False, layer_idx=None):
110
+ super().__init__()
111
+ self.config = config
112
+ max_positions = config.max_position_embeddings
113
+ self.register_buffer(
114
+ "bias",
115
+ torch.tril(torch.ones((max_positions, max_positions), dtype=torch.bool)).view(
116
+ 1, 1, max_positions, max_positions
117
+ ),
118
+ persistent=False,
119
+ )
120
+ self.register_buffer("masked_bias", torch.tensor(-1e4), persistent=False)
121
+
122
+ self.embed_dim = config.hidden_size
123
+ self.num_heads = config.num_attention_heads
124
+ self.head_dim = self.embed_dim // self.num_heads
125
+ self.split_size = self.embed_dim
126
+ if self.head_dim * self.num_heads != self.embed_dim:
127
+ raise ValueError(
128
+ f"`embed_dim` must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:"
129
+ f" {self.num_heads})."
130
+ )
131
+
132
+ self.scale_attn_weights = config.scale_attn_weights
133
+ self.is_cross_attention = is_cross_attention
134
+
135
+ # Layer-wise attention scaling, reordering, and upcasting
136
+ self.scale_attn_by_inverse_layer_idx = config.scale_attn_by_inverse_layer_idx
137
+ self.layer_idx = layer_idx
138
+ self.reorder_and_upcast_attn = config.reorder_and_upcast_attn
139
+
140
+ if self.is_cross_attention:
141
+ self.c_attn = Conv1D(2 * self.embed_dim, self.embed_dim)
142
+ self.q_attn = Conv1D(self.embed_dim, self.embed_dim)
143
+ else:
144
+ self.c_attn = Conv1D(3 * self.embed_dim, self.embed_dim)
145
+ self.c_proj = Conv1D(self.embed_dim, self.embed_dim)
146
+
147
+ self.attn_dropout = nn.Dropout(config.attn_pdrop)
148
+ self.resid_dropout = nn.Dropout(config.resid_pdrop)
149
+ self.is_causal = True
150
+
151
+ self.pruned_heads = set()
152
+
153
+ def prune_heads(self, heads):
154
+ if len(heads) == 0:
155
+ return
156
+ heads, index = find_pruneable_heads_and_indices(heads, self.num_heads, self.head_dim, self.pruned_heads)
157
+ index_attn = torch.cat([index, index + self.split_size, index + (2 * self.split_size)])
158
+
159
+ # Prune conv1d layers
160
+ self.c_attn = prune_conv1d_layer(self.c_attn, index_attn, dim=1)
161
+ self.c_proj = prune_conv1d_layer(self.c_proj, index, dim=0)
162
+
163
+ # Update hyper params
164
+ self.split_size = (self.split_size // self.num_heads) * (self.num_heads - len(heads))
165
+ self.num_heads = self.num_heads - len(heads)
166
+ self.pruned_heads = self.pruned_heads.union(heads)
167
+
168
+ def _attn(self, query, key, value, attention_mask=None, head_mask=None):
169
+ attn_weights = torch.matmul(query, key.transpose(-1, -2))
170
+
171
+ if self.scale_attn_weights:
172
+ attn_weights = attn_weights / torch.full(
173
+ [], value.size(-1) ** 0.5, dtype=attn_weights.dtype, device=attn_weights.device
174
+ )
175
+
176
+ # Layer-wise attention scaling
177
+ if self.scale_attn_by_inverse_layer_idx:
178
+ attn_weights = attn_weights / float(self.layer_idx + 1)
179
+
180
+ if not self.is_cross_attention:
181
+ # if only "normal" attention layer implements causal mask
182
+ query_length, key_length = query.size(-2), key.size(-2)
183
+ causal_mask = self.bias[:, :, key_length - query_length : key_length, :key_length]
184
+ mask_value = torch.finfo(attn_weights.dtype).min
185
+ # Need to be a tensor, otherwise we get error: `RuntimeError: expected scalar type float but found double`.
186
+ # Need to be on the same device, otherwise `RuntimeError: ..., x and y to be on the same device`
187
+ mask_value = torch.full([], mask_value, dtype=attn_weights.dtype, device=attn_weights.device)
188
+ attn_weights = torch.where(causal_mask, attn_weights.to(attn_weights.dtype), mask_value)
189
+
190
+ if attention_mask is not None:
191
+ # Apply the attention mask
192
+ attn_weights = attn_weights + attention_mask
193
+
194
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1)
195
+
196
+ # Downcast (if necessary) back to V's dtype (if in mixed-precision) -- No-Op otherwise
197
+ attn_weights = attn_weights.type(value.dtype)
198
+ attn_weights = self.attn_dropout(attn_weights)
199
+
200
+ # Mask heads if we want to
201
+ if head_mask is not None:
202
+ attn_weights = attn_weights * head_mask
203
+
204
+ attn_output = torch.matmul(attn_weights, value)
205
+
206
+ return attn_output, attn_weights
207
+
208
+ def _upcast_and_reordered_attn(self, query, key, value, attention_mask=None, head_mask=None):
209
+ # Use `torch.baddbmm` (a bit more efficient w/ alpha param for scaling -- from Megatron-LM)
210
+ bsz, num_heads, q_seq_len, dk = query.size()
211
+ _, _, k_seq_len, _ = key.size()
212
+
213
+ # Preallocate attn_weights for `baddbmm`
214
+ attn_weights = torch.empty(bsz * num_heads, q_seq_len, k_seq_len, dtype=torch.float32, device=query.device)
215
+
216
+ # Compute Scale Factor
217
+ scale_factor = 1.0
218
+ if self.scale_attn_weights:
219
+ scale_factor /= float(value.size(-1)) ** 0.5
220
+
221
+ if self.scale_attn_by_inverse_layer_idx:
222
+ scale_factor /= float(self.layer_idx + 1)
223
+
224
+ # Upcast (turn off autocast) and reorder (Scale K by 1 / root(dk))
225
+ with autocast(enabled=False):
226
+ q, k = query.reshape(-1, q_seq_len, dk), key.transpose(-1, -2).reshape(-1, dk, k_seq_len)
227
+ attn_weights = torch.baddbmm(attn_weights, q.float(), k.float(), beta=0, alpha=scale_factor)
228
+ attn_weights = attn_weights.reshape(bsz, num_heads, q_seq_len, k_seq_len)
229
+
230
+ if not self.is_cross_attention:
231
+ # if only "normal" attention layer implements causal mask
232
+ query_length, key_length = query.size(-2), key.size(-2)
233
+ causal_mask = self.bias[:, :, key_length - query_length : key_length, :key_length]
234
+ mask_value = torch.finfo(attn_weights.dtype).min
235
+ # Need to be a tensor, otherwise we get error: `RuntimeError: expected scalar type float but found double`.
236
+ # Need to be on the same device, otherwise `RuntimeError: ..., x and y to be on the same device`
237
+ mask_value = torch.tensor(mask_value, dtype=attn_weights.dtype).to(attn_weights.device)
238
+ attn_weights = torch.where(causal_mask, attn_weights, mask_value)
239
+
240
+ if attention_mask is not None:
241
+ # Apply the attention mask
242
+ attn_weights = attn_weights + attention_mask
243
+
244
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1)
245
+
246
+ # Downcast (if necessary) back to V's dtype (if in mixed-precision) -- No-Op if otherwise
247
+ if attn_weights.dtype != torch.float32:
248
+ raise RuntimeError("Error with upcasting, attn_weights does not have dtype torch.float32")
249
+ attn_weights = attn_weights.type(value.dtype)
250
+ attn_weights = self.attn_dropout(attn_weights)
251
+
252
+ # Mask heads if we want to
253
+ if head_mask is not None:
254
+ attn_weights = attn_weights * head_mask
255
+
256
+ attn_output = torch.matmul(attn_weights, value)
257
+
258
+ return attn_output, attn_weights
259
+
260
+ def _split_heads(self, tensor, num_heads, attn_head_size):
261
+ """
262
+ Splits hidden_size dim into attn_head_size and num_heads
263
+ """
264
+ new_shape = tensor.size()[:-1] + (num_heads, attn_head_size)
265
+ tensor = tensor.view(new_shape)
266
+ return tensor.permute(0, 2, 1, 3) # (batch, head, seq_length, head_features)
267
+
268
+ def _merge_heads(self, tensor, num_heads, attn_head_size):
269
+ """
270
+ Merges attn_head_size dim and num_attn_heads dim into hidden_size
271
+ """
272
+ tensor = tensor.permute(0, 2, 1, 3).contiguous()
273
+ new_shape = tensor.size()[:-2] + (num_heads * attn_head_size,)
274
+ return tensor.view(new_shape)
275
+
276
+ def forward(
277
+ self,
278
+ hidden_states: Optional[Tuple[torch.FloatTensor]],
279
+ layer_past: Optional[Tuple[torch.Tensor]] = None,
280
+ attention_mask: Optional[torch.FloatTensor] = None,
281
+ head_mask: Optional[torch.FloatTensor] = None,
282
+ encoder_hidden_states: Optional[torch.Tensor] = None,
283
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
284
+ use_cache: Optional[bool] = False,
285
+ output_attentions: Optional[bool] = False,
286
+ ) -> Tuple[Union[torch.Tensor, Tuple[torch.Tensor]], ...]:
287
+ if encoder_hidden_states is not None:
288
+ if not hasattr(self, "q_attn"):
289
+ raise ValueError(
290
+ "If class is used as cross attention, the weights `q_attn` have to be defined. "
291
+ "Please make sure to instantiate class with `DecisionTransformerGPT2Attention(..., is_cross_attention=True)`."
292
+ )
293
+
294
+ query = self.q_attn(hidden_states)
295
+ key, value = self.c_attn(encoder_hidden_states).split(self.split_size, dim=2)
296
+ attention_mask = encoder_attention_mask
297
+ else:
298
+ query, key, value = self.c_attn(hidden_states).split(self.split_size, dim=2)
299
+
300
+ query = self._split_heads(query, self.num_heads, self.head_dim)
301
+ key = self._split_heads(key, self.num_heads, self.head_dim)
302
+ value = self._split_heads(value, self.num_heads, self.head_dim)
303
+
304
+ if layer_past is not None:
305
+ past_key, past_value = layer_past
306
+ key = torch.cat((past_key, key), dim=-2)
307
+ value = torch.cat((past_value, value), dim=-2)
308
+
309
+ if use_cache is True:
310
+ present = (key, value)
311
+ else:
312
+ present = None
313
+
314
+ if self.reorder_and_upcast_attn:
315
+ attn_output, attn_weights = self._upcast_and_reordered_attn(query, key, value, attention_mask, head_mask)
316
+ else:
317
+ attn_output, attn_weights = self._attn(query, key, value, attention_mask, head_mask)
318
+
319
+ attn_output = self._merge_heads(attn_output, self.num_heads, self.head_dim)
320
+ attn_output = self.c_proj(attn_output)
321
+ attn_output = self.resid_dropout(attn_output)
322
+
323
+ outputs = (attn_output, present)
324
+ if output_attentions:
325
+ outputs += (attn_weights,)
326
+
327
+ return outputs # a, present, (attentions)
328
+
329
+
330
+ # Copied from transformers.models.gpt2.modeling_gpt2.GPT2MLP with GPT2->DecisionTransformerGPT2
331
+ class DecisionTransformerGPT2MLP(nn.Module):
332
+ def __init__(self, intermediate_size, config):
333
+ super().__init__()
334
+ embed_dim = config.hidden_size
335
+ self.c_fc = Conv1D(intermediate_size, embed_dim)
336
+ self.c_proj = Conv1D(embed_dim, intermediate_size)
337
+ self.act = ACT2FN[config.activation_function]
338
+ self.dropout = nn.Dropout(config.resid_pdrop)
339
+
340
+ def forward(self, hidden_states: Optional[Tuple[torch.FloatTensor]]) -> torch.FloatTensor:
341
+ hidden_states = self.c_fc(hidden_states)
342
+ hidden_states = self.act(hidden_states)
343
+ hidden_states = self.c_proj(hidden_states)
344
+ hidden_states = self.dropout(hidden_states)
345
+ return hidden_states
346
+
347
+
348
+ # Copied from transformers.models.gpt2.modeling_gpt2.GPT2Block with GPT2->DecisionTransformerGPT2
349
+ class DecisionTransformerGPT2Block(nn.Module):
350
+ # Ignore copy
351
+ def __init__(self, config, layer_idx=None):
352
+ super().__init__()
353
+ hidden_size = config.hidden_size
354
+ inner_dim = config.n_inner if config.n_inner is not None else 4 * hidden_size
355
+
356
+ self.ln_1 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon)
357
+ self.attn = DecisionTransformerGPT2Attention(config, layer_idx=layer_idx)
358
+ self.ln_2 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon)
359
+
360
+ if config.add_cross_attention:
361
+ self.crossattention = DecisionTransformerGPT2Attention(
362
+ config, is_cross_attention=True, layer_idx=layer_idx
363
+ )
364
+ self.ln_cross_attn = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon)
365
+
366
+ self.mlp = DecisionTransformerGPT2MLP(inner_dim, config)
367
+
368
+ def forward(
369
+ self,
370
+ hidden_states: Optional[Tuple[torch.FloatTensor]],
371
+ layer_past: Optional[Tuple[torch.Tensor]] = None,
372
+ attention_mask: Optional[torch.FloatTensor] = None,
373
+ head_mask: Optional[torch.FloatTensor] = None,
374
+ encoder_hidden_states: Optional[torch.Tensor] = None,
375
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
376
+ use_cache: Optional[bool] = False,
377
+ output_attentions: Optional[bool] = False,
378
+ ) -> Union[Tuple[torch.Tensor], Optional[Tuple[torch.Tensor, Tuple[torch.FloatTensor, ...]]]]:
379
+ residual = hidden_states
380
+ hidden_states = self.ln_1(hidden_states)
381
+ attn_outputs = self.attn(
382
+ hidden_states,
383
+ layer_past=layer_past,
384
+ attention_mask=attention_mask,
385
+ head_mask=head_mask,
386
+ use_cache=use_cache,
387
+ output_attentions=output_attentions,
388
+ )
389
+ attn_output = attn_outputs[0] # output_attn: a, present, (attentions)
390
+ outputs = attn_outputs[1:]
391
+ # residual connection
392
+ hidden_states = attn_output + residual
393
+
394
+ if encoder_hidden_states is not None:
395
+ # add one self-attention block for cross-attention
396
+ if not hasattr(self, "crossattention"):
397
+ raise ValueError(
398
+ f"If `encoder_hidden_states` are passed, {self} has to be instantiated with "
399
+ "cross-attention layers by setting `config.add_cross_attention=True`"
400
+ )
401
+ residual = hidden_states
402
+ hidden_states = self.ln_cross_attn(hidden_states)
403
+ cross_attn_outputs = self.crossattention(
404
+ hidden_states,
405
+ attention_mask=attention_mask,
406
+ head_mask=head_mask,
407
+ encoder_hidden_states=encoder_hidden_states,
408
+ encoder_attention_mask=encoder_attention_mask,
409
+ output_attentions=output_attentions,
410
+ )
411
+ attn_output = cross_attn_outputs[0]
412
+ # residual connection
413
+ hidden_states = residual + attn_output
414
+ outputs = outputs + cross_attn_outputs[2:] # add cross attentions if we output attention weights
415
+
416
+ residual = hidden_states
417
+ hidden_states = self.ln_2(hidden_states)
418
+ feed_forward_hidden_states = self.mlp(hidden_states)
419
+ # residual connection
420
+ hidden_states = residual + feed_forward_hidden_states
421
+
422
+ if use_cache:
423
+ outputs = (hidden_states,) + outputs
424
+ else:
425
+ outputs = (hidden_states,) + outputs[1:]
426
+
427
+ return outputs # hidden_states, present, (attentions, cross_attentions)
428
+
429
+
430
+ class DecisionTransformerGPT2PreTrainedModel(PreTrainedModel):
431
+ """
432
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
433
+ models.
434
+ """
435
+
436
+ config_class = DecisionTransformerConfig
437
+ load_tf_weights = load_tf_weights_in_gpt2
438
+ base_model_prefix = "transformer"
439
+ is_parallelizable = True
440
+ supports_gradient_checkpointing = True
441
+
442
+ def __init__(self, *inputs, **kwargs):
443
+ super().__init__(*inputs, **kwargs)
444
+
445
+ def _init_weights(self, module):
446
+ """Initialize the weights."""
447
+ if isinstance(module, (nn.Linear, Conv1D)):
448
+ # Slightly different from the TF version which uses truncated_normal for initialization
449
+ # cf https://github.com/pytorch/pytorch/pull/5617
450
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
451
+ if module.bias is not None:
452
+ module.bias.data.zero_()
453
+ elif isinstance(module, nn.Embedding):
454
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
455
+ if module.padding_idx is not None:
456
+ module.weight.data[module.padding_idx].zero_()
457
+ elif isinstance(module, nn.LayerNorm):
458
+ module.bias.data.zero_()
459
+ module.weight.data.fill_(1.0)
460
+
461
+ # Reinitialize selected weights subject to the OpenAI GPT-2 Paper Scheme:
462
+ # > A modified initialization which accounts for the accumulation on the residual path with model depth. Scale
463
+ # > the weights of residual layers at initialization by a factor of 1/√N where N is the # of residual layers.
464
+ # > -- GPT-2 :: https://openai.com/blog/better-language-models/
465
+ #
466
+ # Reference (Megatron-LM): https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/model/gpt_model.py
467
+ for name, p in module.named_parameters():
468
+ if "c_proj" in name and "weight" in name:
469
+ # Special Scaled Initialization --> There are 2 Layer Norms per Transformer Block
470
+ p.data.normal_(mean=0.0, std=(self.config.initializer_range / math.sqrt(2 * self.config.n_layer)))
471
+
472
+
473
+ class DecisionTransformerGPT2Model(DecisionTransformerGPT2PreTrainedModel):
474
+ def __init__(self, config):
475
+ super().__init__(config)
476
+
477
+ self.embed_dim = config.hidden_size
478
+
479
+ self.wte = nn.Embedding(config.vocab_size, self.embed_dim)
480
+ self.wpe = nn.Embedding(config.max_position_embeddings, self.embed_dim)
481
+
482
+ self.drop = nn.Dropout(config.embd_pdrop)
483
+ self.h = nn.ModuleList(
484
+ [DecisionTransformerGPT2Block(config, layer_idx=i) for i in range(config.num_hidden_layers)]
485
+ )
486
+ self.ln_f = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_epsilon)
487
+
488
+ # Model parallel
489
+ self.model_parallel = False
490
+ self.device_map = None
491
+ self.gradient_checkpointing = False
492
+
493
+ # Initialize weights and apply final processing
494
+ self.post_init()
495
+
496
+ def get_input_embeddings(self):
497
+ return self.wte
498
+
499
+ def set_input_embeddings(self, new_embeddings):
500
+ self.wte = new_embeddings
501
+
502
+ def forward(
503
+ self,
504
+ input_ids: Optional[torch.LongTensor] = None,
505
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
506
+ attention_mask: Optional[torch.FloatTensor] = None,
507
+ token_type_ids: Optional[torch.LongTensor] = None,
508
+ position_ids: Optional[torch.LongTensor] = None,
509
+ head_mask: Optional[torch.FloatTensor] = None,
510
+ inputs_embeds: Optional[torch.FloatTensor] = None,
511
+ encoder_hidden_states: Optional[torch.Tensor] = None,
512
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
513
+ use_cache: Optional[bool] = None,
514
+ output_attentions: Optional[bool] = None,
515
+ output_hidden_states: Optional[bool] = None,
516
+ return_dict: Optional[bool] = None,
517
+ ) -> Union[Tuple, BaseModelOutputWithPastAndCrossAttentions]:
518
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
519
+ output_hidden_states = (
520
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
521
+ )
522
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
523
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
524
+
525
+ if input_ids is not None and inputs_embeds is not None:
526
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
527
+ elif input_ids is not None:
528
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
529
+ input_shape = input_ids.size()
530
+ input_ids = input_ids.view(-1, input_shape[-1])
531
+ batch_size = input_ids.shape[0]
532
+ elif inputs_embeds is not None:
533
+ input_shape = inputs_embeds.size()[:-1]
534
+ batch_size = inputs_embeds.shape[0]
535
+ else:
536
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
537
+
538
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
539
+
540
+ if token_type_ids is not None:
541
+ token_type_ids = token_type_ids.view(-1, input_shape[-1])
542
+
543
+ if past_key_values is None:
544
+ past_length = 0
545
+ past_key_values = tuple([None] * len(self.h))
546
+ else:
547
+ past_length = past_key_values[0][0].size(-2)
548
+ if position_ids is None:
549
+ position_ids = torch.arange(past_length, input_shape[-1] + past_length, dtype=torch.long, device=device)
550
+ position_ids = position_ids.unsqueeze(0)
551
+
552
+ # Attention mask.
553
+ if attention_mask is not None:
554
+ if batch_size <= 0:
555
+ raise ValueError("batch_size has to be defined and > 0")
556
+ attention_mask = attention_mask.view(batch_size, -1)
557
+ # We create a 3D attention mask from a 2D tensor mask.
558
+ # Sizes are [batch_size, 1, 1, to_seq_length]
559
+ # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
560
+ # this attention mask is more simple than the triangular masking of causal attention
561
+ # used in OpenAI GPT, we just need to prepare the broadcast dimension here.
562
+ attention_mask = attention_mask[:, None, None, :]
563
+
564
+ # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
565
+ # masked positions, this operation will create a tensor which is 0.0 for
566
+ # positions we want to attend and the dtype's smallest value for masked positions.
567
+ # Since we are adding it to the raw scores before the softmax, this is
568
+ # effectively the same as removing these entirely.
569
+ attention_mask = attention_mask.to(dtype=self.dtype) # fp16 compatibility
570
+ attention_mask = (1.0 - attention_mask) * torch.finfo(self.dtype).min
571
+
572
+ # If a 2D or 3D attention mask is provided for the cross-attention
573
+ # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
574
+ if self.config.add_cross_attention and encoder_hidden_states is not None:
575
+ encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
576
+ encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
577
+ if encoder_attention_mask is None:
578
+ encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
579
+ encoder_attention_mask = self.invert_attention_mask(encoder_attention_mask)
580
+ else:
581
+ encoder_attention_mask = None
582
+
583
+ # Prepare head mask if needed
584
+ # 1.0 in head_mask indicate we keep the head
585
+ # attention_probs has shape bsz x n_heads x N x N
586
+ # head_mask has shape n_layer x batch x n_heads x N x N
587
+ head_mask = self.get_head_mask(head_mask, self.config.n_layer)
588
+
589
+ if inputs_embeds is None:
590
+ inputs_embeds = self.wte(input_ids)
591
+ position_embeds = self.wpe(position_ids)
592
+ hidden_states = inputs_embeds + position_embeds
593
+
594
+ if token_type_ids is not None:
595
+ token_type_embeds = self.wte(token_type_ids)
596
+ hidden_states = hidden_states + token_type_embeds
597
+
598
+ hidden_states = self.drop(hidden_states)
599
+
600
+ output_shape = (-1,) + input_shape[1:] + (hidden_states.size(-1),)
601
+
602
+ if self.gradient_checkpointing and self.training:
603
+ if use_cache:
604
+ logger.warning_once(
605
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
606
+ )
607
+ use_cache = False
608
+
609
+ presents = () if use_cache else None
610
+ all_self_attentions = () if output_attentions else None
611
+ all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
612
+ all_hidden_states = () if output_hidden_states else None
613
+ for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)):
614
+ # Model parallel
615
+ if self.model_parallel:
616
+ torch.cuda.set_device(hidden_states.device)
617
+ # Ensure layer_past is on same device as hidden_states (might not be correct)
618
+ if layer_past is not None:
619
+ layer_past = tuple(past_state.to(hidden_states.device) for past_state in layer_past)
620
+ # Ensure that attention_mask is always on the same device as hidden_states
621
+ if attention_mask is not None:
622
+ attention_mask = attention_mask.to(hidden_states.device)
623
+ if isinstance(head_mask, torch.Tensor):
624
+ head_mask = head_mask.to(hidden_states.device)
625
+ if output_hidden_states:
626
+ all_hidden_states = all_hidden_states + (hidden_states,)
627
+
628
+ if self.gradient_checkpointing and self.training:
629
+ outputs = self._gradient_checkpointing_func(
630
+ block.__call__,
631
+ hidden_states,
632
+ None,
633
+ attention_mask,
634
+ head_mask[i],
635
+ encoder_hidden_states,
636
+ encoder_attention_mask,
637
+ use_cache,
638
+ output_attentions,
639
+ )
640
+ else:
641
+ outputs = block(
642
+ hidden_states,
643
+ layer_past=layer_past,
644
+ attention_mask=attention_mask,
645
+ head_mask=head_mask[i],
646
+ encoder_hidden_states=encoder_hidden_states,
647
+ encoder_attention_mask=encoder_attention_mask,
648
+ use_cache=use_cache,
649
+ output_attentions=output_attentions,
650
+ )
651
+
652
+ hidden_states = outputs[0]
653
+ if use_cache is True:
654
+ presents = presents + (outputs[1],)
655
+
656
+ if output_attentions:
657
+ all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],)
658
+ if self.config.add_cross_attention:
659
+ all_cross_attentions = all_cross_attentions + (outputs[3 if use_cache else 2],)
660
+
661
+ # Model Parallel: If it's the last layer for that device, put things on the next device
662
+ if self.model_parallel:
663
+ for k, v in self.device_map.items():
664
+ if i == v[-1] and "cuda:" + str(k) != self.last_device:
665
+ hidden_states = hidden_states.to("cuda:" + str(k + 1))
666
+
667
+ hidden_states = self.ln_f(hidden_states)
668
+
669
+ hidden_states = hidden_states.view(output_shape)
670
+ # Add last hidden state
671
+ if output_hidden_states:
672
+ all_hidden_states = all_hidden_states + (hidden_states,)
673
+
674
+ if not return_dict:
675
+ return tuple(
676
+ v
677
+ for v in [hidden_states, presents, all_hidden_states, all_self_attentions, all_cross_attentions]
678
+ if v is not None
679
+ )
680
+
681
+ return BaseModelOutputWithPastAndCrossAttentions(
682
+ last_hidden_state=hidden_states,
683
+ past_key_values=presents,
684
+ hidden_states=all_hidden_states,
685
+ attentions=all_self_attentions,
686
+ cross_attentions=all_cross_attentions,
687
+ )
688
+
689
+
690
+ @dataclass
691
+ class DecisionTransformerOutput(ModelOutput):
692
+ """
693
+ Base class for model's outputs that also contains a pooling of the last hidden states.
694
+
695
+ Args:
696
+ last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
697
+ Sequence of hidden-states at the output of the last layer of the model.
698
+ state_preds (`torch.FloatTensor` of shape `(batch_size, sequence_length, state_dim)`):
699
+ Environment state predictions
700
+ action_preds (`torch.FloatTensor` of shape `(batch_size, sequence_length, action_dim)`):
701
+ Model action predictions
702
+ return_preds (`torch.FloatTensor` of shape `(batch_size, sequence_length, 1)`):
703
+ Predicted returns for each state
704
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
705
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
706
+ shape `(batch_size, sequence_length, hidden_size)`.
707
+
708
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
709
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
710
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
711
+ sequence_length)`.
712
+
713
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
714
+ heads.
715
+ """
716
+
717
+ state_preds: torch.FloatTensor = None
718
+ action_preds: torch.FloatTensor = None
719
+ return_preds: torch.FloatTensor = None
720
+ hidden_states: torch.FloatTensor = None
721
+ attentions: torch.FloatTensor = None
722
+ last_hidden_state: torch.FloatTensor = None
723
+
724
+
725
+ class DecisionTransformerPreTrainedModel(PreTrainedModel):
726
+ """
727
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
728
+ models.
729
+ """
730
+
731
+ config_class = DecisionTransformerConfig
732
+ base_model_prefix = "decision_transformer"
733
+ main_input_name = "states"
734
+ supports_gradient_checkpointing = False
735
+
736
+ def _init_weights(self, module):
737
+ """Initialize the weights"""
738
+ if isinstance(module, nn.Linear):
739
+ # Slightly different from the TF version which uses truncated_normal for initialization
740
+ # cf https://github.com/pytorch/pytorch/pull/5617
741
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
742
+ if module.bias is not None:
743
+ module.bias.data.zero_()
744
+ elif isinstance(module, nn.Embedding):
745
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
746
+ if module.padding_idx is not None:
747
+ module.weight.data[module.padding_idx].zero_()
748
+ elif isinstance(module, nn.LayerNorm):
749
+ module.bias.data.zero_()
750
+ module.weight.data.fill_(1.0)
751
+
752
+
753
+ DECISION_TRANSFORMER_START_DOCSTRING = r"""
754
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
755
+ it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
756
+ behavior.
757
+
758
+ Parameters:
759
+ config ([`~DecisionTransformerConfig`]): Model configuration class with all the parameters of the model.
760
+ Initializing with a config file does not load the weights associated with the model, only the
761
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
762
+ """
763
+
764
+ DECISION_TRANSFORMER_INPUTS_DOCSTRING = r"""
765
+ Args:
766
+ states (`torch.FloatTensor` of shape `(batch_size, episode_length, state_dim)`):
767
+ The states for each step in the trajectory
768
+ actions (`torch.FloatTensor` of shape `(batch_size, episode_length, act_dim)`):
769
+ The actions taken by the "expert" policy for the current state, these are masked for auto regressive
770
+ prediction
771
+ rewards (`torch.FloatTensor` of shape `(batch_size, episode_length, 1)`):
772
+ The rewards for each state, action
773
+ returns_to_go (`torch.FloatTensor` of shape `(batch_size, episode_length, 1)`):
774
+ The returns for each state in the trajectory
775
+ timesteps (`torch.LongTensor` of shape `(batch_size, episode_length)`):
776
+ The timestep for each step in the trajectory
777
+ attention_mask (`torch.FloatTensor` of shape `(batch_size, episode_length)`):
778
+ Masking, used to mask the actions when performing autoregressive prediction
779
+ """
780
+
781
+
782
+ @add_start_docstrings("The Decision Transformer Model", DECISION_TRANSFORMER_START_DOCSTRING)
783
+ class DecisionTransformerModel(DecisionTransformerPreTrainedModel):
784
+ """
785
+
786
+ The model builds upon the GPT2 architecture to perform autoregressive prediction of actions in an offline RL
787
+ setting. Refer to the paper for more details: https://arxiv.org/abs/2106.01345
788
+
789
+ """
790
+
791
+ def __init__(self, config):
792
+ super().__init__(config)
793
+ self.config = config
794
+ self.hidden_size = config.hidden_size
795
+ # note: the only difference between this GPT2Model and the default Huggingface version
796
+ # is that the positional embeddings are removed (since we'll add those ourselves)
797
+ self.encoder = DecisionTransformerGPT2Model(config)
798
+
799
+ self.embed_timestep = nn.Embedding(config.max_ep_len, config.hidden_size)
800
+ self.embed_return = torch.nn.Linear(1, config.hidden_size)
801
+ self.embed_state = torch.nn.Linear(config.state_dim, config.hidden_size)
802
+ self.embed_action = torch.nn.Linear(config.act_dim, config.hidden_size)
803
+
804
+ self.embed_ln = nn.LayerNorm(config.hidden_size)
805
+
806
+ # note: we don't predict states or returns for the paper
807
+ self.predict_state = torch.nn.Linear(config.hidden_size, config.state_dim)
808
+ self.predict_action = nn.Sequential(
809
+ *([nn.Linear(config.hidden_size, config.act_dim)] + ([nn.Tanh()] if config.action_tanh else []))
810
+ )
811
+ self.predict_return = torch.nn.Linear(config.hidden_size, 1)
812
+
813
+ # Initialize weights and apply final processing
814
+ self.post_init()
815
+
816
+ @add_start_docstrings_to_model_forward(DECISION_TRANSFORMER_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
817
+ @replace_return_docstrings(output_type=DecisionTransformerOutput, config_class=_CONFIG_FOR_DOC)
818
+ def forward(
819
+ self,
820
+ states: Optional[torch.FloatTensor] = None,
821
+ actions: Optional[torch.FloatTensor] = None,
822
+ rewards: Optional[torch.FloatTensor] = None,
823
+ returns_to_go: Optional[torch.FloatTensor] = None,
824
+ timesteps: Optional[torch.LongTensor] = None,
825
+ attention_mask: Optional[torch.FloatTensor] = None,
826
+ output_hidden_states: Optional[bool] = None,
827
+ output_attentions: Optional[bool] = None,
828
+ return_dict: Optional[bool] = None,
829
+ ) -> Union[Tuple[torch.FloatTensor], DecisionTransformerOutput]:
830
+ r"""
831
+ Returns:
832
+
833
+ Examples:
834
+
835
+ ```python
836
+ >>> from transformers import DecisionTransformerModel
837
+ >>> import torch
838
+
839
+ >>> model = DecisionTransformerModel.from_pretrained("edbeeching/decision-transformer-gym-hopper-medium")
840
+ >>> # evaluation
841
+ >>> model = model.to(device)
842
+ >>> model.eval()
843
+
844
+ >>> env = gym.make("Hopper-v3")
845
+ >>> state_dim = env.observation_space.shape[0]
846
+ >>> act_dim = env.action_space.shape[0]
847
+
848
+ >>> state = env.reset()
849
+ >>> states = torch.from_numpy(state).reshape(1, 1, state_dim).to(device=device, dtype=torch.float32)
850
+ >>> actions = torch.zeros((1, 1, act_dim), device=device, dtype=torch.float32)
851
+ >>> rewards = torch.zeros(1, 1, device=device, dtype=torch.float32)
852
+ >>> target_return = torch.tensor(TARGET_RETURN, dtype=torch.float32).reshape(1, 1)
853
+ >>> timesteps = torch.tensor(0, device=device, dtype=torch.long).reshape(1, 1)
854
+ >>> attention_mask = torch.zeros(1, 1, device=device, dtype=torch.float32)
855
+
856
+ >>> # forward pass
857
+ >>> with torch.no_grad():
858
+ ... state_preds, action_preds, return_preds = model(
859
+ ... states=states,
860
+ ... actions=actions,
861
+ ... rewards=rewards,
862
+ ... returns_to_go=target_return,
863
+ ... timesteps=timesteps,
864
+ ... attention_mask=attention_mask,
865
+ ... return_dict=False,
866
+ ... )
867
+ ```"""
868
+
869
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
870
+ output_hidden_states = (
871
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
872
+ )
873
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
874
+
875
+ batch_size, seq_length = states.shape[0], states.shape[1]
876
+
877
+ if attention_mask is None:
878
+ # attention mask for GPT: 1 if can be attended to, 0 if not
879
+ attention_mask = torch.ones((batch_size, seq_length), dtype=torch.long)
880
+
881
+ # embed each modality with a different head
882
+ state_embeddings = self.embed_state(states)
883
+ action_embeddings = self.embed_action(actions)
884
+ returns_embeddings = self.embed_return(returns_to_go)
885
+ time_embeddings = self.embed_timestep(timesteps)
886
+
887
+ # time embeddings are treated similar to positional embeddings
888
+ state_embeddings = state_embeddings + time_embeddings
889
+ action_embeddings = action_embeddings + time_embeddings
890
+ returns_embeddings = returns_embeddings + time_embeddings
891
+
892
+ # this makes the sequence look like (R_1, s_1, a_1, R_2, s_2, a_2, ...)
893
+ # which works nice in an autoregressive sense since states predict actions
894
+ stacked_inputs = (
895
+ torch.stack((returns_embeddings, state_embeddings, action_embeddings), dim=1)
896
+ .permute(0, 2, 1, 3)
897
+ .reshape(batch_size, 3 * seq_length, self.hidden_size)
898
+ )
899
+ stacked_inputs = self.embed_ln(stacked_inputs)
900
+
901
+ # to make the attention mask fit the stacked inputs, have to stack it as well
902
+ stacked_attention_mask = (
903
+ torch.stack((attention_mask, attention_mask, attention_mask), dim=1)
904
+ .permute(0, 2, 1)
905
+ .reshape(batch_size, 3 * seq_length)
906
+ )
907
+ device = stacked_inputs.device
908
+ # we feed in the input embeddings (not word indices as in NLP) to the model
909
+ encoder_outputs = self.encoder(
910
+ inputs_embeds=stacked_inputs,
911
+ attention_mask=stacked_attention_mask,
912
+ position_ids=torch.zeros(stacked_attention_mask.shape, device=device, dtype=torch.long),
913
+ output_attentions=output_attentions,
914
+ output_hidden_states=output_hidden_states,
915
+ return_dict=return_dict,
916
+ )
917
+ x = encoder_outputs[0]
918
+
919
+ # reshape x so that the second dimension corresponds to the original
920
+ # returns (0), states (1), or actions (2); i.e. x[:,1,t] is the token for s_t
921
+ x = x.reshape(batch_size, seq_length, 3, self.hidden_size).permute(0, 2, 1, 3)
922
+
923
+ # get predictions
924
+ return_preds = self.predict_return(x[:, 2]) # predict next return given state and action
925
+ state_preds = self.predict_state(x[:, 2]) # predict next state given state and action
926
+ action_preds = self.predict_action(x[:, 1]) # predict next action given state
927
+ if not return_dict:
928
+ return (state_preds, action_preds, return_preds)
929
+
930
+ return DecisionTransformerOutput(
931
+ last_hidden_state=encoder_outputs.last_hidden_state,
932
+ state_preds=state_preds,
933
+ action_preds=action_preds,
934
+ return_preds=return_preds,
935
+ hidden_states=encoder_outputs.hidden_states,
936
+ attentions=encoder_outputs.attentions,
937
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/deta/__init__.py ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import TYPE_CHECKING
16
+
17
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
18
+
19
+
20
+ _import_structure = {
21
+ "configuration_deta": ["DETA_PRETRAINED_CONFIG_ARCHIVE_MAP", "DetaConfig"],
22
+ }
23
+
24
+ try:
25
+ if not is_vision_available():
26
+ raise OptionalDependencyNotAvailable()
27
+ except OptionalDependencyNotAvailable:
28
+ pass
29
+ else:
30
+ _import_structure["image_processing_deta"] = ["DetaImageProcessor"]
31
+
32
+ try:
33
+ if not is_torch_available():
34
+ raise OptionalDependencyNotAvailable()
35
+ except OptionalDependencyNotAvailable:
36
+ pass
37
+ else:
38
+ _import_structure["modeling_deta"] = [
39
+ "DETA_PRETRAINED_MODEL_ARCHIVE_LIST",
40
+ "DetaForObjectDetection",
41
+ "DetaModel",
42
+ "DetaPreTrainedModel",
43
+ ]
44
+
45
+
46
+ if TYPE_CHECKING:
47
+ from .configuration_deta import DETA_PRETRAINED_CONFIG_ARCHIVE_MAP, DetaConfig
48
+
49
+ try:
50
+ if not is_vision_available():
51
+ raise OptionalDependencyNotAvailable()
52
+ except OptionalDependencyNotAvailable:
53
+ pass
54
+ else:
55
+ from .image_processing_deta import DetaImageProcessor
56
+
57
+ try:
58
+ if not is_torch_available():
59
+ raise OptionalDependencyNotAvailable()
60
+ except OptionalDependencyNotAvailable:
61
+ pass
62
+ else:
63
+ from .modeling_deta import (
64
+ DETA_PRETRAINED_MODEL_ARCHIVE_LIST,
65
+ DetaForObjectDetection,
66
+ DetaModel,
67
+ DetaPreTrainedModel,
68
+ )
69
+
70
+ else:
71
+ import sys
72
+
73
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
llmeval-env/lib/python3.10/site-packages/transformers/models/deta/convert_deta_resnet_to_pytorch.py ADDED
@@ -0,0 +1,320 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Convert DETA checkpoints from the original repository.
16
+
17
+ URL: https://github.com/jozhang97/DETA/tree/master"""
18
+
19
+
20
+ import argparse
21
+ import json
22
+ from pathlib import Path
23
+
24
+ import requests
25
+ import torch
26
+ from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
27
+ from PIL import Image
28
+
29
+ from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor
30
+ from transformers.utils import logging
31
+
32
+
33
+ logging.set_verbosity_info()
34
+ logger = logging.get_logger(__name__)
35
+
36
+
37
+ def get_deta_config():
38
+ config = DetaConfig(
39
+ num_queries=900,
40
+ encoder_ffn_dim=2048,
41
+ decoder_ffn_dim=2048,
42
+ num_feature_levels=5,
43
+ assign_first_stage=True,
44
+ with_box_refine=True,
45
+ two_stage=True,
46
+ )
47
+
48
+ # set labels
49
+ config.num_labels = 91
50
+ repo_id = "huggingface/label-files"
51
+ filename = "coco-detection-id2label.json"
52
+ id2label = json.load(open(cached_download(hf_hub_url(repo_id, filename, repo_type="dataset")), "r"))
53
+ id2label = {int(k): v for k, v in id2label.items()}
54
+ config.id2label = id2label
55
+ config.label2id = {v: k for k, v in id2label.items()}
56
+
57
+ return config
58
+
59
+
60
+ # here we list all keys to be renamed (original name on the left, our name on the right)
61
+ def create_rename_keys(config):
62
+ rename_keys = []
63
+
64
+ # stem
65
+ # fmt: off
66
+ rename_keys.append(("backbone.0.body.conv1.weight", "model.backbone.model.embedder.embedder.convolution.weight"))
67
+ rename_keys.append(("backbone.0.body.bn1.weight", "model.backbone.model.embedder.embedder.normalization.weight"))
68
+ rename_keys.append(("backbone.0.body.bn1.bias", "model.backbone.model.embedder.embedder.normalization.bias"))
69
+ rename_keys.append(("backbone.0.body.bn1.running_mean", "model.backbone.model.embedder.embedder.normalization.running_mean"))
70
+ rename_keys.append(("backbone.0.body.bn1.running_var", "model.backbone.model.embedder.embedder.normalization.running_var"))
71
+ # stages
72
+ for stage_idx in range(len(config.backbone_config.depths)):
73
+ for layer_idx in range(config.backbone_config.depths[stage_idx]):
74
+ # shortcut
75
+ if layer_idx == 0:
76
+ rename_keys.append(
77
+ (
78
+ f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight",
79
+ f"model.backbone.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight",
80
+ )
81
+ )
82
+ rename_keys.append(
83
+ (
84
+ f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight",
85
+ f"model.backbone.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight",
86
+ )
87
+ )
88
+ rename_keys.append(
89
+ (
90
+ f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias",
91
+ f"model.backbone.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias",
92
+ )
93
+ )
94
+ rename_keys.append(
95
+ (
96
+ f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean",
97
+ f"model.backbone.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean",
98
+ )
99
+ )
100
+ rename_keys.append(
101
+ (
102
+ f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var",
103
+ f"model.backbone.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var",
104
+ )
105
+ )
106
+ # 3 convs
107
+ for i in range(3):
108
+ rename_keys.append(
109
+ (
110
+ f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight",
111
+ f"model.backbone.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight",
112
+ )
113
+ )
114
+ rename_keys.append(
115
+ (
116
+ f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight",
117
+ f"model.backbone.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight",
118
+ )
119
+ )
120
+ rename_keys.append(
121
+ (
122
+ f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias",
123
+ f"model.backbone.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias",
124
+ )
125
+ )
126
+ rename_keys.append(
127
+ (
128
+ f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean",
129
+ f"model.backbone.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean",
130
+ )
131
+ )
132
+ rename_keys.append(
133
+ (
134
+ f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var",
135
+ f"model.backbone.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var",
136
+ )
137
+ )
138
+ # transformer encoder
139
+ for i in range(config.encoder_layers):
140
+ rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight", f"model.encoder.layers.{i}.self_attn.sampling_offsets.weight"))
141
+ rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias", f"model.encoder.layers.{i}.self_attn.sampling_offsets.bias"))
142
+ rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.attention_weights.weight", f"model.encoder.layers.{i}.self_attn.attention_weights.weight"))
143
+ rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.attention_weights.bias", f"model.encoder.layers.{i}.self_attn.attention_weights.bias"))
144
+ rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.value_proj.weight", f"model.encoder.layers.{i}.self_attn.value_proj.weight"))
145
+ rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.value_proj.bias", f"model.encoder.layers.{i}.self_attn.value_proj.bias"))
146
+ rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.output_proj.weight", f"model.encoder.layers.{i}.self_attn.output_proj.weight"))
147
+ rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.output_proj.bias", f"model.encoder.layers.{i}.self_attn.output_proj.bias"))
148
+ rename_keys.append((f"transformer.encoder.layers.{i}.norm1.weight", f"model.encoder.layers.{i}.self_attn_layer_norm.weight"))
149
+ rename_keys.append((f"transformer.encoder.layers.{i}.norm1.bias", f"model.encoder.layers.{i}.self_attn_layer_norm.bias"))
150
+ rename_keys.append((f"transformer.encoder.layers.{i}.linear1.weight", f"model.encoder.layers.{i}.fc1.weight"))
151
+ rename_keys.append((f"transformer.encoder.layers.{i}.linear1.bias", f"model.encoder.layers.{i}.fc1.bias"))
152
+ rename_keys.append((f"transformer.encoder.layers.{i}.linear2.weight", f"model.encoder.layers.{i}.fc2.weight"))
153
+ rename_keys.append((f"transformer.encoder.layers.{i}.linear2.bias", f"model.encoder.layers.{i}.fc2.bias"))
154
+ rename_keys.append((f"transformer.encoder.layers.{i}.norm2.weight", f"model.encoder.layers.{i}.final_layer_norm.weight"))
155
+ rename_keys.append((f"transformer.encoder.layers.{i}.norm2.bias", f"model.encoder.layers.{i}.final_layer_norm.bias"))
156
+
157
+ # transformer decoder
158
+ for i in range(config.decoder_layers):
159
+ rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight", f"model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight"))
160
+ rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias", f"model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias"))
161
+ rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.attention_weights.weight", f"model.decoder.layers.{i}.encoder_attn.attention_weights.weight"))
162
+ rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.attention_weights.bias", f"model.decoder.layers.{i}.encoder_attn.attention_weights.bias"))
163
+ rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.value_proj.weight", f"model.decoder.layers.{i}.encoder_attn.value_proj.weight"))
164
+ rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.value_proj.bias", f"model.decoder.layers.{i}.encoder_attn.value_proj.bias"))
165
+ rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.output_proj.weight", f"model.decoder.layers.{i}.encoder_attn.output_proj.weight"))
166
+ rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.output_proj.bias", f"model.decoder.layers.{i}.encoder_attn.output_proj.bias"))
167
+ rename_keys.append((f"transformer.decoder.layers.{i}.norm1.weight", f"model.decoder.layers.{i}.encoder_attn_layer_norm.weight"))
168
+ rename_keys.append((f"transformer.decoder.layers.{i}.norm1.bias", f"model.decoder.layers.{i}.encoder_attn_layer_norm.bias"))
169
+ rename_keys.append((f"transformer.decoder.layers.{i}.self_attn.out_proj.weight", f"model.decoder.layers.{i}.self_attn.out_proj.weight"))
170
+ rename_keys.append((f"transformer.decoder.layers.{i}.self_attn.out_proj.bias", f"model.decoder.layers.{i}.self_attn.out_proj.bias"))
171
+ rename_keys.append((f"transformer.decoder.layers.{i}.norm2.weight", f"model.decoder.layers.{i}.self_attn_layer_norm.weight"))
172
+ rename_keys.append((f"transformer.decoder.layers.{i}.norm2.bias", f"model.decoder.layers.{i}.self_attn_layer_norm.bias"))
173
+ rename_keys.append((f"transformer.decoder.layers.{i}.linear1.weight", f"model.decoder.layers.{i}.fc1.weight"))
174
+ rename_keys.append((f"transformer.decoder.layers.{i}.linear1.bias", f"model.decoder.layers.{i}.fc1.bias"))
175
+ rename_keys.append((f"transformer.decoder.layers.{i}.linear2.weight", f"model.decoder.layers.{i}.fc2.weight"))
176
+ rename_keys.append((f"transformer.decoder.layers.{i}.linear2.bias", f"model.decoder.layers.{i}.fc2.bias"))
177
+ rename_keys.append((f"transformer.decoder.layers.{i}.norm3.weight", f"model.decoder.layers.{i}.final_layer_norm.weight"))
178
+ rename_keys.append((f"transformer.decoder.layers.{i}.norm3.bias", f"model.decoder.layers.{i}.final_layer_norm.bias"))
179
+
180
+ # fmt: on
181
+
182
+ return rename_keys
183
+
184
+
185
+ def rename_key(dct, old, new):
186
+ val = dct.pop(old)
187
+ dct[new] = val
188
+
189
+
190
+ def read_in_decoder_q_k_v(state_dict, config):
191
+ # transformer decoder self-attention layers
192
+ hidden_size = config.d_model
193
+ for i in range(config.decoder_layers):
194
+ # read in weights + bias of input projection layer of self-attention
195
+ in_proj_weight = state_dict.pop(f"transformer.decoder.layers.{i}.self_attn.in_proj_weight")
196
+ in_proj_bias = state_dict.pop(f"transformer.decoder.layers.{i}.self_attn.in_proj_bias")
197
+ # next, add query, keys and values (in that order) to the state dict
198
+ state_dict[f"model.decoder.layers.{i}.self_attn.q_proj.weight"] = in_proj_weight[:hidden_size, :]
199
+ state_dict[f"model.decoder.layers.{i}.self_attn.q_proj.bias"] = in_proj_bias[:hidden_size]
200
+ state_dict[f"model.decoder.layers.{i}.self_attn.k_proj.weight"] = in_proj_weight[
201
+ hidden_size : hidden_size * 2, :
202
+ ]
203
+ state_dict[f"model.decoder.layers.{i}.self_attn.k_proj.bias"] = in_proj_bias[hidden_size : hidden_size * 2]
204
+ state_dict[f"model.decoder.layers.{i}.self_attn.v_proj.weight"] = in_proj_weight[-hidden_size:, :]
205
+ state_dict[f"model.decoder.layers.{i}.self_attn.v_proj.bias"] = in_proj_bias[-hidden_size:]
206
+
207
+
208
+ # We will verify our results on an image of cute cats
209
+ def prepare_img():
210
+ url = "http://images.cocodataset.org/val2017/000000039769.jpg"
211
+ im = Image.open(requests.get(url, stream=True).raw)
212
+
213
+ return im
214
+
215
+
216
+ @torch.no_grad()
217
+ def convert_deta_checkpoint(model_name, pytorch_dump_folder_path, push_to_hub):
218
+ """
219
+ Copy/paste/tweak model's weights to our DETA structure.
220
+ """
221
+
222
+ # load config
223
+ config = get_deta_config()
224
+
225
+ # load original state dict
226
+ if model_name == "deta-resnet-50":
227
+ filename = "adet_checkpoint0011.pth"
228
+ elif model_name == "deta-resnet-50-24-epochs":
229
+ filename = "adet_2x_checkpoint0023.pth"
230
+ else:
231
+ raise ValueError(f"Model name {model_name} not supported")
232
+ checkpoint_path = hf_hub_download(repo_id="nielsr/deta-checkpoints", filename=filename)
233
+ state_dict = torch.load(checkpoint_path, map_location="cpu")["model"]
234
+
235
+ # rename keys
236
+ rename_keys = create_rename_keys(config)
237
+ for src, dest in rename_keys:
238
+ rename_key(state_dict, src, dest)
239
+ read_in_decoder_q_k_v(state_dict, config)
240
+
241
+ # fix some prefixes
242
+ for key in state_dict.copy().keys():
243
+ if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
244
+ val = state_dict.pop(key)
245
+ state_dict[key.replace("transformer.decoder", "model.decoder")] = val
246
+ if "input_proj" in key:
247
+ val = state_dict.pop(key)
248
+ state_dict["model." + key] = val
249
+ if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
250
+ val = state_dict.pop(key)
251
+ state_dict[key.replace("transformer", "model")] = val
252
+
253
+ # finally, create HuggingFace model and load state dict
254
+ model = DetaForObjectDetection(config)
255
+ model.load_state_dict(state_dict)
256
+ model.eval()
257
+
258
+ device = "cuda" if torch.cuda.is_available() else "cpu"
259
+ model.to(device)
260
+
261
+ # load image processor
262
+ processor = DetaImageProcessor(format="coco_detection")
263
+
264
+ # verify our conversion on image
265
+ img = prepare_img()
266
+ encoding = processor(images=img, return_tensors="pt")
267
+ pixel_values = encoding["pixel_values"]
268
+ outputs = model(pixel_values.to(device))
269
+
270
+ # verify logits
271
+ if model_name == "deta-resnet-50":
272
+ expected_logits = torch.tensor(
273
+ [[-7.3978, -2.5406, -4.1668], [-8.2684, -3.9933, -3.8096], [-7.0515, -3.7973, -5.8516]]
274
+ )
275
+ expected_boxes = torch.tensor([[0.5043, 0.4973, 0.9998], [0.2542, 0.5489, 0.4748], [0.5490, 0.2765, 0.0570]])
276
+ elif model_name == "deta-resnet-50-24-epochs":
277
+ expected_logits = torch.tensor(
278
+ [[-7.1688, -2.4857, -4.8669], [-7.8630, -3.8154, -4.2674], [-7.2730, -4.1865, -5.5323]]
279
+ )
280
+ expected_boxes = torch.tensor([[0.5021, 0.4971, 0.9994], [0.2546, 0.5486, 0.4731], [0.1686, 0.1986, 0.2142]])
281
+
282
+ assert torch.allclose(outputs.logits[0, :3, :3], expected_logits.to(device), atol=1e-4)
283
+ assert torch.allclose(outputs.pred_boxes[0, :3, :3], expected_boxes.to(device), atol=1e-4)
284
+ print("Everything ok!")
285
+
286
+ if pytorch_dump_folder_path:
287
+ # Save model and processor
288
+ logger.info(f"Saving PyTorch model and processor to {pytorch_dump_folder_path}...")
289
+ Path(pytorch_dump_folder_path).mkdir(exist_ok=True)
290
+ model.save_pretrained(pytorch_dump_folder_path)
291
+ processor.save_pretrained(pytorch_dump_folder_path)
292
+
293
+ # Push to hub
294
+ if push_to_hub:
295
+ print("Pushing model and processor to hub...")
296
+ model.push_to_hub(f"jozhang97/{model_name}")
297
+ processor.push_to_hub(f"jozhang97/{model_name}")
298
+
299
+
300
+ if __name__ == "__main__":
301
+ parser = argparse.ArgumentParser()
302
+
303
+ parser.add_argument(
304
+ "--model_name",
305
+ type=str,
306
+ default="deta-resnet-50",
307
+ choices=["deta-resnet-50", "deta-resnet-50-24-epochs"],
308
+ help="Name of the model you'd like to convert.",
309
+ )
310
+ parser.add_argument(
311
+ "--pytorch_dump_folder_path",
312
+ default=None,
313
+ type=str,
314
+ help="Path to the folder to output PyTorch model.",
315
+ )
316
+ parser.add_argument(
317
+ "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
318
+ )
319
+ args = parser.parse_args()
320
+ convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
llmeval-env/lib/python3.10/site-packages/transformers/models/deta/convert_deta_swin_to_pytorch.py ADDED
@@ -0,0 +1,327 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Convert DETA checkpoints from the original repository.
16
+
17
+ URL: https://github.com/jozhang97/DETA/tree/master"""
18
+
19
+
20
+ import argparse
21
+ import json
22
+ from pathlib import Path
23
+
24
+ import requests
25
+ import torch
26
+ from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
27
+ from PIL import Image
28
+
29
+ from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
30
+ from transformers.utils import logging
31
+
32
+
33
+ logging.set_verbosity_info()
34
+ logger = logging.get_logger(__name__)
35
+
36
+
37
+ def get_deta_config(model_name):
38
+ backbone_config = SwinConfig(
39
+ embed_dim=192,
40
+ depths=(2, 2, 18, 2),
41
+ num_heads=(6, 12, 24, 48),
42
+ window_size=12,
43
+ out_features=["stage2", "stage3", "stage4"],
44
+ )
45
+
46
+ config = DetaConfig(
47
+ backbone_config=backbone_config,
48
+ num_queries=900,
49
+ encoder_ffn_dim=2048,
50
+ decoder_ffn_dim=2048,
51
+ num_feature_levels=5,
52
+ assign_first_stage=True,
53
+ with_box_refine=True,
54
+ two_stage=True,
55
+ )
56
+
57
+ # set labels
58
+ repo_id = "huggingface/label-files"
59
+ if "o365" in model_name:
60
+ num_labels = 366
61
+ filename = "object365-id2label.json"
62
+ else:
63
+ num_labels = 91
64
+ filename = "coco-detection-id2label.json"
65
+
66
+ config.num_labels = num_labels
67
+ id2label = json.load(open(cached_download(hf_hub_url(repo_id, filename, repo_type="dataset")), "r"))
68
+ id2label = {int(k): v for k, v in id2label.items()}
69
+ config.id2label = id2label
70
+ config.label2id = {v: k for k, v in id2label.items()}
71
+
72
+ return config
73
+
74
+
75
+ # here we list all keys to be renamed (original name on the left, our name on the right)
76
+ def create_rename_keys(config):
77
+ rename_keys = []
78
+
79
+ # stem
80
+ # fmt: off
81
+ rename_keys.append(("backbone.0.body.patch_embed.proj.weight", "model.backbone.model.embeddings.patch_embeddings.projection.weight"))
82
+ rename_keys.append(("backbone.0.body.patch_embed.proj.bias", "model.backbone.model.embeddings.patch_embeddings.projection.bias"))
83
+ rename_keys.append(("backbone.0.body.patch_embed.norm.weight", "model.backbone.model.embeddings.norm.weight"))
84
+ rename_keys.append(("backbone.0.body.patch_embed.norm.bias", "model.backbone.model.embeddings.norm.bias"))
85
+ # stages
86
+ for i in range(len(config.backbone_config.depths)):
87
+ for j in range(config.backbone_config.depths[i]):
88
+ rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.norm1.weight", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight"))
89
+ rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.norm1.bias", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias"))
90
+ rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table"))
91
+ rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index"))
92
+ rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight"))
93
+ rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias"))
94
+ rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.norm2.weight", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight"))
95
+ rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.norm2.bias", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias"))
96
+ rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight"))
97
+ rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias"))
98
+ rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight"))
99
+ rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias"))
100
+
101
+ if i < 3:
102
+ rename_keys.append((f"backbone.0.body.layers.{i}.downsample.reduction.weight", f"model.backbone.model.encoder.layers.{i}.downsample.reduction.weight"))
103
+ rename_keys.append((f"backbone.0.body.layers.{i}.downsample.norm.weight", f"model.backbone.model.encoder.layers.{i}.downsample.norm.weight"))
104
+ rename_keys.append((f"backbone.0.body.layers.{i}.downsample.norm.bias", f"model.backbone.model.encoder.layers.{i}.downsample.norm.bias"))
105
+
106
+ rename_keys.append(("backbone.0.body.norm1.weight", "model.backbone.model.hidden_states_norms.stage2.weight"))
107
+ rename_keys.append(("backbone.0.body.norm1.bias", "model.backbone.model.hidden_states_norms.stage2.bias"))
108
+ rename_keys.append(("backbone.0.body.norm2.weight", "model.backbone.model.hidden_states_norms.stage3.weight"))
109
+ rename_keys.append(("backbone.0.body.norm2.bias", "model.backbone.model.hidden_states_norms.stage3.bias"))
110
+ rename_keys.append(("backbone.0.body.norm3.weight", "model.backbone.model.hidden_states_norms.stage4.weight"))
111
+ rename_keys.append(("backbone.0.body.norm3.bias", "model.backbone.model.hidden_states_norms.stage4.bias"))
112
+
113
+ # transformer encoder
114
+ for i in range(config.encoder_layers):
115
+ rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight", f"model.encoder.layers.{i}.self_attn.sampling_offsets.weight"))
116
+ rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias", f"model.encoder.layers.{i}.self_attn.sampling_offsets.bias"))
117
+ rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.attention_weights.weight", f"model.encoder.layers.{i}.self_attn.attention_weights.weight"))
118
+ rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.attention_weights.bias", f"model.encoder.layers.{i}.self_attn.attention_weights.bias"))
119
+ rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.value_proj.weight", f"model.encoder.layers.{i}.self_attn.value_proj.weight"))
120
+ rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.value_proj.bias", f"model.encoder.layers.{i}.self_attn.value_proj.bias"))
121
+ rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.output_proj.weight", f"model.encoder.layers.{i}.self_attn.output_proj.weight"))
122
+ rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.output_proj.bias", f"model.encoder.layers.{i}.self_attn.output_proj.bias"))
123
+ rename_keys.append((f"transformer.encoder.layers.{i}.norm1.weight", f"model.encoder.layers.{i}.self_attn_layer_norm.weight"))
124
+ rename_keys.append((f"transformer.encoder.layers.{i}.norm1.bias", f"model.encoder.layers.{i}.self_attn_layer_norm.bias"))
125
+ rename_keys.append((f"transformer.encoder.layers.{i}.linear1.weight", f"model.encoder.layers.{i}.fc1.weight"))
126
+ rename_keys.append((f"transformer.encoder.layers.{i}.linear1.bias", f"model.encoder.layers.{i}.fc1.bias"))
127
+ rename_keys.append((f"transformer.encoder.layers.{i}.linear2.weight", f"model.encoder.layers.{i}.fc2.weight"))
128
+ rename_keys.append((f"transformer.encoder.layers.{i}.linear2.bias", f"model.encoder.layers.{i}.fc2.bias"))
129
+ rename_keys.append((f"transformer.encoder.layers.{i}.norm2.weight", f"model.encoder.layers.{i}.final_layer_norm.weight"))
130
+ rename_keys.append((f"transformer.encoder.layers.{i}.norm2.bias", f"model.encoder.layers.{i}.final_layer_norm.bias"))
131
+
132
+ # transformer decoder
133
+ for i in range(config.decoder_layers):
134
+ rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight", f"model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight"))
135
+ rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias", f"model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias"))
136
+ rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.attention_weights.weight", f"model.decoder.layers.{i}.encoder_attn.attention_weights.weight"))
137
+ rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.attention_weights.bias", f"model.decoder.layers.{i}.encoder_attn.attention_weights.bias"))
138
+ rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.value_proj.weight", f"model.decoder.layers.{i}.encoder_attn.value_proj.weight"))
139
+ rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.value_proj.bias", f"model.decoder.layers.{i}.encoder_attn.value_proj.bias"))
140
+ rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.output_proj.weight", f"model.decoder.layers.{i}.encoder_attn.output_proj.weight"))
141
+ rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.output_proj.bias", f"model.decoder.layers.{i}.encoder_attn.output_proj.bias"))
142
+ rename_keys.append((f"transformer.decoder.layers.{i}.norm1.weight", f"model.decoder.layers.{i}.encoder_attn_layer_norm.weight"))
143
+ rename_keys.append((f"transformer.decoder.layers.{i}.norm1.bias", f"model.decoder.layers.{i}.encoder_attn_layer_norm.bias"))
144
+ rename_keys.append((f"transformer.decoder.layers.{i}.self_attn.out_proj.weight", f"model.decoder.layers.{i}.self_attn.out_proj.weight"))
145
+ rename_keys.append((f"transformer.decoder.layers.{i}.self_attn.out_proj.bias", f"model.decoder.layers.{i}.self_attn.out_proj.bias"))
146
+ rename_keys.append((f"transformer.decoder.layers.{i}.norm2.weight", f"model.decoder.layers.{i}.self_attn_layer_norm.weight"))
147
+ rename_keys.append((f"transformer.decoder.layers.{i}.norm2.bias", f"model.decoder.layers.{i}.self_attn_layer_norm.bias"))
148
+ rename_keys.append((f"transformer.decoder.layers.{i}.linear1.weight", f"model.decoder.layers.{i}.fc1.weight"))
149
+ rename_keys.append((f"transformer.decoder.layers.{i}.linear1.bias", f"model.decoder.layers.{i}.fc1.bias"))
150
+ rename_keys.append((f"transformer.decoder.layers.{i}.linear2.weight", f"model.decoder.layers.{i}.fc2.weight"))
151
+ rename_keys.append((f"transformer.decoder.layers.{i}.linear2.bias", f"model.decoder.layers.{i}.fc2.bias"))
152
+ rename_keys.append((f"transformer.decoder.layers.{i}.norm3.weight", f"model.decoder.layers.{i}.final_layer_norm.weight"))
153
+ rename_keys.append((f"transformer.decoder.layers.{i}.norm3.bias", f"model.decoder.layers.{i}.final_layer_norm.bias"))
154
+
155
+ # fmt: on
156
+
157
+ return rename_keys
158
+
159
+
160
+ def rename_key(dct, old, new):
161
+ val = dct.pop(old)
162
+ dct[new] = val
163
+
164
+
165
+ # we split up the matrix of each encoder layer into queries, keys and values
166
+ def read_in_swin_q_k_v(state_dict, backbone_config):
167
+ num_features = [int(backbone_config.embed_dim * 2**i) for i in range(len(backbone_config.depths))]
168
+ for i in range(len(backbone_config.depths)):
169
+ dim = num_features[i]
170
+ for j in range(backbone_config.depths[i]):
171
+ # fmt: off
172
+ # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
173
+ in_proj_weight = state_dict.pop(f"backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight")
174
+ in_proj_bias = state_dict.pop(f"backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias")
175
+ # next, add query, keys and values (in that order) to the state dict
176
+ state_dict[f"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.query.weight"] = in_proj_weight[:dim, :]
177
+ state_dict[f"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.query.bias"] = in_proj_bias[: dim]
178
+ state_dict[f"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.key.weight"] = in_proj_weight[
179
+ dim : dim * 2, :
180
+ ]
181
+ state_dict[f"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.key.bias"] = in_proj_bias[
182
+ dim : dim * 2
183
+ ]
184
+ state_dict[f"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.value.weight"] = in_proj_weight[
185
+ -dim :, :
186
+ ]
187
+ state_dict[f"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.value.bias"] = in_proj_bias[-dim :]
188
+ # fmt: on
189
+
190
+
191
+ def read_in_decoder_q_k_v(state_dict, config):
192
+ # transformer decoder self-attention layers
193
+ hidden_size = config.d_model
194
+ for i in range(config.decoder_layers):
195
+ # read in weights + bias of input projection layer of self-attention
196
+ in_proj_weight = state_dict.pop(f"transformer.decoder.layers.{i}.self_attn.in_proj_weight")
197
+ in_proj_bias = state_dict.pop(f"transformer.decoder.layers.{i}.self_attn.in_proj_bias")
198
+ # next, add query, keys and values (in that order) to the state dict
199
+ state_dict[f"model.decoder.layers.{i}.self_attn.q_proj.weight"] = in_proj_weight[:hidden_size, :]
200
+ state_dict[f"model.decoder.layers.{i}.self_attn.q_proj.bias"] = in_proj_bias[:hidden_size]
201
+ state_dict[f"model.decoder.layers.{i}.self_attn.k_proj.weight"] = in_proj_weight[
202
+ hidden_size : hidden_size * 2, :
203
+ ]
204
+ state_dict[f"model.decoder.layers.{i}.self_attn.k_proj.bias"] = in_proj_bias[hidden_size : hidden_size * 2]
205
+ state_dict[f"model.decoder.layers.{i}.self_attn.v_proj.weight"] = in_proj_weight[-hidden_size:, :]
206
+ state_dict[f"model.decoder.layers.{i}.self_attn.v_proj.bias"] = in_proj_bias[-hidden_size:]
207
+
208
+
209
+ # We will verify our results on an image of cute cats
210
+ def prepare_img():
211
+ url = "http://images.cocodataset.org/val2017/000000039769.jpg"
212
+ im = Image.open(requests.get(url, stream=True).raw)
213
+
214
+ return im
215
+
216
+
217
+ @torch.no_grad()
218
+ def convert_deta_checkpoint(model_name, pytorch_dump_folder_path, push_to_hub):
219
+ """
220
+ Copy/paste/tweak model's weights to our DETA structure.
221
+ """
222
+
223
+ # load config
224
+ config = get_deta_config(model_name)
225
+
226
+ # load original state dict
227
+ if model_name == "deta-swin-large":
228
+ checkpoint_path = hf_hub_download(repo_id="nielsr/deta-checkpoints", filename="adet_swin_ft.pth")
229
+ elif model_name == "deta-swin-large-o365":
230
+ checkpoint_path = hf_hub_download(repo_id="jozhang97/deta-swin-l-o365", filename="deta_swin_pt_o365.pth")
231
+ else:
232
+ raise ValueError(f"Model name {model_name} not supported")
233
+
234
+ state_dict = torch.load(checkpoint_path, map_location="cpu")["model"]
235
+
236
+ # original state dict
237
+ for name, param in state_dict.items():
238
+ print(name, param.shape)
239
+
240
+ # rename keys
241
+ rename_keys = create_rename_keys(config)
242
+ for src, dest in rename_keys:
243
+ rename_key(state_dict, src, dest)
244
+ read_in_swin_q_k_v(state_dict, config.backbone_config)
245
+ read_in_decoder_q_k_v(state_dict, config)
246
+
247
+ # fix some prefixes
248
+ for key in state_dict.copy().keys():
249
+ if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
250
+ val = state_dict.pop(key)
251
+ state_dict[key.replace("transformer.decoder", "model.decoder")] = val
252
+ if "input_proj" in key:
253
+ val = state_dict.pop(key)
254
+ state_dict["model." + key] = val
255
+ if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
256
+ val = state_dict.pop(key)
257
+ state_dict[key.replace("transformer", "model")] = val
258
+
259
+ # finally, create HuggingFace model and load state dict
260
+ model = DetaForObjectDetection(config)
261
+ model.load_state_dict(state_dict)
262
+ model.eval()
263
+
264
+ device = "cuda" if torch.cuda.is_available() else "cpu"
265
+ model.to(device)
266
+
267
+ # load image processor
268
+ processor = DetaImageProcessor(format="coco_detection")
269
+
270
+ # verify our conversion on image
271
+ img = prepare_img()
272
+ encoding = processor(images=img, return_tensors="pt")
273
+ pixel_values = encoding["pixel_values"]
274
+ outputs = model(pixel_values.to(device))
275
+
276
+ # verify logits
277
+ print("Logits:", outputs.logits[0, :3, :3])
278
+ print("Boxes:", outputs.pred_boxes[0, :3, :3])
279
+ if model_name == "deta-swin-large":
280
+ expected_logits = torch.tensor(
281
+ [[-7.6308, -2.8485, -5.3737], [-7.2037, -4.5505, -4.8027], [-7.2943, -4.2611, -4.6617]]
282
+ )
283
+ expected_boxes = torch.tensor([[0.4987, 0.4969, 0.9999], [0.2549, 0.5498, 0.4805], [0.5498, 0.2757, 0.0569]])
284
+ elif model_name == "deta-swin-large-o365":
285
+ expected_logits = torch.tensor(
286
+ [[-8.0122, -3.5720, -4.9717], [-8.1547, -3.6886, -4.6389], [-7.6610, -3.6194, -5.0134]]
287
+ )
288
+ expected_boxes = torch.tensor([[0.2523, 0.5549, 0.4881], [0.7715, 0.4149, 0.4601], [0.5503, 0.2753, 0.0575]])
289
+ assert torch.allclose(outputs.logits[0, :3, :3], expected_logits.to(device), atol=1e-4)
290
+ assert torch.allclose(outputs.pred_boxes[0, :3, :3], expected_boxes.to(device), atol=1e-4)
291
+ print("Everything ok!")
292
+
293
+ if pytorch_dump_folder_path:
294
+ # Save model and processor
295
+ logger.info(f"Saving PyTorch model and processor to {pytorch_dump_folder_path}...")
296
+ Path(pytorch_dump_folder_path).mkdir(exist_ok=True)
297
+ model.save_pretrained(pytorch_dump_folder_path)
298
+ processor.save_pretrained(pytorch_dump_folder_path)
299
+
300
+ # Push to hub
301
+ if push_to_hub:
302
+ print("Pushing model and processor to hub...")
303
+ model.push_to_hub(f"jozhang97/{model_name}")
304
+ processor.push_to_hub(f"jozhang97/{model_name}")
305
+
306
+
307
+ if __name__ == "__main__":
308
+ parser = argparse.ArgumentParser()
309
+
310
+ parser.add_argument(
311
+ "--model_name",
312
+ type=str,
313
+ default="deta-swin-large",
314
+ choices=["deta-swin-large", "deta-swin-large-o365"],
315
+ help="Name of the model you'd like to convert.",
316
+ )
317
+ parser.add_argument(
318
+ "--pytorch_dump_folder_path",
319
+ default=None,
320
+ type=str,
321
+ help="Path to the folder to output PyTorch model.",
322
+ )
323
+ parser.add_argument(
324
+ "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
325
+ )
326
+ args = parser.parse_args()
327
+ convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
llmeval-env/lib/python3.10/site-packages/transformers/models/deta/image_processing_deta.py ADDED
@@ -0,0 +1,1174 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Image processor class for Deformable DETR."""
16
+
17
+ import pathlib
18
+ from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, Union
19
+
20
+ import numpy as np
21
+
22
+ from ...feature_extraction_utils import BatchFeature
23
+ from ...image_processing_utils import BaseImageProcessor, get_size_dict
24
+ from ...image_transforms import (
25
+ PaddingMode,
26
+ center_to_corners_format,
27
+ corners_to_center_format,
28
+ pad,
29
+ rescale,
30
+ resize,
31
+ rgb_to_id,
32
+ to_channel_dimension_format,
33
+ )
34
+ from ...image_utils import (
35
+ IMAGENET_DEFAULT_MEAN,
36
+ IMAGENET_DEFAULT_STD,
37
+ AnnotationFormat,
38
+ AnnotationType,
39
+ ChannelDimension,
40
+ ImageInput,
41
+ PILImageResampling,
42
+ get_image_size,
43
+ infer_channel_dimension_format,
44
+ is_batched,
45
+ is_scaled_image,
46
+ to_numpy_array,
47
+ valid_images,
48
+ validate_annotations,
49
+ validate_preprocess_arguments,
50
+ )
51
+ from ...utils import (
52
+ is_flax_available,
53
+ is_jax_tensor,
54
+ is_tf_available,
55
+ is_tf_tensor,
56
+ is_torch_available,
57
+ is_torch_tensor,
58
+ is_torchvision_available,
59
+ is_vision_available,
60
+ logging,
61
+ )
62
+ from ...utils.generic import TensorType
63
+
64
+
65
+ if is_torch_available():
66
+ import torch
67
+
68
+
69
+ if is_torchvision_available():
70
+ from torchvision.ops.boxes import batched_nms
71
+
72
+ if is_vision_available():
73
+ import PIL
74
+
75
+
76
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
77
+
78
+ SUPPORTED_ANNOTATION_FORMATS = (AnnotationFormat.COCO_DETECTION, AnnotationFormat.COCO_PANOPTIC)
79
+
80
+
81
+ # Copied from transformers.models.detr.image_processing_detr.get_size_with_aspect_ratio
82
+ def get_size_with_aspect_ratio(image_size, size, max_size=None) -> Tuple[int, int]:
83
+ """
84
+ Computes the output image size given the input image size and the desired output size.
85
+
86
+ Args:
87
+ image_size (`Tuple[int, int]`):
88
+ The input image size.
89
+ size (`int`):
90
+ The desired output size.
91
+ max_size (`int`, *optional*):
92
+ The maximum allowed output size.
93
+ """
94
+ height, width = image_size
95
+ if max_size is not None:
96
+ min_original_size = float(min((height, width)))
97
+ max_original_size = float(max((height, width)))
98
+ if max_original_size / min_original_size * size > max_size:
99
+ size = int(round(max_size * min_original_size / max_original_size))
100
+
101
+ if (height <= width and height == size) or (width <= height and width == size):
102
+ return height, width
103
+
104
+ if width < height:
105
+ ow = size
106
+ oh = int(size * height / width)
107
+ else:
108
+ oh = size
109
+ ow = int(size * width / height)
110
+ return (oh, ow)
111
+
112
+
113
+ # Copied from transformers.models.detr.image_processing_detr.get_resize_output_image_size
114
+ def get_resize_output_image_size(
115
+ input_image: np.ndarray,
116
+ size: Union[int, Tuple[int, int], List[int]],
117
+ max_size: Optional[int] = None,
118
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
119
+ ) -> Tuple[int, int]:
120
+ """
121
+ Computes the output image size given the input image size and the desired output size. If the desired output size
122
+ is a tuple or list, the output image size is returned as is. If the desired output size is an integer, the output
123
+ image size is computed by keeping the aspect ratio of the input image size.
124
+
125
+ Args:
126
+ input_image (`np.ndarray`):
127
+ The image to resize.
128
+ size (`int` or `Tuple[int, int]` or `List[int]`):
129
+ The desired output size.
130
+ max_size (`int`, *optional*):
131
+ The maximum allowed output size.
132
+ input_data_format (`ChannelDimension` or `str`, *optional*):
133
+ The channel dimension format of the input image. If not provided, it will be inferred from the input image.
134
+ """
135
+ image_size = get_image_size(input_image, input_data_format)
136
+ if isinstance(size, (list, tuple)):
137
+ return size
138
+
139
+ return get_size_with_aspect_ratio(image_size, size, max_size)
140
+
141
+
142
+ # Copied from transformers.models.detr.image_processing_detr.get_numpy_to_framework_fn
143
+ def get_numpy_to_framework_fn(arr) -> Callable:
144
+ """
145
+ Returns a function that converts a numpy array to the framework of the input array.
146
+
147
+ Args:
148
+ arr (`np.ndarray`): The array to convert.
149
+ """
150
+ if isinstance(arr, np.ndarray):
151
+ return np.array
152
+ if is_tf_available() and is_tf_tensor(arr):
153
+ import tensorflow as tf
154
+
155
+ return tf.convert_to_tensor
156
+ if is_torch_available() and is_torch_tensor(arr):
157
+ import torch
158
+
159
+ return torch.tensor
160
+ if is_flax_available() and is_jax_tensor(arr):
161
+ import jax.numpy as jnp
162
+
163
+ return jnp.array
164
+ raise ValueError(f"Cannot convert arrays of type {type(arr)}")
165
+
166
+
167
+ # Copied from transformers.models.detr.image_processing_detr.safe_squeeze
168
+ def safe_squeeze(arr: np.ndarray, axis: Optional[int] = None) -> np.ndarray:
169
+ """
170
+ Squeezes an array, but only if the axis specified has dim 1.
171
+ """
172
+ if axis is None:
173
+ return arr.squeeze()
174
+
175
+ try:
176
+ return arr.squeeze(axis=axis)
177
+ except ValueError:
178
+ return arr
179
+
180
+
181
+ # Copied from transformers.models.detr.image_processing_detr.normalize_annotation
182
+ def normalize_annotation(annotation: Dict, image_size: Tuple[int, int]) -> Dict:
183
+ image_height, image_width = image_size
184
+ norm_annotation = {}
185
+ for key, value in annotation.items():
186
+ if key == "boxes":
187
+ boxes = value
188
+ boxes = corners_to_center_format(boxes)
189
+ boxes /= np.asarray([image_width, image_height, image_width, image_height], dtype=np.float32)
190
+ norm_annotation[key] = boxes
191
+ else:
192
+ norm_annotation[key] = value
193
+ return norm_annotation
194
+
195
+
196
+ # Copied from transformers.models.detr.image_processing_detr.max_across_indices
197
+ def max_across_indices(values: Iterable[Any]) -> List[Any]:
198
+ """
199
+ Return the maximum value across all indices of an iterable of values.
200
+ """
201
+ return [max(values_i) for values_i in zip(*values)]
202
+
203
+
204
+ # Copied from transformers.models.detr.image_processing_detr.get_max_height_width
205
+ def get_max_height_width(
206
+ images: List[np.ndarray], input_data_format: Optional[Union[str, ChannelDimension]] = None
207
+ ) -> List[int]:
208
+ """
209
+ Get the maximum height and width across all images in a batch.
210
+ """
211
+ if input_data_format is None:
212
+ input_data_format = infer_channel_dimension_format(images[0])
213
+
214
+ if input_data_format == ChannelDimension.FIRST:
215
+ _, max_height, max_width = max_across_indices([img.shape for img in images])
216
+ elif input_data_format == ChannelDimension.LAST:
217
+ max_height, max_width, _ = max_across_indices([img.shape for img in images])
218
+ else:
219
+ raise ValueError(f"Invalid channel dimension format: {input_data_format}")
220
+ return (max_height, max_width)
221
+
222
+
223
+ # Copied from transformers.models.detr.image_processing_detr.make_pixel_mask
224
+ def make_pixel_mask(
225
+ image: np.ndarray, output_size: Tuple[int, int], input_data_format: Optional[Union[str, ChannelDimension]] = None
226
+ ) -> np.ndarray:
227
+ """
228
+ Make a pixel mask for the image, where 1 indicates a valid pixel and 0 indicates padding.
229
+
230
+ Args:
231
+ image (`np.ndarray`):
232
+ Image to make the pixel mask for.
233
+ output_size (`Tuple[int, int]`):
234
+ Output size of the mask.
235
+ """
236
+ input_height, input_width = get_image_size(image, channel_dim=input_data_format)
237
+ mask = np.zeros(output_size, dtype=np.int64)
238
+ mask[:input_height, :input_width] = 1
239
+ return mask
240
+
241
+
242
+ # Copied from transformers.models.detr.image_processing_detr.convert_coco_poly_to_mask
243
+ def convert_coco_poly_to_mask(segmentations, height: int, width: int) -> np.ndarray:
244
+ """
245
+ Convert a COCO polygon annotation to a mask.
246
+
247
+ Args:
248
+ segmentations (`List[List[float]]`):
249
+ List of polygons, each polygon represented by a list of x-y coordinates.
250
+ height (`int`):
251
+ Height of the mask.
252
+ width (`int`):
253
+ Width of the mask.
254
+ """
255
+ try:
256
+ from pycocotools import mask as coco_mask
257
+ except ImportError:
258
+ raise ImportError("Pycocotools is not installed in your environment.")
259
+
260
+ masks = []
261
+ for polygons in segmentations:
262
+ rles = coco_mask.frPyObjects(polygons, height, width)
263
+ mask = coco_mask.decode(rles)
264
+ if len(mask.shape) < 3:
265
+ mask = mask[..., None]
266
+ mask = np.asarray(mask, dtype=np.uint8)
267
+ mask = np.any(mask, axis=2)
268
+ masks.append(mask)
269
+ if masks:
270
+ masks = np.stack(masks, axis=0)
271
+ else:
272
+ masks = np.zeros((0, height, width), dtype=np.uint8)
273
+
274
+ return masks
275
+
276
+
277
+ # Copied from transformers.models.detr.image_processing_detr.prepare_coco_detection_annotation with DETR->DETA
278
+ def prepare_coco_detection_annotation(
279
+ image,
280
+ target,
281
+ return_segmentation_masks: bool = False,
282
+ input_data_format: Optional[Union[ChannelDimension, str]] = None,
283
+ ):
284
+ """
285
+ Convert the target in COCO format into the format expected by DETA.
286
+ """
287
+ image_height, image_width = get_image_size(image, channel_dim=input_data_format)
288
+
289
+ image_id = target["image_id"]
290
+ image_id = np.asarray([image_id], dtype=np.int64)
291
+
292
+ # Get all COCO annotations for the given image.
293
+ annotations = target["annotations"]
294
+ annotations = [obj for obj in annotations if "iscrowd" not in obj or obj["iscrowd"] == 0]
295
+
296
+ classes = [obj["category_id"] for obj in annotations]
297
+ classes = np.asarray(classes, dtype=np.int64)
298
+
299
+ # for conversion to coco api
300
+ area = np.asarray([obj["area"] for obj in annotations], dtype=np.float32)
301
+ iscrowd = np.asarray([obj["iscrowd"] if "iscrowd" in obj else 0 for obj in annotations], dtype=np.int64)
302
+
303
+ boxes = [obj["bbox"] for obj in annotations]
304
+ # guard against no boxes via resizing
305
+ boxes = np.asarray(boxes, dtype=np.float32).reshape(-1, 4)
306
+ boxes[:, 2:] += boxes[:, :2]
307
+ boxes[:, 0::2] = boxes[:, 0::2].clip(min=0, max=image_width)
308
+ boxes[:, 1::2] = boxes[:, 1::2].clip(min=0, max=image_height)
309
+
310
+ keep = (boxes[:, 3] > boxes[:, 1]) & (boxes[:, 2] > boxes[:, 0])
311
+
312
+ new_target = {}
313
+ new_target["image_id"] = image_id
314
+ new_target["class_labels"] = classes[keep]
315
+ new_target["boxes"] = boxes[keep]
316
+ new_target["area"] = area[keep]
317
+ new_target["iscrowd"] = iscrowd[keep]
318
+ new_target["orig_size"] = np.asarray([int(image_height), int(image_width)], dtype=np.int64)
319
+
320
+ if annotations and "keypoints" in annotations[0]:
321
+ keypoints = [obj["keypoints"] for obj in annotations]
322
+ # Converting the filtered keypoints list to a numpy array
323
+ keypoints = np.asarray(keypoints, dtype=np.float32)
324
+ # Apply the keep mask here to filter the relevant annotations
325
+ keypoints = keypoints[keep]
326
+ num_keypoints = keypoints.shape[0]
327
+ keypoints = keypoints.reshape((-1, 3)) if num_keypoints else keypoints
328
+ new_target["keypoints"] = keypoints
329
+
330
+ if return_segmentation_masks:
331
+ segmentation_masks = [obj["segmentation"] for obj in annotations]
332
+ masks = convert_coco_poly_to_mask(segmentation_masks, image_height, image_width)
333
+ new_target["masks"] = masks[keep]
334
+
335
+ return new_target
336
+
337
+
338
+ # Copied from transformers.models.detr.image_processing_detr.masks_to_boxes
339
+ def masks_to_boxes(masks: np.ndarray) -> np.ndarray:
340
+ """
341
+ Compute the bounding boxes around the provided panoptic segmentation masks.
342
+
343
+ Args:
344
+ masks: masks in format `[number_masks, height, width]` where N is the number of masks
345
+
346
+ Returns:
347
+ boxes: bounding boxes in format `[number_masks, 4]` in xyxy format
348
+ """
349
+ if masks.size == 0:
350
+ return np.zeros((0, 4))
351
+
352
+ h, w = masks.shape[-2:]
353
+ y = np.arange(0, h, dtype=np.float32)
354
+ x = np.arange(0, w, dtype=np.float32)
355
+ # see https://github.com/pytorch/pytorch/issues/50276
356
+ y, x = np.meshgrid(y, x, indexing="ij")
357
+
358
+ x_mask = masks * np.expand_dims(x, axis=0)
359
+ x_max = x_mask.reshape(x_mask.shape[0], -1).max(-1)
360
+ x = np.ma.array(x_mask, mask=~(np.array(masks, dtype=bool)))
361
+ x_min = x.filled(fill_value=1e8)
362
+ x_min = x_min.reshape(x_min.shape[0], -1).min(-1)
363
+
364
+ y_mask = masks * np.expand_dims(y, axis=0)
365
+ y_max = y_mask.reshape(x_mask.shape[0], -1).max(-1)
366
+ y = np.ma.array(y_mask, mask=~(np.array(masks, dtype=bool)))
367
+ y_min = y.filled(fill_value=1e8)
368
+ y_min = y_min.reshape(y_min.shape[0], -1).min(-1)
369
+
370
+ return np.stack([x_min, y_min, x_max, y_max], 1)
371
+
372
+
373
+ # Copied from transformers.models.detr.image_processing_detr.prepare_coco_panoptic_annotation with DETR->DETA
374
+ def prepare_coco_panoptic_annotation(
375
+ image: np.ndarray,
376
+ target: Dict,
377
+ masks_path: Union[str, pathlib.Path],
378
+ return_masks: bool = True,
379
+ input_data_format: Union[ChannelDimension, str] = None,
380
+ ) -> Dict:
381
+ """
382
+ Prepare a coco panoptic annotation for DETA.
383
+ """
384
+ image_height, image_width = get_image_size(image, channel_dim=input_data_format)
385
+ annotation_path = pathlib.Path(masks_path) / target["file_name"]
386
+
387
+ new_target = {}
388
+ new_target["image_id"] = np.asarray([target["image_id"] if "image_id" in target else target["id"]], dtype=np.int64)
389
+ new_target["size"] = np.asarray([image_height, image_width], dtype=np.int64)
390
+ new_target["orig_size"] = np.asarray([image_height, image_width], dtype=np.int64)
391
+
392
+ if "segments_info" in target:
393
+ masks = np.asarray(PIL.Image.open(annotation_path), dtype=np.uint32)
394
+ masks = rgb_to_id(masks)
395
+
396
+ ids = np.array([segment_info["id"] for segment_info in target["segments_info"]])
397
+ masks = masks == ids[:, None, None]
398
+ masks = masks.astype(np.uint8)
399
+ if return_masks:
400
+ new_target["masks"] = masks
401
+ new_target["boxes"] = masks_to_boxes(masks)
402
+ new_target["class_labels"] = np.array(
403
+ [segment_info["category_id"] for segment_info in target["segments_info"]], dtype=np.int64
404
+ )
405
+ new_target["iscrowd"] = np.asarray(
406
+ [segment_info["iscrowd"] for segment_info in target["segments_info"]], dtype=np.int64
407
+ )
408
+ new_target["area"] = np.asarray(
409
+ [segment_info["area"] for segment_info in target["segments_info"]], dtype=np.float32
410
+ )
411
+
412
+ return new_target
413
+
414
+
415
+ # Copied from transformers.models.detr.image_processing_detr.resize_annotation
416
+ def resize_annotation(
417
+ annotation: Dict[str, Any],
418
+ orig_size: Tuple[int, int],
419
+ target_size: Tuple[int, int],
420
+ threshold: float = 0.5,
421
+ resample: PILImageResampling = PILImageResampling.NEAREST,
422
+ ):
423
+ """
424
+ Resizes an annotation to a target size.
425
+
426
+ Args:
427
+ annotation (`Dict[str, Any]`):
428
+ The annotation dictionary.
429
+ orig_size (`Tuple[int, int]`):
430
+ The original size of the input image.
431
+ target_size (`Tuple[int, int]`):
432
+ The target size of the image, as returned by the preprocessing `resize` step.
433
+ threshold (`float`, *optional*, defaults to 0.5):
434
+ The threshold used to binarize the segmentation masks.
435
+ resample (`PILImageResampling`, defaults to `PILImageResampling.NEAREST`):
436
+ The resampling filter to use when resizing the masks.
437
+ """
438
+ ratios = tuple(float(s) / float(s_orig) for s, s_orig in zip(target_size, orig_size))
439
+ ratio_height, ratio_width = ratios
440
+
441
+ new_annotation = {}
442
+ new_annotation["size"] = target_size
443
+
444
+ for key, value in annotation.items():
445
+ if key == "boxes":
446
+ boxes = value
447
+ scaled_boxes = boxes * np.asarray([ratio_width, ratio_height, ratio_width, ratio_height], dtype=np.float32)
448
+ new_annotation["boxes"] = scaled_boxes
449
+ elif key == "area":
450
+ area = value
451
+ scaled_area = area * (ratio_width * ratio_height)
452
+ new_annotation["area"] = scaled_area
453
+ elif key == "masks":
454
+ masks = value[:, None]
455
+ masks = np.array([resize(mask, target_size, resample=resample) for mask in masks])
456
+ masks = masks.astype(np.float32)
457
+ masks = masks[:, 0] > threshold
458
+ new_annotation["masks"] = masks
459
+ elif key == "size":
460
+ new_annotation["size"] = target_size
461
+ else:
462
+ new_annotation[key] = value
463
+
464
+ return new_annotation
465
+
466
+
467
+ class DetaImageProcessor(BaseImageProcessor):
468
+ r"""
469
+ Constructs a Deformable DETR image processor.
470
+
471
+ Args:
472
+ format (`str`, *optional*, defaults to `"coco_detection"`):
473
+ Data format of the annotations. One of "coco_detection" or "coco_panoptic".
474
+ do_resize (`bool`, *optional*, defaults to `True`):
475
+ Controls whether to resize the image's (height, width) dimensions to the specified `size`. Can be
476
+ overridden by the `do_resize` parameter in the `preprocess` method.
477
+ size (`Dict[str, int]` *optional*, defaults to `{"shortest_edge": 800, "longest_edge": 1333}`):
478
+ Size of the image's (height, width) dimensions after resizing. Can be overridden by the `size` parameter in
479
+ the `preprocess` method.
480
+ resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`):
481
+ Resampling filter to use if resizing the image.
482
+ do_rescale (`bool`, *optional*, defaults to `True`):
483
+ Controls whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the
484
+ `do_rescale` parameter in the `preprocess` method.
485
+ rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
486
+ Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the
487
+ `preprocess` method.
488
+ do_normalize:
489
+ Controls whether to normalize the image. Can be overridden by the `do_normalize` parameter in the
490
+ `preprocess` method.
491
+ image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_DEFAULT_MEAN`):
492
+ Mean values to use when normalizing the image. Can be a single value or a list of values, one for each
493
+ channel. Can be overridden by the `image_mean` parameter in the `preprocess` method.
494
+ image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_DEFAULT_STD`):
495
+ Standard deviation values to use when normalizing the image. Can be a single value or a list of values, one
496
+ for each channel. Can be overridden by the `image_std` parameter in the `preprocess` method.
497
+ do_convert_annotations (`bool`, *optional*, defaults to `True`):
498
+ Controls whether to convert the annotations to the format expected by the DETR model. Converts the
499
+ bounding boxes to the format `(center_x, center_y, width, height)` and in the range `[0, 1]`.
500
+ Can be overridden by the `do_convert_annotations` parameter in the `preprocess` method.
501
+ do_pad (`bool`, *optional*, defaults to `True`):
502
+ Controls whether to pad the image. Can be overridden by the `do_pad` parameter in the `preprocess`
503
+ method. If `True` will pad the images in the batch to the largest height and width in the batch.
504
+ Padding will be applied to the bottom and right of the image with zeros.
505
+ """
506
+
507
+ model_input_names = ["pixel_values", "pixel_mask"]
508
+
509
+ def __init__(
510
+ self,
511
+ format: Union[str, AnnotationFormat] = AnnotationFormat.COCO_DETECTION,
512
+ do_resize: bool = True,
513
+ size: Dict[str, int] = None,
514
+ resample: PILImageResampling = PILImageResampling.BILINEAR,
515
+ do_rescale: bool = True,
516
+ rescale_factor: Union[int, float] = 1 / 255,
517
+ do_normalize: bool = True,
518
+ image_mean: Union[float, List[float]] = None,
519
+ image_std: Union[float, List[float]] = None,
520
+ do_convert_annotations: bool = True,
521
+ do_pad: bool = True,
522
+ **kwargs,
523
+ ) -> None:
524
+ if "pad_and_return_pixel_mask" in kwargs:
525
+ do_pad = kwargs.pop("pad_and_return_pixel_mask")
526
+
527
+ size = size if size is not None else {"shortest_edge": 800, "longest_edge": 1333}
528
+ size = get_size_dict(size, default_to_square=False)
529
+
530
+ if do_convert_annotations is None:
531
+ do_convert_annotations = do_normalize
532
+
533
+ super().__init__(**kwargs)
534
+ self.format = format
535
+ self.do_resize = do_resize
536
+ self.size = size
537
+ self.resample = resample
538
+ self.do_rescale = do_rescale
539
+ self.rescale_factor = rescale_factor
540
+ self.do_normalize = do_normalize
541
+ self.do_convert_annotations = do_convert_annotations
542
+ self.image_mean = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
543
+ self.image_std = image_std if image_std is not None else IMAGENET_DEFAULT_STD
544
+ self.do_pad = do_pad
545
+
546
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.prepare_annotation with DETR->DETA
547
+ def prepare_annotation(
548
+ self,
549
+ image: np.ndarray,
550
+ target: Dict,
551
+ format: Optional[AnnotationFormat] = None,
552
+ return_segmentation_masks: bool = None,
553
+ masks_path: Optional[Union[str, pathlib.Path]] = None,
554
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
555
+ ) -> Dict:
556
+ """
557
+ Prepare an annotation for feeding into DETA model.
558
+ """
559
+ format = format if format is not None else self.format
560
+
561
+ if format == AnnotationFormat.COCO_DETECTION:
562
+ return_segmentation_masks = False if return_segmentation_masks is None else return_segmentation_masks
563
+ target = prepare_coco_detection_annotation(
564
+ image, target, return_segmentation_masks, input_data_format=input_data_format
565
+ )
566
+ elif format == AnnotationFormat.COCO_PANOPTIC:
567
+ return_segmentation_masks = True if return_segmentation_masks is None else return_segmentation_masks
568
+ target = prepare_coco_panoptic_annotation(
569
+ image,
570
+ target,
571
+ masks_path=masks_path,
572
+ return_masks=return_segmentation_masks,
573
+ input_data_format=input_data_format,
574
+ )
575
+ else:
576
+ raise ValueError(f"Format {format} is not supported.")
577
+ return target
578
+
579
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.prepare
580
+ def prepare(self, image, target, return_segmentation_masks=None, masks_path=None):
581
+ logger.warning_once(
582
+ "The `prepare` method is deprecated and will be removed in a v4.33. "
583
+ "Please use `prepare_annotation` instead. Note: the `prepare_annotation` method "
584
+ "does not return the image anymore.",
585
+ )
586
+ target = self.prepare_annotation(image, target, return_segmentation_masks, masks_path, self.format)
587
+ return image, target
588
+
589
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.convert_coco_poly_to_mask
590
+ def convert_coco_poly_to_mask(self, *args, **kwargs):
591
+ logger.warning_once("The `convert_coco_poly_to_mask` method is deprecated and will be removed in v4.33. ")
592
+ return convert_coco_poly_to_mask(*args, **kwargs)
593
+
594
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.prepare_coco_detection
595
+ def prepare_coco_detection(self, *args, **kwargs):
596
+ logger.warning_once("The `prepare_coco_detection` method is deprecated and will be removed in v4.33. ")
597
+ return prepare_coco_detection_annotation(*args, **kwargs)
598
+
599
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.prepare_coco_panoptic
600
+ def prepare_coco_panoptic(self, *args, **kwargs):
601
+ logger.warning_once("The `prepare_coco_panoptic` method is deprecated and will be removed in v4.33. ")
602
+ return prepare_coco_panoptic_annotation(*args, **kwargs)
603
+
604
+ def resize(
605
+ self,
606
+ image: np.ndarray,
607
+ size: Dict[str, int],
608
+ resample: PILImageResampling = PILImageResampling.BILINEAR,
609
+ data_format: Optional[ChannelDimension] = None,
610
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
611
+ **kwargs,
612
+ ) -> np.ndarray:
613
+ """
614
+ Resize the image to the given size. Size can be `min_size` (scalar) or `(height, width)` tuple. If size is an
615
+ int, smaller edge of the image will be matched to this number.
616
+
617
+ Args:
618
+ image (`np.ndarray`):
619
+ Image to resize.
620
+ size (`Dict[str, int]`):
621
+ The desired output size. Can contain keys `shortest_edge` and `longest_edge` or `height` and `width`.
622
+ resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`):
623
+ Resampling filter to use if resizing the image.
624
+ data_format (`ChannelDimension`, *optional*):
625
+ The channel dimension format for the output image. If unset, the channel dimension format of the input
626
+ image is used.
627
+ input_data_format (`ChannelDimension` or `str`, *optional*):
628
+ The channel dimension format of the input image. If not provided, it will be inferred from the input
629
+ image.
630
+ """
631
+ size = get_size_dict(size, default_to_square=False)
632
+ if "shortest_edge" in size and "longest_edge" in size:
633
+ size = get_resize_output_image_size(
634
+ image, size["shortest_edge"], size["longest_edge"], input_data_format=input_data_format
635
+ )
636
+ elif "height" in size and "width" in size:
637
+ size = (size["height"], size["width"])
638
+ else:
639
+ raise ValueError(
640
+ "Size must contain 'height' and 'width' keys or 'shortest_edge' and 'longest_edge' keys. Got"
641
+ f" {size.keys()}."
642
+ )
643
+ image = resize(
644
+ image, size=size, resample=resample, data_format=data_format, input_data_format=input_data_format
645
+ )
646
+ return image
647
+
648
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.resize_annotation
649
+ def resize_annotation(
650
+ self,
651
+ annotation,
652
+ orig_size,
653
+ size,
654
+ resample: PILImageResampling = PILImageResampling.NEAREST,
655
+ ) -> Dict:
656
+ """
657
+ Resize the annotation to match the resized image. If size is an int, smaller edge of the mask will be matched
658
+ to this number.
659
+ """
660
+ return resize_annotation(annotation, orig_size=orig_size, target_size=size, resample=resample)
661
+
662
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.rescale
663
+ def rescale(
664
+ self,
665
+ image: np.ndarray,
666
+ rescale_factor: float,
667
+ data_format: Optional[Union[str, ChannelDimension]] = None,
668
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
669
+ ) -> np.ndarray:
670
+ """
671
+ Rescale the image by the given factor. image = image * rescale_factor.
672
+
673
+ Args:
674
+ image (`np.ndarray`):
675
+ Image to rescale.
676
+ rescale_factor (`float`):
677
+ The value to use for rescaling.
678
+ data_format (`str` or `ChannelDimension`, *optional*):
679
+ The channel dimension format for the output image. If unset, the channel dimension format of the input
680
+ image is used. Can be one of:
681
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
682
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
683
+ input_data_format (`str` or `ChannelDimension`, *optional*):
684
+ The channel dimension format for the input image. If unset, is inferred from the input image. Can be
685
+ one of:
686
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
687
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
688
+ """
689
+ return rescale(image, rescale_factor, data_format=data_format, input_data_format=input_data_format)
690
+
691
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.normalize_annotation
692
+ def normalize_annotation(self, annotation: Dict, image_size: Tuple[int, int]) -> Dict:
693
+ """
694
+ Normalize the boxes in the annotation from `[top_left_x, top_left_y, bottom_right_x, bottom_right_y]` to
695
+ `[center_x, center_y, width, height]` format and from absolute to relative pixel values.
696
+ """
697
+ return normalize_annotation(annotation, image_size=image_size)
698
+
699
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor._update_annotation_for_padded_image
700
+ def _update_annotation_for_padded_image(
701
+ self,
702
+ annotation: Dict,
703
+ input_image_size: Tuple[int, int],
704
+ output_image_size: Tuple[int, int],
705
+ padding,
706
+ update_bboxes,
707
+ ) -> Dict:
708
+ """
709
+ Update the annotation for a padded image.
710
+ """
711
+ new_annotation = {}
712
+ new_annotation["size"] = output_image_size
713
+
714
+ for key, value in annotation.items():
715
+ if key == "masks":
716
+ masks = value
717
+ masks = pad(
718
+ masks,
719
+ padding,
720
+ mode=PaddingMode.CONSTANT,
721
+ constant_values=0,
722
+ input_data_format=ChannelDimension.FIRST,
723
+ )
724
+ masks = safe_squeeze(masks, 1)
725
+ new_annotation["masks"] = masks
726
+ elif key == "boxes" and update_bboxes:
727
+ boxes = value
728
+ boxes *= np.asarray(
729
+ [
730
+ input_image_size[1] / output_image_size[1],
731
+ input_image_size[0] / output_image_size[0],
732
+ input_image_size[1] / output_image_size[1],
733
+ input_image_size[0] / output_image_size[0],
734
+ ]
735
+ )
736
+ new_annotation["boxes"] = boxes
737
+ elif key == "size":
738
+ new_annotation["size"] = output_image_size
739
+ else:
740
+ new_annotation[key] = value
741
+ return new_annotation
742
+
743
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor._pad_image
744
+ def _pad_image(
745
+ self,
746
+ image: np.ndarray,
747
+ output_size: Tuple[int, int],
748
+ annotation: Optional[Dict[str, Any]] = None,
749
+ constant_values: Union[float, Iterable[float]] = 0,
750
+ data_format: Optional[ChannelDimension] = None,
751
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
752
+ update_bboxes: bool = True,
753
+ ) -> np.ndarray:
754
+ """
755
+ Pad an image with zeros to the given size.
756
+ """
757
+ input_height, input_width = get_image_size(image, channel_dim=input_data_format)
758
+ output_height, output_width = output_size
759
+
760
+ pad_bottom = output_height - input_height
761
+ pad_right = output_width - input_width
762
+ padding = ((0, pad_bottom), (0, pad_right))
763
+ padded_image = pad(
764
+ image,
765
+ padding,
766
+ mode=PaddingMode.CONSTANT,
767
+ constant_values=constant_values,
768
+ data_format=data_format,
769
+ input_data_format=input_data_format,
770
+ )
771
+ if annotation is not None:
772
+ annotation = self._update_annotation_for_padded_image(
773
+ annotation, (input_height, input_width), (output_height, output_width), padding, update_bboxes
774
+ )
775
+ return padded_image, annotation
776
+
777
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.pad
778
+ def pad(
779
+ self,
780
+ images: List[np.ndarray],
781
+ annotations: Optional[Union[AnnotationType, List[AnnotationType]]] = None,
782
+ constant_values: Union[float, Iterable[float]] = 0,
783
+ return_pixel_mask: bool = True,
784
+ return_tensors: Optional[Union[str, TensorType]] = None,
785
+ data_format: Optional[ChannelDimension] = None,
786
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
787
+ update_bboxes: bool = True,
788
+ ) -> BatchFeature:
789
+ """
790
+ Pads a batch of images to the bottom and right of the image with zeros to the size of largest height and width
791
+ in the batch and optionally returns their corresponding pixel mask.
792
+
793
+ Args:
794
+ images (List[`np.ndarray`]):
795
+ Images to pad.
796
+ annotations (`AnnotationType` or `List[AnnotationType]`, *optional*):
797
+ Annotations to transform according to the padding that is applied to the images.
798
+ constant_values (`float` or `Iterable[float]`, *optional*):
799
+ The value to use for the padding if `mode` is `"constant"`.
800
+ return_pixel_mask (`bool`, *optional*, defaults to `True`):
801
+ Whether to return a pixel mask.
802
+ return_tensors (`str` or `TensorType`, *optional*):
803
+ The type of tensors to return. Can be one of:
804
+ - Unset: Return a list of `np.ndarray`.
805
+ - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
806
+ - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
807
+ - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
808
+ - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
809
+ data_format (`str` or `ChannelDimension`, *optional*):
810
+ The channel dimension format of the image. If not provided, it will be the same as the input image.
811
+ input_data_format (`ChannelDimension` or `str`, *optional*):
812
+ The channel dimension format of the input image. If not provided, it will be inferred.
813
+ update_bboxes (`bool`, *optional*, defaults to `True`):
814
+ Whether to update the bounding boxes in the annotations to match the padded images. If the
815
+ bounding boxes have not been converted to relative coordinates and `(centre_x, centre_y, width, height)`
816
+ format, the bounding boxes will not be updated.
817
+ """
818
+ pad_size = get_max_height_width(images, input_data_format=input_data_format)
819
+
820
+ annotation_list = annotations if annotations is not None else [None] * len(images)
821
+ padded_images = []
822
+ padded_annotations = []
823
+ for image, annotation in zip(images, annotation_list):
824
+ padded_image, padded_annotation = self._pad_image(
825
+ image,
826
+ pad_size,
827
+ annotation,
828
+ constant_values=constant_values,
829
+ data_format=data_format,
830
+ input_data_format=input_data_format,
831
+ update_bboxes=update_bboxes,
832
+ )
833
+ padded_images.append(padded_image)
834
+ padded_annotations.append(padded_annotation)
835
+
836
+ data = {"pixel_values": padded_images}
837
+
838
+ if return_pixel_mask:
839
+ masks = [
840
+ make_pixel_mask(image=image, output_size=pad_size, input_data_format=input_data_format)
841
+ for image in images
842
+ ]
843
+ data["pixel_mask"] = masks
844
+
845
+ encoded_inputs = BatchFeature(data=data, tensor_type=return_tensors)
846
+
847
+ if annotations is not None:
848
+ encoded_inputs["labels"] = [
849
+ BatchFeature(annotation, tensor_type=return_tensors) for annotation in padded_annotations
850
+ ]
851
+
852
+ return encoded_inputs
853
+
854
+ def preprocess(
855
+ self,
856
+ images: ImageInput,
857
+ annotations: Optional[Union[List[Dict], List[List[Dict]]]] = None,
858
+ return_segmentation_masks: bool = None,
859
+ masks_path: Optional[Union[str, pathlib.Path]] = None,
860
+ do_resize: Optional[bool] = None,
861
+ size: Optional[Dict[str, int]] = None,
862
+ resample=None, # PILImageResampling
863
+ do_rescale: Optional[bool] = None,
864
+ rescale_factor: Optional[Union[int, float]] = None,
865
+ do_normalize: Optional[bool] = None,
866
+ image_mean: Optional[Union[float, List[float]]] = None,
867
+ image_std: Optional[Union[float, List[float]]] = None,
868
+ do_convert_annotations: Optional[bool] = None,
869
+ do_pad: Optional[bool] = None,
870
+ format: Optional[Union[str, AnnotationFormat]] = None,
871
+ return_tensors: Optional[Union[TensorType, str]] = None,
872
+ data_format: Union[str, ChannelDimension] = ChannelDimension.FIRST,
873
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
874
+ **kwargs,
875
+ ) -> BatchFeature:
876
+ """
877
+ Preprocess an image or a batch of images so that it can be used by the model.
878
+
879
+ Args:
880
+ images (`ImageInput`):
881
+ Image or batch of images to preprocess. Expects a single or batch of images with pixel values ranging
882
+ from 0 to 255. If passing in images with pixel values between 0 and 1, set `do_rescale=False`.
883
+ annotations (`List[Dict]` or `List[List[Dict]]`, *optional*):
884
+ List of annotations associated with the image or batch of images. If annotation is for object
885
+ detection, the annotations should be a dictionary with the following keys:
886
+ - "image_id" (`int`): The image id.
887
+ - "annotations" (`List[Dict]`): List of annotations for an image. Each annotation should be a
888
+ dictionary. An image can have no annotations, in which case the list should be empty.
889
+ If annotation is for segmentation, the annotations should be a dictionary with the following keys:
890
+ - "image_id" (`int`): The image id.
891
+ - "segments_info" (`List[Dict]`): List of segments for an image. Each segment should be a dictionary.
892
+ An image can have no segments, in which case the list should be empty.
893
+ - "file_name" (`str`): The file name of the image.
894
+ return_segmentation_masks (`bool`, *optional*, defaults to self.return_segmentation_masks):
895
+ Whether to return segmentation masks.
896
+ masks_path (`str` or `pathlib.Path`, *optional*):
897
+ Path to the directory containing the segmentation masks.
898
+ do_resize (`bool`, *optional*, defaults to self.do_resize):
899
+ Whether to resize the image.
900
+ size (`Dict[str, int]`, *optional*, defaults to self.size):
901
+ Size of the image after resizing.
902
+ resample (`PILImageResampling`, *optional*, defaults to self.resample):
903
+ Resampling filter to use when resizing the image.
904
+ do_rescale (`bool`, *optional*, defaults to self.do_rescale):
905
+ Whether to rescale the image.
906
+ rescale_factor (`float`, *optional*, defaults to self.rescale_factor):
907
+ Rescale factor to use when rescaling the image.
908
+ do_normalize (`bool`, *optional*, defaults to self.do_normalize):
909
+ Whether to normalize the image.
910
+ image_mean (`float` or `List[float]`, *optional*, defaults to self.image_mean):
911
+ Mean to use when normalizing the image.
912
+ image_std (`float` or `List[float]`, *optional*, defaults to self.image_std):
913
+ Standard deviation to use when normalizing the image.
914
+ do_convert_annotations (`bool`, *optional*, defaults to self.do_convert_annotations):
915
+ Whether to convert the annotations to the format expected by the model. Converts the bounding
916
+ boxes from the format `(top_left_x, top_left_y, width, height)` to `(center_x, center_y, width, height)`
917
+ and in relative coordinates.
918
+ do_pad (`bool`, *optional*, defaults to self.do_pad):
919
+ Whether to pad the image. If `True` will pad the images in the batch to the largest image in the batch
920
+ and create a pixel mask. Padding will be applied to the bottom and right of the image with zeros.
921
+ format (`str` or `AnnotationFormat`, *optional*, defaults to self.format):
922
+ Format of the annotations.
923
+ return_tensors (`str` or `TensorType`, *optional*, defaults to self.return_tensors):
924
+ Type of tensors to return. If `None`, will return the list of images.
925
+ data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
926
+ The channel dimension format for the output image. Can be one of:
927
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
928
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
929
+ - Unset: Use the channel dimension format of the input image.
930
+ input_data_format (`ChannelDimension` or `str`, *optional*):
931
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
932
+ from the input image. Can be one of:
933
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
934
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
935
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
936
+ """
937
+ if "pad_and_return_pixel_mask" in kwargs:
938
+ logger.warning_once(
939
+ "The `pad_and_return_pixel_mask` argument is deprecated and will be removed in a future version, "
940
+ "use `do_pad` instead.",
941
+ )
942
+ do_pad = kwargs.pop("pad_and_return_pixel_mask")
943
+
944
+ do_resize = self.do_resize if do_resize is None else do_resize
945
+ size = self.size if size is None else size
946
+ size = get_size_dict(size=size, default_to_square=False)
947
+ resample = self.resample if resample is None else resample
948
+ do_rescale = self.do_rescale if do_rescale is None else do_rescale
949
+ rescale_factor = self.rescale_factor if rescale_factor is None else rescale_factor
950
+ do_normalize = self.do_normalize if do_normalize is None else do_normalize
951
+ image_mean = self.image_mean if image_mean is None else image_mean
952
+ image_std = self.image_std if image_std is None else image_std
953
+ do_convert_annotations = (
954
+ self.do_convert_annotations if do_convert_annotations is None else do_convert_annotations
955
+ )
956
+ do_pad = self.do_pad if do_pad is None else do_pad
957
+ format = self.format if format is None else format
958
+
959
+ # Here, the pad() method pads to the maximum of (width, height). It does not need to be validated.
960
+
961
+ validate_preprocess_arguments(
962
+ do_rescale=do_rescale,
963
+ rescale_factor=rescale_factor,
964
+ do_normalize=do_normalize,
965
+ image_mean=image_mean,
966
+ image_std=image_std,
967
+ do_resize=do_resize,
968
+ size=size,
969
+ resample=resample,
970
+ )
971
+
972
+ if not is_batched(images):
973
+ images = [images]
974
+ annotations = [annotations] if annotations is not None else None
975
+
976
+ if not valid_images(images):
977
+ raise ValueError(
978
+ "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
979
+ "torch.Tensor, tf.Tensor or jax.ndarray."
980
+ )
981
+ if annotations is not None and len(images) != len(annotations):
982
+ raise ValueError(
983
+ f"The number of images ({len(images)}) and annotations ({len(annotations)}) do not match."
984
+ )
985
+
986
+ format = AnnotationFormat(format)
987
+ if annotations is not None:
988
+ validate_annotations(format, SUPPORTED_ANNOTATION_FORMATS, annotations)
989
+
990
+ if (
991
+ masks_path is not None
992
+ and format == AnnotationFormat.COCO_PANOPTIC
993
+ and not isinstance(masks_path, (pathlib.Path, str))
994
+ ):
995
+ raise ValueError(
996
+ "The path to the directory containing the mask PNG files should be provided as a"
997
+ f" `pathlib.Path` or string object, but is {type(masks_path)} instead."
998
+ )
999
+
1000
+ # All transformations expect numpy arrays
1001
+ images = [to_numpy_array(image) for image in images]
1002
+
1003
+ if is_scaled_image(images[0]) and do_rescale:
1004
+ logger.warning_once(
1005
+ "It looks like you are trying to rescale already rescaled images. If the input"
1006
+ " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
1007
+ )
1008
+
1009
+ if input_data_format is None:
1010
+ # We assume that all images have the same channel dimension format.
1011
+ input_data_format = infer_channel_dimension_format(images[0])
1012
+
1013
+ # prepare (COCO annotations as a list of Dict -> DETR target as a single Dict per image)
1014
+ if annotations is not None:
1015
+ prepared_images = []
1016
+ prepared_annotations = []
1017
+ for image, target in zip(images, annotations):
1018
+ target = self.prepare_annotation(
1019
+ image,
1020
+ target,
1021
+ format,
1022
+ return_segmentation_masks=return_segmentation_masks,
1023
+ masks_path=masks_path,
1024
+ input_data_format=input_data_format,
1025
+ )
1026
+ prepared_images.append(image)
1027
+ prepared_annotations.append(target)
1028
+ images = prepared_images
1029
+ annotations = prepared_annotations
1030
+ del prepared_images, prepared_annotations
1031
+
1032
+ # transformations
1033
+ if do_resize:
1034
+ if annotations is not None:
1035
+ resized_images, resized_annotations = [], []
1036
+ for image, target in zip(images, annotations):
1037
+ orig_size = get_image_size(image, input_data_format)
1038
+ resized_image = self.resize(
1039
+ image, size=size, resample=resample, input_data_format=input_data_format
1040
+ )
1041
+ resized_annotation = self.resize_annotation(
1042
+ target, orig_size, get_image_size(resized_image, input_data_format)
1043
+ )
1044
+ resized_images.append(resized_image)
1045
+ resized_annotations.append(resized_annotation)
1046
+ images = resized_images
1047
+ annotations = resized_annotations
1048
+ del resized_images, resized_annotations
1049
+ else:
1050
+ images = [
1051
+ self.resize(image, size=size, resample=resample, input_data_format=input_data_format)
1052
+ for image in images
1053
+ ]
1054
+
1055
+ if do_rescale:
1056
+ images = [self.rescale(image, rescale_factor, input_data_format=input_data_format) for image in images]
1057
+
1058
+ if do_normalize:
1059
+ images = [
1060
+ self.normalize(image, image_mean, image_std, input_data_format=input_data_format) for image in images
1061
+ ]
1062
+
1063
+ if do_convert_annotations and annotations is not None:
1064
+ annotations = [
1065
+ self.normalize_annotation(annotation, get_image_size(image, input_data_format))
1066
+ for annotation, image in zip(annotations, images)
1067
+ ]
1068
+
1069
+ if do_pad:
1070
+ # Pads images and returns their mask: {'pixel_values': ..., 'pixel_mask': ...}
1071
+ encoded_inputs = self.pad(
1072
+ images,
1073
+ annotations=annotations,
1074
+ return_pixel_mask=True,
1075
+ data_format=data_format,
1076
+ input_data_format=input_data_format,
1077
+ return_tensors=return_tensors,
1078
+ update_bboxes=do_convert_annotations,
1079
+ )
1080
+ else:
1081
+ images = [
1082
+ to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format)
1083
+ for image in images
1084
+ ]
1085
+ encoded_inputs = BatchFeature(data={"pixel_values": images}, tensor_type=return_tensors)
1086
+ if annotations is not None:
1087
+ encoded_inputs["labels"] = [
1088
+ BatchFeature(annotation, tensor_type=return_tensors) for annotation in annotations
1089
+ ]
1090
+
1091
+ return encoded_inputs
1092
+
1093
+ def post_process_object_detection(
1094
+ self,
1095
+ outputs,
1096
+ threshold: float = 0.5,
1097
+ target_sizes: Union[TensorType, List[Tuple]] = None,
1098
+ nms_threshold: float = 0.7,
1099
+ ):
1100
+ """
1101
+ Converts the output of [`DetaForObjectDetection`] into final bounding boxes in (top_left_x, top_left_y,
1102
+ bottom_right_x, bottom_right_y) format. Only supports PyTorch.
1103
+
1104
+ Args:
1105
+ outputs ([`DetrObjectDetectionOutput`]):
1106
+ Raw outputs of the model.
1107
+ threshold (`float`, *optional*, defaults to 0.5):
1108
+ Score threshold to keep object detection predictions.
1109
+ target_sizes (`torch.Tensor` or `List[Tuple[int, int]]`, *optional*):
1110
+ Tensor of shape `(batch_size, 2)` or list of tuples (`Tuple[int, int]`) containing the target size
1111
+ (height, width) of each image in the batch. If left to None, predictions will not be resized.
1112
+ nms_threshold (`float`, *optional*, defaults to 0.7):
1113
+ NMS threshold.
1114
+
1115
+ Returns:
1116
+ `List[Dict]`: A list of dictionaries, each dictionary containing the scores, labels and boxes for an image
1117
+ in the batch as predicted by the model.
1118
+ """
1119
+ out_logits, out_bbox = outputs.logits, outputs.pred_boxes
1120
+ batch_size, num_queries, num_labels = out_logits.shape
1121
+
1122
+ if target_sizes is not None:
1123
+ if len(out_logits) != len(target_sizes):
1124
+ raise ValueError(
1125
+ "Make sure that you pass in as many target sizes as the batch dimension of the logits"
1126
+ )
1127
+
1128
+ prob = out_logits.sigmoid()
1129
+
1130
+ all_scores = prob.view(batch_size, num_queries * num_labels).to(out_logits.device)
1131
+ all_indexes = torch.arange(num_queries * num_labels)[None].repeat(batch_size, 1).to(out_logits.device)
1132
+ all_boxes = torch.div(all_indexes, out_logits.shape[2], rounding_mode="floor")
1133
+ all_labels = all_indexes % out_logits.shape[2]
1134
+
1135
+ boxes = center_to_corners_format(out_bbox)
1136
+ boxes = torch.gather(boxes, 1, all_boxes.unsqueeze(-1).repeat(1, 1, 4))
1137
+
1138
+ # and from relative [0, 1] to absolute [0, height] coordinates
1139
+ if target_sizes is not None:
1140
+ if isinstance(target_sizes, List):
1141
+ img_h = torch.Tensor([i[0] for i in target_sizes])
1142
+ img_w = torch.Tensor([i[1] for i in target_sizes])
1143
+ else:
1144
+ img_h, img_w = target_sizes.unbind(1)
1145
+
1146
+ scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1).to(boxes.device)
1147
+ boxes = boxes * scale_fct[:, None, :]
1148
+
1149
+ results = []
1150
+ for b in range(batch_size):
1151
+ box = boxes[b]
1152
+ score = all_scores[b]
1153
+ lbls = all_labels[b]
1154
+
1155
+ pre_topk = score.topk(min(10000, num_queries * num_labels)).indices
1156
+ box = box[pre_topk]
1157
+ score = score[pre_topk]
1158
+ lbls = lbls[pre_topk]
1159
+
1160
+ # apply NMS
1161
+ keep_inds = batched_nms(box, score, lbls, nms_threshold)[:100]
1162
+ score = score[keep_inds]
1163
+ lbls = lbls[keep_inds]
1164
+ box = box[keep_inds]
1165
+
1166
+ results.append(
1167
+ {
1168
+ "scores": score[score > threshold],
1169
+ "labels": lbls[score > threshold],
1170
+ "boxes": box[score > threshold],
1171
+ }
1172
+ )
1173
+
1174
+ return results
llmeval-env/lib/python3.10/site-packages/transformers/models/deta/modeling_deta.py ADDED
The diff for this file is too large to render. See raw diff
 
llmeval-env/lib/python3.10/site-packages/transformers/models/hubert/__init__.py ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2021 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
17
+
18
+
19
+ _import_structure = {"configuration_hubert": ["HUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "HubertConfig"]}
20
+
21
+ try:
22
+ if not is_torch_available():
23
+ raise OptionalDependencyNotAvailable()
24
+ except OptionalDependencyNotAvailable:
25
+ pass
26
+ else:
27
+ _import_structure["modeling_hubert"] = [
28
+ "HUBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
29
+ "HubertForCTC",
30
+ "HubertForSequenceClassification",
31
+ "HubertModel",
32
+ "HubertPreTrainedModel",
33
+ ]
34
+
35
+
36
+ try:
37
+ if not is_tf_available():
38
+ raise OptionalDependencyNotAvailable()
39
+ except OptionalDependencyNotAvailable:
40
+ pass
41
+ else:
42
+ _import_structure["modeling_tf_hubert"] = [
43
+ "TF_HUBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
44
+ "TFHubertForCTC",
45
+ "TFHubertModel",
46
+ "TFHubertPreTrainedModel",
47
+ ]
48
+
49
+ if TYPE_CHECKING:
50
+ from .configuration_hubert import HUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, HubertConfig
51
+
52
+ try:
53
+ if not is_torch_available():
54
+ raise OptionalDependencyNotAvailable()
55
+ except OptionalDependencyNotAvailable:
56
+ pass
57
+ else:
58
+ from .modeling_hubert import (
59
+ HUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
60
+ HubertForCTC,
61
+ HubertForSequenceClassification,
62
+ HubertModel,
63
+ HubertPreTrainedModel,
64
+ )
65
+
66
+ try:
67
+ if not is_tf_available():
68
+ raise OptionalDependencyNotAvailable()
69
+ except OptionalDependencyNotAvailable:
70
+ pass
71
+ else:
72
+ from .modeling_tf_hubert import (
73
+ TF_HUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
74
+ TFHubertForCTC,
75
+ TFHubertModel,
76
+ TFHubertPreTrainedModel,
77
+ )
78
+
79
+
80
+ else:
81
+ import sys
82
+
83
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
llmeval-env/lib/python3.10/site-packages/transformers/models/hubert/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.26 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/hubert/__pycache__/configuration_hubert.cpython-310.pyc ADDED
Binary file (12.9 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/hubert/__pycache__/convert_distilhubert_original_s3prl_checkpoint_to_pytorch.cpython-310.pyc ADDED
Binary file (5.92 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/hubert/__pycache__/convert_hubert_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc ADDED
Binary file (6.19 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/hubert/__pycache__/convert_hubert_original_s3prl_checkpoint_to_pytorch.cpython-310.pyc ADDED
Binary file (2.08 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/hubert/__pycache__/modeling_hubert.cpython-310.pyc ADDED
Binary file (38.2 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/hubert/__pycache__/modeling_tf_hubert.cpython-310.pyc ADDED
Binary file (50.3 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/hubert/configuration_hubert.py ADDED
@@ -0,0 +1,261 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The Fairseq Authors and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Hubert model configuration"""
16
+
17
+ import functools
18
+ import operator
19
+
20
+ from ...configuration_utils import PretrainedConfig
21
+ from ...utils import logging
22
+
23
+
24
+ logger = logging.get_logger(__name__)
25
+
26
+
27
+ from ..deprecated._archive_maps import HUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
28
+
29
+
30
+ class HubertConfig(PretrainedConfig):
31
+ r"""
32
+ This is the configuration class to store the configuration of a [`HubertModel`]. It is used to instantiate an
33
+ Hubert model according to the specified arguments, defining the model architecture. Instantiating a configuration
34
+ with the defaults will yield a similar configuration to that of the Hubert
35
+ [facebook/hubert-base-ls960](https://huggingface.co/facebook/hubert-base-ls960) architecture.
36
+
37
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
38
+ documentation from [`PretrainedConfig`] for more information.
39
+
40
+
41
+ Args:
42
+ vocab_size (`int`, *optional*, defaults to 32):
43
+ Vocabulary size of the Hubert model. Defines the number of different tokens that can be represented by the
44
+ `inputs_ids` passed when calling [`HubertModel`]. Vocabulary size of the model. Defines the different
45
+ tokens that can be represented by the *inputs_ids* passed to the forward method of [`HubertModel`].
46
+ hidden_size (`int`, *optional*, defaults to 768):
47
+ Dimensionality of the encoder layers and the pooler layer.
48
+ num_hidden_layers (`int`, *optional*, defaults to 12):
49
+ Number of hidden layers in the Transformer encoder.
50
+ num_attention_heads (`int`, *optional*, defaults to 12):
51
+ Number of attention heads for each attention layer in the Transformer encoder.
52
+ intermediate_size (`int`, *optional*, defaults to 3072):
53
+ Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
54
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
55
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
56
+ `"relu"`, `"selu"` and `"gelu_new"` are supported.
57
+ hidden_dropout(`float`, *optional*, defaults to 0.1):
58
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
59
+ activation_dropout (`float`, *optional*, defaults to 0.1):
60
+ The dropout ratio for activations inside the fully connected layer.
61
+ attention_dropout(`float`, *optional*, defaults to 0.1):
62
+ The dropout ratio for the attention probabilities.
63
+ final_dropout (`float`, *optional*, defaults to 0.1):
64
+ The dropout probability for the final projection layer of [`Wav2Vec2ForCTC`].
65
+ layerdrop (`float`, *optional*, defaults to 0.1):
66
+ The LayerDrop probability. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556) for more
67
+ details.
68
+ initializer_range (`float`, *optional*, defaults to 0.02):
69
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
70
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
71
+ The epsilon used by the layer normalization layers.
72
+ feat_extract_norm (`str`, *optional*, defaults to `"group"`):
73
+ The norm to be applied to 1D convolutional layers in feature encoder. One of `"group"` for group
74
+ normalization of only the first 1D convolutional layer or `"layer"` for layer normalization of all 1D
75
+ convolutional layers.
76
+ feat_proj_dropout (`float`, *optional*, defaults to 0.0):
77
+ The dropout probability for output of the feature encoder.
78
+ feat_proj_layer_norm (`bool`, *optional*, defaults to `True`):
79
+ Whether to apply LayerNorm to the output of the feature encoder.
80
+ feat_extract_activation (`str, `optional`, defaults to `"gelu"`):
81
+ The non-linear activation function (function or string) in the 1D convolutional layers of the feature
82
+ extractor. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported.
83
+ conv_dim (`Tuple[int]`, *optional*, defaults to `(512, 512, 512, 512, 512, 512, 512)`):
84
+ A tuple of integers defining the number of input and output channels of each 1D convolutional layer in the
85
+ feature encoder. The length of *conv_dim* defines the number of 1D convolutional layers.
86
+ conv_stride (`Tuple[int]`, *optional*, defaults to `(5, 2, 2, 2, 2, 2, 2)`):
87
+ A tuple of integers defining the stride of each 1D convolutional layer in the feature encoder. The length
88
+ of *conv_stride* defines the number of convolutional layers and has to match the length of *conv_dim*.
89
+ conv_kernel (`Tuple[int]`, *optional*, defaults to `(10, 3, 3, 3, 3, 3, 3)`):
90
+ A tuple of integers defining the kernel size of each 1D convolutional layer in the feature encoder. The
91
+ length of *conv_kernel* defines the number of convolutional layers and has to match the length of
92
+ *conv_dim*.
93
+ conv_bias (`bool`, *optional*, defaults to `False`):
94
+ Whether the 1D convolutional layers have a bias.
95
+ num_conv_pos_embeddings (`int`, *optional*, defaults to 128):
96
+ Number of convolutional positional embeddings. Defines the kernel size of 1D convolutional positional
97
+ embeddings layer.
98
+ num_conv_pos_embedding_groups (`int`, *optional*, defaults to 16):
99
+ Number of groups of 1D convolutional positional embeddings layer.
100
+ do_stable_layer_norm (`bool`, *optional*, defaults to `False`):
101
+ Whether do apply *stable* layer norm architecture of the Transformer encoder. `do_stable_layer_norm is
102
+ True` corresponds to applying layer norm before the attention layer, whereas `do_stable_layer_norm is
103
+ False` corresponds to applying layer norm after the attention layer.
104
+ apply_spec_augment (`bool`, *optional*, defaults to `True`):
105
+ Whether to apply *SpecAugment* data augmentation to the outputs of the feature encoder. For reference see
106
+ [SpecAugment: A Simple Data Augmentation Method for Automatic Speech
107
+ Recognition](https://arxiv.org/abs/1904.08779).
108
+ mask_time_prob (`float`, *optional*, defaults to 0.05):
109
+ Percentage (between 0 and 1) of all feature vectors along the time axis which will be masked. The masking
110
+ procecure generates ''mask_time_prob*len(time_axis)/mask_time_length'' independent masks over the axis. If
111
+ reasoning from the propability of each feature vector to be chosen as the start of the vector span to be
112
+ masked, *mask_time_prob* should be `prob_vector_start*mask_time_length`. Note that overlap may decrease the
113
+ actual percentage of masked vectors. This is only relevant if `apply_spec_augment is True`.
114
+ mask_time_length (`int`, *optional*, defaults to 10):
115
+ Length of vector span along the time axis.
116
+ mask_time_min_masks (`int`, *optional*, defaults to 2),:
117
+ The minimum number of masks of length `mask_feature_length` generated along the time axis, each time step,
118
+ irrespectively of `mask_feature_prob`. Only relevant if ''mask_time_prob*len(time_axis)/mask_time_length <
119
+ mask_time_min_masks''
120
+ mask_feature_prob (`float`, *optional*, defaults to 0.0):
121
+ Percentage (between 0 and 1) of all feature vectors along the feature axis which will be masked. The
122
+ masking procecure generates ''mask_feature_prob*len(feature_axis)/mask_time_length'' independent masks over
123
+ the axis. If reasoning from the propability of each feature vector to be chosen as the start of the vector
124
+ span to be masked, *mask_feature_prob* should be `prob_vector_start*mask_feature_length`. Note that overlap
125
+ may decrease the actual percentage of masked vectors. This is only relevant if `apply_spec_augment is
126
+ True`.
127
+ mask_feature_length (`int`, *optional*, defaults to 10):
128
+ Length of vector span along the feature axis.
129
+ mask_feature_min_masks (`int`, *optional*, defaults to 0),:
130
+ The minimum number of masks of length `mask_feature_length` generated along the feature axis, each time
131
+ step, irrespectively of `mask_feature_prob`. Only relevant if
132
+ ''mask_feature_prob*len(feature_axis)/mask_feature_length < mask_feature_min_masks''
133
+ ctc_loss_reduction (`str`, *optional*, defaults to `"sum"`):
134
+ Specifies the reduction to apply to the output of `torch.nn.CTCLoss`. Only relevant when training an
135
+ instance of [`HubertForCTC`].
136
+ ctc_zero_infinity (`bool`, *optional*, defaults to `False`):
137
+ Whether to zero infinite losses and the associated gradients of `torch.nn.CTCLoss`. Infinite losses mainly
138
+ occur when the inputs are too short to be aligned to the targets. Only relevant when training an instance
139
+ of [`HubertForCTC`].
140
+ use_weighted_layer_sum (`bool`, *optional*, defaults to `False`):
141
+ Whether to use a weighted average of layer outputs with learned weights. Only relevant when using an
142
+ instance of [`HubertForSequenceClassification`].
143
+ classifier_proj_size (`int`, *optional*, defaults to 256):
144
+ Dimensionality of the projection before token mean-pooling for classification.
145
+
146
+ Example:
147
+
148
+ ```python
149
+ >>> from transformers import HubertModel, HubertConfig
150
+
151
+ >>> # Initializing a Hubert facebook/hubert-base-ls960 style configuration
152
+ >>> configuration = HubertConfig()
153
+
154
+ >>> # Initializing a model from the facebook/hubert-base-ls960 style configuration
155
+ >>> model = HubertModel(configuration)
156
+
157
+ >>> # Accessing the model configuration
158
+ >>> configuration = model.config
159
+ ```"""
160
+
161
+ model_type = "hubert"
162
+
163
+ def __init__(
164
+ self,
165
+ vocab_size=32,
166
+ hidden_size=768,
167
+ num_hidden_layers=12,
168
+ num_attention_heads=12,
169
+ intermediate_size=3072,
170
+ hidden_act="gelu",
171
+ hidden_dropout=0.1,
172
+ activation_dropout=0.1,
173
+ attention_dropout=0.1,
174
+ feat_proj_layer_norm=True,
175
+ feat_proj_dropout=0.0,
176
+ final_dropout=0.1,
177
+ layerdrop=0.1,
178
+ initializer_range=0.02,
179
+ layer_norm_eps=1e-5,
180
+ feat_extract_norm="group",
181
+ feat_extract_activation="gelu",
182
+ conv_dim=(512, 512, 512, 512, 512, 512, 512),
183
+ conv_stride=(5, 2, 2, 2, 2, 2, 2),
184
+ conv_kernel=(10, 3, 3, 3, 3, 2, 2),
185
+ conv_bias=False,
186
+ num_conv_pos_embeddings=128,
187
+ num_conv_pos_embedding_groups=16,
188
+ do_stable_layer_norm=False,
189
+ apply_spec_augment=True,
190
+ mask_time_prob=0.05,
191
+ mask_time_length=10,
192
+ mask_time_min_masks=2,
193
+ mask_feature_prob=0.0,
194
+ mask_feature_length=10,
195
+ mask_feature_min_masks=0,
196
+ ctc_loss_reduction="sum",
197
+ ctc_zero_infinity=False,
198
+ use_weighted_layer_sum=False,
199
+ classifier_proj_size=256,
200
+ pad_token_id=0,
201
+ bos_token_id=1,
202
+ eos_token_id=2,
203
+ **kwargs,
204
+ ):
205
+ super().__init__(**kwargs, pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id)
206
+ self.hidden_size = hidden_size
207
+ self.feat_extract_norm = feat_extract_norm
208
+ self.feat_extract_activation = feat_extract_activation
209
+ self.conv_dim = list(conv_dim)
210
+ self.conv_stride = list(conv_stride)
211
+ self.conv_kernel = list(conv_kernel)
212
+ self.conv_bias = conv_bias
213
+ self.num_conv_pos_embeddings = num_conv_pos_embeddings
214
+ self.num_conv_pos_embedding_groups = num_conv_pos_embedding_groups
215
+ self.num_feat_extract_layers = len(self.conv_dim)
216
+ self.num_hidden_layers = num_hidden_layers
217
+ self.intermediate_size = intermediate_size
218
+ self.hidden_act = hidden_act
219
+ self.num_attention_heads = num_attention_heads
220
+ self.hidden_dropout = hidden_dropout
221
+ self.attention_dropout = attention_dropout
222
+ self.activation_dropout = activation_dropout
223
+ self.feat_proj_layer_norm = feat_proj_layer_norm
224
+ self.feat_proj_dropout = feat_proj_dropout
225
+ self.final_dropout = final_dropout
226
+ self.layerdrop = layerdrop
227
+ self.layer_norm_eps = layer_norm_eps
228
+ self.initializer_range = initializer_range
229
+ self.vocab_size = vocab_size
230
+ self.do_stable_layer_norm = do_stable_layer_norm
231
+ self.use_weighted_layer_sum = use_weighted_layer_sum
232
+ self.classifier_proj_size = classifier_proj_size
233
+
234
+ if (
235
+ (len(self.conv_stride) != self.num_feat_extract_layers)
236
+ or (len(self.conv_kernel) != self.num_feat_extract_layers)
237
+ or (len(self.conv_dim) != self.num_feat_extract_layers)
238
+ ):
239
+ raise ValueError(
240
+ "Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
241
+ " `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
242
+ f" {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,"
243
+ f" `len(config.conv_kernel) = {len(self.conv_kernel)}`."
244
+ )
245
+
246
+ # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
247
+ self.apply_spec_augment = apply_spec_augment
248
+ self.mask_time_prob = mask_time_prob
249
+ self.mask_time_length = mask_time_length
250
+ self.mask_time_min_masks = mask_time_min_masks
251
+ self.mask_feature_prob = mask_feature_prob
252
+ self.mask_feature_length = mask_feature_length
253
+ self.mask_feature_min_masks = mask_feature_min_masks
254
+
255
+ # ctc loss
256
+ self.ctc_loss_reduction = ctc_loss_reduction
257
+ self.ctc_zero_infinity = ctc_zero_infinity
258
+
259
+ @property
260
+ def inputs_to_logits_ratio(self):
261
+ return functools.reduce(operator.mul, self.conv_stride, 1)
llmeval-env/lib/python3.10/site-packages/transformers/models/hubert/convert_distilhubert_original_s3prl_checkpoint_to_pytorch.py ADDED
@@ -0,0 +1,223 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Convert Hubert checkpoint."""
16
+
17
+
18
+ import argparse
19
+
20
+ import torch
21
+ from s3prl.hub import distilhubert
22
+
23
+ from transformers import HubertConfig, HubertModel, Wav2Vec2FeatureExtractor, logging
24
+
25
+
26
+ logging.set_verbosity_info()
27
+ logger = logging.get_logger(__name__)
28
+
29
+ MAPPING = {
30
+ "post_extract_proj": "feature_projection.projection",
31
+ "encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
32
+ "self_attn.k_proj": "encoder.layers.*.attention.k_proj",
33
+ "self_attn.v_proj": "encoder.layers.*.attention.v_proj",
34
+ "self_attn.q_proj": "encoder.layers.*.attention.q_proj",
35
+ "self_attn.out_proj": "encoder.layers.*.attention.out_proj",
36
+ "self_attn_layer_norm": "encoder.layers.*.layer_norm",
37
+ "fc1": "encoder.layers.*.feed_forward.intermediate_dense",
38
+ "fc2": "encoder.layers.*.feed_forward.output_dense",
39
+ "final_layer_norm": "encoder.layers.*.final_layer_norm",
40
+ "encoder.layer_norm": "encoder.layer_norm",
41
+ "mask_emb": "masked_spec_embed",
42
+ }
43
+
44
+
45
+ def set_recursively(hf_pointer, key, value, full_name, weight_type):
46
+ for attribute in key.split("."):
47
+ hf_pointer = getattr(hf_pointer, attribute)
48
+
49
+ if weight_type is not None:
50
+ hf_shape = getattr(hf_pointer, weight_type).shape
51
+ else:
52
+ hf_shape = hf_pointer.shape
53
+
54
+ assert hf_shape == value.shape, (
55
+ f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
56
+ f" {value.shape} for {full_name}"
57
+ )
58
+
59
+ if weight_type == "weight":
60
+ hf_pointer.weight.data = value
61
+ elif weight_type == "weight_g":
62
+ hf_pointer.weight_g.data = value
63
+ elif weight_type == "weight_v":
64
+ hf_pointer.weight_v.data = value
65
+ elif weight_type == "bias":
66
+ hf_pointer.bias.data = value
67
+ else:
68
+ hf_pointer.data = value
69
+
70
+ logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.")
71
+
72
+
73
+ def recursively_load_weights(fairseq_model, hf_model):
74
+ unused_weights = []
75
+ fairseq_dict = fairseq_model.state_dict()
76
+
77
+ feature_extractor = hf_model.feature_extractor
78
+
79
+ for name, value in fairseq_dict.items():
80
+ is_used = False
81
+ if "conv_layers" in name:
82
+ load_conv_layer(
83
+ name,
84
+ value,
85
+ feature_extractor,
86
+ unused_weights,
87
+ hf_model.config.feat_extract_norm == "group",
88
+ )
89
+ is_used = True
90
+ else:
91
+ for key, mapped_key in MAPPING.items():
92
+ mapped_key = mapped_key
93
+
94
+ if key in name:
95
+ is_used = True
96
+ if "*" in mapped_key:
97
+ layer_index = name.split(key)[0].split(".")[-2]
98
+ mapped_key = mapped_key.replace("*", layer_index)
99
+ if "weight_g" in name:
100
+ weight_type = "weight_g"
101
+ elif "weight_v" in name:
102
+ weight_type = "weight_v"
103
+ elif "weight" in name:
104
+ weight_type = "weight"
105
+ elif "bias" in name:
106
+ weight_type = "bias"
107
+ else:
108
+ weight_type = None
109
+ set_recursively(hf_model, mapped_key, value, name, weight_type)
110
+ continue
111
+ if not is_used:
112
+ unused_weights.append(name)
113
+
114
+ logger.warning(f"Unused weights: {unused_weights}")
115
+
116
+
117
+ def load_conv_layer(full_name, value, feature_extractor, unused_weights, use_group_norm):
118
+ name = full_name.split("conv_layers.")[-1]
119
+ items = name.split(".")
120
+ layer_id = int(items[0])
121
+ type_id = int(items[1])
122
+
123
+ if type_id == 0:
124
+ if "bias" in name:
125
+ assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
126
+ f"{full_name} has size {value.shape}, but"
127
+ f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
128
+ )
129
+ feature_extractor.conv_layers[layer_id].conv.bias.data = value
130
+ logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}.")
131
+ elif "weight" in name:
132
+ assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
133
+ f"{full_name} has size {value.shape}, but"
134
+ f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
135
+ )
136
+ feature_extractor.conv_layers[layer_id].conv.weight.data = value
137
+ logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}.")
138
+ elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
139
+ if "bias" in name:
140
+ assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
141
+ f"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
142
+ " found."
143
+ )
144
+ feature_extractor.conv_layers[layer_id].layer_norm.bias.data = value
145
+ logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.")
146
+ elif "weight" in name:
147
+ assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
148
+ f"{full_name} has size {value.shape}, but"
149
+ f" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
150
+ )
151
+ feature_extractor.conv_layers[layer_id].layer_norm.weight.data = value
152
+ logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.")
153
+ else:
154
+ unused_weights.append(full_name)
155
+
156
+
157
+ def convert_config(model):
158
+ config = HubertConfig()
159
+ fs_config = model.config
160
+
161
+ config.activation_dropout = fs_config.activation_dropout
162
+ config.apply_spec_augment = False
163
+ config.attention_dropout = fs_config.attention_dropout
164
+ config.conv_bias = False
165
+ conv_layers = eval(fs_config.extractor_conv_feature_layers)
166
+ config.conv_dim = [x[0] for x in conv_layers]
167
+ config.conv_kernel = [x[1] for x in conv_layers]
168
+ config.conv_stride = [x[2] for x in conv_layers]
169
+ config.feat_extract_activation = "gelu"
170
+ config.feat_extract_norm = "layer" if fs_config.extractor_mode == "layer_norm" else "group"
171
+ config.feat_proj_layer_norm = False
172
+ config.feat_proj_dropout = 0.0
173
+ config.final_dropout = 0.0
174
+ config.hidden_act = fs_config.activation_fn
175
+ config.hidden_dropout = fs_config.dropout
176
+ config.hidden_size = fs_config.encoder_embed_dim
177
+ config.initializer_range = 0.02
178
+ config.intermediate_size = fs_config.encoder_ffn_embed_dim
179
+ config.layer_norm_eps = 1e-5
180
+ config.layerdrop = 0.0
181
+ config.num_attention_heads = fs_config.encoder_attention_heads
182
+ config.num_conv_pos_embedding_groups = fs_config.conv_pos_groups
183
+ config.num_conv_pos_embeddings = fs_config.conv_pos
184
+ config.num_feat_extract_layers = len(conv_layers)
185
+ config.num_hidden_layers = fs_config.encoder_layers
186
+
187
+ return config
188
+
189
+
190
+ @torch.no_grad()
191
+ def convert_hubert_checkpoint(pytorch_dump_folder_path, config_path=None):
192
+ """
193
+ Copy/paste/tweak model's weights to transformers design.
194
+ """
195
+ model = distilhubert().model.model
196
+
197
+ if config_path is not None:
198
+ config = HubertConfig.from_pretrained(config_path)
199
+ else:
200
+ config = convert_config(model)
201
+ model = model.eval()
202
+
203
+ feature_extractor = Wav2Vec2FeatureExtractor(
204
+ feature_size=1,
205
+ sampling_rate=16000,
206
+ padding_value=0,
207
+ do_normalize=False,
208
+ return_attention_mask=False,
209
+ )
210
+ hf_model = HubertModel(config)
211
+
212
+ recursively_load_weights(model, hf_model)
213
+
214
+ feature_extractor.save_pretrained(pytorch_dump_folder_path)
215
+ hf_model.save_pretrained(pytorch_dump_folder_path)
216
+
217
+
218
+ if __name__ == "__main__":
219
+ parser = argparse.ArgumentParser()
220
+ parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
221
+ parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
222
+ args = parser.parse_args()
223
+ convert_hubert_checkpoint(args.pytorch_dump_folder_path, args.config_path)
llmeval-env/lib/python3.10/site-packages/transformers/models/hubert/convert_hubert_original_s3prl_checkpoint_to_pytorch.py ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Convert Hubert checkpoint."""
16
+
17
+
18
+ import argparse
19
+
20
+ import torch
21
+
22
+ from transformers import HubertConfig, HubertForSequenceClassification, Wav2Vec2FeatureExtractor, logging
23
+
24
+
25
+ logging.set_verbosity_info()
26
+ logger = logging.get_logger(__name__)
27
+
28
+ SUPPORTED_MODELS = ["UtteranceLevel"]
29
+
30
+
31
+ @torch.no_grad()
32
+ def convert_s3prl_checkpoint(base_model_name, config_path, checkpoint_path, model_dump_path):
33
+ """
34
+ Copy/paste/tweak model's weights to transformers design.
35
+ """
36
+ checkpoint = torch.load(checkpoint_path, map_location="cpu")
37
+ if checkpoint["Config"]["downstream_expert"]["modelrc"]["select"] not in SUPPORTED_MODELS:
38
+ raise NotImplementedError(f"The supported s3prl models are {SUPPORTED_MODELS}")
39
+
40
+ downstream_dict = checkpoint["Downstream"]
41
+
42
+ hf_congfig = HubertConfig.from_pretrained(config_path)
43
+ hf_model = HubertForSequenceClassification.from_pretrained(base_model_name, config=hf_congfig)
44
+ hf_feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(
45
+ base_model_name, return_attention_mask=True, do_normalize=False
46
+ )
47
+
48
+ if hf_congfig.use_weighted_layer_sum:
49
+ hf_model.layer_weights.data = checkpoint["Featurizer"]["weights"]
50
+
51
+ hf_model.projector.weight.data = downstream_dict["projector.weight"]
52
+ hf_model.projector.bias.data = downstream_dict["projector.bias"]
53
+ hf_model.classifier.weight.data = downstream_dict["model.post_net.linear.weight"]
54
+ hf_model.classifier.bias.data = downstream_dict["model.post_net.linear.bias"]
55
+
56
+ hf_feature_extractor.save_pretrained(model_dump_path)
57
+ hf_model.save_pretrained(model_dump_path)
58
+
59
+
60
+ if __name__ == "__main__":
61
+ parser = argparse.ArgumentParser()
62
+ parser.add_argument(
63
+ "--base_model_name", default=None, type=str, help="Name of the huggingface pretrained base model."
64
+ )
65
+ parser.add_argument("--config_path", default=None, type=str, help="Path to the huggingface classifier config.")
66
+ parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to the s3prl checkpoint.")
67
+ parser.add_argument("--model_dump_path", default=None, type=str, help="Path to the final converted model.")
68
+ args = parser.parse_args()
69
+ convert_s3prl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
llmeval-env/lib/python3.10/site-packages/transformers/models/hubert/modeling_hubert.py ADDED
@@ -0,0 +1,1386 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The Fairseq Authors and the HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch Hubert model."""
16
+
17
+ import warnings
18
+ from typing import Optional, Tuple, Union
19
+
20
+ import numpy as np
21
+ import torch
22
+ import torch.utils.checkpoint
23
+ from torch import nn
24
+ from torch.nn import CrossEntropyLoss
25
+
26
+ from ...activations import ACT2FN
27
+ from ...integrations.deepspeed import is_deepspeed_zero3_enabled
28
+ from ...modeling_outputs import BaseModelOutput, CausalLMOutput, SequenceClassifierOutput
29
+ from ...modeling_utils import PreTrainedModel
30
+ from ...utils import (
31
+ add_code_sample_docstrings,
32
+ add_start_docstrings,
33
+ add_start_docstrings_to_model_forward,
34
+ logging,
35
+ replace_return_docstrings,
36
+ )
37
+ from .configuration_hubert import HubertConfig
38
+
39
+
40
+ logger = logging.get_logger(__name__)
41
+
42
+ _HIDDEN_STATES_START_POSITION = 1
43
+
44
+ # General docstring
45
+ _CONFIG_FOR_DOC = "HubertConfig"
46
+
47
+ # Base docstring
48
+ _CHECKPOINT_FOR_DOC = "facebook/hubert-large-ls960-ft"
49
+ _EXPECTED_OUTPUT_SHAPE = [1, 292, 768]
50
+
51
+ # CTC docstring
52
+ _CTC_EXPECTED_OUTPUT = "'MISTER QUILTER IS THE APOSTLE OF THE MIDDLE CLASSES AND WE ARE GLAD TO WELCOME HIS GOSPEL'"
53
+ _CTC_EXPECTED_LOSS = 22.68
54
+
55
+ # Audio class docstring
56
+ _SEQ_CLASS_CHECKPOINT = "superb/hubert-base-superb-ks"
57
+ _SEQ_CLASS_EXPECTED_OUTPUT = "'_unknown_'"
58
+ _SEQ_CLASS_EXPECTED_LOSS = 8.53
59
+
60
+
61
+ from ..deprecated._archive_maps import HUBERT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
62
+
63
+
64
+ # Copied from transformers.models.wav2vec2.modeling_wav2vec2._compute_mask_indices
65
+ def _compute_mask_indices(
66
+ shape: Tuple[int, int],
67
+ mask_prob: float,
68
+ mask_length: int,
69
+ attention_mask: Optional[torch.LongTensor] = None,
70
+ min_masks: int = 0,
71
+ ) -> np.ndarray:
72
+ """
73
+ Computes random mask spans for a given shape. Used to implement [SpecAugment: A Simple Data Augmentation Method for
74
+ ASR](https://arxiv.org/abs/1904.08779). Note that this method is not optimized to run on TPU and should be run on
75
+ CPU as part of the preprocessing during training.
76
+
77
+ Args:
78
+ shape: The shape for which to compute masks. This should be of a tuple of size 2 where
79
+ the first element is the batch size and the second element is the length of the axis to span.
80
+ mask_prob: The percentage of the whole axis (between 0 and 1) which will be masked. The number of
81
+ independently generated mask spans of length `mask_length` is computed by
82
+ `mask_prob*shape[1]/mask_length`. Note that due to overlaps, `mask_prob` is an upper bound and the
83
+ actual percentage will be smaller.
84
+ mask_length: size of the mask
85
+ min_masks: minimum number of masked spans
86
+ attention_mask: A (right-padded) attention mask which independently shortens the feature axis of
87
+ each batch dimension.
88
+ """
89
+ batch_size, sequence_length = shape
90
+
91
+ if mask_length < 1:
92
+ raise ValueError("`mask_length` has to be bigger than 0.")
93
+
94
+ if mask_length > sequence_length:
95
+ raise ValueError(
96
+ f"`mask_length` has to be smaller than `sequence_length`, but got `mask_length`: {mask_length}"
97
+ f" and `sequence_length`: {sequence_length}`"
98
+ )
99
+
100
+ # epsilon is used for probabilistic rounding
101
+ epsilon = np.random.rand(1).item()
102
+
103
+ def compute_num_masked_span(input_length):
104
+ """Given input length, compute how many spans should be masked"""
105
+ num_masked_span = int(mask_prob * input_length / mask_length + epsilon)
106
+ num_masked_span = max(num_masked_span, min_masks)
107
+
108
+ # make sure num masked span <= sequence_length
109
+ if num_masked_span * mask_length > sequence_length:
110
+ num_masked_span = sequence_length // mask_length
111
+
112
+ # make sure num_masked span is also <= input_length - (mask_length - 1)
113
+ if input_length - (mask_length - 1) < num_masked_span:
114
+ num_masked_span = max(input_length - (mask_length - 1), 0)
115
+
116
+ return num_masked_span
117
+
118
+ # compute number of masked spans in batch
119
+ input_lengths = (
120
+ attention_mask.sum(-1).detach().tolist()
121
+ if attention_mask is not None
122
+ else [sequence_length for _ in range(batch_size)]
123
+ )
124
+
125
+ # SpecAugment mask to fill
126
+ spec_aug_mask = np.zeros((batch_size, sequence_length), dtype=bool)
127
+ spec_aug_mask_idxs = []
128
+
129
+ max_num_masked_span = compute_num_masked_span(sequence_length)
130
+
131
+ if max_num_masked_span == 0:
132
+ return spec_aug_mask
133
+
134
+ for input_length in input_lengths:
135
+ # compute num of masked spans for this input
136
+ num_masked_span = compute_num_masked_span(input_length)
137
+
138
+ # get random indices to mask
139
+ spec_aug_mask_idx = np.random.choice(
140
+ np.arange(input_length - (mask_length - 1)), num_masked_span, replace=False
141
+ )
142
+
143
+ # pick first sampled index that will serve as a dummy index to pad vector
144
+ # to ensure same dimension for all batches due to probabilistic rounding
145
+ # Picking first sample just pads those vectors twice.
146
+ if len(spec_aug_mask_idx) == 0:
147
+ # this case can only happen if `input_length` is strictly smaller then
148
+ # `sequence_length` in which case the last token has to be a padding
149
+ # token which we can use as a dummy mask id
150
+ dummy_mask_idx = sequence_length - 1
151
+ else:
152
+ dummy_mask_idx = spec_aug_mask_idx[0]
153
+
154
+ spec_aug_mask_idx = np.concatenate(
155
+ [spec_aug_mask_idx, np.ones(max_num_masked_span - num_masked_span, dtype=np.int32) * dummy_mask_idx]
156
+ )
157
+ spec_aug_mask_idxs.append(spec_aug_mask_idx)
158
+
159
+ spec_aug_mask_idxs = np.array(spec_aug_mask_idxs)
160
+
161
+ # expand masked indices to masked spans
162
+ spec_aug_mask_idxs = np.broadcast_to(
163
+ spec_aug_mask_idxs[:, :, None], (batch_size, max_num_masked_span, mask_length)
164
+ )
165
+ spec_aug_mask_idxs = spec_aug_mask_idxs.reshape(batch_size, max_num_masked_span * mask_length)
166
+
167
+ # add offset to the starting indexes so that indexes now create a span
168
+ offsets = np.arange(mask_length)[None, None, :]
169
+ offsets = np.broadcast_to(offsets, (batch_size, max_num_masked_span, mask_length)).reshape(
170
+ batch_size, max_num_masked_span * mask_length
171
+ )
172
+ spec_aug_mask_idxs = spec_aug_mask_idxs + offsets
173
+
174
+ # ensure that we cannot have indices larger than sequence_length
175
+ if spec_aug_mask_idxs.max() > sequence_length - 1:
176
+ spec_aug_mask_idxs[spec_aug_mask_idxs > sequence_length - 1] = sequence_length - 1
177
+
178
+ # scatter indices to mask
179
+ np.put_along_axis(spec_aug_mask, spec_aug_mask_idxs, 1, -1)
180
+
181
+ return spec_aug_mask
182
+
183
+
184
+ # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2NoLayerNormConvLayer with Wav2Vec2->Hubert
185
+ class HubertNoLayerNormConvLayer(nn.Module):
186
+ def __init__(self, config, layer_id=0):
187
+ super().__init__()
188
+ self.in_conv_dim = config.conv_dim[layer_id - 1] if layer_id > 0 else 1
189
+ self.out_conv_dim = config.conv_dim[layer_id]
190
+
191
+ self.conv = nn.Conv1d(
192
+ self.in_conv_dim,
193
+ self.out_conv_dim,
194
+ kernel_size=config.conv_kernel[layer_id],
195
+ stride=config.conv_stride[layer_id],
196
+ bias=config.conv_bias,
197
+ )
198
+ self.activation = ACT2FN[config.feat_extract_activation]
199
+
200
+ def forward(self, hidden_states):
201
+ hidden_states = self.conv(hidden_states)
202
+ hidden_states = self.activation(hidden_states)
203
+ return hidden_states
204
+
205
+
206
+ # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2LayerNormConvLayer with Wav2Vec2->Hubert
207
+ class HubertLayerNormConvLayer(nn.Module):
208
+ def __init__(self, config, layer_id=0):
209
+ super().__init__()
210
+ self.in_conv_dim = config.conv_dim[layer_id - 1] if layer_id > 0 else 1
211
+ self.out_conv_dim = config.conv_dim[layer_id]
212
+
213
+ self.conv = nn.Conv1d(
214
+ self.in_conv_dim,
215
+ self.out_conv_dim,
216
+ kernel_size=config.conv_kernel[layer_id],
217
+ stride=config.conv_stride[layer_id],
218
+ bias=config.conv_bias,
219
+ )
220
+ self.layer_norm = nn.LayerNorm(self.out_conv_dim, elementwise_affine=True)
221
+ self.activation = ACT2FN[config.feat_extract_activation]
222
+
223
+ def forward(self, hidden_states):
224
+ hidden_states = self.conv(hidden_states)
225
+
226
+ hidden_states = hidden_states.transpose(-2, -1)
227
+ hidden_states = self.layer_norm(hidden_states)
228
+ hidden_states = hidden_states.transpose(-2, -1)
229
+
230
+ hidden_states = self.activation(hidden_states)
231
+ return hidden_states
232
+
233
+
234
+ # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2GroupNormConvLayer with Wav2Vec2->Hubert
235
+ class HubertGroupNormConvLayer(nn.Module):
236
+ def __init__(self, config, layer_id=0):
237
+ super().__init__()
238
+ self.in_conv_dim = config.conv_dim[layer_id - 1] if layer_id > 0 else 1
239
+ self.out_conv_dim = config.conv_dim[layer_id]
240
+
241
+ self.conv = nn.Conv1d(
242
+ self.in_conv_dim,
243
+ self.out_conv_dim,
244
+ kernel_size=config.conv_kernel[layer_id],
245
+ stride=config.conv_stride[layer_id],
246
+ bias=config.conv_bias,
247
+ )
248
+ self.activation = ACT2FN[config.feat_extract_activation]
249
+
250
+ self.layer_norm = nn.GroupNorm(num_groups=self.out_conv_dim, num_channels=self.out_conv_dim, affine=True)
251
+
252
+ def forward(self, hidden_states):
253
+ hidden_states = self.conv(hidden_states)
254
+ hidden_states = self.layer_norm(hidden_states)
255
+ hidden_states = self.activation(hidden_states)
256
+ return hidden_states
257
+
258
+
259
+ # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2PositionalConvEmbedding with Wav2Vec2->Hubert
260
+ class HubertPositionalConvEmbedding(nn.Module):
261
+ def __init__(self, config):
262
+ super().__init__()
263
+ self.conv = nn.Conv1d(
264
+ config.hidden_size,
265
+ config.hidden_size,
266
+ kernel_size=config.num_conv_pos_embeddings,
267
+ padding=config.num_conv_pos_embeddings // 2,
268
+ groups=config.num_conv_pos_embedding_groups,
269
+ )
270
+
271
+ weight_norm = nn.utils.weight_norm
272
+ if hasattr(nn.utils.parametrizations, "weight_norm"):
273
+ weight_norm = nn.utils.parametrizations.weight_norm
274
+
275
+ if is_deepspeed_zero3_enabled():
276
+ import deepspeed
277
+
278
+ with deepspeed.zero.GatheredParameters(self.conv.weight, modifier_rank=0):
279
+ self.conv = weight_norm(self.conv, name="weight", dim=2)
280
+ deepspeed.zero.register_external_parameter(self, self.conv.weight_v)
281
+ deepspeed.zero.register_external_parameter(self, self.conv.weight_g)
282
+ else:
283
+ self.conv = weight_norm(self.conv, name="weight", dim=2)
284
+
285
+ self.padding = HubertSamePadLayer(config.num_conv_pos_embeddings)
286
+ self.activation = ACT2FN[config.feat_extract_activation]
287
+
288
+ def forward(self, hidden_states):
289
+ hidden_states = hidden_states.transpose(1, 2)
290
+
291
+ hidden_states = self.conv(hidden_states)
292
+ hidden_states = self.padding(hidden_states)
293
+ hidden_states = self.activation(hidden_states)
294
+
295
+ hidden_states = hidden_states.transpose(1, 2)
296
+ return hidden_states
297
+
298
+
299
+ # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2SamePadLayer with Wav2Vec2->Hubert
300
+ class HubertSamePadLayer(nn.Module):
301
+ def __init__(self, num_conv_pos_embeddings):
302
+ super().__init__()
303
+ self.num_pad_remove = 1 if num_conv_pos_embeddings % 2 == 0 else 0
304
+
305
+ def forward(self, hidden_states):
306
+ if self.num_pad_remove > 0:
307
+ hidden_states = hidden_states[:, :, : -self.num_pad_remove]
308
+ return hidden_states
309
+
310
+
311
+ # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2FeatureEncoder with Wav2Vec2->Hubert
312
+ class HubertFeatureEncoder(nn.Module):
313
+ """Construct the features from raw audio waveform"""
314
+
315
+ def __init__(self, config):
316
+ super().__init__()
317
+
318
+ if config.feat_extract_norm == "group":
319
+ conv_layers = [HubertGroupNormConvLayer(config, layer_id=0)] + [
320
+ HubertNoLayerNormConvLayer(config, layer_id=i + 1) for i in range(config.num_feat_extract_layers - 1)
321
+ ]
322
+ elif config.feat_extract_norm == "layer":
323
+ conv_layers = [HubertLayerNormConvLayer(config, layer_id=i) for i in range(config.num_feat_extract_layers)]
324
+ else:
325
+ raise ValueError(
326
+ f"`config.feat_extract_norm` is {config.feat_extract_norm}, but has to be one of ['group', 'layer']"
327
+ )
328
+ self.conv_layers = nn.ModuleList(conv_layers)
329
+ self.gradient_checkpointing = False
330
+ self._requires_grad = True
331
+
332
+ def _freeze_parameters(self):
333
+ for param in self.parameters():
334
+ param.requires_grad = False
335
+ self._requires_grad = False
336
+
337
+ def forward(self, input_values):
338
+ hidden_states = input_values[:, None]
339
+
340
+ # make sure hidden_states require grad for gradient_checkpointing
341
+ if self._requires_grad and self.training:
342
+ hidden_states.requires_grad = True
343
+
344
+ for conv_layer in self.conv_layers:
345
+ if self._requires_grad and self.gradient_checkpointing and self.training:
346
+ hidden_states = self._gradient_checkpointing_func(
347
+ conv_layer.__call__,
348
+ hidden_states,
349
+ )
350
+ else:
351
+ hidden_states = conv_layer(hidden_states)
352
+
353
+ return hidden_states
354
+
355
+
356
+ class HubertFeatureExtractor(HubertFeatureEncoder):
357
+ def __init__(self, config):
358
+ super().__init__(config)
359
+ warnings.warn(
360
+ f"The class `{self.__class__.__name__}` has been depreciated "
361
+ "and will be removed in Transformers v5. "
362
+ f"Use `{self.__class__.__bases__[0].__name__}` instead.",
363
+ FutureWarning,
364
+ )
365
+
366
+
367
+ class HubertFeatureProjection(nn.Module):
368
+ def __init__(self, config):
369
+ super().__init__()
370
+ self.feat_proj_layer_norm = config.feat_proj_layer_norm
371
+ if self.feat_proj_layer_norm:
372
+ self.layer_norm = nn.LayerNorm(config.conv_dim[-1], eps=config.layer_norm_eps)
373
+ self.projection = nn.Linear(config.conv_dim[-1], config.hidden_size)
374
+ self.dropout = nn.Dropout(config.feat_proj_dropout)
375
+
376
+ def forward(self, hidden_states):
377
+ # non-projected hidden states are needed for quantization
378
+ if self.feat_proj_layer_norm:
379
+ hidden_states = self.layer_norm(hidden_states)
380
+ hidden_states = self.projection(hidden_states)
381
+ hidden_states = self.dropout(hidden_states)
382
+ return hidden_states
383
+
384
+
385
+ # Copied from transformers.models.bart.modeling_bart.BartAttention with Bart->Hubert
386
+ class HubertAttention(nn.Module):
387
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
388
+
389
+ def __init__(
390
+ self,
391
+ embed_dim: int,
392
+ num_heads: int,
393
+ dropout: float = 0.0,
394
+ is_decoder: bool = False,
395
+ bias: bool = True,
396
+ is_causal: bool = False,
397
+ config: Optional[HubertConfig] = None,
398
+ ):
399
+ super().__init__()
400
+ self.embed_dim = embed_dim
401
+ self.num_heads = num_heads
402
+ self.dropout = dropout
403
+ self.head_dim = embed_dim // num_heads
404
+ self.config = config
405
+
406
+ if (self.head_dim * num_heads) != self.embed_dim:
407
+ raise ValueError(
408
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
409
+ f" and `num_heads`: {num_heads})."
410
+ )
411
+ self.scaling = self.head_dim**-0.5
412
+ self.is_decoder = is_decoder
413
+ self.is_causal = is_causal
414
+
415
+ self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
416
+ self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
417
+ self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
418
+ self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
419
+
420
+ def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
421
+ return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
422
+
423
+ def forward(
424
+ self,
425
+ hidden_states: torch.Tensor,
426
+ key_value_states: Optional[torch.Tensor] = None,
427
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
428
+ attention_mask: Optional[torch.Tensor] = None,
429
+ layer_head_mask: Optional[torch.Tensor] = None,
430
+ output_attentions: bool = False,
431
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
432
+ """Input shape: Batch x Time x Channel"""
433
+
434
+ # if key_value_states are provided this layer is used as a cross-attention layer
435
+ # for the decoder
436
+ is_cross_attention = key_value_states is not None
437
+
438
+ bsz, tgt_len, _ = hidden_states.size()
439
+
440
+ # get query proj
441
+ query_states = self.q_proj(hidden_states) * self.scaling
442
+ # get key, value proj
443
+ # `past_key_value[0].shape[2] == key_value_states.shape[1]`
444
+ # is checking that the `sequence_length` of the `past_key_value` is the same as
445
+ # the provided `key_value_states` to support prefix tuning
446
+ if (
447
+ is_cross_attention
448
+ and past_key_value is not None
449
+ and past_key_value[0].shape[2] == key_value_states.shape[1]
450
+ ):
451
+ # reuse k,v, cross_attentions
452
+ key_states = past_key_value[0]
453
+ value_states = past_key_value[1]
454
+ elif is_cross_attention:
455
+ # cross_attentions
456
+ key_states = self._shape(self.k_proj(key_value_states), -1, bsz)
457
+ value_states = self._shape(self.v_proj(key_value_states), -1, bsz)
458
+ elif past_key_value is not None:
459
+ # reuse k, v, self_attention
460
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
461
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
462
+ key_states = torch.cat([past_key_value[0], key_states], dim=2)
463
+ value_states = torch.cat([past_key_value[1], value_states], dim=2)
464
+ else:
465
+ # self_attention
466
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
467
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
468
+
469
+ if self.is_decoder:
470
+ # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
471
+ # Further calls to cross_attention layer can then reuse all cross-attention
472
+ # key/value_states (first "if" case)
473
+ # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
474
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
475
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
476
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
477
+ past_key_value = (key_states, value_states)
478
+
479
+ proj_shape = (bsz * self.num_heads, -1, self.head_dim)
480
+ query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)
481
+ key_states = key_states.reshape(*proj_shape)
482
+ value_states = value_states.reshape(*proj_shape)
483
+
484
+ src_len = key_states.size(1)
485
+ attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
486
+
487
+ if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len):
488
+ raise ValueError(
489
+ f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is"
490
+ f" {attn_weights.size()}"
491
+ )
492
+
493
+ if attention_mask is not None:
494
+ if attention_mask.size() != (bsz, 1, tgt_len, src_len):
495
+ raise ValueError(
496
+ f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}"
497
+ )
498
+ attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask
499
+ attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
500
+
501
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1)
502
+
503
+ if layer_head_mask is not None:
504
+ if layer_head_mask.size() != (self.num_heads,):
505
+ raise ValueError(
506
+ f"Head mask for a single layer should be of size {(self.num_heads,)}, but is"
507
+ f" {layer_head_mask.size()}"
508
+ )
509
+ attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
510
+ attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
511
+
512
+ if output_attentions:
513
+ # this operation is a bit awkward, but it's required to
514
+ # make sure that attn_weights keeps its gradient.
515
+ # In order to do so, attn_weights have to be reshaped
516
+ # twice and have to be reused in the following
517
+ attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
518
+ attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)
519
+ else:
520
+ attn_weights_reshaped = None
521
+
522
+ attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
523
+
524
+ attn_output = torch.bmm(attn_probs, value_states)
525
+
526
+ if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):
527
+ raise ValueError(
528
+ f"`attn_output` should be of size {(bsz * self.num_heads, tgt_len, self.head_dim)}, but is"
529
+ f" {attn_output.size()}"
530
+ )
531
+
532
+ attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
533
+ attn_output = attn_output.transpose(1, 2)
534
+
535
+ # Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be
536
+ # partitioned across GPUs when using tensor-parallelism.
537
+ attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim)
538
+
539
+ attn_output = self.out_proj(attn_output)
540
+
541
+ return attn_output, attn_weights_reshaped, past_key_value
542
+
543
+
544
+ # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2FeedForward with Wav2Vec2->Hubert
545
+ class HubertFeedForward(nn.Module):
546
+ def __init__(self, config):
547
+ super().__init__()
548
+ self.intermediate_dropout = nn.Dropout(config.activation_dropout)
549
+
550
+ self.intermediate_dense = nn.Linear(config.hidden_size, config.intermediate_size)
551
+ if isinstance(config.hidden_act, str):
552
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
553
+ else:
554
+ self.intermediate_act_fn = config.hidden_act
555
+
556
+ self.output_dense = nn.Linear(config.intermediate_size, config.hidden_size)
557
+ self.output_dropout = nn.Dropout(config.hidden_dropout)
558
+
559
+ def forward(self, hidden_states):
560
+ hidden_states = self.intermediate_dense(hidden_states)
561
+ hidden_states = self.intermediate_act_fn(hidden_states)
562
+ hidden_states = self.intermediate_dropout(hidden_states)
563
+
564
+ hidden_states = self.output_dense(hidden_states)
565
+ hidden_states = self.output_dropout(hidden_states)
566
+ return hidden_states
567
+
568
+
569
+ # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2EncoderLayer with Wav2Vec2->Hubert
570
+ class HubertEncoderLayer(nn.Module):
571
+ def __init__(self, config):
572
+ super().__init__()
573
+ self.attention = HubertAttention(
574
+ embed_dim=config.hidden_size,
575
+ num_heads=config.num_attention_heads,
576
+ dropout=config.attention_dropout,
577
+ is_decoder=False,
578
+ )
579
+ self.dropout = nn.Dropout(config.hidden_dropout)
580
+ self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
581
+ self.feed_forward = HubertFeedForward(config)
582
+ self.final_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
583
+
584
+ def forward(self, hidden_states, attention_mask=None, output_attentions=False):
585
+ attn_residual = hidden_states
586
+ hidden_states, attn_weights, _ = self.attention(
587
+ hidden_states, attention_mask=attention_mask, output_attentions=output_attentions
588
+ )
589
+ hidden_states = self.dropout(hidden_states)
590
+ hidden_states = attn_residual + hidden_states
591
+
592
+ hidden_states = self.layer_norm(hidden_states)
593
+ hidden_states = hidden_states + self.feed_forward(hidden_states)
594
+ hidden_states = self.final_layer_norm(hidden_states)
595
+
596
+ outputs = (hidden_states,)
597
+
598
+ if output_attentions:
599
+ outputs += (attn_weights,)
600
+
601
+ return outputs
602
+
603
+
604
+ # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2AttnAdapterLayer with Wav2Vec2->Hubert
605
+ class HubertAttnAdapterLayer(nn.Module):
606
+ def __init__(self, config):
607
+ """
608
+ Implements adapter modules directly with 3D tensor weight as parameters and without using ModuleList to speed
609
+ up training throughput.
610
+ """
611
+ super().__init__()
612
+ self.input_dim = config.adapter_attn_dim
613
+ self.hidden_dim = config.hidden_size
614
+
615
+ self.norm = nn.LayerNorm(self.hidden_dim)
616
+ self.linear_1 = nn.Linear(self.hidden_dim, self.input_dim)
617
+ self.act_fn = nn.ReLU()
618
+ self.linear_2 = nn.Linear(self.input_dim, self.hidden_dim)
619
+
620
+ def forward(self, hidden_states: torch.FloatTensor):
621
+ hidden_states = self.norm(hidden_states)
622
+
623
+ hidden_states = self.linear_1(hidden_states)
624
+ hidden_states = self.act_fn(hidden_states)
625
+ hidden_states = self.linear_2(hidden_states)
626
+
627
+ return hidden_states
628
+
629
+
630
+ # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2EncoderLayerStableLayerNorm with Wav2Vec2->Hubert
631
+ class HubertEncoderLayerStableLayerNorm(nn.Module):
632
+ def __init__(self, config):
633
+ super().__init__()
634
+ self.attention = HubertAttention(
635
+ embed_dim=config.hidden_size,
636
+ num_heads=config.num_attention_heads,
637
+ dropout=config.attention_dropout,
638
+ is_decoder=False,
639
+ )
640
+ self.dropout = nn.Dropout(config.hidden_dropout)
641
+ self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
642
+ self.feed_forward = HubertFeedForward(config)
643
+ self.final_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
644
+
645
+ if getattr(config, "adapter_attn_dim", None) is not None:
646
+ self.adapter_layer = HubertAttnAdapterLayer(config)
647
+ else:
648
+ self.adapter_layer = None
649
+
650
+ def forward(
651
+ self,
652
+ hidden_states: torch.Tensor,
653
+ attention_mask: Optional[torch.Tensor] = None,
654
+ output_attentions: bool = False,
655
+ ):
656
+ attn_residual = hidden_states
657
+ hidden_states = self.layer_norm(hidden_states)
658
+ hidden_states, attn_weights, _ = self.attention(
659
+ hidden_states, attention_mask=attention_mask, output_attentions=output_attentions
660
+ )
661
+ hidden_states = self.dropout(hidden_states)
662
+ hidden_states = attn_residual + hidden_states
663
+ hidden_states = hidden_states + self.feed_forward(self.final_layer_norm(hidden_states))
664
+
665
+ if self.adapter_layer is not None:
666
+ hidden_states = hidden_states + self.adapter_layer(hidden_states)
667
+
668
+ outputs = (hidden_states,)
669
+
670
+ if output_attentions:
671
+ outputs += (attn_weights,)
672
+
673
+ return outputs
674
+
675
+
676
+ # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2Encoder with Wav2Vec2->Hubert
677
+ class HubertEncoder(nn.Module):
678
+ def __init__(self, config):
679
+ super().__init__()
680
+ self.config = config
681
+ self.pos_conv_embed = HubertPositionalConvEmbedding(config)
682
+ self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
683
+ self.dropout = nn.Dropout(config.hidden_dropout)
684
+ self.layers = nn.ModuleList([HubertEncoderLayer(config) for _ in range(config.num_hidden_layers)])
685
+ self.gradient_checkpointing = False
686
+
687
+ def forward(
688
+ self,
689
+ hidden_states: torch.tensor,
690
+ attention_mask: Optional[torch.Tensor] = None,
691
+ output_attentions: bool = False,
692
+ output_hidden_states: bool = False,
693
+ return_dict: bool = True,
694
+ ):
695
+ all_hidden_states = () if output_hidden_states else None
696
+ all_self_attentions = () if output_attentions else None
697
+
698
+ if attention_mask is not None:
699
+ # make sure padded tokens output 0
700
+ expand_attention_mask = attention_mask.unsqueeze(-1).repeat(1, 1, hidden_states.shape[2])
701
+ hidden_states[~expand_attention_mask] = 0
702
+
703
+ # extend attention_mask
704
+ attention_mask = 1.0 - attention_mask[:, None, None, :].to(dtype=hidden_states.dtype)
705
+ attention_mask = attention_mask * torch.finfo(hidden_states.dtype).min
706
+ attention_mask = attention_mask.expand(
707
+ attention_mask.shape[0], 1, attention_mask.shape[-1], attention_mask.shape[-1]
708
+ )
709
+
710
+ position_embeddings = self.pos_conv_embed(hidden_states)
711
+ hidden_states = hidden_states + position_embeddings
712
+ hidden_states = self.layer_norm(hidden_states)
713
+ hidden_states = self.dropout(hidden_states)
714
+
715
+ deepspeed_zero3_is_enabled = is_deepspeed_zero3_enabled()
716
+
717
+ for layer in self.layers:
718
+ if output_hidden_states:
719
+ all_hidden_states = all_hidden_states + (hidden_states,)
720
+
721
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
722
+ dropout_probability = torch.rand([])
723
+
724
+ skip_the_layer = True if self.training and (dropout_probability < self.config.layerdrop) else False
725
+ if not skip_the_layer or deepspeed_zero3_is_enabled:
726
+ # under deepspeed zero3 all gpus must run in sync
727
+ if self.gradient_checkpointing and self.training:
728
+ layer_outputs = self._gradient_checkpointing_func(
729
+ layer.__call__,
730
+ hidden_states,
731
+ attention_mask,
732
+ output_attentions,
733
+ )
734
+ else:
735
+ layer_outputs = layer(
736
+ hidden_states, attention_mask=attention_mask, output_attentions=output_attentions
737
+ )
738
+ hidden_states = layer_outputs[0]
739
+
740
+ if skip_the_layer:
741
+ layer_outputs = (None, None)
742
+
743
+ if output_attentions:
744
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
745
+
746
+ if output_hidden_states:
747
+ all_hidden_states = all_hidden_states + (hidden_states,)
748
+
749
+ if not return_dict:
750
+ return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
751
+ return BaseModelOutput(
752
+ last_hidden_state=hidden_states,
753
+ hidden_states=all_hidden_states,
754
+ attentions=all_self_attentions,
755
+ )
756
+
757
+
758
+ # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2EncoderStableLayerNorm with Wav2Vec2->Hubert
759
+ class HubertEncoderStableLayerNorm(nn.Module):
760
+ def __init__(self, config):
761
+ super().__init__()
762
+ self.config = config
763
+ self.pos_conv_embed = HubertPositionalConvEmbedding(config)
764
+ self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
765
+ self.dropout = nn.Dropout(config.hidden_dropout)
766
+ self.layers = nn.ModuleList(
767
+ [HubertEncoderLayerStableLayerNorm(config) for _ in range(config.num_hidden_layers)]
768
+ )
769
+ self.gradient_checkpointing = False
770
+
771
+ def forward(
772
+ self,
773
+ hidden_states,
774
+ attention_mask=None,
775
+ output_attentions=False,
776
+ output_hidden_states=False,
777
+ return_dict=True,
778
+ ):
779
+ all_hidden_states = () if output_hidden_states else None
780
+ all_self_attentions = () if output_attentions else None
781
+
782
+ if attention_mask is not None:
783
+ # make sure padded tokens are not attended to
784
+ expand_attention_mask = attention_mask.unsqueeze(-1).repeat(1, 1, hidden_states.shape[2])
785
+ hidden_states[~expand_attention_mask] = 0
786
+
787
+ # extend attention_mask
788
+ attention_mask = 1.0 - attention_mask[:, None, None, :].to(dtype=hidden_states.dtype)
789
+ attention_mask = attention_mask * torch.finfo(hidden_states.dtype).min
790
+ attention_mask = attention_mask.expand(
791
+ attention_mask.shape[0], 1, attention_mask.shape[-1], attention_mask.shape[-1]
792
+ )
793
+
794
+ position_embeddings = self.pos_conv_embed(hidden_states)
795
+ hidden_states = hidden_states + position_embeddings
796
+ hidden_states = self.dropout(hidden_states)
797
+
798
+ deepspeed_zero3_is_enabled = is_deepspeed_zero3_enabled()
799
+
800
+ for layer in self.layers:
801
+ if output_hidden_states:
802
+ all_hidden_states = all_hidden_states + (hidden_states,)
803
+
804
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
805
+ dropout_probability = torch.rand([])
806
+
807
+ skip_the_layer = True if self.training and (dropout_probability < self.config.layerdrop) else False
808
+ if not skip_the_layer or deepspeed_zero3_is_enabled:
809
+ # under deepspeed zero3 all gpus must run in sync
810
+ # XXX: could optimize this like synced_gpus in generate_utils but not sure if it's worth the code complication
811
+ if self.gradient_checkpointing and self.training:
812
+ layer_outputs = self._gradient_checkpointing_func(
813
+ layer.__call__,
814
+ hidden_states,
815
+ attention_mask,
816
+ output_attentions,
817
+ )
818
+ else:
819
+ layer_outputs = layer(
820
+ hidden_states, attention_mask=attention_mask, output_attentions=output_attentions
821
+ )
822
+ hidden_states = layer_outputs[0]
823
+
824
+ if skip_the_layer:
825
+ layer_outputs = (None, None)
826
+
827
+ if output_attentions:
828
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
829
+
830
+ hidden_states = self.layer_norm(hidden_states)
831
+
832
+ if output_hidden_states:
833
+ all_hidden_states = all_hidden_states + (hidden_states,)
834
+
835
+ if not return_dict:
836
+ return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
837
+ return BaseModelOutput(
838
+ last_hidden_state=hidden_states,
839
+ hidden_states=all_hidden_states,
840
+ attentions=all_self_attentions,
841
+ )
842
+
843
+
844
+ class HubertPreTrainedModel(PreTrainedModel):
845
+ """
846
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
847
+ models.
848
+ """
849
+
850
+ config_class = HubertConfig
851
+ base_model_prefix = "hubert"
852
+ main_input_name = "input_values"
853
+ supports_gradient_checkpointing = True
854
+
855
+ def _init_weights(self, module):
856
+ """Initialize the weights"""
857
+ if isinstance(module, nn.Linear):
858
+ # Slightly different from the TF version which uses truncated_normal for initialization
859
+ # cf https://github.com/pytorch/pytorch/pull/5617
860
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
861
+ elif isinstance(module, (nn.LayerNorm, nn.GroupNorm)):
862
+ module.bias.data.zero_()
863
+ module.weight.data.fill_(1.0)
864
+ elif isinstance(module, nn.Conv1d):
865
+ if is_deepspeed_zero3_enabled():
866
+ import deepspeed
867
+
868
+ if hasattr(module, "weight_v") and hasattr(module, "weight_g"):
869
+ with deepspeed.zero.GatheredParameters([module.weight_v, module.weight_g], modifier_rank=0):
870
+ nn.init.kaiming_normal_(module.weight.data)
871
+ else:
872
+ with deepspeed.zero.GatheredParameters(module.weight, modifier_rank=0):
873
+ nn.init.kaiming_normal_(module.weight.data)
874
+ else:
875
+ nn.init.kaiming_normal_(module.weight.data)
876
+
877
+ if isinstance(module, (nn.Linear, nn.Conv1d)) and module.bias is not None:
878
+ module.bias.data.zero_()
879
+
880
+ def _get_feat_extract_output_lengths(self, input_lengths: Union[torch.LongTensor, int]):
881
+ """
882
+ Computes the output length of the convolutional layers
883
+ """
884
+
885
+ def _conv_out_length(input_length, kernel_size, stride):
886
+ # 1D convolutional layer output length formula taken
887
+ # from https://pytorch.org/docs/stable/generated/torch.nn.Conv1d.html
888
+ return torch.div(input_length - kernel_size, stride, rounding_mode="floor") + 1
889
+
890
+ for kernel_size, stride in zip(self.config.conv_kernel, self.config.conv_stride):
891
+ input_lengths = _conv_out_length(input_lengths, kernel_size, stride)
892
+
893
+ return input_lengths
894
+
895
+ def _get_feature_vector_attention_mask(self, feature_vector_length: int, attention_mask: torch.LongTensor):
896
+ output_lengths = self._get_feat_extract_output_lengths(attention_mask.sum(-1)).to(torch.long)
897
+ batch_size = attention_mask.shape[0]
898
+
899
+ attention_mask = torch.zeros(
900
+ (batch_size, feature_vector_length), dtype=attention_mask.dtype, device=attention_mask.device
901
+ )
902
+ # these two operations makes sure that all values before the output lengths idxs are attended to
903
+ attention_mask[(torch.arange(attention_mask.shape[0], device=attention_mask.device), output_lengths - 1)] = 1
904
+ attention_mask = attention_mask.flip([-1]).cumsum(-1).flip([-1]).bool()
905
+ return attention_mask
906
+
907
+
908
+ HUBERT_START_DOCSTRING = r"""
909
+ Hubert was proposed in [HuBERT: Self-Supervised Speech Representation Learning by Masked Prediction of Hidden
910
+ Units](https://arxiv.org/abs/2106.07447) by Wei-Ning Hsu, Benjamin Bolte, Yao-Hung Hubert Tsai, Kushal Lakhotia,
911
+ Ruslan Salakhutdinov, Abdelrahman Mohamed.
912
+
913
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
914
+ library implements for all its model (such as downloading or saving etc.).
915
+
916
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
917
+ it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
918
+ behavior.
919
+
920
+ Parameters:
921
+ config ([`HubertConfig`]): Model configuration class with all the parameters of the model.
922
+ Initializing with a config file does not load the weights associated with the model, only the
923
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
924
+ """
925
+
926
+
927
+ HUBERT_INPUTS_DOCSTRING = r"""
928
+ Args:
929
+ input_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):
930
+ Float values of input raw speech waveform. Values can be obtained by loading a `.flac` or `.wav` audio file
931
+ into an array of type `List[float]` or a `numpy.ndarray`, *e.g.* via the soundfile library (`pip install
932
+ soundfile`). To prepare the array into `input_values`, the [`AutoProcessor`] should be used for padding and
933
+ conversion into a tensor of type `torch.FloatTensor`. See [`Wav2Vec2Processor.__call__`] for details.
934
+ attention_mask (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
935
+ Mask to avoid performing convolution and attention on padding token indices. Mask values selected in `[0,
936
+ 1]`:
937
+
938
+ - 1 for tokens that are **not masked**,
939
+ - 0 for tokens that are **masked**.
940
+
941
+ [What are attention masks?](../glossary#attention-mask)
942
+
943
+ <Tip warning={true}>
944
+
945
+ `attention_mask` should only be passed if the corresponding processor has `config.return_attention_mask ==
946
+ True`. For all models whose processor has `config.return_attention_mask == False`, such as
947
+ [hubert-base](https://huggingface.co/facebook/hubert-base-ls960), `attention_mask` should **not** be passed
948
+ to avoid degraded performance when doing batched inference. For such models `input_values` should simply be
949
+ padded with 0 and passed without `attention_mask`. Be aware that these models also yield slightly different
950
+ results depending on whether `input_values` is padded or not.
951
+
952
+ </Tip>
953
+
954
+ output_attentions (`bool`, *optional*):
955
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
956
+ tensors for more detail.
957
+ output_hidden_states (`bool`, *optional*):
958
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
959
+ more detail.
960
+ return_dict (`bool`, *optional*):
961
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
962
+ """
963
+
964
+
965
+ @add_start_docstrings(
966
+ "The bare Hubert Model transformer outputting raw hidden-states without any specific head on top.",
967
+ HUBERT_START_DOCSTRING,
968
+ )
969
+ class HubertModel(HubertPreTrainedModel):
970
+ def __init__(self, config: HubertConfig):
971
+ super().__init__(config)
972
+ self.config = config
973
+ self.feature_extractor = HubertFeatureEncoder(config)
974
+ self.feature_projection = HubertFeatureProjection(config)
975
+
976
+ if config.mask_time_prob > 0.0 or config.mask_feature_prob > 0.0:
977
+ self.masked_spec_embed = nn.Parameter(torch.FloatTensor(config.hidden_size).uniform_())
978
+
979
+ if config.do_stable_layer_norm:
980
+ self.encoder = HubertEncoderStableLayerNorm(config)
981
+ else:
982
+ self.encoder = HubertEncoder(config)
983
+
984
+ # Initialize weights and apply final processing
985
+ self.post_init()
986
+
987
+ # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2Model._mask_hidden_states
988
+ def _mask_hidden_states(
989
+ self,
990
+ hidden_states: torch.FloatTensor,
991
+ mask_time_indices: Optional[torch.FloatTensor] = None,
992
+ attention_mask: Optional[torch.LongTensor] = None,
993
+ ):
994
+ """
995
+ Masks extracted features along time axis and/or along feature axis according to
996
+ [SpecAugment](https://arxiv.org/abs/1904.08779).
997
+ """
998
+
999
+ # `config.apply_spec_augment` can set masking to False
1000
+ if not getattr(self.config, "apply_spec_augment", True):
1001
+ return hidden_states
1002
+
1003
+ # generate indices & apply SpecAugment along time axis
1004
+ batch_size, sequence_length, hidden_size = hidden_states.size()
1005
+
1006
+ if mask_time_indices is not None:
1007
+ # apply SpecAugment along time axis with given mask_time_indices
1008
+ hidden_states[mask_time_indices] = self.masked_spec_embed.to(hidden_states.dtype)
1009
+ elif self.config.mask_time_prob > 0 and self.training:
1010
+ mask_time_indices = _compute_mask_indices(
1011
+ (batch_size, sequence_length),
1012
+ mask_prob=self.config.mask_time_prob,
1013
+ mask_length=self.config.mask_time_length,
1014
+ attention_mask=attention_mask,
1015
+ min_masks=self.config.mask_time_min_masks,
1016
+ )
1017
+ mask_time_indices = torch.tensor(mask_time_indices, device=hidden_states.device, dtype=torch.bool)
1018
+ hidden_states[mask_time_indices] = self.masked_spec_embed.to(hidden_states.dtype)
1019
+
1020
+ if self.config.mask_feature_prob > 0 and self.training:
1021
+ # generate indices & apply SpecAugment along feature axis
1022
+ mask_feature_indices = _compute_mask_indices(
1023
+ (batch_size, hidden_size),
1024
+ mask_prob=self.config.mask_feature_prob,
1025
+ mask_length=self.config.mask_feature_length,
1026
+ min_masks=self.config.mask_feature_min_masks,
1027
+ )
1028
+ mask_feature_indices = torch.tensor(mask_feature_indices, device=hidden_states.device, dtype=torch.bool)
1029
+ mask_feature_indices = mask_feature_indices[:, None].expand(-1, sequence_length, -1)
1030
+ hidden_states[mask_feature_indices] = 0
1031
+
1032
+ return hidden_states
1033
+
1034
+ @add_start_docstrings_to_model_forward(HUBERT_INPUTS_DOCSTRING)
1035
+ @replace_return_docstrings(output_type=BaseModelOutput, config_class=_CONFIG_FOR_DOC)
1036
+ def forward(
1037
+ self,
1038
+ input_values: Optional[torch.Tensor],
1039
+ attention_mask: Optional[torch.Tensor] = None,
1040
+ mask_time_indices: Optional[torch.FloatTensor] = None,
1041
+ output_attentions: Optional[bool] = None,
1042
+ output_hidden_states: Optional[bool] = None,
1043
+ return_dict: Optional[bool] = None,
1044
+ ) -> Union[Tuple, BaseModelOutput]:
1045
+ """
1046
+
1047
+ Returns:
1048
+
1049
+ Example:
1050
+
1051
+ ```python
1052
+ >>> from transformers import AutoProcessor, HubertModel
1053
+ >>> from datasets import load_dataset
1054
+ >>> import soundfile as sf
1055
+
1056
+ >>> processor = AutoProcessor.from_pretrained("facebook/hubert-large-ls960-ft")
1057
+ >>> model = HubertModel.from_pretrained("facebook/hubert-large-ls960-ft")
1058
+
1059
+
1060
+ >>> def map_to_array(batch):
1061
+ ... speech, _ = sf.read(batch["file"])
1062
+ ... batch["speech"] = speech
1063
+ ... return batch
1064
+
1065
+
1066
+ >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
1067
+ >>> ds = ds.map(map_to_array)
1068
+
1069
+ >>> input_values = processor(ds["speech"][0], return_tensors="pt").input_values # Batch size 1
1070
+ >>> hidden_states = model(input_values).last_hidden_state
1071
+ ```"""
1072
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1073
+ output_hidden_states = (
1074
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1075
+ )
1076
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1077
+
1078
+ extract_features = self.feature_extractor(input_values)
1079
+ extract_features = extract_features.transpose(1, 2)
1080
+
1081
+ if attention_mask is not None:
1082
+ # compute reduced attention_mask corresponding to feature vectors
1083
+ attention_mask = self._get_feature_vector_attention_mask(extract_features.shape[1], attention_mask)
1084
+
1085
+ hidden_states = self.feature_projection(extract_features)
1086
+ hidden_states = self._mask_hidden_states(hidden_states, mask_time_indices=mask_time_indices)
1087
+
1088
+ encoder_outputs = self.encoder(
1089
+ hidden_states,
1090
+ attention_mask=attention_mask,
1091
+ output_attentions=output_attentions,
1092
+ output_hidden_states=output_hidden_states,
1093
+ return_dict=return_dict,
1094
+ )
1095
+
1096
+ hidden_states = encoder_outputs[0]
1097
+
1098
+ if not return_dict:
1099
+ return (hidden_states,) + encoder_outputs[1:]
1100
+
1101
+ return BaseModelOutput(
1102
+ last_hidden_state=hidden_states,
1103
+ hidden_states=encoder_outputs.hidden_states,
1104
+ attentions=encoder_outputs.attentions,
1105
+ )
1106
+
1107
+
1108
+ @add_start_docstrings(
1109
+ """Hubert Model with a `language modeling` head on top for Connectionist Temporal Classification (CTC).""",
1110
+ HUBERT_START_DOCSTRING,
1111
+ )
1112
+ # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForCTC with Wav2Vec2->Hubert, wav2vec2->hubert, WAV_2_VEC_2->HUBERT
1113
+ class HubertForCTC(HubertPreTrainedModel):
1114
+ def __init__(self, config, target_lang: Optional[str] = None):
1115
+ super().__init__(config)
1116
+
1117
+ self.hubert = HubertModel(config)
1118
+ self.dropout = nn.Dropout(config.final_dropout)
1119
+
1120
+ self.target_lang = target_lang
1121
+
1122
+ if config.vocab_size is None:
1123
+ raise ValueError(
1124
+ f"You are trying to instantiate {self.__class__} with a configuration that "
1125
+ "does not define the vocabulary size of the language model head. Please "
1126
+ "instantiate the model as follows: `HubertForCTC.from_pretrained(..., vocab_size=vocab_size)`. "
1127
+ "or define `vocab_size` of your model's configuration."
1128
+ )
1129
+ output_hidden_size = (
1130
+ config.output_hidden_size if hasattr(config, "add_adapter") and config.add_adapter else config.hidden_size
1131
+ )
1132
+ self.lm_head = nn.Linear(output_hidden_size, config.vocab_size)
1133
+
1134
+ # Initialize weights and apply final processing
1135
+ self.post_init()
1136
+
1137
+ def tie_weights(self):
1138
+ """
1139
+ This method overwrites [`~PreTrainedModel.tie_weights`] so that adapter weights can be correctly loaded when
1140
+ passing `target_lang=...` to `from_pretrained(...)`.
1141
+
1142
+ This method is **not** supposed to be called by the user and is prone to be changed in the future.
1143
+ """
1144
+
1145
+ # Note that `tie_weights` is usually used to tie input and output embedding weights. The method is re-purposed to
1146
+ # correctly load adapter layers for Hubert so that we do not have to introduce a new API to
1147
+ # [`PreTrainedModel`]. While slightly hacky, Hubert never has to tie input and output embeddings, so that it is
1148
+ # ok to repurpose this function here.
1149
+ target_lang = self.target_lang
1150
+
1151
+ if target_lang is not None and getattr(self.config, "adapter_attn_dim", None) is None:
1152
+ raise ValueError(f"Cannot pass `target_lang`: {target_lang} if `config.adapter_attn_dim` is not defined.")
1153
+ elif target_lang is None and getattr(self.config, "adapter_attn_dim", None) is not None:
1154
+ logger.info("By default `target_lang` is set to 'eng'.")
1155
+ elif target_lang is not None:
1156
+ self.load_adapter(target_lang, force_load=True)
1157
+
1158
+ def freeze_feature_extractor(self):
1159
+ """
1160
+ Calling this function will disable the gradient computation for the feature encoder so that its parameter will
1161
+ not be updated during training.
1162
+ """
1163
+ warnings.warn(
1164
+ "The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5. "
1165
+ "Please use the equivalent `freeze_feature_encoder` method instead.",
1166
+ FutureWarning,
1167
+ )
1168
+ self.freeze_feature_encoder()
1169
+
1170
+ def freeze_feature_encoder(self):
1171
+ """
1172
+ Calling this function will disable the gradient computation for the feature encoder so that its parameter will
1173
+ not be updated during training.
1174
+ """
1175
+ self.hubert.feature_extractor._freeze_parameters()
1176
+
1177
+ def freeze_base_model(self):
1178
+ """
1179
+ Calling this function will disable the gradient computation for the base model so that its parameters will not
1180
+ be updated during training. Only the classification head will be updated.
1181
+ """
1182
+ for param in self.hubert.parameters():
1183
+ param.requires_grad = False
1184
+
1185
+ @add_start_docstrings_to_model_forward(HUBERT_INPUTS_DOCSTRING)
1186
+ @add_code_sample_docstrings(
1187
+ checkpoint=_CHECKPOINT_FOR_DOC,
1188
+ output_type=CausalLMOutput,
1189
+ config_class=_CONFIG_FOR_DOC,
1190
+ expected_output=_CTC_EXPECTED_OUTPUT,
1191
+ expected_loss=_CTC_EXPECTED_LOSS,
1192
+ )
1193
+ def forward(
1194
+ self,
1195
+ input_values: Optional[torch.Tensor],
1196
+ attention_mask: Optional[torch.Tensor] = None,
1197
+ output_attentions: Optional[bool] = None,
1198
+ output_hidden_states: Optional[bool] = None,
1199
+ return_dict: Optional[bool] = None,
1200
+ labels: Optional[torch.Tensor] = None,
1201
+ ) -> Union[Tuple, CausalLMOutput]:
1202
+ r"""
1203
+ labels (`torch.LongTensor` of shape `(batch_size, target_length)`, *optional*):
1204
+ Labels for connectionist temporal classification. Note that `target_length` has to be smaller or equal to
1205
+ the sequence length of the output logits. Indices are selected in `[-100, 0, ..., config.vocab_size - 1]`.
1206
+ All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ...,
1207
+ config.vocab_size - 1]`.
1208
+ """
1209
+
1210
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1211
+
1212
+ outputs = self.hubert(
1213
+ input_values,
1214
+ attention_mask=attention_mask,
1215
+ output_attentions=output_attentions,
1216
+ output_hidden_states=output_hidden_states,
1217
+ return_dict=return_dict,
1218
+ )
1219
+
1220
+ hidden_states = outputs[0]
1221
+ hidden_states = self.dropout(hidden_states)
1222
+
1223
+ logits = self.lm_head(hidden_states)
1224
+
1225
+ loss = None
1226
+ if labels is not None:
1227
+ if labels.max() >= self.config.vocab_size:
1228
+ raise ValueError(f"Label values must be <= vocab_size: {self.config.vocab_size}")
1229
+
1230
+ # retrieve loss input_lengths from attention_mask
1231
+ attention_mask = (
1232
+ attention_mask if attention_mask is not None else torch.ones_like(input_values, dtype=torch.long)
1233
+ )
1234
+ input_lengths = self._get_feat_extract_output_lengths(attention_mask.sum(-1)).to(torch.long)
1235
+
1236
+ # assuming that padded tokens are filled with -100
1237
+ # when not being attended to
1238
+ labels_mask = labels >= 0
1239
+ target_lengths = labels_mask.sum(-1)
1240
+ flattened_targets = labels.masked_select(labels_mask)
1241
+
1242
+ # ctc_loss doesn't support fp16
1243
+ log_probs = nn.functional.log_softmax(logits, dim=-1, dtype=torch.float32).transpose(0, 1)
1244
+
1245
+ with torch.backends.cudnn.flags(enabled=False):
1246
+ loss = nn.functional.ctc_loss(
1247
+ log_probs,
1248
+ flattened_targets,
1249
+ input_lengths,
1250
+ target_lengths,
1251
+ blank=self.config.pad_token_id,
1252
+ reduction=self.config.ctc_loss_reduction,
1253
+ zero_infinity=self.config.ctc_zero_infinity,
1254
+ )
1255
+
1256
+ if not return_dict:
1257
+ output = (logits,) + outputs[_HIDDEN_STATES_START_POSITION:]
1258
+ return ((loss,) + output) if loss is not None else output
1259
+
1260
+ return CausalLMOutput(
1261
+ loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions
1262
+ )
1263
+
1264
+
1265
+ @add_start_docstrings(
1266
+ """
1267
+ Hubert Model with a sequence classification head on top (a linear layer over the pooled output) for tasks like
1268
+ SUPERB Keyword Spotting.
1269
+ """,
1270
+ HUBERT_START_DOCSTRING,
1271
+ )
1272
+ # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForSequenceClassification with Wav2Vec2->Hubert, wav2vec2->hubert, WAV_2_VEC_2->HUBERT
1273
+ class HubertForSequenceClassification(HubertPreTrainedModel):
1274
+ def __init__(self, config):
1275
+ super().__init__(config)
1276
+
1277
+ if hasattr(config, "add_adapter") and config.add_adapter:
1278
+ raise ValueError(
1279
+ "Sequence classification does not support the use of Hubert adapters (config.add_adapter=True)"
1280
+ )
1281
+ self.hubert = HubertModel(config)
1282
+ num_layers = config.num_hidden_layers + 1 # transformer layers + input embeddings
1283
+ if config.use_weighted_layer_sum:
1284
+ self.layer_weights = nn.Parameter(torch.ones(num_layers) / num_layers)
1285
+ self.projector = nn.Linear(config.hidden_size, config.classifier_proj_size)
1286
+ self.classifier = nn.Linear(config.classifier_proj_size, config.num_labels)
1287
+
1288
+ # Initialize weights and apply final processing
1289
+ self.post_init()
1290
+
1291
+ def freeze_feature_extractor(self):
1292
+ """
1293
+ Calling this function will disable the gradient computation for the feature encoder so that its parameters will
1294
+ not be updated during training.
1295
+ """
1296
+ warnings.warn(
1297
+ "The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5. "
1298
+ "Please use the equivalent `freeze_feature_encoder` method instead.",
1299
+ FutureWarning,
1300
+ )
1301
+ self.freeze_feature_encoder()
1302
+
1303
+ def freeze_feature_encoder(self):
1304
+ """
1305
+ Calling this function will disable the gradient computation for the feature encoder so that its parameter will
1306
+ not be updated during training.
1307
+ """
1308
+ self.hubert.feature_extractor._freeze_parameters()
1309
+
1310
+ def freeze_base_model(self):
1311
+ """
1312
+ Calling this function will disable the gradient computation for the base model so that its parameters will not
1313
+ be updated during training. Only the classification head will be updated.
1314
+ """
1315
+ for param in self.hubert.parameters():
1316
+ param.requires_grad = False
1317
+
1318
+ @add_start_docstrings_to_model_forward(HUBERT_INPUTS_DOCSTRING)
1319
+ @add_code_sample_docstrings(
1320
+ checkpoint=_SEQ_CLASS_CHECKPOINT,
1321
+ output_type=SequenceClassifierOutput,
1322
+ config_class=_CONFIG_FOR_DOC,
1323
+ modality="audio",
1324
+ expected_output=_SEQ_CLASS_EXPECTED_OUTPUT,
1325
+ expected_loss=_SEQ_CLASS_EXPECTED_LOSS,
1326
+ )
1327
+ def forward(
1328
+ self,
1329
+ input_values: Optional[torch.Tensor],
1330
+ attention_mask: Optional[torch.Tensor] = None,
1331
+ output_attentions: Optional[bool] = None,
1332
+ output_hidden_states: Optional[bool] = None,
1333
+ return_dict: Optional[bool] = None,
1334
+ labels: Optional[torch.Tensor] = None,
1335
+ ) -> Union[Tuple, SequenceClassifierOutput]:
1336
+ r"""
1337
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1338
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1339
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1340
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1341
+ """
1342
+
1343
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1344
+ output_hidden_states = True if self.config.use_weighted_layer_sum else output_hidden_states
1345
+
1346
+ outputs = self.hubert(
1347
+ input_values,
1348
+ attention_mask=attention_mask,
1349
+ output_attentions=output_attentions,
1350
+ output_hidden_states=output_hidden_states,
1351
+ return_dict=return_dict,
1352
+ )
1353
+
1354
+ if self.config.use_weighted_layer_sum:
1355
+ hidden_states = outputs[_HIDDEN_STATES_START_POSITION]
1356
+ hidden_states = torch.stack(hidden_states, dim=1)
1357
+ norm_weights = nn.functional.softmax(self.layer_weights, dim=-1)
1358
+ hidden_states = (hidden_states * norm_weights.view(-1, 1, 1)).sum(dim=1)
1359
+ else:
1360
+ hidden_states = outputs[0]
1361
+
1362
+ hidden_states = self.projector(hidden_states)
1363
+ if attention_mask is None:
1364
+ pooled_output = hidden_states.mean(dim=1)
1365
+ else:
1366
+ padding_mask = self._get_feature_vector_attention_mask(hidden_states.shape[1], attention_mask)
1367
+ hidden_states[~padding_mask] = 0.0
1368
+ pooled_output = hidden_states.sum(dim=1) / padding_mask.sum(dim=1).view(-1, 1)
1369
+
1370
+ logits = self.classifier(pooled_output)
1371
+
1372
+ loss = None
1373
+ if labels is not None:
1374
+ loss_fct = CrossEntropyLoss()
1375
+ loss = loss_fct(logits.view(-1, self.config.num_labels), labels.view(-1))
1376
+
1377
+ if not return_dict:
1378
+ output = (logits,) + outputs[_HIDDEN_STATES_START_POSITION:]
1379
+ return ((loss,) + output) if loss is not None else output
1380
+
1381
+ return SequenceClassifierOutput(
1382
+ loss=loss,
1383
+ logits=logits,
1384
+ hidden_states=outputs.hidden_states,
1385
+ attentions=outputs.attentions,
1386
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/hubert/modeling_tf_hubert.py ADDED
@@ -0,0 +1,1676 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The Fairseq Authors and the HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ TensorFlow Hubert model."""
16
+
17
+ from __future__ import annotations
18
+
19
+ import warnings
20
+ from typing import Any, Optional, Tuple, Union
21
+
22
+ import numpy as np
23
+ import tensorflow as tf
24
+
25
+ from ...activations_tf import get_tf_activation
26
+ from ...modeling_tf_outputs import TFBaseModelOutput, TFCausalLMOutput
27
+ from ...modeling_tf_utils import (
28
+ TFPreTrainedModel,
29
+ get_initializer,
30
+ keras,
31
+ keras_serializable,
32
+ unpack_inputs,
33
+ )
34
+ from ...tf_utils import shape_list, stable_softmax
35
+ from ...utils import (
36
+ add_start_docstrings,
37
+ add_start_docstrings_to_model_forward,
38
+ logging,
39
+ replace_return_docstrings,
40
+ )
41
+ from .configuration_hubert import HubertConfig
42
+
43
+
44
+ logger = logging.get_logger(__name__)
45
+
46
+ _CONFIG_FOR_DOC = "HubertConfig"
47
+
48
+
49
+ from ..deprecated._archive_maps import TF_HUBERT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
50
+
51
+
52
+ LARGE_NEGATIVE = -1e8
53
+
54
+
55
+ # Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2._sample_without_replacement
56
+ def _sample_without_replacement(distribution, num_samples):
57
+ """
58
+ Categorical sampling without replacement is currently not implemented. The gumbel-max trick will do for now - see
59
+ https://github.com/tensorflow/tensorflow/issues/9260 for more info
60
+ """
61
+ z = -tf.math.log(tf.random.uniform(shape_list(distribution), 0, 1))
62
+ _, indices = tf.nn.top_k(distribution + z, num_samples)
63
+ return indices
64
+
65
+
66
+ # Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2._scatter_values_on_batch_indices
67
+ def _scatter_values_on_batch_indices(values, batch_indices, output_shape):
68
+ """
69
+ Scatter function as in PyTorch with indices in format (batch_dim, indixes)
70
+ """
71
+ indices_shape = shape_list(batch_indices)
72
+ # broadcast batch dim to indices_shape
73
+ broad_casted_batch_dims = tf.reshape(
74
+ tf.broadcast_to(tf.expand_dims(tf.range(indices_shape[0]), axis=-1), indices_shape), [1, -1]
75
+ )
76
+ # transform batch_indices to pair_indices
77
+ pair_indices = tf.transpose(tf.concat([broad_casted_batch_dims, tf.reshape(batch_indices, [1, -1])], 0))
78
+ # scatter values to pair indices
79
+ return tf.scatter_nd(pair_indices, tf.reshape(values, [-1]), output_shape)
80
+
81
+
82
+ # Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2._compute_mask_indices
83
+ def _compute_mask_indices(
84
+ shape: Tuple[int, int],
85
+ mask_prob: float,
86
+ mask_length: int,
87
+ min_masks: int = 0,
88
+ ) -> tf.Tensor:
89
+ """
90
+ Computes random mask spans for a given shape
91
+
92
+ Args:
93
+ shape: the shape for which to compute masks.
94
+ should be of size 2 where first element is batch size and 2nd is timesteps
95
+ attention_mask: optional padding mask of the same size as shape, which will prevent masking padded elements
96
+ mask_prob:
97
+ probability for each token to be chosen as start of the span to be masked. this will be multiplied by
98
+ number of timesteps divided by length of mask span to mask approximately this percentage of all elements.
99
+ however due to overlaps, the actual number will be smaller (unless no_overlap is True)
100
+ mask_length: size of the mask
101
+ min_masks: minimum number of masked spans
102
+
103
+ Adapted from [fairseq's
104
+ data_utils.py](https://github.com/pytorch/fairseq/blob/e0788f7007a8473a76db573985031f3c94201e79/fairseq/data/data_utils.py#L376).
105
+ """
106
+ batch_size, sequence_length = shape
107
+
108
+ if mask_length < 1:
109
+ raise ValueError("`mask_length` has to be bigger than 0.")
110
+
111
+ tf.debugging.assert_less(
112
+ mask_length,
113
+ sequence_length,
114
+ message=(
115
+ f"`mask_length` has to be smaller than `sequence_length`, but got `mask_length`: {mask_length} and"
116
+ f" `sequence_length`: {sequence_length}`"
117
+ ),
118
+ )
119
+
120
+ # compute number of masked spans in batch
121
+ num_masked_spans = mask_prob * tf.cast(sequence_length, tf.float32) / mask_length + tf.random.uniform((1,))
122
+ num_masked_spans = tf.maximum(num_masked_spans, min_masks)
123
+ num_masked_spans = tf.cast(num_masked_spans, tf.int32)
124
+
125
+ # make sure num masked indices <= sequence_length
126
+ num_masked_spans = tf.math.minimum(sequence_length // mask_length, num_masked_spans)
127
+ num_masked_spans = tf.squeeze(num_masked_spans)
128
+
129
+ # SpecAugment mask to fill
130
+ spec_aug_mask = tf.zeros((batch_size, sequence_length), dtype=tf.int32)
131
+
132
+ # uniform distribution to sample from, make sure that offset samples are < sequence_length
133
+ uniform_dist = tf.ones((batch_size, sequence_length - (mask_length - 1)))
134
+
135
+ # get random indices to mask
136
+ spec_aug_mask_idxs = _sample_without_replacement(uniform_dist, num_masked_spans)
137
+
138
+ # expand masked indices to masked spans
139
+ spec_aug_mask_idxs = tf.expand_dims(spec_aug_mask_idxs, -1)
140
+ spec_aug_mask_idxs = tf.tile(spec_aug_mask_idxs, (1, 1, mask_length))
141
+ spec_aug_mask_idxs = tf.reshape(spec_aug_mask_idxs, (batch_size, num_masked_spans * mask_length))
142
+
143
+ offsets = tf.range(mask_length)[tf.newaxis, tf.newaxis, :]
144
+ offsets = tf.tile(offsets, (batch_size, num_masked_spans, 1))
145
+ offsets = tf.reshape(offsets, (batch_size, num_masked_spans * mask_length))
146
+
147
+ spec_aug_mask_idxs = spec_aug_mask_idxs + offsets
148
+
149
+ # scatter indices to mask
150
+ spec_aug_mask = _scatter_values_on_batch_indices(
151
+ tf.ones_like(spec_aug_mask_idxs), spec_aug_mask_idxs, tf.shape(spec_aug_mask)
152
+ )
153
+
154
+ return spec_aug_mask
155
+
156
+
157
+ # Copied from transformers.models.bart.modeling_tf_bart._expand_mask
158
+ def _expand_mask(mask: tf.Tensor, tgt_len: Optional[int] = None):
159
+ """
160
+ Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
161
+ """
162
+ src_len = shape_list(mask)[1]
163
+ tgt_len = tgt_len if tgt_len is not None else src_len
164
+ one_cst = tf.constant(1.0)
165
+ mask = tf.cast(mask, dtype=one_cst.dtype)
166
+ expanded_mask = tf.tile(mask[:, None, None, :], (1, 1, tgt_len, 1))
167
+
168
+ return (one_cst - expanded_mask) * LARGE_NEGATIVE
169
+
170
+
171
+ # Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2.TFWav2Vec2GroupNorm with Wav2Vec2->Hubert
172
+ class TFHubertGroupNorm(keras.layers.Layer):
173
+ """
174
+ From tensorflow-addons https://www.tensorflow.org/addons/api_docs/python/tfa/layers/GroupNormalization
175
+ """
176
+
177
+ def __init__(
178
+ self,
179
+ groups: int = 32,
180
+ axis: int = -1,
181
+ epsilon: float = 1e-3,
182
+ center: bool = True,
183
+ scale: bool = True,
184
+ beta_initializer: keras.initializers.Initializer = "zeros",
185
+ gamma_initializer: keras.initializers.Initializer = "ones",
186
+ beta_regularizer: keras.regularizers.Regularizer = None,
187
+ gamma_regularizer: keras.regularizers.Regularizer = None,
188
+ beta_constraint: keras.constraints.Constraint = None,
189
+ gamma_constraint: keras.constraints.Constraint = None,
190
+ **kwargs,
191
+ ):
192
+ super().__init__(**kwargs)
193
+ self.supports_masking = True
194
+ self.groups = groups
195
+ self.axis = axis
196
+ self.epsilon = epsilon
197
+ self.center = center
198
+ self.scale = scale
199
+ self.beta_initializer = keras.initializers.get(beta_initializer)
200
+ self.gamma_initializer = keras.initializers.get(gamma_initializer)
201
+ self.beta_regularizer = keras.regularizers.get(beta_regularizer)
202
+ self.gamma_regularizer = keras.regularizers.get(gamma_regularizer)
203
+ self.beta_constraint = keras.constraints.get(beta_constraint)
204
+ self.gamma_constraint = keras.constraints.get(gamma_constraint)
205
+ self._check_axis()
206
+
207
+ def build(self, input_shape):
208
+ self._check_if_input_shape_is_none(input_shape)
209
+ self._set_number_of_groups_for_instance_norm(input_shape)
210
+ self._check_size_of_dimensions(input_shape)
211
+ self._create_input_spec(input_shape)
212
+
213
+ self._add_gamma_weight(input_shape)
214
+ self._add_beta_weight(input_shape)
215
+ self.built = True
216
+ super().build(input_shape)
217
+
218
+ def call(self, inputs):
219
+ input_shape = keras.backend.int_shape(inputs)
220
+ tensor_input_shape = tf.shape(inputs)
221
+
222
+ reshaped_inputs, group_shape = self._reshape_into_groups(inputs, input_shape, tensor_input_shape)
223
+
224
+ normalized_inputs = self._apply_normalization(reshaped_inputs, input_shape)
225
+
226
+ is_instance_norm = (input_shape[self.axis] // self.groups) == 1
227
+ if not is_instance_norm:
228
+ outputs = tf.reshape(normalized_inputs, tensor_input_shape)
229
+ else:
230
+ outputs = normalized_inputs
231
+
232
+ return outputs
233
+
234
+ def get_config(self):
235
+ config = {
236
+ "groups": self.groups,
237
+ "axis": self.axis,
238
+ "epsilon": self.epsilon,
239
+ "center": self.center,
240
+ "scale": self.scale,
241
+ "beta_initializer": keras.initializers.serialize(self.beta_initializer),
242
+ "gamma_initializer": keras.initializers.serialize(self.gamma_initializer),
243
+ "beta_regularizer": keras.regularizers.serialize(self.beta_regularizer),
244
+ "gamma_regularizer": keras.regularizers.serialize(self.gamma_regularizer),
245
+ "beta_constraint": keras.constraints.serialize(self.beta_constraint),
246
+ "gamma_constraint": keras.constraints.serialize(self.gamma_constraint),
247
+ }
248
+ base_config = super().get_config()
249
+ return {**base_config, **config}
250
+
251
+ def compute_output_shape(self, input_shape):
252
+ return input_shape
253
+
254
+ def _reshape_into_groups(self, inputs, input_shape, tensor_input_shape):
255
+ group_shape = [tensor_input_shape[i] for i in range(len(input_shape))]
256
+ is_instance_norm = (input_shape[self.axis] // self.groups) == 1
257
+ if not is_instance_norm:
258
+ group_shape[self.axis] = input_shape[self.axis] // self.groups
259
+ group_shape.insert(self.axis, self.groups)
260
+ group_shape = tf.stack(group_shape)
261
+ reshaped_inputs = tf.reshape(inputs, group_shape)
262
+ return reshaped_inputs, group_shape
263
+ else:
264
+ return inputs, group_shape
265
+
266
+ def _apply_normalization(self, reshaped_inputs, input_shape):
267
+ group_shape = keras.backend.int_shape(reshaped_inputs)
268
+ group_reduction_axes = list(range(1, len(group_shape)))
269
+ is_instance_norm = (input_shape[self.axis] // self.groups) == 1
270
+ if not is_instance_norm:
271
+ axis = -2 if self.axis == -1 else self.axis - 1
272
+ else:
273
+ axis = -1 if self.axis == -1 else self.axis - 1
274
+ group_reduction_axes.pop(axis)
275
+
276
+ mean, variance = tf.nn.moments(reshaped_inputs, group_reduction_axes, keepdims=True)
277
+
278
+ gamma, beta = self._get_reshaped_weights(input_shape)
279
+ normalized_inputs = tf.nn.batch_normalization(
280
+ reshaped_inputs,
281
+ mean=mean,
282
+ variance=variance,
283
+ scale=gamma,
284
+ offset=beta,
285
+ variance_epsilon=self.epsilon,
286
+ )
287
+ return normalized_inputs
288
+
289
+ def _get_reshaped_weights(self, input_shape):
290
+ broadcast_shape = self._create_broadcast_shape(input_shape)
291
+ gamma = None
292
+ beta = None
293
+ if self.scale:
294
+ gamma = tf.reshape(self.gamma, broadcast_shape)
295
+
296
+ if self.center:
297
+ beta = tf.reshape(self.beta, broadcast_shape)
298
+ return gamma, beta
299
+
300
+ def _check_if_input_shape_is_none(self, input_shape):
301
+ dim = input_shape[self.axis]
302
+ if dim is None:
303
+ raise ValueError(
304
+ "Axis "
305
+ + str(self.axis)
306
+ + " of input tensor should have a defined dimension but the layer received an input with shape "
307
+ + str(input_shape)
308
+ + "."
309
+ )
310
+
311
+ def _set_number_of_groups_for_instance_norm(self, input_shape):
312
+ dim = input_shape[self.axis]
313
+
314
+ if self.groups == -1:
315
+ self.groups = dim
316
+
317
+ def _check_size_of_dimensions(self, input_shape):
318
+ dim = input_shape[self.axis]
319
+ if dim < self.groups:
320
+ raise ValueError(
321
+ "Number of groups ("
322
+ + str(self.groups)
323
+ + ") cannot be more than the number of channels ("
324
+ + str(dim)
325
+ + ")."
326
+ )
327
+
328
+ if dim % self.groups != 0:
329
+ raise ValueError(
330
+ "Number of groups ("
331
+ + str(self.groups)
332
+ + ") must be a multiple of the number of channels ("
333
+ + str(dim)
334
+ + ")."
335
+ )
336
+
337
+ def _check_axis(self):
338
+ if self.axis == 0:
339
+ raise ValueError(
340
+ "You are trying to normalize your batch axis. Do you want to use tf.layer.batch_normalization instead"
341
+ )
342
+
343
+ def _create_input_spec(self, input_shape):
344
+ dim = input_shape[self.axis]
345
+ self.input_spec = keras.layers.InputSpec(ndim=len(input_shape), axes={self.axis: dim})
346
+
347
+ def _add_gamma_weight(self, input_shape):
348
+ dim = input_shape[self.axis]
349
+ shape = (dim,)
350
+
351
+ if self.scale:
352
+ self.gamma = self.add_weight(
353
+ shape=shape,
354
+ name="gamma",
355
+ initializer=self.gamma_initializer,
356
+ regularizer=self.gamma_regularizer,
357
+ constraint=self.gamma_constraint,
358
+ )
359
+ else:
360
+ self.gamma = None
361
+
362
+ def _add_beta_weight(self, input_shape):
363
+ dim = input_shape[self.axis]
364
+ shape = (dim,)
365
+
366
+ if self.center:
367
+ self.beta = self.add_weight(
368
+ shape=shape,
369
+ name="beta",
370
+ initializer=self.beta_initializer,
371
+ regularizer=self.beta_regularizer,
372
+ constraint=self.beta_constraint,
373
+ )
374
+ else:
375
+ self.beta = None
376
+
377
+ def _create_broadcast_shape(self, input_shape):
378
+ broadcast_shape = [1] * len(input_shape)
379
+ is_instance_norm = (input_shape[self.axis] // self.groups) == 1
380
+ if not is_instance_norm:
381
+ broadcast_shape[self.axis] = input_shape[self.axis] // self.groups
382
+ broadcast_shape.insert(self.axis, self.groups)
383
+ else:
384
+ broadcast_shape[self.axis] = self.groups
385
+ return broadcast_shape
386
+
387
+
388
+ # Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2.TFWav2Vec2WeightNormConv1D with Wav2Vec2->Hubert
389
+ class TFHubertWeightNormConv1D(keras.layers.Conv1D):
390
+ """Adapted from https://www.tensorflow.org/probability/api_docs/python/tfp/layers/weight_norm/WeightNorm"""
391
+
392
+ def __init__(self, filters, kernel_size, groups, explicit_padding, **kwargs):
393
+ super().__init__(
394
+ filters=filters,
395
+ kernel_size=kernel_size,
396
+ groups=groups,
397
+ padding="valid",
398
+ use_bias=True,
399
+ bias_initializer="he_normal",
400
+ **kwargs,
401
+ )
402
+ self.explicit_padding = explicit_padding
403
+ self.filter_axis = 2
404
+ self.kernel_norm_axes = tf.constant([0, 1])
405
+
406
+ def _init_norm(self):
407
+ """Set the norm of the weight vector."""
408
+ kernel_norm = tf.sqrt(tf.reduce_sum(tf.square(self.weight_v), axis=self.kernel_norm_axes))
409
+ self.weight_g.assign(kernel_norm[:, tf.newaxis, tf.newaxis])
410
+
411
+ def _normalize_kernel(self):
412
+ """Generate normalized weights."""
413
+ kernel = tf.nn.l2_normalize(self.weight_v, axis=self.kernel_norm_axes) * tf.transpose(self.weight_g)
414
+ self.kernel = tf.transpose(kernel)
415
+
416
+ def build(self, input_shape):
417
+ if not self.built:
418
+ super().build(input_shape)
419
+
420
+ self.kernel = tf.Variable(tf.transpose(self.kernel), name="weight_v", trainable=True)
421
+ self.weight_v = self.kernel
422
+
423
+ self.weight_g = self.add_weight(
424
+ name="weight_g",
425
+ shape=(int(self.weight_v.shape[self.filter_axis]), 1, 1),
426
+ initializer="ones",
427
+ dtype=self.weight_v.dtype,
428
+ trainable=True,
429
+ )
430
+ self._init_norm()
431
+ self.bias = self.add_weight(name="bias", shape=(self.filters,), initializer="zeros", trainable=True)
432
+
433
+ def call(self, inputs):
434
+ # TODO Matt: Assigning to attributes in call() is deeply sinful in TensorFlow, as it should be idempotent.
435
+ # This whole layer should be replaced by a layer that doesn't inherit from Conv1D, but instead calls
436
+ # a functional 1d convolution with normalized weights that it generates (but does not store!)
437
+ self._normalize_kernel()
438
+
439
+ padded_inputs = tf.pad(inputs, ((0, 0), (self.explicit_padding, self.explicit_padding), (0, 0)))
440
+ output = super().call(padded_inputs)
441
+
442
+ return output
443
+
444
+
445
+ # Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2.TFWav2Vec2NoLayerNormConvLayer with Wav2Vec2->Hubert
446
+ class TFHubertNoLayerNormConvLayer(keras.layers.Layer):
447
+ def __init__(self, config: HubertConfig, layer_id: int = 0, **kwargs: Any) -> None:
448
+ super().__init__(**kwargs)
449
+ self.in_conv_dim = config.conv_dim[layer_id] if layer_id > 0 else 1
450
+ self.out_conv_dim = config.conv_dim[layer_id]
451
+
452
+ self.conv = keras.layers.Conv1D(
453
+ filters=self.out_conv_dim,
454
+ kernel_size=config.conv_kernel[layer_id],
455
+ strides=config.conv_stride[layer_id],
456
+ use_bias=config.conv_bias,
457
+ name="conv",
458
+ )
459
+ self.activation = get_tf_activation(config.feat_extract_activation)
460
+
461
+ def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
462
+ hidden_states = self.conv(hidden_states)
463
+ hidden_states = self.activation(hidden_states)
464
+ return hidden_states
465
+
466
+ def build(self, input_shape=None):
467
+ if self.built:
468
+ return
469
+ self.built = True
470
+ if getattr(self, "conv", None) is not None:
471
+ with tf.name_scope(self.conv.name):
472
+ self.conv.build([None, None, self.in_conv_dim])
473
+
474
+
475
+ # Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2.TFWav2Vec2LayerNormConvLayer with Wav2Vec2->Hubert
476
+ class TFHubertLayerNormConvLayer(keras.layers.Layer):
477
+ def __init__(self, config: HubertConfig, layer_id: int = 0, **kwargs: Any) -> None:
478
+ super().__init__(**kwargs)
479
+ self.in_conv_dim = config.conv_dim[layer_id] if layer_id > 0 else 1
480
+ self.out_conv_dim = config.conv_dim[layer_id]
481
+
482
+ self.conv = keras.layers.Conv1D(
483
+ filters=self.out_conv_dim,
484
+ kernel_size=config.conv_kernel[layer_id],
485
+ strides=config.conv_stride[layer_id],
486
+ use_bias=config.conv_bias,
487
+ name="conv",
488
+ )
489
+ self.layer_norm = keras.layers.LayerNormalization(name="layer_norm", epsilon=config.layer_norm_eps)
490
+ self.activation = get_tf_activation(config.feat_extract_activation)
491
+
492
+ def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
493
+ hidden_states = self.conv(hidden_states)
494
+ hidden_states = self.layer_norm(hidden_states)
495
+ hidden_states = self.activation(hidden_states)
496
+ return hidden_states
497
+
498
+ def build(self, input_shape=None):
499
+ if self.built:
500
+ return
501
+ self.built = True
502
+ if getattr(self, "conv", None) is not None:
503
+ with tf.name_scope(self.conv.name):
504
+ self.conv.build([None, None, self.in_conv_dim])
505
+ if getattr(self, "layer_norm", None) is not None:
506
+ with tf.name_scope(self.layer_norm.name):
507
+ self.layer_norm.build([None, None, self.out_conv_dim])
508
+
509
+
510
+ # Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2.TFWav2Vec2GroupNormConvLayer with Wav2Vec2->Hubert
511
+ class TFHubertGroupNormConvLayer(keras.layers.Layer):
512
+ def __init__(self, config: HubertConfig, layer_id: int = 0, **kwargs: Any) -> None:
513
+ super().__init__(**kwargs)
514
+ self.in_conv_dim = config.conv_dim[layer_id] if layer_id > 0 else 1
515
+ self.out_conv_dim = config.conv_dim[layer_id]
516
+
517
+ self.conv = keras.layers.Conv1D(
518
+ filters=self.out_conv_dim,
519
+ kernel_size=config.conv_kernel[layer_id],
520
+ strides=config.conv_stride[layer_id],
521
+ use_bias=config.conv_bias,
522
+ name="conv",
523
+ )
524
+ self.activation = get_tf_activation(config.feat_extract_activation)
525
+ self.layer_norm = TFHubertGroupNorm(groups=self.out_conv_dim, epsilon=config.layer_norm_eps, name="layer_norm")
526
+
527
+ def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
528
+ hidden_states = self.conv(hidden_states)
529
+ hidden_states = self.layer_norm(hidden_states)
530
+ hidden_states = self.activation(hidden_states)
531
+ return hidden_states
532
+
533
+ def build(self, input_shape=None):
534
+ if self.built:
535
+ return
536
+ self.built = True
537
+ if getattr(self, "conv", None) is not None:
538
+ with tf.name_scope(self.conv.name):
539
+ self.conv.build([None, None, self.in_conv_dim])
540
+ if getattr(self, "layer_norm", None) is not None:
541
+ with tf.name_scope(self.layer_norm.name):
542
+ self.layer_norm.build([None, None, self.out_conv_dim])
543
+
544
+
545
+ # Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2.TFWav2Vec2PositionalConvEmbedding with Wav2Vec2->Hubert
546
+ class TFHubertPositionalConvEmbedding(keras.layers.Layer):
547
+ def __init__(self, config: HubertConfig, **kwargs: Any) -> None:
548
+ super().__init__(**kwargs)
549
+ self.conv = TFHubertWeightNormConv1D(
550
+ filters=config.hidden_size,
551
+ kernel_size=config.num_conv_pos_embeddings,
552
+ groups=config.num_conv_pos_embedding_groups,
553
+ explicit_padding=config.num_conv_pos_embeddings // 2,
554
+ name="conv",
555
+ )
556
+ self.padding = TFHubertSamePadLayer(config.num_conv_pos_embeddings)
557
+ self.activation = get_tf_activation(config.feat_extract_activation)
558
+ self.config = config
559
+
560
+ def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
561
+ hidden_states = self.conv(hidden_states)
562
+ hidden_states = self.padding(hidden_states)
563
+ hidden_states = self.activation(hidden_states)
564
+ return hidden_states
565
+
566
+ def build(self, input_shape=None):
567
+ if self.built:
568
+ return
569
+ self.built = True
570
+ if getattr(self, "conv", None) is not None:
571
+ with tf.name_scope(self.conv.name):
572
+ self.conv.build([None, None, self.config.hidden_size])
573
+
574
+
575
+ # Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2.TFWav2Vec2SamePadLayer with Wav2Vec2->Hubert
576
+ class TFHubertSamePadLayer(keras.layers.Layer):
577
+ def __init__(self, num_conv_pos_embeddings, **kwargs):
578
+ super().__init__(**kwargs)
579
+ self.num_pad_remove = 1 if num_conv_pos_embeddings % 2 == 0 else 0
580
+
581
+ def call(self, hidden_states):
582
+ if self.num_pad_remove > 0:
583
+ hidden_states = hidden_states[:, : -self.num_pad_remove, :]
584
+ return hidden_states
585
+
586
+
587
+ class TFHubertFeatureEncoder(keras.layers.Layer):
588
+ def __init__(self, config: HubertConfig, **kwargs: Any) -> None:
589
+ super().__init__(**kwargs)
590
+
591
+ if config.feat_extract_norm == "group":
592
+ conv_layers = [TFHubertGroupNormConvLayer(config, layer_id=0, name=f"conv_layers.{0}")] + [
593
+ TFHubertNoLayerNormConvLayer(config, layer_id=i + 1, name=f"conv_layers.{i+1}")
594
+ for i in range(config.num_feat_extract_layers - 1)
595
+ ]
596
+ elif config.feat_extract_norm == "layer":
597
+ conv_layers = [
598
+ TFHubertLayerNormConvLayer(config, layer_id=i, name=f"conv_layers.{i}")
599
+ for i in range(config.num_feat_extract_layers)
600
+ ]
601
+ else:
602
+ raise ValueError(
603
+ f"`config.feat_extract_norm` is {config.feat_extract_norm}, but has to be one of ['group', 'layer']"
604
+ )
605
+ self.conv_layers = conv_layers
606
+
607
+ def call(self, input_values):
608
+ hidden_states = tf.expand_dims(input_values, -1)
609
+ for conv_layer in self.conv_layers:
610
+ hidden_states = conv_layer(hidden_states)
611
+ return hidden_states
612
+
613
+ def build(self, input_shape=None):
614
+ if self.built:
615
+ return
616
+ self.built = True
617
+ for conv_layer in self.conv_layers:
618
+ with tf.name_scope(conv_layer.name):
619
+ conv_layer.build(None)
620
+
621
+
622
+ class TFHubertFeatureExtractor(TFHubertFeatureEncoder):
623
+ def __init__(self, config, **kwargs):
624
+ super().__init__(config, **kwargs)
625
+ warnings.warn(
626
+ f"The class `{self.__class__.__name__}` has been depreciated "
627
+ "and will be removed in Transformers v5. "
628
+ f"Use `{self.__class__.__bases__[0].__name__}` instead.",
629
+ FutureWarning,
630
+ )
631
+
632
+
633
+ class TFHubertFeatureProjection(keras.layers.Layer):
634
+ def __init__(self, config: HubertConfig, **kwargs):
635
+ super().__init__(**kwargs)
636
+
637
+ self.layer_norm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm")
638
+ self.projection = keras.layers.Dense(
639
+ units=config.hidden_size,
640
+ kernel_initializer=get_initializer(config.initializer_range),
641
+ bias_initializer="zeros",
642
+ name="projection",
643
+ )
644
+ self.dropout = keras.layers.Dropout(rate=config.feat_proj_dropout)
645
+ self.config = config
646
+
647
+ def call(self, hidden_states: tf.Tensor, training: bool = False) -> tf.Tensor:
648
+ hidden_states = self.layer_norm(hidden_states)
649
+ hidden_states = self.projection(hidden_states)
650
+ hidden_states = self.dropout(hidden_states, training=training)
651
+ return hidden_states
652
+
653
+ def build(self, input_shape=None):
654
+ if self.built:
655
+ return
656
+ self.built = True
657
+ if getattr(self, "layer_norm", None) is not None:
658
+ with tf.name_scope(self.layer_norm.name):
659
+ self.layer_norm.build([None, None, self.config.conv_dim[-1]])
660
+ if getattr(self, "projection", None) is not None:
661
+ with tf.name_scope(self.projection.name):
662
+ self.projection.build([None, None, self.config.conv_dim[-1]])
663
+
664
+
665
+ # Copied from transformers.models.bart.modeling_tf_bart.TFBartAttention with TFBart->TFHubert
666
+ class TFHubertAttention(keras.layers.Layer):
667
+ """Multi-headed attention from "Attention Is All You Need"""
668
+
669
+ def __init__(
670
+ self,
671
+ embed_dim: int,
672
+ num_heads: int,
673
+ dropout: float = 0.0,
674
+ is_decoder: bool = False,
675
+ bias: bool = True,
676
+ **kwargs,
677
+ ):
678
+ super().__init__(**kwargs)
679
+ self.embed_dim = embed_dim
680
+
681
+ self.num_heads = num_heads
682
+ self.dropout = keras.layers.Dropout(dropout)
683
+ self.head_dim = embed_dim // num_heads
684
+ if (self.head_dim * num_heads) != self.embed_dim:
685
+ raise ValueError(
686
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
687
+ f" and `num_heads`: {num_heads})."
688
+ )
689
+ self.scaling = self.head_dim**-0.5
690
+ self.is_decoder = is_decoder
691
+
692
+ self.k_proj = keras.layers.Dense(embed_dim, use_bias=bias, name="k_proj")
693
+ self.q_proj = keras.layers.Dense(embed_dim, use_bias=bias, name="q_proj")
694
+ self.v_proj = keras.layers.Dense(embed_dim, use_bias=bias, name="v_proj")
695
+ self.out_proj = keras.layers.Dense(embed_dim, use_bias=bias, name="out_proj")
696
+
697
+ def _shape(self, tensor: tf.Tensor, seq_len: int, bsz: int):
698
+ return tf.transpose(tf.reshape(tensor, (bsz, seq_len, self.num_heads, self.head_dim)), (0, 2, 1, 3))
699
+
700
+ def call(
701
+ self,
702
+ hidden_states: tf.Tensor,
703
+ key_value_states: tf.Tensor | None = None,
704
+ past_key_value: Tuple[Tuple[tf.Tensor]] | None = None,
705
+ attention_mask: tf.Tensor | None = None,
706
+ layer_head_mask: tf.Tensor | None = None,
707
+ training: Optional[bool] = False,
708
+ ) -> Tuple[tf.Tensor, tf.Tensor | None]:
709
+ """Input shape: Batch x Time x Channel"""
710
+
711
+ # if key_value_states are provided this layer is used as a cross-attention layer
712
+ # for the decoder
713
+ is_cross_attention = key_value_states is not None
714
+ bsz, tgt_len, embed_dim = shape_list(hidden_states)
715
+
716
+ # get query proj
717
+ query_states = self.q_proj(hidden_states) * self.scaling
718
+ # get key, value proj
719
+ if is_cross_attention and past_key_value is not None:
720
+ # reuse k,v, cross_attentions
721
+ key_states = past_key_value[0]
722
+ value_states = past_key_value[1]
723
+ elif is_cross_attention:
724
+ # cross_attentions
725
+ key_states = self._shape(self.k_proj(key_value_states), -1, bsz)
726
+ value_states = self._shape(self.v_proj(key_value_states), -1, bsz)
727
+ elif past_key_value is not None:
728
+ # reuse k, v, self_attention
729
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
730
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
731
+ key_states = tf.concat([past_key_value[0], key_states], axis=2)
732
+ value_states = tf.concat([past_key_value[1], value_states], axis=2)
733
+ else:
734
+ # self_attention
735
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
736
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
737
+
738
+ if self.is_decoder:
739
+ # if cross_attention save Tuple(tf.Tensor, tf.Tensor) of all cross attention key/value_states.
740
+ # Further calls to cross_attention layer can then reuse all cross-attention
741
+ # key/value_states (first "if" case)
742
+ # if uni-directional self-attention (decoder) save Tuple(tf.Tensor, tf.Tensor) of
743
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
744
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
745
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
746
+ past_key_value = (key_states, value_states)
747
+
748
+ proj_shape = (bsz * self.num_heads, -1, self.head_dim)
749
+ query_states = tf.reshape(self._shape(query_states, tgt_len, bsz), proj_shape)
750
+ key_states = tf.reshape(key_states, proj_shape)
751
+ value_states = tf.reshape(value_states, proj_shape)
752
+
753
+ src_len = shape_list(key_states)[1]
754
+ attn_weights = tf.matmul(query_states, key_states, transpose_b=True)
755
+
756
+ tf.debugging.assert_equal(
757
+ shape_list(attn_weights),
758
+ [bsz * self.num_heads, tgt_len, src_len],
759
+ message=(
760
+ f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is"
761
+ f" {shape_list(attn_weights)}"
762
+ ),
763
+ )
764
+
765
+ if attention_mask is not None:
766
+ tf.debugging.assert_equal(
767
+ shape_list(attention_mask),
768
+ [bsz, 1, tgt_len, src_len],
769
+ message=(
770
+ f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is"
771
+ f" {shape_list(attention_mask)}"
772
+ ),
773
+ )
774
+
775
+ attention_mask = tf.cast(attention_mask, dtype=attn_weights.dtype)
776
+ attn_weights = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len)) + attention_mask
777
+ attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len))
778
+
779
+ attn_weights = stable_softmax(attn_weights, axis=-1)
780
+
781
+ if layer_head_mask is not None:
782
+ tf.debugging.assert_equal(
783
+ shape_list(layer_head_mask),
784
+ [self.num_heads],
785
+ message=(
786
+ f"Head mask for a single layer should be of size {(self.num_heads)}, but is"
787
+ f" {shape_list(layer_head_mask)}"
788
+ ),
789
+ )
790
+
791
+ attn_weights = tf.reshape(layer_head_mask, (1, -1, 1, 1)) * tf.reshape(
792
+ attn_weights, (bsz, self.num_heads, tgt_len, src_len)
793
+ )
794
+ attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len))
795
+
796
+ attn_probs = self.dropout(attn_weights, training=training)
797
+ attn_output = tf.matmul(attn_probs, value_states)
798
+
799
+ tf.debugging.assert_equal(
800
+ shape_list(attn_output),
801
+ [bsz * self.num_heads, tgt_len, self.head_dim],
802
+ message=(
803
+ f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is"
804
+ f" {shape_list(attn_output)}"
805
+ ),
806
+ )
807
+
808
+ attn_output = tf.transpose(
809
+ tf.reshape(attn_output, (bsz, self.num_heads, tgt_len, self.head_dim)), (0, 2, 1, 3)
810
+ )
811
+ attn_output = tf.reshape(attn_output, (bsz, tgt_len, embed_dim))
812
+
813
+ attn_output = self.out_proj(attn_output)
814
+ attn_weights: tf.Tensor = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len))
815
+
816
+ return attn_output, attn_weights, past_key_value
817
+
818
+ def build(self, input_shape=None):
819
+ if self.built:
820
+ return
821
+ self.built = True
822
+ if getattr(self, "k_proj", None) is not None:
823
+ with tf.name_scope(self.k_proj.name):
824
+ self.k_proj.build([None, None, self.embed_dim])
825
+ if getattr(self, "q_proj", None) is not None:
826
+ with tf.name_scope(self.q_proj.name):
827
+ self.q_proj.build([None, None, self.embed_dim])
828
+ if getattr(self, "v_proj", None) is not None:
829
+ with tf.name_scope(self.v_proj.name):
830
+ self.v_proj.build([None, None, self.embed_dim])
831
+ if getattr(self, "out_proj", None) is not None:
832
+ with tf.name_scope(self.out_proj.name):
833
+ self.out_proj.build([None, None, self.embed_dim])
834
+
835
+
836
+ # Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2.TFWav2Vec2FeedForward with Wav2Vec2->Hubert
837
+ class TFHubertFeedForward(keras.layers.Layer):
838
+ def __init__(self, config: HubertConfig, **kwargs):
839
+ super().__init__(**kwargs)
840
+
841
+ self.intermediate_dropout = keras.layers.Dropout(config.activation_dropout)
842
+
843
+ self.intermediate_dense = keras.layers.Dense(
844
+ units=config.intermediate_size,
845
+ kernel_initializer=get_initializer(config.initializer_range),
846
+ bias_initializer="zeros",
847
+ name="intermediate_dense",
848
+ )
849
+ self.intermediate_act_fn = get_tf_activation(config.hidden_act)
850
+
851
+ self.output_dense = keras.layers.Dense(
852
+ units=config.hidden_size,
853
+ kernel_initializer=get_initializer(config.initializer_range),
854
+ bias_initializer="zeros",
855
+ name="output_dense",
856
+ )
857
+ self.output_dropout = keras.layers.Dropout(config.hidden_dropout)
858
+ self.config = config
859
+
860
+ def call(self, hidden_states: tf.Tensor, training: bool = False) -> tf.Tensor:
861
+ hidden_states = self.intermediate_dense(hidden_states)
862
+ hidden_states = self.intermediate_act_fn(hidden_states)
863
+ hidden_states = self.intermediate_dropout(hidden_states, training=training)
864
+
865
+ hidden_states = self.output_dense(hidden_states)
866
+ hidden_states = self.output_dropout(hidden_states, training=training)
867
+ return hidden_states
868
+
869
+ def build(self, input_shape=None):
870
+ if self.built:
871
+ return
872
+ self.built = True
873
+ if getattr(self, "intermediate_dense", None) is not None:
874
+ with tf.name_scope(self.intermediate_dense.name):
875
+ self.intermediate_dense.build([None, None, self.config.hidden_size])
876
+ if getattr(self, "output_dense", None) is not None:
877
+ with tf.name_scope(self.output_dense.name):
878
+ self.output_dense.build([None, None, self.config.intermediate_size])
879
+
880
+
881
+ # Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2.TFWav2Vec2EncoderLayer with Wav2Vec2->Hubert
882
+ class TFHubertEncoderLayer(keras.layers.Layer):
883
+ def __init__(self, config: HubertConfig, **kwargs):
884
+ super().__init__(**kwargs)
885
+ self.attention = TFHubertAttention(
886
+ embed_dim=config.hidden_size,
887
+ num_heads=config.num_attention_heads,
888
+ dropout=config.attention_dropout,
889
+ is_decoder=False,
890
+ name="attention",
891
+ )
892
+ self.dropout = keras.layers.Dropout(config.hidden_dropout)
893
+ self.layer_norm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm")
894
+ self.feed_forward = TFHubertFeedForward(config, name="feed_forward")
895
+ self.final_layer_norm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="final_layer_norm")
896
+ self.config = config
897
+
898
+ def call(
899
+ self,
900
+ hidden_states: tf.Tensor,
901
+ attention_mask: tf.Tensor | None = None,
902
+ output_attentions: Optional[bool] = False,
903
+ training: bool = False,
904
+ ) -> Tuple[tf.Tensor]:
905
+ attn_residual = hidden_states
906
+ hidden_states, attn_weights, _ = self.attention(
907
+ hidden_states, attention_mask=attention_mask, training=training
908
+ )
909
+ hidden_states = self.dropout(hidden_states, training=training)
910
+ hidden_states = attn_residual + hidden_states
911
+
912
+ hidden_states = self.layer_norm(hidden_states)
913
+ hidden_states = hidden_states + self.feed_forward(hidden_states)
914
+ hidden_states = self.final_layer_norm(hidden_states)
915
+
916
+ outputs = (hidden_states,)
917
+
918
+ if output_attentions:
919
+ outputs += (attn_weights,)
920
+
921
+ return outputs
922
+
923
+ def build(self, input_shape=None):
924
+ if self.built:
925
+ return
926
+ self.built = True
927
+ if getattr(self, "attention", None) is not None:
928
+ with tf.name_scope(self.attention.name):
929
+ self.attention.build(None)
930
+ if getattr(self, "layer_norm", None) is not None:
931
+ with tf.name_scope(self.layer_norm.name):
932
+ self.layer_norm.build([None, None, self.config.hidden_size])
933
+ if getattr(self, "feed_forward", None) is not None:
934
+ with tf.name_scope(self.feed_forward.name):
935
+ self.feed_forward.build(None)
936
+ if getattr(self, "final_layer_norm", None) is not None:
937
+ with tf.name_scope(self.final_layer_norm.name):
938
+ self.final_layer_norm.build([None, None, self.config.hidden_size])
939
+
940
+
941
+ # Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2.TFWav2Vec2EncoderLayerStableLayerNorm with Wav2Vec2->Hubert
942
+ class TFHubertEncoderLayerStableLayerNorm(keras.layers.Layer):
943
+ def __init__(self, config: HubertConfig, **kwargs):
944
+ super().__init__(**kwargs)
945
+ self.attention = TFHubertAttention(
946
+ embed_dim=config.hidden_size,
947
+ num_heads=config.num_attention_heads,
948
+ dropout=config.attention_dropout,
949
+ is_decoder=False,
950
+ name="attention",
951
+ )
952
+ self.dropout = keras.layers.Dropout(config.hidden_dropout)
953
+ self.layer_norm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm")
954
+ self.feed_forward = TFHubertFeedForward(config, name="feed_forward")
955
+ self.final_layer_norm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="final_layer_norm")
956
+ self.config = config
957
+
958
+ def call(
959
+ self,
960
+ hidden_states: tf.Tensor,
961
+ attention_mask: tf.Tensor | None = None,
962
+ output_attentions: Optional[bool] = False,
963
+ training: bool = False,
964
+ ) -> Tuple[tf.Tensor]:
965
+ attn_residual = hidden_states
966
+ hidden_states = self.layer_norm(hidden_states)
967
+ hidden_states, attn_weights, _ = self.attention(
968
+ hidden_states, attention_mask=attention_mask, training=training
969
+ )
970
+ hidden_states = self.dropout(hidden_states, training=training)
971
+ hidden_states = attn_residual + hidden_states
972
+ hidden_states = hidden_states + self.feed_forward(self.final_layer_norm(hidden_states))
973
+
974
+ outputs = (hidden_states,)
975
+
976
+ if output_attentions:
977
+ outputs += (attn_weights,)
978
+
979
+ return outputs
980
+
981
+ def build(self, input_shape=None):
982
+ if self.built:
983
+ return
984
+ self.built = True
985
+ if getattr(self, "attention", None) is not None:
986
+ with tf.name_scope(self.attention.name):
987
+ self.attention.build(None)
988
+ if getattr(self, "layer_norm", None) is not None:
989
+ with tf.name_scope(self.layer_norm.name):
990
+ self.layer_norm.build([None, None, self.config.hidden_size])
991
+ if getattr(self, "feed_forward", None) is not None:
992
+ with tf.name_scope(self.feed_forward.name):
993
+ self.feed_forward.build(None)
994
+ if getattr(self, "final_layer_norm", None) is not None:
995
+ with tf.name_scope(self.final_layer_norm.name):
996
+ self.final_layer_norm.build([None, None, self.config.hidden_size])
997
+
998
+
999
+ # Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2.TFWav2Vec2Encoder with Wav2Vec2->Hubert
1000
+ class TFHubertEncoder(keras.layers.Layer):
1001
+ def __init__(self, config: HubertConfig, **kwargs):
1002
+ super().__init__(**kwargs)
1003
+ self.config = config
1004
+ self.pos_conv_embed = TFHubertPositionalConvEmbedding(config, name="pos_conv_embed")
1005
+ self.layer_norm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm")
1006
+ self.dropout = keras.layers.Dropout(config.hidden_dropout)
1007
+ self.layer = [TFHubertEncoderLayer(config, name=f"layers.{i}") for i in range(config.num_hidden_layers)]
1008
+
1009
+ def call(
1010
+ self,
1011
+ hidden_states: tf.Tensor,
1012
+ attention_mask: tf.Tensor | None = None,
1013
+ output_attentions: Optional[bool] = False,
1014
+ output_hidden_states: Optional[bool] = False,
1015
+ return_dict: Optional[bool] = True,
1016
+ training: Optional[bool] = False,
1017
+ ) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]:
1018
+ all_hidden_states = () if output_hidden_states else None
1019
+ all_self_attentions = () if output_attentions else None
1020
+
1021
+ if attention_mask is not None:
1022
+ hidden_states = hidden_states * tf.expand_dims(attention_mask, -1)
1023
+ attention_mask = _expand_mask(attention_mask)
1024
+ else:
1025
+ attention_mask = None
1026
+
1027
+ position_embeddings = self.pos_conv_embed(hidden_states)
1028
+ hidden_states = hidden_states + position_embeddings
1029
+ hidden_states = self.layer_norm(hidden_states)
1030
+ hidden_states = self.dropout(hidden_states, training=training)
1031
+
1032
+ for i, layer_module in enumerate(self.layer):
1033
+ if output_hidden_states:
1034
+ all_hidden_states = all_hidden_states + (hidden_states,)
1035
+
1036
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
1037
+ dropout_probability = np.random.uniform(0, 1)
1038
+ if training and (dropout_probability < self.config.layerdrop): # skip the layer
1039
+ continue
1040
+
1041
+ layer_outputs = layer_module(
1042
+ hidden_states=hidden_states,
1043
+ attention_mask=attention_mask,
1044
+ output_attentions=output_attentions,
1045
+ training=training,
1046
+ )
1047
+ hidden_states = layer_outputs[0]
1048
+
1049
+ if output_attentions:
1050
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
1051
+
1052
+ # Add last layer
1053
+ if output_hidden_states:
1054
+ all_hidden_states = all_hidden_states + (hidden_states,)
1055
+
1056
+ if not return_dict:
1057
+ return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
1058
+ return TFBaseModelOutput(
1059
+ last_hidden_state=hidden_states,
1060
+ hidden_states=all_hidden_states,
1061
+ attentions=all_self_attentions,
1062
+ )
1063
+
1064
+ def build(self, input_shape=None):
1065
+ if self.built:
1066
+ return
1067
+ self.built = True
1068
+ if getattr(self, "pos_conv_embed", None) is not None:
1069
+ with tf.name_scope(self.pos_conv_embed.name):
1070
+ self.pos_conv_embed.build(None)
1071
+ if getattr(self, "layer_norm", None) is not None:
1072
+ with tf.name_scope(self.layer_norm.name):
1073
+ self.layer_norm.build([None, None, self.config.hidden_size])
1074
+ if getattr(self, "layer", None) is not None:
1075
+ for layer in self.layer:
1076
+ with tf.name_scope(layer.name):
1077
+ layer.build(None)
1078
+
1079
+
1080
+ # Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2.TFWav2Vec2EncoderStableLayerNorm with Wav2Vec2->Hubert
1081
+ class TFHubertEncoderStableLayerNorm(keras.layers.Layer):
1082
+ def __init__(self, config: HubertConfig, **kwargs):
1083
+ super().__init__(**kwargs)
1084
+ self.config = config
1085
+ self.pos_conv_embed = TFHubertPositionalConvEmbedding(config, name="pos_conv_embed")
1086
+ self.layer_norm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm")
1087
+ self.dropout = keras.layers.Dropout(config.hidden_dropout)
1088
+ self.layer = [
1089
+ TFHubertEncoderLayerStableLayerNorm(config, name=f"layers.{i}") for i in range(config.num_hidden_layers)
1090
+ ]
1091
+
1092
+ def call(
1093
+ self,
1094
+ hidden_states: tf.Tensor,
1095
+ attention_mask: tf.Tensor | None = None,
1096
+ output_attentions: Optional[bool] = False,
1097
+ output_hidden_states: Optional[bool] = False,
1098
+ return_dict: Optional[bool] = True,
1099
+ training: Optional[bool] = False,
1100
+ ) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]:
1101
+ all_hidden_states = () if output_hidden_states else None
1102
+ all_self_attentions = () if output_attentions else None
1103
+
1104
+ if attention_mask is not None:
1105
+ hidden_states = hidden_states * tf.expand_dims(attention_mask, -1)
1106
+ attention_mask = _expand_mask(attention_mask)
1107
+ else:
1108
+ attention_mask = None
1109
+
1110
+ position_embeddings = self.pos_conv_embed(hidden_states)
1111
+ hidden_states = hidden_states + position_embeddings
1112
+ hidden_states = self.dropout(hidden_states, training=training)
1113
+
1114
+ for i, layer_module in enumerate(self.layer):
1115
+ if output_hidden_states:
1116
+ all_hidden_states = all_hidden_states + (hidden_states,)
1117
+
1118
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
1119
+ dropout_probability = np.random.uniform(0, 1)
1120
+ if training and (dropout_probability < self.config.layerdrop): # skip the layer
1121
+ continue
1122
+
1123
+ layer_outputs = layer_module(
1124
+ hidden_states=hidden_states,
1125
+ attention_mask=attention_mask,
1126
+ output_attentions=output_attentions,
1127
+ training=training,
1128
+ )
1129
+ hidden_states = layer_outputs[0]
1130
+
1131
+ if output_attentions:
1132
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
1133
+
1134
+ hidden_states = self.layer_norm(hidden_states)
1135
+
1136
+ if output_hidden_states:
1137
+ all_hidden_states = all_hidden_states + (hidden_states,)
1138
+
1139
+ if not return_dict:
1140
+ return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
1141
+ return TFBaseModelOutput(
1142
+ last_hidden_state=hidden_states,
1143
+ hidden_states=all_hidden_states,
1144
+ attentions=all_self_attentions,
1145
+ )
1146
+
1147
+ def build(self, input_shape=None):
1148
+ if self.built:
1149
+ return
1150
+ self.built = True
1151
+ if getattr(self, "pos_conv_embed", None) is not None:
1152
+ with tf.name_scope(self.pos_conv_embed.name):
1153
+ self.pos_conv_embed.build(None)
1154
+ if getattr(self, "layer_norm", None) is not None:
1155
+ with tf.name_scope(self.layer_norm.name):
1156
+ self.layer_norm.build([None, None, self.config.hidden_size])
1157
+ if getattr(self, "layer", None) is not None:
1158
+ for layer in self.layer:
1159
+ with tf.name_scope(layer.name):
1160
+ layer.build(None)
1161
+
1162
+
1163
+ @keras_serializable
1164
+ class TFHubertMainLayer(keras.layers.Layer):
1165
+ config_class = HubertConfig
1166
+
1167
+ def __init__(self, config: HubertConfig, **kwargs):
1168
+ super().__init__(**kwargs)
1169
+ self.config = config
1170
+ self.feature_extractor = TFHubertFeatureEncoder(config, name="feature_extractor")
1171
+ self.feature_projection = TFHubertFeatureProjection(config, name="feature_projection")
1172
+
1173
+ if config.do_stable_layer_norm:
1174
+ self.encoder = TFHubertEncoderStableLayerNorm(config, name="encoder")
1175
+ else:
1176
+ self.encoder = TFHubertEncoder(config, name="encoder")
1177
+
1178
+ def build(self, input_shape=None):
1179
+ self.masked_spec_embed = self.add_weight(
1180
+ shape=(self.config.hidden_size,), initializer="uniform", trainable=True, name="masked_spec_embed"
1181
+ )
1182
+
1183
+ if self.built:
1184
+ return
1185
+ self.built = True
1186
+ if getattr(self, "feature_extractor", None) is not None:
1187
+ with tf.name_scope(self.feature_extractor.name):
1188
+ self.feature_extractor.build(None)
1189
+ if getattr(self, "feature_projection", None) is not None:
1190
+ with tf.name_scope(self.feature_projection.name):
1191
+ self.feature_projection.build(None)
1192
+ if getattr(self, "encoder", None) is not None:
1193
+ with tf.name_scope(self.encoder.name):
1194
+ self.encoder.build(None)
1195
+
1196
+ def _get_feat_extract_output_lengths(self, input_lengths: tf.Tensor):
1197
+ """
1198
+ Computes the output length of the convolutional layers
1199
+ """
1200
+
1201
+ def _conv_out_length(input_length, kernel_size, stride):
1202
+ # 1D convolutional layer output length formula taken
1203
+ # from https://pytorch.org/docs/stable/generated/torch.nn.Conv1d.html
1204
+ return (input_length - kernel_size) // stride + 1
1205
+
1206
+ for kernel_size, stride in zip(self.config.conv_kernel, self.config.conv_stride):
1207
+ input_lengths = _conv_out_length(input_lengths, kernel_size, stride)
1208
+
1209
+ return input_lengths
1210
+
1211
+ def _mask_hidden_states(self, hidden_states: tf.Tensor, mask_time_indices: tf.Tensor | None = None):
1212
+ """
1213
+ Masks extracted features along time axis and/or along feature axis according to
1214
+ [SpecAugment](https://arxiv.org/abs/1904.08779).
1215
+ """
1216
+ batch_size, sequence_length, hidden_size = shape_list(hidden_states)
1217
+
1218
+ # `config.apply_spec_augment` can set masking to False
1219
+ if not getattr(self.config, "apply_spec_augment", True):
1220
+ return hidden_states
1221
+
1222
+ if mask_time_indices is not None:
1223
+ # apply SpecAugment along time axis with given mask_time_indices
1224
+ hidden_states = tf.where(
1225
+ tf.cast(mask_time_indices[:, :, tf.newaxis], tf.bool),
1226
+ self.masked_spec_embed[tf.newaxis, tf.newaxis, :],
1227
+ hidden_states,
1228
+ )
1229
+
1230
+ elif self.config.mask_time_prob > 0:
1231
+ # generate indices & apply SpecAugment along time axis
1232
+ mask_time_indices = _compute_mask_indices(
1233
+ (batch_size, sequence_length),
1234
+ mask_prob=self.config.mask_time_prob,
1235
+ mask_length=self.config.mask_time_length,
1236
+ min_masks=2,
1237
+ )
1238
+ hidden_states = tf.where(
1239
+ tf.cast(mask_time_indices[:, :, tf.newaxis], tf.bool),
1240
+ self.masked_spec_embed[tf.newaxis, tf.newaxis, :],
1241
+ hidden_states,
1242
+ )
1243
+
1244
+ # apply SpecAugment along feature axis
1245
+ if self.config.mask_feature_prob > 0:
1246
+ mask_feature_indices = _compute_mask_indices(
1247
+ (batch_size, hidden_size),
1248
+ mask_prob=self.config.mask_feature_prob,
1249
+ mask_length=self.config.mask_feature_length,
1250
+ )
1251
+ hidden_states = tf.where(mask_feature_indices[:, tf.newaxis, :], hidden_states, 0)
1252
+
1253
+ return hidden_states
1254
+
1255
+ @unpack_inputs
1256
+ def call(
1257
+ self,
1258
+ input_values: tf.Tensor,
1259
+ attention_mask: tf.Tensor | None = None,
1260
+ token_type_ids: tf.Tensor | None = None,
1261
+ position_ids: tf.Tensor | None = None,
1262
+ head_mask: tf.Tensor | None = None,
1263
+ inputs_embeds: tf.Tensor | None = None,
1264
+ output_attentions: tf.Tensor | None = None,
1265
+ output_hidden_states: tf.Tensor | None = None,
1266
+ return_dict: Optional[bool] = None,
1267
+ training: bool = False,
1268
+ **kwargs: Any,
1269
+ ):
1270
+ hidden_states = self.feature_extractor(tf.cast(input_values, tf.float32), training=training)
1271
+
1272
+ if attention_mask is not None:
1273
+ # compute real output lengths according to convolution formula
1274
+ output_lengths = self._get_feat_extract_output_lengths(tf.reduce_sum(attention_mask, -1))
1275
+
1276
+ attention_mask = tf.sequence_mask(
1277
+ output_lengths, maxlen=shape_list(hidden_states)[1], dtype=hidden_states.dtype
1278
+ )
1279
+
1280
+ hidden_states = self.feature_projection(hidden_states, training=training)
1281
+
1282
+ mask_time_indices = kwargs.get("mask_time_indices", None)
1283
+ if training:
1284
+ hidden_states = self._mask_hidden_states(hidden_states, mask_time_indices=mask_time_indices)
1285
+
1286
+ encoder_outputs = self.encoder(
1287
+ hidden_states,
1288
+ attention_mask=attention_mask,
1289
+ output_attentions=output_attentions,
1290
+ output_hidden_states=output_hidden_states,
1291
+ return_dict=return_dict,
1292
+ training=training,
1293
+ )
1294
+ hidden_states = encoder_outputs[0]
1295
+
1296
+ if not return_dict:
1297
+ return (hidden_states,) + encoder_outputs[1:]
1298
+
1299
+ return TFBaseModelOutput(
1300
+ last_hidden_state=hidden_states,
1301
+ hidden_states=encoder_outputs.hidden_states,
1302
+ attentions=encoder_outputs.attentions,
1303
+ )
1304
+
1305
+
1306
+ class TFHubertPreTrainedModel(TFPreTrainedModel):
1307
+ """
1308
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
1309
+ models.
1310
+ """
1311
+
1312
+ config_class = HubertConfig
1313
+ base_model_prefix = "hubert"
1314
+ main_input_name = "input_values"
1315
+
1316
+ @property
1317
+ def input_signature(self):
1318
+ return {
1319
+ "input_values": tf.TensorSpec((None, 16000), tf.float32, name="input_values"),
1320
+ "attention_mask": tf.TensorSpec((None, None), tf.int32, name="attention_mask"),
1321
+ "token_type_ids": tf.TensorSpec((None, None), tf.int32, name="token_type_ids"),
1322
+ }
1323
+
1324
+ def __init__(self, config, *inputs, **kwargs):
1325
+ super().__init__(config, *inputs, **kwargs)
1326
+ logger.warning(
1327
+ f"\n{self.__class__.__name__} has backpropagation operations that are NOT supported on CPU. If you wish "
1328
+ "to train/fine-tune this model, you need a GPU or a TPU"
1329
+ )
1330
+
1331
+
1332
+ HUBERT_START_DOCSTRING = r"""
1333
+
1334
+ This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
1335
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
1336
+ etc.)
1337
+
1338
+ This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
1339
+ as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
1340
+ behavior.
1341
+
1342
+ <Tip>
1343
+
1344
+ TensorFlow models and layers in `transformers` accept two formats as input:
1345
+
1346
+ - having all inputs as keyword arguments (like PyTorch models), or
1347
+ - having all inputs as a list, tuple or dict in the first positional argument.
1348
+
1349
+ The reason the second format is supported is that Keras methods prefer this format when passing inputs to models
1350
+ and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just
1351
+ pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second
1352
+ format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with
1353
+ the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first
1354
+ positional argument:
1355
+
1356
+ - a single Tensor with `input_values` only and nothing else: `model(input_values)`
1357
+ - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
1358
+ `model([input_values, attention_mask])` or `model([input_values, attention_mask, token_type_ids])`
1359
+ - a dictionary with one or several input Tensors associated to the input names given in the docstring:
1360
+ `model({"input_values": input_values, "token_type_ids": token_type_ids})`
1361
+
1362
+ Note that when creating models and layers with
1363
+ [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry
1364
+ about any of this, as you can just pass inputs like you would to any other Python function!
1365
+
1366
+ </Tip>
1367
+
1368
+ Args:
1369
+ config ([`HubertConfig`]): Model configuration class with all the parameters of the model.
1370
+ Initializing with a config file does not load the weights associated with the model, only the
1371
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
1372
+ """
1373
+
1374
+ HUBERT_INPUTS_DOCSTRING = r"""
1375
+ Args:
1376
+ input_values (`np.ndarray`, `tf.Tensor`, `List[tf.Tensor]` `Dict[str, tf.Tensor]` or `Dict[str, np.ndarray]` and each example must have the shape `({0})`):
1377
+ Indices of input sequence tokens in the vocabulary.
1378
+
1379
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and
1380
+ [`PreTrainedTokenizer.encode`] for details.
1381
+
1382
+ [What are input IDs?](../glossary#input-ids)
1383
+ attention_mask (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*):
1384
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
1385
+
1386
+ - 1 for tokens that are **not masked**,
1387
+ - 0 for tokens that are **masked**.
1388
+
1389
+ [What are attention masks?](../glossary#attention-mask)
1390
+ token_type_ids (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*):
1391
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
1392
+ 1]`:
1393
+
1394
+ - 0 corresponds to a *sentence A* token,
1395
+ - 1 corresponds to a *sentence B* token.
1396
+
1397
+ [What are token type IDs?](../glossary#token-type-ids)
1398
+ position_ids (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*):
1399
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
1400
+ config.max_position_embeddings - 1]`.
1401
+
1402
+ [What are position IDs?](../glossary#position-ids)
1403
+ head_mask (`np.ndarray` or `tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
1404
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
1405
+
1406
+ - 1 indicates the head is **not masked**,
1407
+ - 0 indicates the head is **masked**.
1408
+
1409
+ inputs_embeds (`np.ndarray` or `tf.Tensor` of shape `({0}, hidden_size)`, *optional*):
1410
+ Optionally, instead of passing `input_values` you can choose to directly pass an embedded representation.
1411
+ This is useful if you want more control over how to convert `input_values` indices into associated vectors
1412
+ than the model's internal embedding lookup matrix.
1413
+ output_attentions (`bool`, *optional*):
1414
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
1415
+ tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the
1416
+ config will be used instead.
1417
+ output_hidden_states (`bool`, *optional*):
1418
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
1419
+ more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
1420
+ used instead.
1421
+ return_dict (`bool`, *optional*):
1422
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in
1423
+ eager mode, in graph mode the value will always be set to True.
1424
+ training (`bool`, *optional*, defaults to `False``):
1425
+ Whether or not to use the model in training mode (some modules like dropout modules have different
1426
+ behaviors between training and evaluation).
1427
+ """
1428
+
1429
+
1430
+ @add_start_docstrings(
1431
+ "The bare TFHubert Model transformer outputing raw hidden-states without any specific head on top.",
1432
+ HUBERT_START_DOCSTRING,
1433
+ )
1434
+ class TFHubertModel(TFHubertPreTrainedModel):
1435
+ def __init__(self, config: HubertConfig, *inputs, **kwargs):
1436
+ super().__init__(config, *inputs, **kwargs)
1437
+ self.config = config
1438
+ self.hubert = TFHubertMainLayer(config, name="hubert")
1439
+
1440
+ @add_start_docstrings_to_model_forward(HUBERT_INPUTS_DOCSTRING)
1441
+ @replace_return_docstrings(output_type=TFBaseModelOutput, config_class=_CONFIG_FOR_DOC)
1442
+ @unpack_inputs
1443
+ def call(
1444
+ self,
1445
+ input_values: tf.Tensor,
1446
+ attention_mask: tf.Tensor | None = None,
1447
+ token_type_ids: tf.Tensor | None = None,
1448
+ position_ids: tf.Tensor | None = None,
1449
+ head_mask: tf.Tensor | None = None,
1450
+ inputs_embeds: tf.Tensor | None = None,
1451
+ output_attentions: Optional[bool] = None,
1452
+ output_hidden_states: Optional[bool] = None,
1453
+ return_dict: Optional[bool] = None,
1454
+ training: bool = False,
1455
+ ) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]:
1456
+ """
1457
+
1458
+ Returns:
1459
+
1460
+ Example:
1461
+
1462
+ ```python
1463
+ >>> from transformers import AutoProcessor, TFHubertModel
1464
+ >>> from datasets import load_dataset
1465
+ >>> import soundfile as sf
1466
+
1467
+ >>> processor = AutoProcessor.from_pretrained("facebook/hubert-large-ls960-ft")
1468
+ >>> model = TFHubertModel.from_pretrained("facebook/hubert-large-ls960-ft")
1469
+
1470
+
1471
+ >>> def map_to_array(batch):
1472
+ ... speech, _ = sf.read(batch["file"])
1473
+ ... batch["speech"] = speech
1474
+ ... return batch
1475
+
1476
+
1477
+ >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
1478
+ >>> ds = ds.map(map_to_array)
1479
+
1480
+ >>> input_values = processor(ds["speech"][0], return_tensors="tf").input_values # Batch size 1
1481
+ >>> hidden_states = model(input_values).last_hidden_state
1482
+ ```"""
1483
+
1484
+ output_hidden_states = output_hidden_states if output_hidden_states else self.config.output_hidden_states
1485
+ output_attentions = output_attentions if output_attentions else self.config.output_attentions
1486
+ return_dict = return_dict if return_dict else self.config.return_dict
1487
+
1488
+ outputs = self.hubert(
1489
+ input_values=input_values,
1490
+ attention_mask=attention_mask,
1491
+ token_type_ids=token_type_ids,
1492
+ position_ids=position_ids,
1493
+ head_mask=head_mask,
1494
+ inputs_embeds=inputs_embeds,
1495
+ output_attentions=output_attentions,
1496
+ output_hidden_states=output_hidden_states,
1497
+ return_dict=return_dict,
1498
+ training=training,
1499
+ )
1500
+
1501
+ return outputs
1502
+
1503
+ def build(self, input_shape=None):
1504
+ if self.built:
1505
+ return
1506
+ self.built = True
1507
+ if getattr(self, "hubert", None) is not None:
1508
+ with tf.name_scope(self.hubert.name):
1509
+ self.hubert.build(None)
1510
+
1511
+
1512
+ @add_start_docstrings(
1513
+ """TFHubert Model with a `language modeling` head on top for Connectionist Temporal Classification (CTC).""",
1514
+ HUBERT_START_DOCSTRING,
1515
+ )
1516
+ class TFHubertForCTC(TFHubertPreTrainedModel):
1517
+ def __init__(self, config: HubertConfig, *inputs, **kwargs):
1518
+ super().__init__(config, *inputs, **kwargs)
1519
+
1520
+ self.hubert = TFHubertMainLayer(config, name="hubert")
1521
+ self.dropout = keras.layers.Dropout(config.final_dropout)
1522
+ self.lm_head = keras.layers.Dense(config.vocab_size, name="lm_head")
1523
+ self.output_hidden_size = (
1524
+ config.output_hidden_size if hasattr(config, "add_adapter") and config.add_adapter else config.hidden_size
1525
+ )
1526
+
1527
+ def freeze_feature_extractor(self):
1528
+ """
1529
+ Calling this function will disable the gradient computation for the feature encoder so that its parameters will
1530
+ not be updated during training.
1531
+ """
1532
+ warnings.warn(
1533
+ "The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5. "
1534
+ "Please use the equivalent `freeze_feature_encoder` method instead.",
1535
+ FutureWarning,
1536
+ )
1537
+ self.freeze_feature_encoder()
1538
+
1539
+ def freeze_feature_encoder(self):
1540
+ """
1541
+ Calling this function will disable the gradient computation for the feature encoder so that its parameter will
1542
+ not be updated during training.
1543
+ """
1544
+ self.hubert.feature_extractor.trainable = False
1545
+
1546
+ @add_start_docstrings_to_model_forward(HUBERT_INPUTS_DOCSTRING)
1547
+ @replace_return_docstrings(output_type=TFCausalLMOutput, config_class=_CONFIG_FOR_DOC)
1548
+ @unpack_inputs
1549
+ def call(
1550
+ self,
1551
+ input_values: tf.Tensor,
1552
+ attention_mask: tf.Tensor | None = None,
1553
+ token_type_ids: tf.Tensor | None = None,
1554
+ position_ids: tf.Tensor | None = None,
1555
+ head_mask: tf.Tensor | None = None,
1556
+ inputs_embeds: tf.Tensor | None = None,
1557
+ output_attentions: Optional[bool] = None,
1558
+ labels: tf.Tensor | None = None,
1559
+ output_hidden_states: Optional[bool] = None,
1560
+ return_dict: Optional[bool] = None,
1561
+ training: Optional[bool] = False,
1562
+ ) -> Union[TFCausalLMOutput, Tuple[tf.Tensor]]:
1563
+ r"""
1564
+ labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
1565
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
1566
+ config.vocab_size]` (see `input_values` docstring) Tokens with indices set to `-100` are ignored (masked),
1567
+ the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
1568
+
1569
+ Returns:
1570
+
1571
+ Example:
1572
+
1573
+ ```python
1574
+ >>> import tensorflow as tf
1575
+ >>> from transformers import AutoProcessor, TFHubertForCTC
1576
+ >>> from datasets import load_dataset
1577
+ >>> import soundfile as sf
1578
+
1579
+ >>> processor = AutoProcessor.from_pretrained("facebook/hubert-large-ls960-ft")
1580
+ >>> model = TFHubertForCTC.from_pretrained("facebook/hubert-large-ls960-ft")
1581
+
1582
+
1583
+ >>> def map_to_array(batch):
1584
+ ... speech, _ = sf.read(batch["file"])
1585
+ ... batch["speech"] = speech
1586
+ ... return batch
1587
+
1588
+
1589
+ >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
1590
+ >>> ds = ds.map(map_to_array)
1591
+
1592
+ >>> input_values = processor(ds["speech"][0], return_tensors="tf").input_values # Batch size 1
1593
+ >>> logits = model(input_values).logits
1594
+ >>> predicted_ids = tf.argmax(logits, axis=-1)
1595
+
1596
+ >>> transcription = processor.decode(predicted_ids[0])
1597
+
1598
+ >>> # compute loss
1599
+ >>> target_transcription = "A MAN SAID TO THE UNIVERSE SIR I EXIST"
1600
+
1601
+ >>> # Pass the transcription as text to encode labels
1602
+ >>> labels = processor(text=transcription, return_tensors="tf").input_values
1603
+
1604
+ >>> loss = model(input_values, labels=labels).loss
1605
+ ```"""
1606
+
1607
+ outputs = self.hubert(
1608
+ input_values=input_values,
1609
+ attention_mask=attention_mask,
1610
+ token_type_ids=token_type_ids,
1611
+ position_ids=position_ids,
1612
+ head_mask=head_mask,
1613
+ inputs_embeds=inputs_embeds,
1614
+ output_attentions=output_attentions,
1615
+ output_hidden_states=output_hidden_states,
1616
+ return_dict=return_dict,
1617
+ training=training,
1618
+ )
1619
+ hidden_states = outputs[0]
1620
+ hidden_states = self.dropout(hidden_states, training=training)
1621
+
1622
+ logits = self.lm_head(hidden_states)
1623
+
1624
+ if labels is not None:
1625
+ if tf.reduce_max(labels) >= self.config.vocab_size:
1626
+ raise ValueError(f"Label values must be <= vocab_size: {self.config.vocab_size}")
1627
+
1628
+ attention_mask = (
1629
+ attention_mask if attention_mask is not None else tf.ones_like(input_values, dtype=tf.float32)
1630
+ )
1631
+ input_lengths = self.hubert._get_feat_extract_output_lengths(tf.reduce_sum(attention_mask, axis=-1))
1632
+
1633
+ # assuming that padded tokens are filled with -100
1634
+ # when not being attended to
1635
+ labels_mask = tf.cast(labels >= 0, tf.int32)
1636
+ target_lengths = tf.reduce_sum(labels_mask, axis=-1)
1637
+
1638
+ loss = tf.nn.ctc_loss(
1639
+ logits=logits,
1640
+ labels=labels,
1641
+ logit_length=input_lengths,
1642
+ label_length=target_lengths,
1643
+ blank_index=self.config.pad_token_id,
1644
+ logits_time_major=False,
1645
+ )
1646
+
1647
+ if self.config.ctc_loss_reduction == "sum":
1648
+ loss = tf.reduce_sum(loss)
1649
+ loss = tf.reshape(loss, (1,))
1650
+ if self.config.ctc_loss_reduction == "mean":
1651
+ loss = tf.reduce_mean(loss)
1652
+ loss = tf.reshape(loss, (1,))
1653
+ else:
1654
+ loss = None
1655
+
1656
+ if not return_dict:
1657
+ output = (logits,) + outputs[1:]
1658
+ return ((loss,) + output) if loss is not None else output
1659
+
1660
+ return TFCausalLMOutput(
1661
+ loss=loss,
1662
+ logits=logits,
1663
+ hidden_states=outputs.hidden_states,
1664
+ attentions=outputs.attentions,
1665
+ )
1666
+
1667
+ def build(self, input_shape=None):
1668
+ if self.built:
1669
+ return
1670
+ self.built = True
1671
+ if getattr(self, "hubert", None) is not None:
1672
+ with tf.name_scope(self.hubert.name):
1673
+ self.hubert.build(None)
1674
+ if getattr(self, "lm_head", None) is not None:
1675
+ with tf.name_scope(self.lm_head.name):
1676
+ self.lm_head.build([None, None, self.output_hidden_size])
llmeval-env/lib/python3.10/site-packages/transformers/models/llava_next/__init__.py ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
17
+
18
+
19
+ _import_structure = {
20
+ "configuration_llava_next": ["LLAVA_NEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "LlavaNextConfig"],
21
+ "processing_llava_next": ["LlavaNextProcessor"],
22
+ }
23
+
24
+
25
+ try:
26
+ if not is_torch_available():
27
+ raise OptionalDependencyNotAvailable()
28
+ except OptionalDependencyNotAvailable:
29
+ pass
30
+ else:
31
+ _import_structure["modeling_llava_next"] = [
32
+ "LLAVA_NEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
33
+ "LlavaNextForConditionalGeneration",
34
+ "LlavaNextPreTrainedModel",
35
+ ]
36
+
37
+ try:
38
+ if not is_vision_available():
39
+ raise OptionalDependencyNotAvailable()
40
+ except OptionalDependencyNotAvailable:
41
+ pass
42
+ else:
43
+ _import_structure["image_processing_llava_next"] = ["LlavaNextImageProcessor"]
44
+
45
+
46
+ if TYPE_CHECKING:
47
+ from .configuration_llava_next import LLAVA_NEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, LlavaNextConfig
48
+ from .processing_llava_next import LlavaNextProcessor
49
+
50
+ try:
51
+ if not is_torch_available():
52
+ raise OptionalDependencyNotAvailable()
53
+ except OptionalDependencyNotAvailable:
54
+ pass
55
+ else:
56
+ from .modeling_llava_next import (
57
+ LLAVA_NEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
58
+ LlavaNextForConditionalGeneration,
59
+ LlavaNextPreTrainedModel,
60
+ )
61
+
62
+ try:
63
+ if not is_vision_available():
64
+ raise OptionalDependencyNotAvailable()
65
+ except OptionalDependencyNotAvailable:
66
+ pass
67
+ else:
68
+ from .image_processing_llava_next import LlavaNextImageProcessor
69
+
70
+
71
+ else:
72
+ import sys
73
+
74
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure)
llmeval-env/lib/python3.10/site-packages/transformers/models/llava_next/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.22 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/llava_next/__pycache__/configuration_llava_next.cpython-310.pyc ADDED
Binary file (4.89 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/llava_next/__pycache__/convert_llava_next_weights_to_hf.cpython-310.pyc ADDED
Binary file (11.4 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/llava_next/__pycache__/image_processing_llava_next.cpython-310.pyc ADDED
Binary file (23.2 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/llava_next/__pycache__/modeling_llava_next.cpython-310.pyc ADDED
Binary file (23.9 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/llava_next/__pycache__/processing_llava_next.cpython-310.pyc ADDED
Binary file (6.67 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/llava_next/configuration_llava_next.py ADDED
@@ -0,0 +1,141 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """ Llava-NeXT model configuration"""
15
+
16
+ from ...configuration_utils import PretrainedConfig
17
+ from ...utils import logging
18
+ from ..auto import CONFIG_MAPPING
19
+
20
+
21
+ logger = logging.get_logger(__name__)
22
+
23
+ LLAVA_NEXT_PRETRAINED_CONFIG_ARCHIVE_MAP = {
24
+ "llava-hf/llava-v1.6-mistral-7b-hf": "https://huggingface.co/llava-hf/llava-v1.6-mistral-7b-hf/resolve/main/config.json",
25
+ }
26
+
27
+
28
+ class LlavaNextConfig(PretrainedConfig):
29
+ r"""
30
+ This is the configuration class to store the configuration of a [`LlavaNextForConditionalGeneration`]. It is used to instantiate an
31
+ Llava-NeXT model according to the specified arguments, defining the model architecture. Instantiating a configuration
32
+ with the defaults will yield a similar configuration to that of the [llava-hf/llava-v1.6-mistral-7b-hf](https://huggingface.co/llava-hf/llava-v1.6-mistral-7b-hf)
33
+ model.
34
+
35
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
36
+ documentation from [`PretrainedConfig`] for more information.
37
+
38
+ Args:
39
+ vision_config (`Union[AutoConfig, dict]`, *optional*, defaults to `CLIPVisionConfig`):
40
+ The config object or dictionary of the vision backbone.
41
+ text_config (`Union[AutoConfig, dict]`, *optional*, defaults to `LlamaConfig`):
42
+ The config object or dictionary of the text backbone.
43
+ ignore_index (`int`, *optional*, defaults to -100):
44
+ The ignore index for the loss function.
45
+ image_token_index (`int`, *optional*, defaults to 32000):
46
+ The image token index to encode the image prompt.
47
+ projector_hidden_act (`str`, *optional*, defaults to `"gelu"`):
48
+ The activation function used by the multimodal projector.
49
+ vision_feature_select_strategy (`str`, *optional*, defaults to `"default"`):
50
+ The feature selection strategy used to select the vision feature from the vision backbone.
51
+ Can be one of `"default"` or `"full"`. If `"default"`, the CLS token is removed from the vision features.
52
+ If `"full"`, the full vision features are used.
53
+ vision_feature_layer (`int`, *optional*, defaults to -2):
54
+ The index of the layer to select the vision feature.
55
+ image_grid_pinpoints (`List`, *optional*, defaults to `[[336, 672], [672, 336], [672, 672], [1008, 336], [336, 1008]]`):
56
+ A list of possible resolutions to use for processing high resolution images. Each item in the list should be a tuple or list
57
+ of the form `(height, width)`.
58
+
59
+ Example:
60
+
61
+ ```python
62
+ >>> from transformers import LlavaNextForConditionalGeneration, LlavaNextConfig, CLIPVisionConfig, LlamaConfig
63
+
64
+ >>> # Initializing a CLIP-vision config
65
+ >>> vision_config = CLIPVisionConfig()
66
+
67
+ >>> # Initializing a Llama config
68
+ >>> text_config = LlamaConfig()
69
+
70
+ >>> # Initializing a Llava-Next llava-hf/llava-v1.6-mistral-7b-hf style configuration
71
+ >>> configuration = LlavaNextConfig(vision_config, text_config)
72
+
73
+ >>> # Initializing a model from the llava-hf/llava-v1.6-mistral-7b-hf style configuration
74
+ >>> model = LlavaNextForConditionalGeneration(configuration)
75
+
76
+ >>> # Accessing the model configuration
77
+ >>> configuration = model.config
78
+ ```"""
79
+
80
+ model_type = "llava_next"
81
+ is_composition = False
82
+
83
+ def __init__(
84
+ self,
85
+ vision_config=None,
86
+ text_config=None,
87
+ ignore_index=-100,
88
+ image_token_index=32000,
89
+ projector_hidden_act="gelu",
90
+ vision_feature_select_strategy="default",
91
+ vision_feature_layer=-2,
92
+ image_grid_pinpoints=None,
93
+ **kwargs,
94
+ ):
95
+ self.ignore_index = ignore_index
96
+ self.image_token_index = image_token_index
97
+ self.projector_hidden_act = projector_hidden_act
98
+
99
+ if vision_feature_select_strategy not in ["default", "full"]:
100
+ raise ValueError(
101
+ "vision_feature_select_strategy should be one of 'default', 'full'."
102
+ f"Got: {vision_feature_select_strategy}"
103
+ )
104
+
105
+ self.vision_feature_select_strategy = vision_feature_select_strategy
106
+ self.vision_feature_layer = vision_feature_layer
107
+ image_grid_pinpoints = (
108
+ image_grid_pinpoints
109
+ if image_grid_pinpoints is not None
110
+ else [[336, 672], [672, 336], [672, 672], [1008, 336], [336, 1008]]
111
+ )
112
+ self.image_grid_pinpoints = image_grid_pinpoints
113
+
114
+ if isinstance(vision_config, dict):
115
+ vision_config["model_type"] = (
116
+ vision_config["model_type"] if "model_type" in vision_config else "clip_vision_model"
117
+ )
118
+ vision_config = CONFIG_MAPPING[vision_config["model_type"]](**vision_config)
119
+ elif vision_config is None:
120
+ vision_config = CONFIG_MAPPING["clip_vision_model"](
121
+ intermediate_size=4096,
122
+ hidden_size=1024,
123
+ patch_size=14,
124
+ image_size=336,
125
+ num_hidden_layers=24,
126
+ num_attention_heads=16,
127
+ vocab_size=32000,
128
+ projection_dim=768,
129
+ )
130
+
131
+ self.vision_config = vision_config
132
+
133
+ if isinstance(text_config, dict):
134
+ text_config["model_type"] = text_config["model_type"] if "model_type" in text_config else "llama"
135
+ text_config = CONFIG_MAPPING[text_config["model_type"]](**text_config)
136
+ elif text_config is None:
137
+ text_config = CONFIG_MAPPING["llama"]()
138
+
139
+ self.text_config = text_config
140
+
141
+ super().__init__(**kwargs)
llmeval-env/lib/python3.10/site-packages/transformers/models/llava_next/convert_llava_next_weights_to_hf.py ADDED
@@ -0,0 +1,342 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ """Convert LLaVa-NeXT (LLaVa-1.6) checkpoints from the original repository.
16
+
17
+ URL: https://github.com/haotian-liu/LLaVA/tree/main.
18
+
19
+
20
+ The command used to obtain original logits is the following:
21
+ python llava/eval/run_llava.py --model-path "liuhaotian/llava-v1.6-mistral-7b" --image-file "images/llava_v1_5_radar.jpg" --query "What is shown in this image?" --max_new_tokens 100 --temperature 0
22
+
23
+ Note: logits are tested with torch==2.1.2.
24
+ """
25
+
26
+ import argparse
27
+ import glob
28
+ import json
29
+ from pathlib import Path
30
+
31
+ import requests
32
+ import torch
33
+ from accelerate import init_empty_weights
34
+ from huggingface_hub import hf_hub_download, snapshot_download
35
+ from PIL import Image
36
+ from safetensors import safe_open
37
+
38
+ from transformers import (
39
+ AddedToken,
40
+ AutoConfig,
41
+ AutoTokenizer,
42
+ LlavaNextConfig,
43
+ LlavaNextForConditionalGeneration,
44
+ LlavaNextImageProcessor,
45
+ LlavaNextProcessor,
46
+ )
47
+
48
+
49
+ KEYS_TO_MODIFY_MAPPING = {
50
+ "model.vision_tower.": "",
51
+ "model.mm_projector": "multi_modal_projector",
52
+ "model": "model.model",
53
+ "vision_model.model": "vision_model",
54
+ "lm_head": "language_model.lm_head",
55
+ "model.model": "language_model.model",
56
+ "multi_modal_projector.0": "multi_modal_projector.linear_1",
57
+ "multi_modal_projector.2": "multi_modal_projector.linear_2",
58
+ "language_model.model.image_newline": "image_newline",
59
+ }
60
+
61
+
62
+ def load_original_state_dict(model_id):
63
+ directory_path = snapshot_download(repo_id=model_id, allow_patterns=["*.safetensors"])
64
+
65
+ original_state_dict = {}
66
+ for path in glob.glob(f"{directory_path}/*"):
67
+ if path.endswith(".safetensors"):
68
+ with safe_open(path, framework="pt", device="cpu") as f:
69
+ for key in f.keys():
70
+ original_state_dict[key] = f.get_tensor(key)
71
+
72
+ return original_state_dict
73
+
74
+
75
+ def convert_state_dict_to_hf(state_dict):
76
+ new_state_dict = {}
77
+ for key, value in state_dict.items():
78
+ if key.endswith(".inv_freq"):
79
+ continue
80
+ for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
81
+ if key_to_modify in key:
82
+ key = key.replace(key_to_modify, new_key)
83
+
84
+ new_state_dict[key] = value.to(torch.float16)
85
+ return new_state_dict
86
+
87
+
88
+ def load_image():
89
+ url = "https://github.com/haotian-liu/LLaVA/blob/1a91fc274d7c35a9b50b3cb29c4247ae5837ce39/images/llava_v1_5_radar.jpg?raw=true"
90
+ image = Image.open(requests.get(url, stream=True).raw)
91
+ return image
92
+
93
+
94
+ def convert_llava_to_hf(model_id, pytorch_dump_folder_path, push_to_hub=False):
95
+ # load original config
96
+ filepath = hf_hub_download(repo_id=model_id, filename="config.json", repo_type="model")
97
+ # read json
98
+ with open(filepath) as f:
99
+ data = json.load(f)
100
+ print(data)
101
+
102
+ if model_id == "liuhaotian/llava-v1.6-mistral-7b":
103
+ text_model_id = "mistralai/Mistral-7B-Instruct-v0.2"
104
+ image_token_index = 32000
105
+ elif model_id == "liuhaotian/llava-v1.6-vicuna-7b":
106
+ text_model_id = "lmsys/vicuna-7b-v1.5"
107
+ image_token_index = 32000
108
+ elif model_id == "liuhaotian/llava-v1.6-vicuna-13b":
109
+ text_model_id = "lmsys/vicuna-13b-v1.5"
110
+ image_token_index = 32000
111
+ elif model_id == "liuhaotian/llava-v1.6-34b":
112
+ text_model_id = "NousResearch/Nous-Hermes-2-Yi-34B"
113
+ image_token_index = 64000
114
+ vision_model_id = data["mm_vision_tower"]
115
+
116
+ torch.set_default_dtype(torch.float16)
117
+ text_config = AutoConfig.from_pretrained(text_model_id)
118
+
119
+ use_fast = False if model_id == "liuhaotian/llava-v1.6-34b" else True
120
+ tokenizer = AutoTokenizer.from_pretrained(text_model_id, use_fast=use_fast)
121
+ tokenizer.add_tokens(AddedToken("<image>", special=True, normalized=False), special_tokens=True)
122
+
123
+ if model_id == "liuhaotian/llava-v1.6-mistral-7b":
124
+ # Mistral-7B doesn't have a padding token set yet
125
+ tokenizer.add_special_tokens({"pad_token": "<pad>"})
126
+
127
+ image_processor = LlavaNextImageProcessor.from_pretrained(vision_model_id)
128
+ processor = LlavaNextProcessor(tokenizer=tokenizer, image_processor=image_processor)
129
+
130
+ config = LlavaNextConfig(
131
+ text_config=text_config.to_dict(),
132
+ image_grid_pinpoints=image_processor.image_grid_pinpoints,
133
+ use_image_newline_parameter=True,
134
+ image_token_index=image_token_index,
135
+ )
136
+
137
+ with init_empty_weights():
138
+ model = LlavaNextForConditionalGeneration(config)
139
+
140
+ # load original state dict
141
+ state_dict = load_original_state_dict(model_id)
142
+ state_dict = convert_state_dict_to_hf(state_dict)
143
+ model.load_state_dict(state_dict, assign=True)
144
+ model.eval()
145
+
146
+ pre_expansion_embeddings = model.language_model.model.embed_tokens.weight.data
147
+ mu = torch.mean(pre_expansion_embeddings, dim=0).float()
148
+ n = pre_expansion_embeddings.size()[0]
149
+ sigma = ((pre_expansion_embeddings - mu).T @ (pre_expansion_embeddings - mu)) / n
150
+ dist = torch.distributions.multivariate_normal.MultivariateNormal(mu, covariance_matrix=1e-5 * sigma)
151
+
152
+ # We add an image token so we resize the model
153
+ # Pad to 64 for performance reasons
154
+ pad_shape = 64
155
+ vocab_size = config.text_config.vocab_size
156
+ if model_id == "liuhaotian/llava-v1.6-34b":
157
+ # this one has 3 additional tokens, namely <|startoftext|>, <|endoftext|> and <image>
158
+ num_tokens = vocab_size + 3
159
+ else:
160
+ # this one has 2 additional tokens, namely <image> and <pad>
161
+ num_tokens = vocab_size + 2
162
+ model.resize_token_embeddings(num_tokens, pad_to_multiple_of=pad_shape)
163
+ model.language_model.model.embed_tokens.weight.data[vocab_size:] = torch.stack(
164
+ tuple(
165
+ (dist.sample() for _ in range(model.language_model.model.embed_tokens.weight.data[vocab_size:].shape[0]))
166
+ ),
167
+ dim=0,
168
+ )
169
+ model.language_model.lm_head.weight.data[vocab_size:] = torch.stack(
170
+ tuple((dist.sample() for _ in range(model.language_model.lm_head.weight.data[vocab_size:].shape[0]))),
171
+ dim=0,
172
+ )
173
+
174
+ device = "cuda:2"
175
+ model.to(device)
176
+
177
+ # prepare inputs
178
+ image = load_image()
179
+ if model_id == "liuhaotian/llava-v1.6-mistral-7b":
180
+ prompt = "[INST] <image>\nWhat is shown in this image? [/INST]"
181
+ elif model_id in ["liuhaotian/llava-v1.6-vicuna-7b", "liuhaotian/llava-v1.6-vicuna-13b"]:
182
+ prompt = "A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human's questions. USER: <image>\nWhat is shown in this image? ASSISTANT:"
183
+ elif model_id == "liuhaotian/llava-v1.6-34b":
184
+ prompt = "<|im_start|>system\nAnswer the questions.<|im_end|><|im_start|>user\n<image>\nWhat is shown in this image?<|im_end|><|im_start|>assistant\n"
185
+ inputs = processor(images=image, text=prompt, return_tensors="pt")
186
+
187
+ # verify inputs
188
+ filepath = hf_hub_download(repo_id="nielsr/test-image", filename="llava_1_6_pixel_values.pt", repo_type="dataset")
189
+ original_pixel_values = torch.load(filepath, map_location="cpu")
190
+ assert torch.allclose(original_pixel_values, inputs.pixel_values.half())
191
+
192
+ if model_id == "liuhaotian/llava-v1.6-mistral-7b":
193
+ filepath = hf_hub_download(repo_id="nielsr/test-image", filename="llava_1_6_input_ids.pt", repo_type="dataset")
194
+ original_input_ids = torch.load(filepath, map_location="cpu")
195
+ # replace -200 by image_token_index (since we use token ID = 32000 for the image token)
196
+ original_input_ids[original_input_ids == -200] = image_token_index
197
+ print(tokenizer.decode([id for id in original_input_ids.tolist()[0] if id != -200]))
198
+
199
+ assert original_input_ids[0].tolist() == inputs.input_ids[0].tolist()
200
+
201
+ elif model_id == "liuhaotian/llava-v1.6-34b":
202
+ filepath = hf_hub_download(
203
+ repo_id="nielsr/test-image", filename="llava_1_6_34b_input_ids.pt", repo_type="dataset"
204
+ )
205
+ original_input_ids = torch.load(filepath, map_location="cpu")
206
+ # replace -200 by image_token_index
207
+ original_input_ids[original_input_ids == -200] = image_token_index
208
+
209
+ assert original_input_ids[0].tolist() == inputs.input_ids[0].tolist()
210
+
211
+ image_sizes = torch.tensor([[899, 1024]])
212
+ assert image_sizes[0].tolist() == inputs.image_sizes[0].tolist()
213
+
214
+ # verify single forward pass
215
+ print("Single forward pass")
216
+ with torch.inference_mode():
217
+ inputs = inputs.to(device)
218
+ outputs = model(**inputs)
219
+ print("Shape of logits:", outputs.logits.shape)
220
+ print("First values of logits:", outputs.logits[0, :3, :3])
221
+
222
+ if model_id == "liuhaotian/llava-v1.6-mistral-7b":
223
+ expected_slice = torch.tensor(
224
+ [[-4.8555, -4.6992, -0.1996], [-10.5703, -10.7344, -2.7246], [-7.0391, -7.3672, -0.2634]],
225
+ dtype=torch.float32,
226
+ device=device,
227
+ )
228
+ elif model_id == "liuhaotian/llava-v1.6-vicuna-7b":
229
+ expected_slice = torch.tensor(
230
+ [[1.4883, 0.9976, -0.6992], [-9.7031, -5.7031, -1.5557], [-5.1328, -5.5586, 8.8281]],
231
+ dtype=torch.float32,
232
+ device=device,
233
+ )
234
+ elif model_id == "liuhaotian/llava-v1.6-vicuna-13b":
235
+ expected_slice = torch.tensor(
236
+ [[-0.9614, 7.3125, 0.2106], [-7.2695, -8.5469, 3.6211], [-6.3750, -8.1875, 5.4688]],
237
+ dtype=torch.float32,
238
+ device=device,
239
+ )
240
+ elif model_id == "liuhaotian/llava-v1.6-34b":
241
+ expected_slice = torch.tensor(
242
+ [[-9.0859, -9.1406, 5.9453], [-5.9570, -5.9766, 2.2754], [-5.7305, -5.7539, 4.0000]],
243
+ dtype=torch.float32,
244
+ device=device,
245
+ )
246
+ else:
247
+ raise ValueError(f"Model {model_id} not supported")
248
+
249
+ assert torch.allclose(outputs.logits[0, :3, :3], expected_slice, atol=1e-4)
250
+ print("Logits are ok!")
251
+
252
+ # verify generation
253
+ output_ids = model.generate(
254
+ **inputs,
255
+ max_new_tokens=100,
256
+ use_cache=True,
257
+ )
258
+
259
+ generated_text = processor.batch_decode(output_ids, skip_special_tokens=True)[0].strip()
260
+
261
+ print("Generated text:", repr(generated_text))
262
+
263
+ if model_id == "liuhaotian/llava-v1.6-mistral-7b":
264
+ expected_text = '[INST] \nWhat is shown in this image? [/INST] The image appears to be a radar chart, which is a type of multi-dimensional plot that displays data in the form of a two-dimensional chart of three or more quantitative variables represented on axes starting from the same point.\n\nIn this particular radar chart, there are several axes labeled with different metrics or benchmarks, such as "MMM-Vet," "MMM-Bench," "LLaVA-Bench," "SLED-Bench," "'
265
+ elif model_id == "liuhaotian/llava-v1.6-vicuna-7b":
266
+ expected_text = """A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human\'s questions. USER: \nWhat is shown in this image? ASSISTANT: The image appears to be a graphical representation of a benchmarking study comparing the performance of various models or systems. It\'s a scatter plot with a circular layout, where each point represents a different model or system, and the axes represent different metrics or dimensions of comparison.\n\nThe metrics are likely related to machine learning or artificial intelligence performance, as indicated by the terms like "BLIP-2," "Instruct BLIP," "POE," "QWA," "V"""
267
+ elif model_id == "liuhaotian/llava-v1.6-vicuna-13b":
268
+ expected_text = "A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human's questions. USER: \nWhat is shown in this image? ASSISTANT: The image appears to be a radar chart, also known as a spider chart or star chart, which is a graphical method of displaying multivariate data in the form of a two-dimensional chart of three or more quantitative variables represented on axes starting from the same point.\n\nIn this particular radar chart, there are several variables represented:\n\n- MM-Vet\n- LLa-Va-Bench\n- SEED-Bench\n- MM"
269
+ elif model_id == "liuhaotian/llava-v1.6-34b":
270
+ expected_text = "<|im_start|> system\nAnswer the questions. <|im_start|> user\n\nWhat is shown in this image? <|im_start|> assistant\nThe image appears to be a radar chart, also known as a spider chart, which is a graphical method of displaying multivariate data in the form of a two-dimensional chart of three or more quantitative variables represented on axes starting from the same point.\n\nIn this particular chart, there are several datasets represented by different colors and labeled with various acronyms such as MM-Vet, LLaVA-Bench, SEED-Bench, MM-Bench-CN, MM-"
271
+ else:
272
+ raise ValueError(f"Model {model_id} not supported")
273
+
274
+ assert generated_text == expected_text
275
+ print("Generated text is ok!")
276
+
277
+ # verify batched generation
278
+ print("Batched generation...")
279
+ url = "http://images.cocodataset.org/val2017/000000039769.jpg"
280
+ cats_image = Image.open(requests.get(url, stream=True).raw)
281
+
282
+ inputs = processor(
283
+ images=[image, cats_image],
284
+ text=[prompt, "[INST] <image>\nHow many cats are there? [/INST]"],
285
+ padding=True,
286
+ return_tensors="pt",
287
+ ).to(device)
288
+
289
+ for k, v in inputs.items():
290
+ print(k, v.shape)
291
+
292
+ print("Image sizes:", inputs.image_sizes)
293
+
294
+ # make sure image_sizes are the same
295
+ # as otherwise batched generation doesn't work
296
+ inputs.image_sizes[1] = inputs.image_sizes[0]
297
+
298
+ print("Batched generation...")
299
+ output_ids = model.generate(
300
+ **inputs,
301
+ max_new_tokens=20,
302
+ use_cache=True,
303
+ )
304
+
305
+ outputs = tokenizer.batch_decode(output_ids, skip_special_tokens=True)
306
+ print(outputs)
307
+
308
+ if pytorch_dump_folder_path is not None:
309
+ print(f"Saving model and processor for {model_id} to {pytorch_dump_folder_path}")
310
+ Path(pytorch_dump_folder_path).mkdir(exist_ok=True)
311
+ model.save_pretrained(pytorch_dump_folder_path)
312
+ processor.save_pretrained(pytorch_dump_folder_path)
313
+
314
+ if push_to_hub:
315
+ repo_id = model_id.split("/")[-1]
316
+ model.push_to_hub(f"llava-hf/{repo_id}-hf")
317
+ processor.push_to_hub(f"llava-hf/{repo_id}-hf")
318
+
319
+
320
+ if __name__ == "__main__":
321
+ parser = argparse.ArgumentParser()
322
+ parser.add_argument(
323
+ "--model_id",
324
+ help="Hub location of the model to convert",
325
+ default="liuhaotian/llava-v1.6-mistral-7b",
326
+ choices=[
327
+ "liuhaotian/llava-v1.6-mistral-7b",
328
+ "liuhaotian/llava-v1.6-vicuna-7b",
329
+ "liuhaotian/llava-v1.6-vicuna-13b",
330
+ "liuhaotian/llava-v1.6-34b",
331
+ ],
332
+ required=False,
333
+ )
334
+ parser.add_argument(
335
+ "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
336
+ )
337
+ parser.add_argument(
338
+ "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
339
+ )
340
+ args = parser.parse_args()
341
+
342
+ convert_llava_to_hf(args.model_id, args.pytorch_dump_folder_path, args.push_to_hub)
llmeval-env/lib/python3.10/site-packages/transformers/models/llava_next/image_processing_llava_next.py ADDED
@@ -0,0 +1,608 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Image processor class for LLaVa-NeXT."""
16
+
17
+ import math
18
+ from typing import Dict, List, Optional, Union
19
+
20
+ import numpy as np
21
+
22
+ from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict, select_best_resolution
23
+ from ...image_transforms import (
24
+ convert_to_rgb,
25
+ get_resize_output_image_size,
26
+ pad,
27
+ resize,
28
+ to_channel_dimension_format,
29
+ )
30
+ from ...image_utils import (
31
+ OPENAI_CLIP_MEAN,
32
+ OPENAI_CLIP_STD,
33
+ ChannelDimension,
34
+ ImageInput,
35
+ PILImageResampling,
36
+ get_image_size,
37
+ infer_channel_dimension_format,
38
+ is_scaled_image,
39
+ make_list_of_images,
40
+ to_numpy_array,
41
+ valid_images,
42
+ validate_preprocess_arguments,
43
+ )
44
+ from ...utils import TensorType, is_vision_available, logging
45
+
46
+
47
+ logger = logging.get_logger(__name__)
48
+
49
+
50
+ if is_vision_available():
51
+ from PIL import Image
52
+
53
+
54
+ def divide_to_patches(image: np.array, patch_size: int, input_data_format) -> List[np.array]:
55
+ """
56
+ Divides an image into patches of a specified size.
57
+
58
+ Args:
59
+ image (`np.array`):
60
+ The input image.
61
+ patch_size (`int`):
62
+ The size of each patch.
63
+ input_data_format (`ChannelDimension` or `str`):
64
+ The channel dimension format of the input image.
65
+
66
+ Returns:
67
+ list: A list of np.array representing the patches.
68
+ """
69
+ patches = []
70
+ height, width = get_image_size(image, channel_dim=input_data_format)
71
+ for i in range(0, height, patch_size):
72
+ for j in range(0, width, patch_size):
73
+ if input_data_format == ChannelDimension.LAST:
74
+ patch = image[i : i + patch_size, j : j + patch_size]
75
+ else:
76
+ patch = image[:, i : i + patch_size, j : j + patch_size]
77
+ patches.append(patch)
78
+
79
+ return patches
80
+
81
+
82
+ def expand_to_square(image: np.array, background_color, input_data_format) -> np.array:
83
+ """
84
+ Expands an image to a square by adding a background color.
85
+ """
86
+
87
+ height, width = get_image_size(image, channel_dim=input_data_format)
88
+ if width == height:
89
+ return image
90
+ elif width > height:
91
+ result = np.ones((width, width, image.shape[2]), dtype=image.dtype) * background_color
92
+ result[(width - height) // 2 : (width - height) // 2 + height, :] = image
93
+ return result
94
+ else:
95
+ result = np.ones((height, height, image.shape[2]), dtype=image.dtype) * background_color
96
+ result[:, (height - width) // 2 : (height - width) // 2 + width] = image
97
+ return result
98
+
99
+
100
+ def _get_patch_output_size(image, target_resolution, input_data_format):
101
+ original_height, original_width = get_image_size(image, channel_dim=input_data_format)
102
+ target_height, target_width = target_resolution
103
+
104
+ scale_w = target_width / original_width
105
+ scale_h = target_height / original_height
106
+
107
+ if scale_w < scale_h:
108
+ new_width = target_width
109
+ new_height = min(math.ceil(original_height * scale_w), target_height)
110
+ else:
111
+ new_height = target_height
112
+ new_width = min(math.ceil(original_width * scale_h), target_width)
113
+
114
+ return new_height, new_width
115
+
116
+
117
+ class LlavaNextImageProcessor(BaseImageProcessor):
118
+ r"""
119
+ Constructs a LLaVa-NeXT image processor. Based on [`CLIPImageProcessor`] with incorporation of additional techniques
120
+ for processing high resolution images as explained in the [LLaVa paper](https://arxiv.org/abs/2310.03744).
121
+
122
+ Args:
123
+ do_resize (`bool`, *optional*, defaults to `True`):
124
+ Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by
125
+ `do_resize` in the `preprocess` method.
126
+ size (`Dict[str, int]` *optional*, defaults to `{"shortest_edge": 224}`):
127
+ Size of the image after resizing. The shortest edge of the image is resized to size["shortest_edge"], with
128
+ the longest edge resized to keep the input aspect ratio. Can be overridden by `size` in the `preprocess`
129
+ method.
130
+ image_grid_pinpoints (`List` *optional*, defaults to `[[672, 336], [336, 672], [672, 672], [336, 1008], [1008, 336]]`):
131
+ A list of possible resolutions to use for processing high resolution images. The best resolution is selected
132
+ based on the original size of the image. Can be overridden by `image_grid_pinpoints` in the `preprocess`
133
+ method.
134
+ resample (`PILImageResampling`, *optional*, defaults to `Resampling.BICUBIC`):
135
+ Resampling filter to use if resizing the image. Can be overridden by `resample` in the `preprocess` method.
136
+ do_center_crop (`bool`, *optional*, defaults to `True`):
137
+ Whether to center crop the image to the specified `crop_size`. Can be overridden by `do_center_crop` in the
138
+ `preprocess` method.
139
+ crop_size (`Dict[str, int]` *optional*, defaults to 224):
140
+ Size of the output image after applying `center_crop`. Can be overridden by `crop_size` in the `preprocess`
141
+ method.
142
+ do_rescale (`bool`, *optional*, defaults to `True`):
143
+ Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by `do_rescale` in
144
+ the `preprocess` method.
145
+ rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
146
+ Scale factor to use if rescaling the image. Can be overridden by `rescale_factor` in the `preprocess`
147
+ method.
148
+ do_normalize (`bool`, *optional*, defaults to `True`):
149
+ Whether to normalize the image. Can be overridden by `do_normalize` in the `preprocess` method.
150
+ image_mean (`float` or `List[float]`, *optional*, defaults to `[0.48145466, 0.4578275, 0.40821073]`):
151
+ Mean to use if normalizing the image. This is a float or list of floats the length of the number of
152
+ channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method.
153
+ image_std (`float` or `List[float]`, *optional*, defaults to `[0.26862954, 0.26130258, 0.27577711]`):
154
+ Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
155
+ number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
156
+ Can be overridden by the `image_std` parameter in the `preprocess` method.
157
+ do_convert_rgb (`bool`, *optional*, defaults to `True`):
158
+ Whether to convert the image to RGB.
159
+ """
160
+
161
+ model_input_names = ["pixel_values"]
162
+
163
+ def __init__(
164
+ self,
165
+ do_resize: bool = True,
166
+ size: Dict[str, int] = None,
167
+ image_grid_pinpoints: List = None,
168
+ resample: PILImageResampling = PILImageResampling.BICUBIC,
169
+ do_center_crop: bool = True,
170
+ crop_size: Dict[str, int] = None,
171
+ do_rescale: bool = True,
172
+ rescale_factor: Union[int, float] = 1 / 255,
173
+ do_normalize: bool = True,
174
+ image_mean: Optional[Union[float, List[float]]] = None,
175
+ image_std: Optional[Union[float, List[float]]] = None,
176
+ do_convert_rgb: bool = True,
177
+ **kwargs,
178
+ ) -> None:
179
+ super().__init__(**kwargs)
180
+ size = size if size is not None else {"shortest_edge": 224}
181
+ size = get_size_dict(size, default_to_square=False)
182
+ image_grid_pinpoints = (
183
+ image_grid_pinpoints
184
+ if image_grid_pinpoints is not None
185
+ else [[336, 672], [672, 336], [672, 672], [1008, 336], [336, 1008]]
186
+ )
187
+ crop_size = crop_size if crop_size is not None else {"height": 224, "width": 224}
188
+ crop_size = get_size_dict(crop_size, default_to_square=True, param_name="crop_size")
189
+
190
+ self.do_resize = do_resize
191
+ self.size = size
192
+ self.image_grid_pinpoints = image_grid_pinpoints
193
+ self.resample = resample
194
+ self.do_center_crop = do_center_crop
195
+ self.crop_size = crop_size
196
+ self.do_rescale = do_rescale
197
+ self.rescale_factor = rescale_factor
198
+ self.do_normalize = do_normalize
199
+ self.image_mean = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
200
+ self.image_std = image_std if image_std is not None else OPENAI_CLIP_STD
201
+ self.do_convert_rgb = do_convert_rgb
202
+
203
+ # Copied from transformers.models.clip.image_processing_clip.CLIPImageProcessor.resize with CLIP->LLaVa
204
+ def resize(
205
+ self,
206
+ image: np.ndarray,
207
+ size: Dict[str, int],
208
+ resample: PILImageResampling = PILImageResampling.BICUBIC,
209
+ data_format: Optional[Union[str, ChannelDimension]] = None,
210
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
211
+ **kwargs,
212
+ ) -> np.ndarray:
213
+ """
214
+ Resize an image. The shortest edge of the image is resized to size["shortest_edge"], with the longest edge
215
+ resized to keep the input aspect ratio.
216
+
217
+ Args:
218
+ image (`np.ndarray`):
219
+ Image to resize.
220
+ size (`Dict[str, int]`):
221
+ Size of the output image.
222
+ resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
223
+ Resampling filter to use when resiizing the image.
224
+ data_format (`str` or `ChannelDimension`, *optional*):
225
+ The channel dimension format of the image. If not provided, it will be the same as the input image.
226
+ input_data_format (`ChannelDimension` or `str`, *optional*):
227
+ The channel dimension format of the input image. If not provided, it will be inferred.
228
+ """
229
+ default_to_square = True
230
+ if "shortest_edge" in size:
231
+ size = size["shortest_edge"]
232
+ default_to_square = False
233
+ elif "height" in size and "width" in size:
234
+ size = (size["height"], size["width"])
235
+ else:
236
+ raise ValueError("Size must contain either 'shortest_edge' or 'height' and 'width'.")
237
+
238
+ output_size = get_resize_output_image_size(
239
+ image,
240
+ size=size,
241
+ default_to_square=default_to_square,
242
+ input_data_format=input_data_format,
243
+ )
244
+
245
+ return resize(
246
+ image,
247
+ size=output_size,
248
+ resample=resample,
249
+ data_format=data_format,
250
+ input_data_format=input_data_format,
251
+ **kwargs,
252
+ )
253
+
254
+ def _preprocess(
255
+ self,
256
+ images: ImageInput,
257
+ do_resize: bool = None,
258
+ size: Dict[str, int] = None,
259
+ resample: PILImageResampling = None,
260
+ do_center_crop: bool = None,
261
+ crop_size: int = None,
262
+ do_rescale: bool = None,
263
+ rescale_factor: float = None,
264
+ do_normalize: bool = None,
265
+ image_mean: Optional[Union[float, List[float]]] = None,
266
+ image_std: Optional[Union[float, List[float]]] = None,
267
+ data_format: Optional[ChannelDimension] = ChannelDimension.FIRST,
268
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
269
+ ) -> Image.Image:
270
+ """
271
+ Preprocess an image or batch of images. Copy of the `preprocess` method from `CLIPImageProcessor`.
272
+
273
+ Args:
274
+ images (`ImageInput`):
275
+ Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
276
+ passing in images with pixel values between 0 and 1, set `do_rescale=False`.
277
+ do_resize (`bool`, *optional*, defaults to `self.do_resize`):
278
+ Whether to resize the image.
279
+ size (`Dict[str, int]`, *optional*, defaults to `self.size`):
280
+ Size of the image after resizing. Shortest edge of the image is resized to size["shortest_edge"], with
281
+ the longest edge resized to keep the input aspect ratio.
282
+ resample (`int`, *optional*, defaults to `self.resample`):
283
+ Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`. Only
284
+ has an effect if `do_resize` is set to `True`.
285
+ do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`):
286
+ Whether to center crop the image.
287
+ crop_size (`Dict[str, int]`, *optional*, defaults to `self.crop_size`):
288
+ Size of the center crop. Only has an effect if `do_center_crop` is set to `True`.
289
+ do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
290
+ Whether to rescale the image.
291
+ rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
292
+ Rescale factor to rescale the image by if `do_rescale` is set to `True`.
293
+ do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
294
+ Whether to normalize the image.
295
+ image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):
296
+ Image mean to use for normalization. Only has an effect if `do_normalize` is set to `True`.
297
+ image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):
298
+ Image standard deviation to use for normalization. Only has an effect if `do_normalize` is set to
299
+ `True`.
300
+ data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
301
+ The channel dimension format for the output image. Can be one of:
302
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
303
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
304
+ - Unset: Use the channel dimension format of the input image.
305
+ input_data_format (`ChannelDimension` or `str`, *optional*):
306
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
307
+ from the input image. Can be one of:
308
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
309
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
310
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
311
+ """
312
+ images = make_list_of_images(images)
313
+
314
+ if do_resize:
315
+ images = [
316
+ self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format)
317
+ for image in images
318
+ ]
319
+
320
+ if do_center_crop:
321
+ images = [
322
+ self.center_crop(image=image, size=crop_size, input_data_format=input_data_format) for image in images
323
+ ]
324
+
325
+ if do_rescale:
326
+ images = [
327
+ self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
328
+ for image in images
329
+ ]
330
+
331
+ if do_normalize:
332
+ images = [
333
+ self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format)
334
+ for image in images
335
+ ]
336
+
337
+ images = [
338
+ to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images
339
+ ]
340
+
341
+ return images
342
+
343
+ def _resize_for_patching(
344
+ self, image: np.array, target_resolution: tuple, resample, input_data_format: ChannelDimension
345
+ ) -> np.array:
346
+ """
347
+ Resizes an image to a target resolution while maintaining aspect ratio.
348
+
349
+ Args:
350
+ image (np.array):
351
+ The input image.
352
+ target_resolution (tuple):
353
+ The target resolution (height, width) of the image.
354
+ resample (`PILImageResampling`):
355
+ Resampling filter to use if resizing the image.
356
+ input_data_format (`ChannelDimension` or `str`):
357
+ The channel dimension format of the input image.
358
+
359
+ Returns:
360
+ np.array: The resized and padded image.
361
+ """
362
+ new_height, new_width = _get_patch_output_size(image, target_resolution, input_data_format)
363
+
364
+ # Resize the image
365
+ resized_image = resize(image, (new_height, new_width), resample=resample, input_data_format=input_data_format)
366
+
367
+ return resized_image
368
+
369
+ def _pad_for_patching(
370
+ self, image: np.array, target_resolution: tuple, input_data_format: ChannelDimension
371
+ ) -> np.array:
372
+ """
373
+ Pad an image to a target resolution while maintaining aspect ratio.
374
+ """
375
+ target_height, target_width = target_resolution
376
+ new_height, new_width = _get_patch_output_size(image, target_resolution, input_data_format)
377
+
378
+ paste_x = (target_width - new_width) // 2
379
+ paste_y = (target_height - new_height) // 2
380
+
381
+ padded_image = pad(image, padding=((paste_y, paste_y), (paste_x, paste_x)))
382
+
383
+ return padded_image
384
+
385
+ def get_image_patches(
386
+ self,
387
+ image: np.array,
388
+ grid_pinpoints,
389
+ size: tuple,
390
+ patch_size: int,
391
+ resample: PILImageResampling,
392
+ data_format: ChannelDimension,
393
+ input_data_format: ChannelDimension,
394
+ ) -> List[np.array]:
395
+ """
396
+ Process an image with variable resolutions by dividing it into patches.
397
+
398
+ Args:
399
+ image (np.array):
400
+ The input image to be processed.
401
+ grid_pinpoints (List):
402
+ A string representation of a list of possible resolutions.
403
+ size (`tuple`):
404
+ Size to resize the original image to.
405
+ patch_size (`int`):
406
+ Size of the patches to divide the image into.
407
+ resample (`PILImageResampling`):
408
+ Resampling filter to use if resizing the image.
409
+ data_format (`ChannelDimension` or `str`):
410
+ The channel dimension format for the output image.
411
+ input_data_format (`ChannelDimension` or `str`):
412
+ The channel dimension format of the input image.
413
+
414
+ Returns:
415
+ List[np.array]: A list of NumPy arrays containing the processed image patches.
416
+ """
417
+ if not isinstance(grid_pinpoints, list):
418
+ raise ValueError("grid_pinpoints must be a list of possible resolutions.")
419
+
420
+ possible_resolutions = grid_pinpoints
421
+
422
+ image_size = get_image_size(image, channel_dim=input_data_format)
423
+ best_resolution = select_best_resolution(image_size, possible_resolutions)
424
+ resized_image = self._resize_for_patching(
425
+ image, best_resolution, resample=resample, input_data_format=input_data_format
426
+ )
427
+ padded_image = self._pad_for_patching(resized_image, best_resolution, input_data_format=input_data_format)
428
+
429
+ patches = divide_to_patches(padded_image, patch_size=patch_size, input_data_format=input_data_format)
430
+
431
+ # make sure that all patches are in the input data format
432
+ patches = [
433
+ to_channel_dimension_format(patch, channel_dim=data_format, input_channel_dim=input_data_format)
434
+ for patch in patches
435
+ ]
436
+
437
+ resized_original_image = resize(
438
+ image,
439
+ size=size,
440
+ resample=resample,
441
+ data_format=data_format,
442
+ input_data_format=input_data_format,
443
+ )
444
+
445
+ image_patches = [resized_original_image] + patches
446
+
447
+ return image_patches
448
+
449
+ def preprocess(
450
+ self,
451
+ images: ImageInput,
452
+ do_resize: bool = None,
453
+ size: Dict[str, int] = None,
454
+ image_grid_pinpoints: List = None,
455
+ resample: PILImageResampling = None,
456
+ do_center_crop: bool = None,
457
+ crop_size: int = None,
458
+ do_rescale: bool = None,
459
+ rescale_factor: float = None,
460
+ do_normalize: bool = None,
461
+ image_mean: Optional[Union[float, List[float]]] = None,
462
+ image_std: Optional[Union[float, List[float]]] = None,
463
+ do_convert_rgb: bool = None,
464
+ return_tensors: Optional[Union[str, TensorType]] = None,
465
+ data_format: Optional[ChannelDimension] = ChannelDimension.FIRST,
466
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
467
+ ):
468
+ """
469
+ Args:
470
+ images (`ImageInput`):
471
+ Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
472
+ passing in images with pixel values between 0 and 1, set `do_rescale=False`.
473
+ do_resize (`bool`, *optional*, defaults to `self.do_resize`):
474
+ Whether to resize the image.
475
+ size (`Dict[str, int]`, *optional*, defaults to `self.size`):
476
+ Size of the image after resizing. Shortest edge of the image is resized to size["shortest_edge"], with
477
+ the longest edge resized to keep the input aspect ratio.
478
+ image_grid_pinpoints (`List` *optional*, defaults to `self.image_grid_pinpoints`):
479
+ A list of possible resolutions to use for processing high resolution images. The best resolution is
480
+ selected based on the original size of the image.
481
+ resample (`int`, *optional*, defaults to `self.resample`):
482
+ Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`. Only
483
+ has an effect if `do_resize` is set to `True`.
484
+ do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`):
485
+ Whether to center crop the image.
486
+ crop_size (`Dict[str, int]`, *optional*, defaults to `self.crop_size`):
487
+ Size of the center crop. Only has an effect if `do_center_crop` is set to `True`.
488
+ do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
489
+ Whether to rescale the image.
490
+ rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
491
+ Rescale factor to rescale the image by if `do_rescale` is set to `True`.
492
+ do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
493
+ Whether to normalize the image.
494
+ image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):
495
+ Image mean to use for normalization. Only has an effect if `do_normalize` is set to `True`.
496
+ image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):
497
+ Image standard deviation to use for normalization. Only has an effect if `do_normalize` is set to
498
+ `True`.
499
+ do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`):
500
+ Whether to convert the image to RGB.
501
+ return_tensors (`str` or `TensorType`, *optional*):
502
+ The type of tensors to return. Can be one of:
503
+ - Unset: Return a list of `np.ndarray`.
504
+ - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
505
+ - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
506
+ - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
507
+ - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
508
+ data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
509
+ The channel dimension format for the output image. Can be one of:
510
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
511
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
512
+ - Unset: Use the channel dimension format of the input image.
513
+ input_data_format (`ChannelDimension` or `str`, *optional*):
514
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
515
+ from the input image. Can be one of:
516
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
517
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
518
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
519
+ """
520
+ do_resize = do_resize if do_resize is not None else self.do_resize
521
+ size = size if size is not None else self.size
522
+ size = get_size_dict(size, param_name="size", default_to_square=False)
523
+ image_grid_pinpoints = image_grid_pinpoints if image_grid_pinpoints is not None else self.image_grid_pinpoints
524
+ resample = resample if resample is not None else self.resample
525
+ do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop
526
+ crop_size = crop_size if crop_size is not None else self.crop_size
527
+ crop_size = get_size_dict(crop_size, param_name="crop_size", default_to_square=True)
528
+ do_rescale = do_rescale if do_rescale is not None else self.do_rescale
529
+ rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
530
+ do_normalize = do_normalize if do_normalize is not None else self.do_normalize
531
+ image_mean = image_mean if image_mean is not None else self.image_mean
532
+ image_std = image_std if image_std is not None else self.image_std
533
+ do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
534
+
535
+ images = make_list_of_images(images)
536
+
537
+ if not valid_images(images):
538
+ raise ValueError(
539
+ "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
540
+ "torch.Tensor, tf.Tensor or jax.ndarray."
541
+ )
542
+
543
+ validate_preprocess_arguments(
544
+ do_rescale=do_rescale,
545
+ rescale_factor=rescale_factor,
546
+ do_normalize=do_normalize,
547
+ image_mean=image_mean,
548
+ image_std=image_std,
549
+ do_center_crop=do_center_crop,
550
+ crop_size=crop_size,
551
+ do_resize=do_resize,
552
+ size=size,
553
+ resample=resample,
554
+ )
555
+
556
+ if do_convert_rgb:
557
+ images = [convert_to_rgb(image) for image in images]
558
+
559
+ # All transformations expect numpy arrays.
560
+ images = [to_numpy_array(image) for image in images]
561
+
562
+ if is_scaled_image(images[0]) and do_rescale:
563
+ logger.warning_once(
564
+ "It looks like you are trying to rescale already rescaled images. If the input"
565
+ " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
566
+ )
567
+
568
+ if input_data_format is None:
569
+ # We assume that all images have the same channel dimension format.
570
+ input_data_format = infer_channel_dimension_format(images[0])
571
+
572
+ new_images = []
573
+ image_sizes = [get_image_size(image, channel_dim=input_data_format) for image in images]
574
+ for image in images:
575
+ # convert image into a list of patches
576
+ # we intentially use the same data format as the input data format
577
+ image_patches = self.get_image_patches(
578
+ image,
579
+ image_grid_pinpoints,
580
+ size=(size["shortest_edge"], size["shortest_edge"]),
581
+ patch_size=crop_size["height"],
582
+ resample=resample,
583
+ data_format=input_data_format,
584
+ input_data_format=input_data_format,
585
+ )
586
+
587
+ # preprocess patches
588
+ pixel_values = self._preprocess(
589
+ image_patches,
590
+ do_resize=do_resize,
591
+ size=size,
592
+ resample=resample,
593
+ do_center_crop=do_center_crop,
594
+ crop_size=crop_size,
595
+ do_rescale=do_rescale,
596
+ rescale_factor=rescale_factor,
597
+ do_normalize=do_normalize,
598
+ image_mean=image_mean,
599
+ image_std=image_std,
600
+ data_format=data_format,
601
+ input_data_format=input_data_format,
602
+ )
603
+ pixel_values = np.array(pixel_values)
604
+ new_images.append(pixel_values)
605
+
606
+ data = {"pixel_values": new_images, "image_sizes": image_sizes}
607
+
608
+ return BatchFeature(data=data, tensor_type=return_tensors)
llmeval-env/lib/python3.10/site-packages/transformers/models/llava_next/modeling_llava_next.py ADDED
@@ -0,0 +1,698 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 the HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch Llava-NeXT model."""
16
+
17
+ from dataclasses import dataclass
18
+ from typing import List, Optional, Tuple, Union
19
+
20
+ import torch
21
+ import torch.utils.checkpoint
22
+ from torch import nn
23
+
24
+ from ... import PreTrainedModel
25
+ from ...activations import ACT2FN
26
+ from ...cache_utils import Cache
27
+ from ...image_processing_utils import select_best_resolution
28
+ from ...modeling_outputs import ModelOutput
29
+ from ...utils import (
30
+ add_start_docstrings,
31
+ add_start_docstrings_to_model_forward,
32
+ logging,
33
+ replace_return_docstrings,
34
+ )
35
+ from ..auto import AutoModel, AutoModelForCausalLM
36
+ from .configuration_llava_next import LlavaNextConfig
37
+
38
+
39
+ logger = logging.get_logger(__name__)
40
+
41
+ _CONFIG_FOR_DOC = "LlavaNextConfig"
42
+
43
+ LLAVA_NEXT_PRETRAINED_MODEL_ARCHIVE_LIST = [
44
+ "llava-hf/llava-v1.6-mistral-7b-hf",
45
+ # See all LLaVA-NeXT models at https://huggingface.co/models?filter=llava_next
46
+ ]
47
+
48
+
49
+ def get_anyres_image_grid_shape(image_size, grid_pinpoints, patch_size):
50
+ """
51
+ Calculate the shape of the image patch grid after the preprocessing for images of any resolution.
52
+
53
+ Args:
54
+ image_size (`tuple`):
55
+ The size of the input image in the format (width, height).
56
+ grid_pinpoints (`List`):
57
+ A list containing possible resolutions. Each item in the list should be a tuple or list
58
+ of the form `(height, width)`.
59
+ patch_size (`int`):
60
+ The size of each image patch.
61
+
62
+ Returns:
63
+ tuple: The shape of the image patch grid in the format (width, height).
64
+ """
65
+ if not isinstance(grid_pinpoints, list):
66
+ raise ValueError("grid_pinpoints should be a list of tuples or lists")
67
+
68
+ height, width = select_best_resolution(image_size, grid_pinpoints)
69
+ return height // patch_size, width // patch_size
70
+
71
+
72
+ def unpad_image(tensor, original_size):
73
+ """
74
+ Unpads a PyTorch tensor of a padded and resized image.
75
+
76
+ Args:
77
+ tensor (`torch.Tensor`):
78
+ The image tensor, assumed to be of shape (num_channels, height, width).
79
+ original_size (`tuple`):
80
+ The original size of the image (height, width).
81
+
82
+ Returns:
83
+ `torch.Tensor`: The unpadded image tensor.
84
+ """
85
+ original_height, original_width = original_size
86
+ current_height, current_width = tensor.shape[1:]
87
+
88
+ original_aspect_ratio = original_width / original_height
89
+ current_aspect_ratio = current_width / current_height
90
+
91
+ if original_aspect_ratio > current_aspect_ratio:
92
+ scale_factor = current_width / original_width
93
+ new_height = int(original_height * scale_factor)
94
+ padding = (current_height - new_height) // 2
95
+ unpadded_tensor = tensor[:, padding : current_height - padding, :]
96
+ else:
97
+ scale_factor = current_height / original_height
98
+ new_width = int(original_width * scale_factor)
99
+ padding = (current_width - new_width) // 2
100
+ unpadded_tensor = tensor[:, :, padding : current_width - padding]
101
+
102
+ return unpadded_tensor
103
+
104
+
105
+ @dataclass
106
+ # Copied from transformers.models.idefics.modeling_idefics.IdeficsCausalLMOutputWithPast with Idefics->LlavaNext
107
+ class LlavaNextCausalLMOutputWithPast(ModelOutput):
108
+ """
109
+ Base class for LlavaNext causal language model (or autoregressive) outputs.
110
+
111
+ Args:
112
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
113
+ Language modeling loss (for next-token prediction).
114
+ logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
115
+ Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
116
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
117
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
118
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`)
119
+
120
+ Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see
121
+ `past_key_values` input) to speed up sequential decoding.
122
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
123
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
124
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
125
+
126
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
127
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
128
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
129
+ sequence_length)`.
130
+
131
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
132
+ heads.
133
+ image_hidden_states (`tuple(torch.FloatTensor)`, *optional*):
134
+ Tuple of `torch.FloatTensor` (one for the output of the image embeddings, `(batch_size, num_images,
135
+ sequence_length, hidden_size)`.
136
+
137
+ image_hidden_states of the model produced by the vision encoder, and optionally by the perceiver
138
+ """
139
+
140
+ loss: Optional[torch.FloatTensor] = None
141
+ logits: torch.FloatTensor = None
142
+ past_key_values: Optional[List[torch.FloatTensor]] = None
143
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
144
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
145
+ image_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
146
+
147
+
148
+ # Copied from transformers.models.llava.modeling_llava.LlavaMultiModalProjector with Llava->LlavaNext
149
+ class LlavaNextMultiModalProjector(nn.Module):
150
+ def __init__(self, config: LlavaNextConfig):
151
+ super().__init__()
152
+
153
+ self.linear_1 = nn.Linear(config.vision_config.hidden_size, config.text_config.hidden_size, bias=True)
154
+ self.act = ACT2FN[config.projector_hidden_act]
155
+ self.linear_2 = nn.Linear(config.text_config.hidden_size, config.text_config.hidden_size, bias=True)
156
+
157
+ def forward(self, image_features):
158
+ hidden_states = self.linear_1(image_features)
159
+ hidden_states = self.act(hidden_states)
160
+ hidden_states = self.linear_2(hidden_states)
161
+ return hidden_states
162
+
163
+
164
+ LLAVA_NEXT_START_DOCSTRING = r"""
165
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
166
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
167
+ etc.)
168
+
169
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
170
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
171
+ and behavior.
172
+
173
+ Parameters:
174
+ config ([`LlavaNextConfig`] or [`LlavaNextVisionConfig`]):
175
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
176
+ load the weights associated with the model, only the configuration. Check out the
177
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
178
+ """
179
+
180
+
181
+ @add_start_docstrings(
182
+ "The bare LLaMA Model outputting raw hidden-states without any specific head on top.",
183
+ LLAVA_NEXT_START_DOCSTRING,
184
+ )
185
+ # Copied from transformers.models.llava.modeling_llava.LlavaPreTrainedModel with Llava->LlavaNext,llava->llava_next
186
+ class LlavaNextPreTrainedModel(PreTrainedModel):
187
+ config_class = LlavaNextConfig
188
+ base_model_prefix = "model"
189
+ supports_gradient_checkpointing = True
190
+ _no_split_modules = ["LlavaNextVisionAttention"]
191
+ _skip_keys_device_placement = "past_key_values"
192
+ _supports_flash_attn_2 = True
193
+
194
+ def _init_weights(self, module):
195
+ # important: this ported version of LlavaNext isn't meant for training from scratch - only
196
+ # inference and fine-tuning - so the proper init weights code has been removed - the original codebase
197
+ # https://github.com/haotian-liu/LLaVA/tree/main/llava_next should serve for that purpose
198
+ std = (
199
+ self.config.initializer_range
200
+ if hasattr(self.config, "initializer_range")
201
+ else self.config.text_config.initializer_range
202
+ )
203
+
204
+ if hasattr(module, "class_embedding"):
205
+ module.class_embedding.data.normal_(mean=0.0, std=std)
206
+
207
+ if isinstance(module, (nn.Linear, nn.Conv2d)):
208
+ module.weight.data.normal_(mean=0.0, std=std)
209
+ if module.bias is not None:
210
+ module.bias.data.zero_()
211
+ elif isinstance(module, nn.Embedding):
212
+ module.weight.data.normal_(mean=0.0, std=std)
213
+ if module.padding_idx is not None:
214
+ module.weight.data[module.padding_idx].zero_()
215
+
216
+ @property
217
+ def _supports_sdpa(self):
218
+ """
219
+ Retrieve language_model's attribute to check whether the model supports
220
+ SDPA or not.
221
+ """
222
+ return self.language_model._supports_sdpa
223
+
224
+
225
+ LLAVA_NEXT_INPUTS_DOCSTRING = r"""
226
+ Args:
227
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
228
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
229
+ it.
230
+
231
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
232
+ [`PreTrainedTokenizer.__call__`] for details.
233
+
234
+ [What are input IDs?](../glossary#input-ids)
235
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)):
236
+ The tensors corresponding to the input images. Pixel values can be obtained using
237
+ [`AutoImageProcessor`]. See [`LlavaNextImageProcessor.__call__`] for details. [`LlavaProcessor`] uses
238
+ [`LlavaNextImageProcessor`] for processing images.
239
+ image_sizes (`torch.LongTensor` of shape `(batch_size, 2)`, *optional*):
240
+ The sizes of the images in the batch, being (height, width) for each image.
241
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
242
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
243
+
244
+ - 1 for tokens that are **not masked**,
245
+ - 0 for tokens that are **masked**.
246
+
247
+ [What are attention masks?](../glossary#attention-mask)
248
+
249
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
250
+ [`PreTrainedTokenizer.__call__`] for details.
251
+
252
+ If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
253
+ `past_key_values`).
254
+
255
+ If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
256
+ and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
257
+ information on the default strategy.
258
+
259
+ - 1 indicates the head is **not masked**,
260
+ - 0 indicates the head is **masked**.
261
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
262
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
263
+ config.n_positions - 1]`. [What are position IDs?](../glossary#position-ids)
264
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
265
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
266
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
267
+ `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
268
+
269
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
270
+ blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
271
+
272
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
273
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
274
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
275
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
276
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
277
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
278
+ model's internal embedding lookup matrix.
279
+ vision_feature_layer (`int`, *optional*, defaults to -2):
280
+ The index of the layer to select the vision feature.
281
+ vision_feature_select_strategy (`str`, *optional*, defaults to `"default"`):
282
+ The feature selection strategy used to select the vision feature from the vision backbone.
283
+ Can be one of `"default"` or `"full"`. If `"default"`, the CLS token is removed from the vision features.
284
+ If `"full"`, the full vision features are used.
285
+ use_cache (`bool`, *optional*):
286
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
287
+ `past_key_values`).
288
+ output_attentions (`bool`, *optional*):
289
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
290
+ tensors for more detail.
291
+ output_hidden_states (`bool`, *optional*):
292
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
293
+ more detail.
294
+ return_dict (`bool`, *optional*):
295
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
296
+ """
297
+
298
+
299
+ @add_start_docstrings(
300
+ """The LLAVA-NeXT model which consists of a vision backbone and a language model.""",
301
+ LLAVA_NEXT_START_DOCSTRING,
302
+ )
303
+ class LlavaNextForConditionalGeneration(LlavaNextPreTrainedModel):
304
+ def __init__(self, config: LlavaNextConfig):
305
+ super().__init__(config)
306
+ self.vision_tower = AutoModel.from_config(config.vision_config)
307
+
308
+ self.multi_modal_projector = LlavaNextMultiModalProjector(config)
309
+
310
+ self.image_newline = nn.Parameter(torch.empty(config.text_config.hidden_size, dtype=self.dtype))
311
+
312
+ self.vocab_size = config.text_config.vocab_size
313
+ self.language_model = AutoModelForCausalLM.from_config(
314
+ config.text_config, attn_implementation=config._attn_implementation
315
+ )
316
+ self.pad_token_id = self.config.pad_token_id if self.config.pad_token_id is not None else -1
317
+ self.post_init()
318
+
319
+ # Copied from transformers.models.llava.modeling_llava.LlavaForConditionalGeneration.get_input_embeddings
320
+ def get_input_embeddings(self):
321
+ return self.language_model.get_input_embeddings()
322
+
323
+ # Copied from transformers.models.llava.modeling_llava.LlavaForConditionalGeneration.set_input_embeddings
324
+ def set_input_embeddings(self, value):
325
+ self.language_model.set_input_embeddings(value)
326
+
327
+ # Copied from transformers.models.llava.modeling_llava.LlavaForConditionalGeneration.get_output_embeddings
328
+ def get_output_embeddings(self):
329
+ return self.language_model.get_output_embeddings()
330
+
331
+ # Copied from transformers.models.llava.modeling_llava.LlavaForConditionalGeneration.set_output_embeddings
332
+ def set_output_embeddings(self, new_embeddings):
333
+ self.language_model.set_output_embeddings(new_embeddings)
334
+
335
+ # Copied from transformers.models.llava.modeling_llava.LlavaForConditionalGeneration.set_decoder
336
+ def set_decoder(self, decoder):
337
+ self.language_model.set_decoder(decoder)
338
+
339
+ # Copied from transformers.models.llava.modeling_llava.LlavaForConditionalGeneration.get_decoder
340
+ def get_decoder(self):
341
+ return self.language_model.get_decoder()
342
+
343
+ # Copied from transformers.models.llava.modeling_llava.LlavaForConditionalGeneration.tie_weights
344
+ def tie_weights(self):
345
+ return self.language_model.tie_weights()
346
+
347
+ # Copied from transformers.models.llava.modeling_llava.LlavaForConditionalGeneration.resize_token_embeddings
348
+ def resize_token_embeddings(self, new_num_tokens: Optional[int] = None, pad_to_multiple_of=None) -> nn.Embedding:
349
+ model_embeds = self.language_model.resize_token_embeddings(new_num_tokens, pad_to_multiple_of)
350
+ # update vocab size
351
+ self.config.text_config.vocab_size = model_embeds.num_embeddings
352
+ self.vocab_size = model_embeds.num_embeddings
353
+ return model_embeds
354
+
355
+ # Copied from transformers.models.llava.modeling_llava.LlavaForConditionalGeneration._merge_input_ids_with_image_features
356
+ def _merge_input_ids_with_image_features(self, image_features, inputs_embeds, input_ids, attention_mask, labels):
357
+ num_images, num_image_patches, embed_dim = image_features.shape
358
+ batch_size, sequence_length = input_ids.shape
359
+ left_padding = not torch.sum(input_ids[:, -1] == torch.tensor(self.pad_token_id))
360
+ # 1. Create a mask to know where special image tokens are
361
+ special_image_token_mask = input_ids == self.config.image_token_index
362
+ num_special_image_tokens = torch.sum(special_image_token_mask, dim=-1)
363
+ # Compute the maximum embed dimension
364
+ max_embed_dim = (num_special_image_tokens.max() * (num_image_patches - 1)) + sequence_length
365
+ batch_indices, non_image_indices = torch.where(input_ids != self.config.image_token_index)
366
+
367
+ # 2. Compute the positions where text should be written
368
+ # Calculate new positions for text tokens in merged image-text sequence.
369
+ # `special_image_token_mask` identifies image tokens. Each image token will be replaced by `nb_text_tokens_per_images - 1` text tokens.
370
+ # `torch.cumsum` computes how each image token shifts subsequent text token positions.
371
+ # - 1 to adjust for zero-based indexing, as `cumsum` inherently increases indices by one.
372
+ new_token_positions = torch.cumsum((special_image_token_mask * (num_image_patches - 1) + 1), -1) - 1
373
+ nb_image_pad = max_embed_dim - 1 - new_token_positions[:, -1]
374
+ if left_padding:
375
+ new_token_positions += nb_image_pad[:, None] # offset for left padding
376
+ text_to_overwrite = new_token_positions[batch_indices, non_image_indices]
377
+
378
+ # 3. Create the full embedding, already padded to the maximum position
379
+ final_embedding = torch.zeros(
380
+ batch_size, max_embed_dim, embed_dim, dtype=inputs_embeds.dtype, device=inputs_embeds.device
381
+ )
382
+ final_attention_mask = torch.zeros(
383
+ batch_size, max_embed_dim, dtype=attention_mask.dtype, device=inputs_embeds.device
384
+ )
385
+ if labels is not None:
386
+ final_labels = torch.full(
387
+ (batch_size, max_embed_dim), self.config.ignore_index, dtype=input_ids.dtype, device=input_ids.device
388
+ )
389
+ # In case the Vision model or the Language model has been offloaded to CPU, we need to manually
390
+ # set the corresponding tensors into their correct target device.
391
+ target_device = inputs_embeds.device
392
+ batch_indices, non_image_indices, text_to_overwrite = (
393
+ batch_indices.to(target_device),
394
+ non_image_indices.to(target_device),
395
+ text_to_overwrite.to(target_device),
396
+ )
397
+ attention_mask = attention_mask.to(target_device)
398
+
399
+ # 4. Fill the embeddings based on the mask. If we have ["hey" "<image>", "how", "are"]
400
+ # we need to index copy on [0, 577, 578, 579] for the text and [1:576] for the image features
401
+ final_embedding[batch_indices, text_to_overwrite] = inputs_embeds[batch_indices, non_image_indices]
402
+ final_attention_mask[batch_indices, text_to_overwrite] = attention_mask[batch_indices, non_image_indices]
403
+ if labels is not None:
404
+ final_labels[batch_indices, text_to_overwrite] = labels[batch_indices, non_image_indices]
405
+
406
+ # 5. Fill the embeddings corresponding to the images. Anything that is still zeros needs filling
407
+ image_to_overwrite = torch.all(final_embedding == 0, dim=-1)
408
+ image_to_overwrite &= image_to_overwrite.cumsum(-1) - 1 >= nb_image_pad[:, None].to(target_device)
409
+
410
+ if image_to_overwrite.sum() != image_features.shape[:-1].numel():
411
+ raise ValueError(
412
+ f"The input provided to the model are wrong. The number of image tokens is {torch.sum(special_image_token_mask)} while"
413
+ f" the number of image given to the model is {num_images}. This prevents correct indexing and breaks batch generation."
414
+ )
415
+
416
+ final_embedding[image_to_overwrite] = image_features.contiguous().reshape(-1, embed_dim).to(target_device)
417
+ final_attention_mask |= image_to_overwrite
418
+ position_ids = (final_attention_mask.cumsum(-1) - 1).masked_fill_((final_attention_mask == 0), 1)
419
+
420
+ # 6. Mask out the embedding at padding positions, as we later use the past_key_value value to determine the non-attended tokens.
421
+ batch_indices, pad_indices = torch.where(input_ids == self.pad_token_id)
422
+ indices_to_mask = new_token_positions[batch_indices, pad_indices]
423
+
424
+ final_embedding[batch_indices, indices_to_mask] = 0
425
+
426
+ if labels is None:
427
+ final_labels = None
428
+
429
+ return final_embedding, final_attention_mask, final_labels, position_ids
430
+
431
+ @add_start_docstrings_to_model_forward(LLAVA_NEXT_INPUTS_DOCSTRING)
432
+ @replace_return_docstrings(output_type=LlavaNextCausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
433
+ def forward(
434
+ self,
435
+ input_ids: torch.LongTensor = None,
436
+ pixel_values: torch.FloatTensor = None,
437
+ image_sizes: Optional[torch.LongTensor] = None,
438
+ attention_mask: Optional[torch.Tensor] = None,
439
+ position_ids: Optional[torch.LongTensor] = None,
440
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
441
+ inputs_embeds: Optional[torch.FloatTensor] = None,
442
+ vision_feature_layer: Optional[int] = None,
443
+ vision_feature_select_strategy: Optional[str] = None,
444
+ labels: Optional[torch.LongTensor] = None,
445
+ use_cache: Optional[bool] = None,
446
+ output_attentions: Optional[bool] = None,
447
+ output_hidden_states: Optional[bool] = None,
448
+ return_dict: Optional[bool] = None,
449
+ ) -> Union[Tuple, LlavaNextCausalLMOutputWithPast]:
450
+ r"""
451
+ Args:
452
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
453
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
454
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
455
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
456
+
457
+ Returns:
458
+
459
+ Example:
460
+
461
+ ```python
462
+ >>> from PIL import Image
463
+ >>> import requests
464
+ >>> from transformers import AutoProcessor, LlavaNextForConditionalGeneration
465
+
466
+ >>> model = LlavaNextForConditionalGeneration.from_pretrained("llava-hf/llava-v1.6-mistral-7b-hf")
467
+ >>> processor = AutoProcessor.from_pretrained("llava-hf/llava-v1.6-mistral-7b-hf")
468
+
469
+ >>> prompt = "[INST] <image>\nWhat is shown in this image? [/INST]"
470
+ >>> url = "https://www.ilankelman.org/stopsigns/australia.jpg"
471
+ >>> image = Image.open(requests.get(url, stream=True).raw)
472
+
473
+ >>> inputs = processor(text=prompt, images=image, return_tensors="pt")
474
+
475
+ >>> # Generate
476
+ >>> generate_ids = model.generate(**inputs, max_length=30)
477
+ >>> processor.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
478
+ "[INST] \nWhat is shown in this image? [/INST] The image appears to be a radar chart, which is a type of multi-dimensional plot (...)"
479
+ ```"""
480
+
481
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
482
+ output_hidden_states = (
483
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
484
+ )
485
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
486
+ vision_feature_layer = (
487
+ vision_feature_layer if vision_feature_layer is not None else self.config.vision_feature_layer
488
+ )
489
+ vision_feature_select_strategy = (
490
+ vision_feature_select_strategy
491
+ if vision_feature_select_strategy is not None
492
+ else self.config.vision_feature_select_strategy
493
+ )
494
+
495
+ if inputs_embeds is None:
496
+ # 1. Extract the input embeddings
497
+ inputs_embeds = self.get_input_embeddings()(input_ids)
498
+
499
+ # 2. Merge text and images
500
+ if pixel_values is not None and input_ids.shape[1] != 1:
501
+ batch_size, num_patches, num_channels, height, width = pixel_values.shape
502
+ reshaped_pixel_values = pixel_values.view(batch_size * num_patches, num_channels, height, width)
503
+ image_features = self.vision_tower(reshaped_pixel_values, output_hidden_states=True)
504
+
505
+ selected_image_feature = image_features.hidden_states[vision_feature_layer]
506
+
507
+ if vision_feature_select_strategy == "default":
508
+ selected_image_feature = selected_image_feature[:, 1:]
509
+ elif vision_feature_select_strategy == "full":
510
+ selected_image_feature = selected_image_feature
511
+
512
+ image_features = self.multi_modal_projector(selected_image_feature)
513
+
514
+ # split up image_features for each of the individual images
515
+ # hence we get a list of image_features, each of shape (5, num_patches, hidden_size)
516
+ # if we assume each image has 5 image features (base image + 4 patches)
517
+ split_sizes = [image.shape[0] for image in pixel_values]
518
+ image_features = torch.split(image_features, split_sizes, dim=0)
519
+
520
+ # NOTE we only support multimodal_patch_merge_type == "spatial_unpad"
521
+ height = width = self.config.vision_config.image_size // self.config.vision_config.patch_size
522
+
523
+ new_image_features = []
524
+ for image_idx, image_feature in enumerate(image_features):
525
+ if image_feature.shape[0] > 1:
526
+ base_image_feature = image_feature[0]
527
+ image_feature = image_feature[1:]
528
+
529
+ if height * width != base_image_feature.shape[0]:
530
+ raise ValueError("The number of patches is not consistent with the image size.")
531
+ num_patch_height, num_patch_width = get_anyres_image_grid_shape(
532
+ image_sizes[image_idx],
533
+ self.config.image_grid_pinpoints,
534
+ self.config.vision_config.image_size,
535
+ )
536
+ image_feature = image_feature.view(num_patch_height, num_patch_width, height, width, -1)
537
+ image_feature = image_feature.permute(4, 0, 2, 1, 3).contiguous()
538
+ image_feature = image_feature.flatten(1, 2).flatten(2, 3)
539
+ image_feature = unpad_image(image_feature, image_sizes[image_idx])
540
+ image_feature = torch.cat(
541
+ (
542
+ image_feature,
543
+ self.image_newline[:, None, None].expand(*image_feature.shape[:-1], 1),
544
+ ),
545
+ dim=-1,
546
+ )
547
+ image_feature = image_feature.flatten(1, 2).transpose(0, 1)
548
+ image_feature = torch.cat((base_image_feature, image_feature), dim=0)
549
+ else:
550
+ image_feature = image_feature[0]
551
+ image_feature = torch.cat((image_feature, self.image_newline[None]), dim=0)
552
+ new_image_features.append(image_feature)
553
+ image_features = torch.stack(new_image_features, dim=0)
554
+
555
+ inputs_embeds, attention_mask, labels, position_ids = self._merge_input_ids_with_image_features(
556
+ image_features, inputs_embeds, input_ids, attention_mask, labels
557
+ )
558
+ if labels is None:
559
+ labels = torch.full_like(attention_mask, self.config.ignore_index).to(torch.long)
560
+
561
+ # In case input_ids.shape[1] == 1 & pixel_values==None & past_key_values != None, we are in the case of
562
+ # generation with cache
563
+ elif past_key_values is not None and pixel_values is not None and input_ids.shape[1] == 1:
564
+ # Retrieve the first layer to inspect the logits and mask out the hidden states
565
+ # that are set to 0
566
+ first_layer_past_key_value = past_key_values[0][0][:, :, :, 0]
567
+
568
+ # Sum all dimensions of head_dim (-2) to avoid random errors such as: https://github.com/huggingface/transformers/pull/28032#issuecomment-1863691941
569
+ batch_index, non_attended_tokens = torch.where(first_layer_past_key_value.float().sum(-2) == 0)
570
+
571
+ # Get the target length
572
+ target_length = input_ids.shape[1]
573
+ past_length = first_layer_past_key_value.shape[-1]
574
+
575
+ extended_attention_mask = torch.ones(
576
+ (attention_mask.shape[0], past_length),
577
+ dtype=attention_mask.dtype,
578
+ device=attention_mask.device,
579
+ )
580
+
581
+ # Filter out only the tokens that can be un-attended, this can happen
582
+ # if one uses Llava + Fused modules where the cache on the
583
+ # first iteration is already big enough, or if one passes custom cache
584
+ valid_indices = non_attended_tokens < extended_attention_mask.size(-1)
585
+ new_batch_index = batch_index[valid_indices]
586
+ new_non_attended_tokens = non_attended_tokens[valid_indices]
587
+
588
+ # Zero-out the places where we don't need to attend
589
+ extended_attention_mask[new_batch_index, new_non_attended_tokens] = 0
590
+
591
+ attention_mask = torch.cat((extended_attention_mask, attention_mask[:, -target_length:]), dim=1)
592
+ position_ids = torch.sum(attention_mask, dim=1).unsqueeze(-1) - 1
593
+
594
+ outputs = self.language_model(
595
+ attention_mask=attention_mask,
596
+ position_ids=position_ids,
597
+ past_key_values=past_key_values,
598
+ inputs_embeds=inputs_embeds,
599
+ use_cache=use_cache,
600
+ output_attentions=output_attentions,
601
+ output_hidden_states=output_hidden_states,
602
+ return_dict=return_dict,
603
+ )
604
+
605
+ logits = outputs[0]
606
+
607
+ loss = None
608
+ if labels is not None:
609
+ # Shift so that tokens < n predict n
610
+ if attention_mask is not None:
611
+ shift_attention_mask = attention_mask[..., 1:]
612
+ shift_logits = logits[..., :-1, :][shift_attention_mask.to(logits.device) != 0].contiguous()
613
+ shift_labels = labels[..., 1:][shift_attention_mask.to(labels.device) != 0].contiguous()
614
+ else:
615
+ shift_logits = logits[..., :-1, :].contiguous()
616
+ shift_labels = labels[..., 1:].contiguous()
617
+ # Flatten the tokens
618
+ loss_fct = nn.CrossEntropyLoss()
619
+ loss = loss_fct(
620
+ shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1).to(shift_logits.device)
621
+ )
622
+
623
+ if not return_dict:
624
+ output = (logits,) + outputs[1:]
625
+ return (loss,) + output if loss is not None else output
626
+
627
+ return LlavaNextCausalLMOutputWithPast(
628
+ loss=loss,
629
+ logits=logits,
630
+ past_key_values=outputs.past_key_values,
631
+ hidden_states=outputs.hidden_states,
632
+ attentions=outputs.attentions,
633
+ )
634
+
635
+ def prepare_inputs_for_generation(
636
+ self,
637
+ input_ids,
638
+ past_key_values=None,
639
+ inputs_embeds=None,
640
+ pixel_values=None,
641
+ image_sizes=None,
642
+ attention_mask=None,
643
+ **kwargs,
644
+ ):
645
+ if past_key_values is not None:
646
+ if isinstance(past_key_values, Cache):
647
+ cache_length = past_key_values.get_seq_length()
648
+ past_length = past_key_values.seen_tokens
649
+ else:
650
+ cache_length = past_length = past_key_values[0][0].shape[2]
651
+
652
+ # Keep only the unprocessed tokens:
653
+ # 1 - If the length of the attention_mask exceeds the length of input_ids, then we are in a setting where
654
+ # some of the inputs are exclusively passed as part of the cache (e.g. when passing input_embeds as
655
+ # input)
656
+ if attention_mask is not None and attention_mask.shape[1] > input_ids.shape[1]:
657
+ input_ids = input_ids[:, -(attention_mask.shape[1] - past_length) :]
658
+ # 2 - If the past_length is smaller than input_ids', then input_ids holds all input tokens. We can discard
659
+ # input_ids based on the past_length.
660
+ elif past_length < input_ids.shape[1]:
661
+ input_ids = input_ids[:, past_length:]
662
+ # 3 - Otherwise (past_length >= input_ids.shape[1]), let's assume input_ids only has unprocessed tokens.
663
+ elif self.config.image_token_index in input_ids:
664
+ input_ids = input_ids[:, input_ids.shape[1] - 1 :]
665
+ # If the cache has seen more tokens than it can hold, then the cache has a size limit. Let's discard the
666
+ # older attention values, as their corresponding values are not part of the input.
667
+ if cache_length < past_length and attention_mask is not None:
668
+ attention_mask = attention_mask[:, -(cache_length + input_ids.shape[1]) :]
669
+
670
+ position_ids = kwargs.get("position_ids", None)
671
+ if attention_mask is not None and position_ids is None:
672
+ # create position_ids on the fly for batch generation
673
+ position_ids = attention_mask.long().cumsum(-1) - 1
674
+ position_ids.masked_fill_(attention_mask == 0, 1)
675
+ if past_key_values:
676
+ position_ids = position_ids[:, -input_ids.shape[1] :]
677
+
678
+ # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
679
+ if inputs_embeds is not None and past_key_values is None:
680
+ model_inputs = {"inputs_embeds": inputs_embeds}
681
+ else:
682
+ model_inputs = {"input_ids": input_ids}
683
+
684
+ model_inputs.update(
685
+ {
686
+ "position_ids": position_ids,
687
+ "past_key_values": past_key_values,
688
+ "use_cache": kwargs.get("use_cache"),
689
+ "attention_mask": attention_mask,
690
+ "pixel_values": pixel_values,
691
+ "image_sizes": image_sizes,
692
+ }
693
+ )
694
+ return model_inputs
695
+
696
+ # Copied from transformers.models.llava.modeling_llava.LlavaForConditionalGeneration._reorder_cache
697
+ def _reorder_cache(self, *args, **kwargs):
698
+ return self.language_model._reorder_cache(*args, **kwargs)
llmeval-env/lib/python3.10/site-packages/transformers/models/llava_next/processing_llava_next.py ADDED
@@ -0,0 +1,135 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """
16
+ Processor class for LLaVa-NeXT.
17
+ """
18
+
19
+
20
+ from typing import List, Optional, Union
21
+
22
+ from ...feature_extraction_utils import BatchFeature
23
+ from ...image_utils import ImageInput
24
+ from ...processing_utils import ProcessorMixin
25
+ from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
26
+ from ...utils import TensorType
27
+
28
+
29
+ class LlavaNextProcessor(ProcessorMixin):
30
+ r"""
31
+ Constructs a LLaVa-NeXT processor which wraps a LLaVa-NeXT image processor and a LLaMa tokenizer into a single processor.
32
+
33
+ [`LlavaNextProcessor`] offers all the functionalities of [`LlavaNextImageProcessor`] and [`LlamaTokenizerFast`]. See the
34
+ [`~LlavaNextProcessor.__call__`] and [`~LlavaNextProcessor.decode`] for more information.
35
+
36
+ Args:
37
+ image_processor ([`LlavaNextImageProcessor`], *optional*):
38
+ The image processor is a required input.
39
+ tokenizer ([`LlamaTokenizerFast`], *optional*):
40
+ The tokenizer is a required input.
41
+ """
42
+
43
+ attributes = ["image_processor", "tokenizer"]
44
+ image_processor_class = "LlavaNextImageProcessor"
45
+ tokenizer_class = ("LlamaTokenizer", "LlamaTokenizerFast")
46
+
47
+ def __init__(self, image_processor=None, tokenizer=None):
48
+ super().__init__(image_processor, tokenizer)
49
+
50
+ def __call__(
51
+ self,
52
+ text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]],
53
+ images: ImageInput = None,
54
+ padding: Union[bool, str, PaddingStrategy] = False,
55
+ truncation: Union[bool, str, TruncationStrategy] = None,
56
+ max_length=None,
57
+ return_tensors: Optional[Union[str, TensorType]] = TensorType.PYTORCH,
58
+ ) -> BatchFeature:
59
+ """
60
+ Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text`
61
+ and `kwargs` arguments to LlamaTokenizerFast's [`~LlamaTokenizerFast.__call__`] if `text` is not `None` to encode
62
+ the text. To prepare the image(s), this method forwards the `images` and `kwrags` arguments to
63
+ LlavaNextImageProcessor's [`~LlavaNextImageProcessor.__call__`] if `images` is not `None`. Please refer to the doctsring
64
+ of the above two methods for more information.
65
+
66
+ Args:
67
+ text (`str`, `List[str]`, `List[List[str]]`):
68
+ The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
69
+ (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
70
+ `is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
71
+ images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`, `List[torch.Tensor]`):
72
+ The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
73
+ tensor. Both channels-first and channels-last formats are supported.
74
+ padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
75
+ Select a strategy to pad the returned sequences (according to the model's padding side and padding
76
+ index) among:
77
+ - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
78
+ sequence if provided).
79
+ - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
80
+ acceptable input length for the model if that argument is not provided.
81
+ - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
82
+ lengths).
83
+ max_length (`int`, *optional*):
84
+ Maximum length of the returned list and optionally padding length (see above).
85
+ truncation (`bool`, *optional*):
86
+ Activates truncation to cut input sequences longer than `max_length` to `max_length`.
87
+ return_tensors (`str` or [`~utils.TensorType`], *optional*):
88
+ If set, will return tensors of a particular framework. Acceptable values are:
89
+
90
+ - `'tf'`: Return TensorFlow `tf.constant` objects.
91
+ - `'pt'`: Return PyTorch `torch.Tensor` objects.
92
+ - `'np'`: Return NumPy `np.ndarray` objects.
93
+ - `'jax'`: Return JAX `jnp.ndarray` objects.
94
+
95
+ Returns:
96
+ [`BatchFeature`]: A [`BatchFeature`] with the following fields:
97
+
98
+ - **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`.
99
+ - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
100
+ `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not
101
+ `None`).
102
+ - **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`.
103
+ """
104
+ if images is not None:
105
+ image_inputs = self.image_processor(images, return_tensors=return_tensors)
106
+ else:
107
+ image_inputs = {}
108
+ text_inputs = self.tokenizer(
109
+ text, return_tensors=return_tensors, padding=padding, truncation=truncation, max_length=max_length
110
+ )
111
+
112
+ return BatchFeature(data={**text_inputs, **image_inputs})
113
+
114
+ # Copied from transformers.models.clip.processing_clip.CLIPProcessor.batch_decode with CLIP->Llama
115
+ def batch_decode(self, *args, **kwargs):
116
+ """
117
+ This method forwards all its arguments to LlamaTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please
118
+ refer to the docstring of this method for more information.
119
+ """
120
+ return self.tokenizer.batch_decode(*args, **kwargs)
121
+
122
+ # Copied from transformers.models.clip.processing_clip.CLIPProcessor.decode with CLIP->Llama
123
+ def decode(self, *args, **kwargs):
124
+ """
125
+ This method forwards all its arguments to LlamaTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to
126
+ the docstring of this method for more information.
127
+ """
128
+ return self.tokenizer.decode(*args, **kwargs)
129
+
130
+ @property
131
+ # Copied from transformers.models.clip.processing_clip.CLIPProcessor.model_input_names
132
+ def model_input_names(self):
133
+ tokenizer_input_names = self.tokenizer.model_input_names
134
+ image_processor_input_names = self.image_processor.model_input_names
135
+ return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
llmeval-env/lib/python3.10/site-packages/transformers/models/regnet/__init__.py ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import (
17
+ OptionalDependencyNotAvailable,
18
+ _LazyModule,
19
+ is_flax_available,
20
+ is_tf_available,
21
+ is_torch_available,
22
+ )
23
+
24
+
25
+ _import_structure = {"configuration_regnet": ["REGNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "RegNetConfig"]}
26
+
27
+ try:
28
+ if not is_torch_available():
29
+ raise OptionalDependencyNotAvailable()
30
+ except OptionalDependencyNotAvailable:
31
+ pass
32
+ else:
33
+ _import_structure["modeling_regnet"] = [
34
+ "REGNET_PRETRAINED_MODEL_ARCHIVE_LIST",
35
+ "RegNetForImageClassification",
36
+ "RegNetModel",
37
+ "RegNetPreTrainedModel",
38
+ ]
39
+
40
+ try:
41
+ if not is_tf_available():
42
+ raise OptionalDependencyNotAvailable()
43
+ except OptionalDependencyNotAvailable:
44
+ pass
45
+ else:
46
+ _import_structure["modeling_tf_regnet"] = [
47
+ "TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST",
48
+ "TFRegNetForImageClassification",
49
+ "TFRegNetModel",
50
+ "TFRegNetPreTrainedModel",
51
+ ]
52
+
53
+ try:
54
+ if not is_flax_available():
55
+ raise OptionalDependencyNotAvailable()
56
+ except OptionalDependencyNotAvailable:
57
+ pass
58
+ else:
59
+ _import_structure["modeling_flax_regnet"] = [
60
+ "FlaxRegNetForImageClassification",
61
+ "FlaxRegNetModel",
62
+ "FlaxRegNetPreTrainedModel",
63
+ ]
64
+
65
+
66
+ if TYPE_CHECKING:
67
+ from .configuration_regnet import REGNET_PRETRAINED_CONFIG_ARCHIVE_MAP, RegNetConfig
68
+
69
+ try:
70
+ if not is_torch_available():
71
+ raise OptionalDependencyNotAvailable()
72
+ except OptionalDependencyNotAvailable:
73
+ pass
74
+ else:
75
+ from .modeling_regnet import (
76
+ REGNET_PRETRAINED_MODEL_ARCHIVE_LIST,
77
+ RegNetForImageClassification,
78
+ RegNetModel,
79
+ RegNetPreTrainedModel,
80
+ )
81
+
82
+ try:
83
+ if not is_tf_available():
84
+ raise OptionalDependencyNotAvailable()
85
+ except OptionalDependencyNotAvailable:
86
+ pass
87
+ else:
88
+ from .modeling_tf_regnet import (
89
+ TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST,
90
+ TFRegNetForImageClassification,
91
+ TFRegNetModel,
92
+ TFRegNetPreTrainedModel,
93
+ )
94
+
95
+ try:
96
+ if not is_flax_available():
97
+ raise OptionalDependencyNotAvailable()
98
+ except OptionalDependencyNotAvailable:
99
+ pass
100
+ else:
101
+ from .modeling_flax_regnet import (
102
+ FlaxRegNetForImageClassification,
103
+ FlaxRegNetModel,
104
+ FlaxRegNetPreTrainedModel,
105
+ )
106
+
107
+
108
+ else:
109
+ import sys
110
+
111
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure)
llmeval-env/lib/python3.10/site-packages/transformers/models/regnet/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.51 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/regnet/__pycache__/configuration_regnet.cpython-310.pyc ADDED
Binary file (3.55 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/regnet/__pycache__/convert_regnet_seer_10b_to_pytorch.cpython-310.pyc ADDED
Binary file (9.51 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/regnet/__pycache__/convert_regnet_to_pytorch.cpython-310.pyc ADDED
Binary file (15.4 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/regnet/__pycache__/modeling_flax_regnet.cpython-310.pyc ADDED
Binary file (23 kB). View file