applied-ai-018 commited on
Commit
dbfc818
·
verified ·
1 Parent(s): 8b6ee29

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. env-llmeval/lib/python3.10/site-packages/transformers/models/autoformer/__pycache__/__init__.cpython-310.pyc +0 -0
  2. env-llmeval/lib/python3.10/site-packages/transformers/models/autoformer/__pycache__/configuration_autoformer.cpython-310.pyc +0 -0
  3. env-llmeval/lib/python3.10/site-packages/transformers/models/autoformer/__pycache__/modeling_autoformer.cpython-310.pyc +0 -0
  4. env-llmeval/lib/python3.10/site-packages/transformers/models/autoformer/configuration_autoformer.py +246 -0
  5. env-llmeval/lib/python3.10/site-packages/transformers/models/autoformer/modeling_autoformer.py +0 -0
  6. env-llmeval/lib/python3.10/site-packages/transformers/models/bros/__init__.py +77 -0
  7. env-llmeval/lib/python3.10/site-packages/transformers/models/bros/__pycache__/__init__.cpython-310.pyc +0 -0
  8. env-llmeval/lib/python3.10/site-packages/transformers/models/bros/__pycache__/configuration_bros.cpython-310.pyc +0 -0
  9. env-llmeval/lib/python3.10/site-packages/transformers/models/bros/__pycache__/convert_bros_to_pytorch.cpython-310.pyc +0 -0
  10. env-llmeval/lib/python3.10/site-packages/transformers/models/bros/__pycache__/modeling_bros.cpython-310.pyc +0 -0
  11. env-llmeval/lib/python3.10/site-packages/transformers/models/bros/__pycache__/processing_bros.cpython-310.pyc +0 -0
  12. env-llmeval/lib/python3.10/site-packages/transformers/models/bros/configuration_bros.py +140 -0
  13. env-llmeval/lib/python3.10/site-packages/transformers/models/bros/convert_bros_to_pytorch.py +145 -0
  14. env-llmeval/lib/python3.10/site-packages/transformers/models/bros/modeling_bros.py +1320 -0
  15. env-llmeval/lib/python3.10/site-packages/transformers/models/bros/processing_bros.py +109 -0
  16. env-llmeval/lib/python3.10/site-packages/transformers/models/byt5/__init__.py +28 -0
  17. env-llmeval/lib/python3.10/site-packages/transformers/models/byt5/__pycache__/__init__.cpython-310.pyc +0 -0
  18. env-llmeval/lib/python3.10/site-packages/transformers/models/byt5/__pycache__/convert_byt5_original_tf_checkpoint_to_pytorch.cpython-310.pyc +0 -0
  19. env-llmeval/lib/python3.10/site-packages/transformers/models/byt5/__pycache__/tokenization_byt5.cpython-310.pyc +0 -0
  20. env-llmeval/lib/python3.10/site-packages/transformers/models/byt5/convert_byt5_original_tf_checkpoint_to_pytorch.py +60 -0
  21. env-llmeval/lib/python3.10/site-packages/transformers/models/byt5/tokenization_byt5.py +234 -0
  22. env-llmeval/lib/python3.10/site-packages/transformers/models/clvp/__init__.py +83 -0
  23. env-llmeval/lib/python3.10/site-packages/transformers/models/clvp/__pycache__/processing_clvp.cpython-310.pyc +0 -0
  24. env-llmeval/lib/python3.10/site-packages/transformers/models/clvp/configuration_clvp.py +457 -0
  25. env-llmeval/lib/python3.10/site-packages/transformers/models/clvp/feature_extraction_clvp.py +238 -0
  26. env-llmeval/lib/python3.10/site-packages/transformers/models/clvp/processing_clvp.py +91 -0
  27. env-llmeval/lib/python3.10/site-packages/transformers/models/clvp/tokenization_clvp.py +379 -0
  28. env-llmeval/lib/python3.10/site-packages/transformers/models/decision_transformer/__pycache__/__init__.cpython-310.pyc +0 -0
  29. env-llmeval/lib/python3.10/site-packages/transformers/models/decision_transformer/__pycache__/configuration_decision_transformer.cpython-310.pyc +0 -0
  30. env-llmeval/lib/python3.10/site-packages/transformers/models/decision_transformer/__pycache__/modeling_decision_transformer.cpython-310.pyc +0 -0
  31. env-llmeval/lib/python3.10/site-packages/transformers/models/lxmert/__init__.py +117 -0
  32. env-llmeval/lib/python3.10/site-packages/transformers/models/lxmert/modeling_lxmert.py +1438 -0
  33. env-llmeval/lib/python3.10/site-packages/transformers/models/lxmert/modeling_tf_lxmert.py +1657 -0
  34. env-llmeval/lib/python3.10/site-packages/transformers/models/lxmert/tokenization_lxmert.py +520 -0
  35. env-llmeval/lib/python3.10/site-packages/transformers/models/rag/__init__.py +82 -0
  36. env-llmeval/lib/python3.10/site-packages/transformers/models/rag/configuration_rag.py +182 -0
  37. env-llmeval/lib/python3.10/site-packages/transformers/models/rag/modeling_rag.py +1628 -0
  38. env-llmeval/lib/python3.10/site-packages/transformers/models/rag/retrieval_rag.py +674 -0
  39. env-llmeval/lib/python3.10/site-packages/transformers/models/rag/tokenization_rag.py +120 -0
  40. env-llmeval/lib/python3.10/site-packages/transformers/models/reformer/__init__.py +103 -0
  41. env-llmeval/lib/python3.10/site-packages/transformers/models/reformer/__pycache__/__init__.cpython-310.pyc +0 -0
  42. env-llmeval/lib/python3.10/site-packages/transformers/models/reformer/__pycache__/configuration_reformer.cpython-310.pyc +0 -0
  43. env-llmeval/lib/python3.10/site-packages/transformers/models/reformer/__pycache__/convert_reformer_trax_checkpoint_to_pytorch.cpython-310.pyc +0 -0
  44. env-llmeval/lib/python3.10/site-packages/transformers/models/reformer/__pycache__/modeling_reformer.cpython-310.pyc +0 -0
  45. env-llmeval/lib/python3.10/site-packages/transformers/models/reformer/__pycache__/tokenization_reformer.cpython-310.pyc +0 -0
  46. env-llmeval/lib/python3.10/site-packages/transformers/models/reformer/__pycache__/tokenization_reformer_fast.cpython-310.pyc +0 -0
  47. env-llmeval/lib/python3.10/site-packages/transformers/models/reformer/configuration_reformer.py +239 -0
  48. env-llmeval/lib/python3.10/site-packages/transformers/models/reformer/convert_reformer_trax_checkpoint_to_pytorch.py +222 -0
  49. env-llmeval/lib/python3.10/site-packages/transformers/models/reformer/modeling_reformer.py +0 -0
  50. env-llmeval/lib/python3.10/site-packages/transformers/models/reformer/tokenization_reformer.py +186 -0
env-llmeval/lib/python3.10/site-packages/transformers/models/autoformer/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (951 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/autoformer/__pycache__/configuration_autoformer.cpython-310.pyc ADDED
Binary file (10.4 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/autoformer/__pycache__/modeling_autoformer.cpython-310.pyc ADDED
Binary file (79.5 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/autoformer/configuration_autoformer.py ADDED
@@ -0,0 +1,246 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Autoformer model configuration"""
16
+
17
+ from typing import List, Optional
18
+
19
+ from ...configuration_utils import PretrainedConfig
20
+ from ...utils import logging
21
+
22
+
23
+ logger = logging.get_logger(__name__)
24
+
25
+ AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP = {
26
+ "huggingface/autoformer-tourism-monthly": "https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json",
27
+ }
28
+
29
+
30
+ class AutoformerConfig(PretrainedConfig):
31
+ r"""
32
+ This is the configuration class to store the configuration of an [`AutoformerModel`]. It is used to instantiate an
33
+ Autoformer model according to the specified arguments, defining the model architecture. Instantiating a
34
+ configuration with the defaults will yield a similar configuration to that of the Autoformer
35
+ [huggingface/autoformer-tourism-monthly](https://huggingface.co/huggingface/autoformer-tourism-monthly)
36
+ architecture.
37
+
38
+ Configuration objects inherit from [`PretrainedConfig`] can be used to control the model outputs. Read the
39
+ documentation from [`PretrainedConfig`] for more information.
40
+
41
+ Args:
42
+ prediction_length (`int`):
43
+ The prediction length for the decoder. In other words, the prediction horizon of the model.
44
+ context_length (`int`, *optional*, defaults to `prediction_length`):
45
+ The context length for the encoder. If unset, the context length will be the same as the
46
+ `prediction_length`.
47
+ distribution_output (`string`, *optional*, defaults to `"student_t"`):
48
+ The distribution emission head for the model. Could be either "student_t", "normal" or "negative_binomial".
49
+ loss (`string`, *optional*, defaults to `"nll"`):
50
+ The loss function for the model corresponding to the `distribution_output` head. For parametric
51
+ distributions it is the negative log likelihood (nll) - which currently is the only supported one.
52
+ input_size (`int`, *optional*, defaults to 1):
53
+ The size of the target variable which by default is 1 for univariate targets. Would be > 1 in case of
54
+ multivariate targets.
55
+ lags_sequence (`list[int]`, *optional*, defaults to `[1, 2, 3, 4, 5, 6, 7]`):
56
+ The lags of the input time series as covariates often dictated by the frequency. Default is `[1, 2, 3, 4,
57
+ 5, 6, 7]`.
58
+ scaling (`bool`, *optional* defaults to `True`):
59
+ Whether to scale the input targets.
60
+ num_time_features (`int`, *optional*, defaults to 0):
61
+ The number of time features in the input time series.
62
+ num_dynamic_real_features (`int`, *optional*, defaults to 0):
63
+ The number of dynamic real valued features.
64
+ num_static_categorical_features (`int`, *optional*, defaults to 0):
65
+ The number of static categorical features.
66
+ num_static_real_features (`int`, *optional*, defaults to 0):
67
+ The number of static real valued features.
68
+ cardinality (`list[int]`, *optional*):
69
+ The cardinality (number of different values) for each of the static categorical features. Should be a list
70
+ of integers, having the same length as `num_static_categorical_features`. Cannot be `None` if
71
+ `num_static_categorical_features` is > 0.
72
+ embedding_dimension (`list[int]`, *optional*):
73
+ The dimension of the embedding for each of the static categorical features. Should be a list of integers,
74
+ having the same length as `num_static_categorical_features`. Cannot be `None` if
75
+ `num_static_categorical_features` is > 0.
76
+ d_model (`int`, *optional*, defaults to 64):
77
+ Dimensionality of the transformer layers.
78
+ encoder_layers (`int`, *optional*, defaults to 2):
79
+ Number of encoder layers.
80
+ decoder_layers (`int`, *optional*, defaults to 2):
81
+ Number of decoder layers.
82
+ encoder_attention_heads (`int`, *optional*, defaults to 2):
83
+ Number of attention heads for each attention layer in the Transformer encoder.
84
+ decoder_attention_heads (`int`, *optional*, defaults to 2):
85
+ Number of attention heads for each attention layer in the Transformer decoder.
86
+ encoder_ffn_dim (`int`, *optional*, defaults to 32):
87
+ Dimension of the "intermediate" (often named feed-forward) layer in encoder.
88
+ decoder_ffn_dim (`int`, *optional*, defaults to 32):
89
+ Dimension of the "intermediate" (often named feed-forward) layer in decoder.
90
+ activation_function (`str` or `function`, *optional*, defaults to `"gelu"`):
91
+ The non-linear activation function (function or string) in the encoder and decoder. If string, `"gelu"` and
92
+ `"relu"` are supported.
93
+ dropout (`float`, *optional*, defaults to 0.1):
94
+ The dropout probability for all fully connected layers in the encoder, and decoder.
95
+ encoder_layerdrop (`float`, *optional*, defaults to 0.1):
96
+ The dropout probability for the attention and fully connected layers for each encoder layer.
97
+ decoder_layerdrop (`float`, *optional*, defaults to 0.1):
98
+ The dropout probability for the attention and fully connected layers for each decoder layer.
99
+ attention_dropout (`float`, *optional*, defaults to 0.1):
100
+ The dropout probability for the attention probabilities.
101
+ activation_dropout (`float`, *optional*, defaults to 0.1):
102
+ The dropout probability used between the two layers of the feed-forward networks.
103
+ num_parallel_samples (`int`, *optional*, defaults to 100):
104
+ The number of samples to generate in parallel for each time step of inference.
105
+ init_std (`float`, *optional*, defaults to 0.02):
106
+ The standard deviation of the truncated normal weight initialization distribution.
107
+ use_cache (`bool`, *optional*, defaults to `True`):
108
+ Whether to use the past key/values attentions (if applicable to the model) to speed up decoding.
109
+ label_length (`int`, *optional*, defaults to 10):
110
+ Start token length of the Autoformer decoder, which is used for direct multi-step prediction (i.e.
111
+ non-autoregressive generation).
112
+ moving_average (`int`, defaults to 25):
113
+ The window size of the moving average. In practice, it's the kernel size in AvgPool1d of the Decomposition
114
+ Layer.
115
+ autocorrelation_factor (`int`, defaults to 3):
116
+ "Attention" (i.e. AutoCorrelation mechanism) factor which is used to find top k autocorrelations delays.
117
+ It's recommended in the paper to set it to a number between 1 and 5.
118
+
119
+
120
+ Example:
121
+
122
+ ```python
123
+ >>> from transformers import AutoformerConfig, AutoformerModel
124
+
125
+ >>> # Initializing a default Autoformer configuration
126
+ >>> configuration = AutoformerConfig()
127
+
128
+ >>> # Randomly initializing a model (with random weights) from the configuration
129
+ >>> model = AutoformerModel(configuration)
130
+
131
+ >>> # Accessing the model configuration
132
+ >>> configuration = model.config
133
+ ```"""
134
+
135
+ model_type = "autoformer"
136
+ attribute_map = {
137
+ "hidden_size": "d_model",
138
+ "num_attention_heads": "encoder_attention_heads",
139
+ "num_hidden_layers": "encoder_layers",
140
+ }
141
+
142
+ def __init__(
143
+ self,
144
+ prediction_length: Optional[int] = None,
145
+ context_length: Optional[int] = None,
146
+ distribution_output: str = "student_t",
147
+ loss: str = "nll",
148
+ input_size: int = 1,
149
+ lags_sequence: List[int] = [1, 2, 3, 4, 5, 6, 7],
150
+ scaling: bool = True,
151
+ num_time_features: int = 0,
152
+ num_dynamic_real_features: int = 0,
153
+ num_static_categorical_features: int = 0,
154
+ num_static_real_features: int = 0,
155
+ cardinality: Optional[List[int]] = None,
156
+ embedding_dimension: Optional[List[int]] = None,
157
+ d_model: int = 64,
158
+ encoder_attention_heads: int = 2,
159
+ decoder_attention_heads: int = 2,
160
+ encoder_layers: int = 2,
161
+ decoder_layers: int = 2,
162
+ encoder_ffn_dim: int = 32,
163
+ decoder_ffn_dim: int = 32,
164
+ activation_function: str = "gelu",
165
+ dropout: float = 0.1,
166
+ encoder_layerdrop: float = 0.1,
167
+ decoder_layerdrop: float = 0.1,
168
+ attention_dropout: float = 0.1,
169
+ activation_dropout: float = 0.1,
170
+ num_parallel_samples: int = 100,
171
+ init_std: float = 0.02,
172
+ use_cache: bool = True,
173
+ is_encoder_decoder=True,
174
+ # Autoformer arguments
175
+ label_length: int = 10,
176
+ moving_average: int = 25,
177
+ autocorrelation_factor: int = 3,
178
+ **kwargs,
179
+ ):
180
+ # time series specific configuration
181
+ self.prediction_length = prediction_length
182
+ self.context_length = context_length if context_length is not None else prediction_length
183
+ self.distribution_output = distribution_output
184
+ self.loss = loss
185
+ self.input_size = input_size
186
+ self.num_time_features = num_time_features
187
+ self.lags_sequence = lags_sequence
188
+ self.scaling = scaling
189
+ self.num_dynamic_real_features = num_dynamic_real_features
190
+ self.num_static_real_features = num_static_real_features
191
+ self.num_static_categorical_features = num_static_categorical_features
192
+ if cardinality is not None and num_static_categorical_features > 0:
193
+ if len(cardinality) != num_static_categorical_features:
194
+ raise ValueError(
195
+ "The cardinality should be a list of the same length as `num_static_categorical_features`"
196
+ )
197
+ self.cardinality = cardinality
198
+ else:
199
+ self.cardinality = [0]
200
+ if embedding_dimension is not None and num_static_categorical_features > 0:
201
+ if len(embedding_dimension) != num_static_categorical_features:
202
+ raise ValueError(
203
+ "The embedding dimension should be a list of the same length as `num_static_categorical_features`"
204
+ )
205
+ self.embedding_dimension = embedding_dimension
206
+ else:
207
+ self.embedding_dimension = [min(50, (cat + 1) // 2) for cat in self.cardinality]
208
+ self.num_parallel_samples = num_parallel_samples
209
+
210
+ # Transformer architecture configuration
211
+ self.feature_size = input_size * len(self.lags_sequence) + self._number_of_features
212
+ self.d_model = d_model
213
+ self.encoder_attention_heads = encoder_attention_heads
214
+ self.decoder_attention_heads = decoder_attention_heads
215
+ self.encoder_ffn_dim = encoder_ffn_dim
216
+ self.decoder_ffn_dim = decoder_ffn_dim
217
+ self.encoder_layers = encoder_layers
218
+ self.decoder_layers = decoder_layers
219
+
220
+ self.dropout = dropout
221
+ self.attention_dropout = attention_dropout
222
+ self.activation_dropout = activation_dropout
223
+ self.encoder_layerdrop = encoder_layerdrop
224
+ self.decoder_layerdrop = decoder_layerdrop
225
+
226
+ self.activation_function = activation_function
227
+ self.init_std = init_std
228
+
229
+ self.use_cache = use_cache
230
+
231
+ # Autoformer
232
+ self.label_length = label_length
233
+ self.moving_average = moving_average
234
+ self.autocorrelation_factor = autocorrelation_factor
235
+
236
+ super().__init__(is_encoder_decoder=is_encoder_decoder, **kwargs)
237
+
238
+ @property
239
+ def _number_of_features(self) -> int:
240
+ return (
241
+ sum(self.embedding_dimension)
242
+ + self.num_dynamic_real_features
243
+ + self.num_time_features
244
+ + self.num_static_real_features
245
+ + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
246
+ )
env-llmeval/lib/python3.10/site-packages/transformers/models/autoformer/modeling_autoformer.py ADDED
The diff for this file is too large to render. See raw diff
 
env-llmeval/lib/python3.10/site-packages/transformers/models/bros/__init__.py ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023-present NAVER Corp, The Microsoft Research Asia LayoutLM Team Authors and the HuggingFace Inc. team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
17
+
18
+
19
+ _import_structure = {
20
+ "configuration_bros": ["BROS_PRETRAINED_CONFIG_ARCHIVE_MAP", "BrosConfig"],
21
+ }
22
+
23
+ try:
24
+ if not is_tokenizers_available():
25
+ raise OptionalDependencyNotAvailable()
26
+ except OptionalDependencyNotAvailable:
27
+ pass
28
+ else:
29
+ _import_structure["processing_bros"] = ["BrosProcessor"]
30
+
31
+ try:
32
+ if not is_torch_available():
33
+ raise OptionalDependencyNotAvailable()
34
+ except OptionalDependencyNotAvailable:
35
+ pass
36
+ else:
37
+ _import_structure["modeling_bros"] = [
38
+ "BROS_PRETRAINED_MODEL_ARCHIVE_LIST",
39
+ "BrosPreTrainedModel",
40
+ "BrosModel",
41
+ "BrosForTokenClassification",
42
+ "BrosSpadeEEForTokenClassification",
43
+ "BrosSpadeELForTokenClassification",
44
+ ]
45
+
46
+
47
+ if TYPE_CHECKING:
48
+ from .configuration_bros import BROS_PRETRAINED_CONFIG_ARCHIVE_MAP, BrosConfig
49
+
50
+ try:
51
+ if not is_tokenizers_available():
52
+ raise OptionalDependencyNotAvailable()
53
+ except OptionalDependencyNotAvailable:
54
+ pass
55
+ else:
56
+ from .processing_bros import BrosProcessor
57
+
58
+ try:
59
+ if not is_torch_available():
60
+ raise OptionalDependencyNotAvailable()
61
+ except OptionalDependencyNotAvailable:
62
+ pass
63
+ else:
64
+ from .modeling_bros import (
65
+ BROS_PRETRAINED_MODEL_ARCHIVE_LIST,
66
+ BrosForTokenClassification,
67
+ BrosModel,
68
+ BrosPreTrainedModel,
69
+ BrosSpadeEEForTokenClassification,
70
+ BrosSpadeELForTokenClassification,
71
+ )
72
+
73
+
74
+ else:
75
+ import sys
76
+
77
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
env-llmeval/lib/python3.10/site-packages/transformers/models/bros/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.23 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/bros/__pycache__/configuration_bros.cpython-310.pyc ADDED
Binary file (5.73 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/bros/__pycache__/convert_bros_to_pytorch.cpython-310.pyc ADDED
Binary file (3.32 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/bros/__pycache__/modeling_bros.cpython-310.pyc ADDED
Binary file (36.8 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/bros/__pycache__/processing_bros.cpython-310.pyc ADDED
Binary file (3.58 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/bros/configuration_bros.py ADDED
@@ -0,0 +1,140 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023-present NAVER Corp, The Microsoft Research Asia LayoutLM Team Authors and the HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Bros model configuration"""
16
+
17
+ from ...configuration_utils import PretrainedConfig
18
+ from ...utils import logging
19
+
20
+
21
+ logger = logging.get_logger(__name__)
22
+
23
+ BROS_PRETRAINED_CONFIG_ARCHIVE_MAP = {
24
+ "jinho8345/bros-base-uncased": "https://huggingface.co/jinho8345/bros-base-uncased/blob/main/config.json",
25
+ "jinho8345/bros-large-uncased": "https://huggingface.co/jinho8345/bros-large-uncased/blob/main/config.json",
26
+ }
27
+
28
+
29
+ class BrosConfig(PretrainedConfig):
30
+ r"""
31
+ This is the configuration class to store the configuration of a [`BrosModel`] or a [`TFBrosModel`]. It is used to
32
+ instantiate a Bros model according to the specified arguments, defining the model architecture. Instantiating a
33
+ configuration with the defaults will yield a similar configuration to that of the Bros
34
+ [jinho8345/bros-base-uncased](https://huggingface.co/jinho8345/bros-base-uncased) architecture.
35
+
36
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
37
+ documentation from [`PretrainedConfig`] for more information.
38
+
39
+ Args:
40
+ vocab_size (`int`, *optional*, defaults to 30522):
41
+ Vocabulary size of the Bros model. Defines the number of different tokens that can be represented by the
42
+ `inputs_ids` passed when calling [`BrosModel`] or [`TFBrosModel`].
43
+ hidden_size (`int`, *optional*, defaults to 768):
44
+ Dimensionality of the encoder layers and the pooler layer.
45
+ num_hidden_layers (`int`, *optional*, defaults to 12):
46
+ Number of hidden layers in the Transformer encoder.
47
+ num_attention_heads (`int`, *optional*, defaults to 12):
48
+ Number of attention heads for each attention layer in the Transformer encoder.
49
+ intermediate_size (`int`, *optional*, defaults to 3072):
50
+ Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
51
+ hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`):
52
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
53
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
54
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
55
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
56
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
57
+ The dropout ratio for the attention probabilities.
58
+ max_position_embeddings (`int`, *optional*, defaults to 512):
59
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
60
+ just in case (e.g., 512 or 1024 or 2048).
61
+ type_vocab_size (`int`, *optional*, defaults to 2):
62
+ The vocabulary size of the `token_type_ids` passed when calling [`BrosModel`] or [`TFBrosModel`].
63
+ initializer_range (`float`, *optional*, defaults to 0.02):
64
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
65
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
66
+ The epsilon used by the layer normalization layers.
67
+ pad_token_id (`int`, *optional*, defaults to 0):
68
+ The index of the padding token in the token vocabulary.
69
+ dim_bbox (`int`, *optional*, defaults to 8):
70
+ The dimension of the bounding box coordinates. (x0, y1, x1, y0, x1, y1, x0, y1)
71
+ bbox_scale (`float`, *optional*, defaults to 100.0):
72
+ The scale factor of the bounding box coordinates.
73
+ n_relations (`int`, *optional*, defaults to 1):
74
+ The number of relations for SpadeEE(entity extraction), SpadeEL(entity linking) head.
75
+ classifier_dropout_prob (`float`, *optional*, defaults to 0.1):
76
+ The dropout ratio for the classifier head.
77
+
78
+
79
+ Examples:
80
+
81
+ ```python
82
+ >>> from transformers import BrosConfig, BrosModel
83
+
84
+ >>> # Initializing a BROS jinho8345/bros-base-uncased style configuration
85
+ >>> configuration = BrosConfig()
86
+
87
+ >>> # Initializing a model from the jinho8345/bros-base-uncased style configuration
88
+ >>> model = BrosModel(configuration)
89
+
90
+ >>> # Accessing the model configuration
91
+ >>> configuration = model.config
92
+ ```"""
93
+
94
+ model_type = "bros"
95
+
96
+ def __init__(
97
+ self,
98
+ vocab_size=30522,
99
+ hidden_size=768,
100
+ num_hidden_layers=12,
101
+ num_attention_heads=12,
102
+ intermediate_size=3072,
103
+ hidden_act="gelu",
104
+ hidden_dropout_prob=0.1,
105
+ attention_probs_dropout_prob=0.1,
106
+ max_position_embeddings=512,
107
+ type_vocab_size=2,
108
+ initializer_range=0.02,
109
+ layer_norm_eps=1e-12,
110
+ pad_token_id=0,
111
+ dim_bbox=8,
112
+ bbox_scale=100.0,
113
+ n_relations=1,
114
+ classifier_dropout_prob=0.1,
115
+ **kwargs,
116
+ ):
117
+ super().__init__(
118
+ vocab_size=vocab_size,
119
+ hidden_size=hidden_size,
120
+ num_hidden_layers=num_hidden_layers,
121
+ num_attention_heads=num_attention_heads,
122
+ intermediate_size=intermediate_size,
123
+ hidden_act=hidden_act,
124
+ hidden_dropout_prob=hidden_dropout_prob,
125
+ attention_probs_dropout_prob=attention_probs_dropout_prob,
126
+ max_position_embeddings=max_position_embeddings,
127
+ type_vocab_size=type_vocab_size,
128
+ initializer_range=initializer_range,
129
+ layer_norm_eps=layer_norm_eps,
130
+ pad_token_id=pad_token_id,
131
+ **kwargs,
132
+ )
133
+
134
+ self.dim_bbox = dim_bbox
135
+ self.bbox_scale = bbox_scale
136
+ self.n_relations = n_relations
137
+ self.dim_bbox_sinusoid_emb_2d = self.hidden_size // 4
138
+ self.dim_bbox_sinusoid_emb_1d = self.dim_bbox_sinusoid_emb_2d // self.dim_bbox
139
+ self.dim_bbox_projection = self.hidden_size // self.num_attention_heads
140
+ self.classifier_dropout_prob = classifier_dropout_prob
env-llmeval/lib/python3.10/site-packages/transformers/models/bros/convert_bros_to_pytorch.py ADDED
@@ -0,0 +1,145 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Convert Bros checkpoints."""
16
+
17
+ import argparse
18
+
19
+ import bros # original repo
20
+ import torch
21
+
22
+ from transformers import BrosConfig, BrosModel, BrosProcessor
23
+ from transformers.utils import logging
24
+
25
+
26
+ logging.set_verbosity_info()
27
+ logger = logging.get_logger(__name__)
28
+
29
+
30
+ def get_configs(model_name):
31
+ bros_config = BrosConfig.from_pretrained(model_name)
32
+ return bros_config
33
+
34
+
35
+ def remove_ignore_keys_(state_dict):
36
+ ignore_keys = [
37
+ "embeddings.bbox_sinusoid_emb.inv_freq",
38
+ ]
39
+ for k in ignore_keys:
40
+ state_dict.pop(k, None)
41
+
42
+
43
+ def rename_key(name):
44
+ if name == "embeddings.bbox_projection.weight":
45
+ name = "bbox_embeddings.bbox_projection.weight"
46
+
47
+ if name == "embeddings.bbox_sinusoid_emb.x_pos_emb.inv_freq":
48
+ name = "bbox_embeddings.bbox_sinusoid_emb.x_pos_emb.inv_freq"
49
+
50
+ if name == "embeddings.bbox_sinusoid_emb.y_pos_emb.inv_freq":
51
+ name = "bbox_embeddings.bbox_sinusoid_emb.y_pos_emb.inv_freq"
52
+
53
+ return name
54
+
55
+
56
+ def convert_state_dict(orig_state_dict, model):
57
+ # rename keys
58
+ for key in orig_state_dict.copy().keys():
59
+ val = orig_state_dict.pop(key)
60
+ orig_state_dict[rename_key(key)] = val
61
+
62
+ # remove ignore keys
63
+ remove_ignore_keys_(orig_state_dict)
64
+
65
+ return orig_state_dict
66
+
67
+
68
+ def convert_bros_checkpoint(model_name, pytorch_dump_folder_path=None, push_to_hub=False):
69
+ # load original model
70
+ original_model = bros.BrosModel.from_pretrained(model_name).eval()
71
+
72
+ # load HuggingFace Model
73
+ bros_config = get_configs(model_name)
74
+ model = BrosModel.from_pretrained(model_name, config=bros_config)
75
+ model.eval()
76
+
77
+ state_dict = original_model.state_dict()
78
+ new_state_dict = convert_state_dict(state_dict, model)
79
+ model.load_state_dict(new_state_dict)
80
+
81
+ # verify results
82
+
83
+ # original BROS model require 4 points (8 float values) for each bbox, prepare bbox with [batch_size, seq_len, 8] shape
84
+ bbox = torch.tensor(
85
+ [
86
+ [
87
+ [0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
88
+ [0.4396, 0.6720, 0.4659, 0.6720, 0.4659, 0.6850, 0.4396, 0.6850],
89
+ [0.4698, 0.6720, 0.4843, 0.6720, 0.4843, 0.6850, 0.4698, 0.6850],
90
+ [0.4698, 0.6720, 0.4843, 0.6720, 0.4843, 0.6850, 0.4698, 0.6850],
91
+ [0.2047, 0.6870, 0.2730, 0.6870, 0.2730, 0.7000, 0.2047, 0.7000],
92
+ [0.2047, 0.6870, 0.2730, 0.6870, 0.2730, 0.7000, 0.2047, 0.7000],
93
+ [1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000],
94
+ ]
95
+ ]
96
+ )
97
+
98
+ processor = BrosProcessor.from_pretrained(model_name)
99
+
100
+ encoding = processor("His name is Rocco.", return_tensors="pt")
101
+ encoding["bbox"] = bbox
102
+
103
+ original_hidden_states = original_model(**encoding).last_hidden_state
104
+ # pixel_values = processor(image, return_tensors="pt").pixel_values
105
+
106
+ last_hidden_states = model(**encoding).last_hidden_state
107
+
108
+ assert torch.allclose(original_hidden_states, last_hidden_states, atol=1e-4)
109
+
110
+ if pytorch_dump_folder_path is not None:
111
+ print(f"Saving model and processor to {pytorch_dump_folder_path}")
112
+ model.save_pretrained(pytorch_dump_folder_path)
113
+ processor.save_pretrained(pytorch_dump_folder_path)
114
+
115
+ if push_to_hub:
116
+ model.push_to_hub("jinho8345/" + model_name.split("/")[-1], commit_message="Update model")
117
+ processor.push_to_hub("jinho8345/" + model_name.split("/")[-1], commit_message="Update model")
118
+
119
+
120
+ if __name__ == "__main__":
121
+ parser = argparse.ArgumentParser()
122
+
123
+ # Required parameters
124
+ parser.add_argument(
125
+ "--model_name",
126
+ default="jinho8345/bros-base-uncased",
127
+ required=False,
128
+ type=str,
129
+ help="Name of the original model you'd like to convert.",
130
+ )
131
+ parser.add_argument(
132
+ "--pytorch_dump_folder_path",
133
+ default=None,
134
+ required=False,
135
+ type=str,
136
+ help="Path to the output PyTorch model directory.",
137
+ )
138
+ parser.add_argument(
139
+ "--push_to_hub",
140
+ action="store_true",
141
+ help="Whether or not to push the converted model and processor to the 🤗 hub.",
142
+ )
143
+
144
+ args = parser.parse_args()
145
+ convert_bros_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
env-llmeval/lib/python3.10/site-packages/transformers/models/bros/modeling_bros.py ADDED
@@ -0,0 +1,1320 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023-present NAVER Corp, The Microsoft Research Asia LayoutLM Team Authors and the HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch Bros model."""
16
+
17
+
18
+ import math
19
+ from dataclasses import dataclass
20
+ from typing import List, Optional, Tuple, Union
21
+
22
+ import torch
23
+ import torch.utils.checkpoint
24
+ from torch import nn
25
+ from torch.nn import CrossEntropyLoss
26
+
27
+ from ...activations import ACT2FN
28
+ from ...modeling_outputs import (
29
+ BaseModelOutputWithPastAndCrossAttentions,
30
+ BaseModelOutputWithPoolingAndCrossAttentions,
31
+ TokenClassifierOutput,
32
+ )
33
+ from ...modeling_utils import PreTrainedModel
34
+ from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
35
+ from ...utils import (
36
+ ModelOutput,
37
+ add_start_docstrings,
38
+ add_start_docstrings_to_model_forward,
39
+ logging,
40
+ replace_return_docstrings,
41
+ )
42
+ from .configuration_bros import BrosConfig
43
+
44
+
45
+ logger = logging.get_logger(__name__)
46
+
47
+ _CHECKPOINT_FOR_DOC = "jinho8345/bros-base-uncased"
48
+ _CONFIG_FOR_DOC = "BrosConfig"
49
+
50
+ BROS_PRETRAINED_MODEL_ARCHIVE_LIST = [
51
+ "jinho8345/bros-base-uncased",
52
+ "jinho8345/bros-large-uncased",
53
+ # See all Bros models at https://huggingface.co/models?filter=bros
54
+ ]
55
+
56
+ BROS_START_DOCSTRING = r"""
57
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
58
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
59
+ and behavior.
60
+
61
+ Parameters:
62
+ config ([`BrosConfig`]): Model configuration class with all the parameters of the model.
63
+ Initializing with a config file does not load the weights associated with the model, only the
64
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
65
+ """
66
+
67
+ BROS_INPUTS_DOCSTRING = r"""
68
+ Args:
69
+ input_ids (`torch.LongTensor` of shape `({0})`):
70
+ Indices of input sequence tokens in the vocabulary.
71
+
72
+ Indices can be obtained using [`BrosProcessor`]. See [`PreTrainedTokenizer.encode`] and
73
+ [`PreTrainedTokenizer.__call__`] for details.
74
+
75
+ [What are input IDs?](../glossary#input-ids)
76
+
77
+ bbox ('torch.FloatTensor' of shape '(batch_size, num_boxes, 4)'):
78
+ Bounding box coordinates for each token in the input sequence. Each bounding box is a list of four values
79
+ (x1, y1, x2, y2), where (x1, y1) is the top left corner, and (x2, y2) is the bottom right corner of the
80
+ bounding box.
81
+
82
+ attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
83
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
84
+
85
+ - 1 for tokens that are **not masked**,
86
+ - 0 for tokens that are **masked**.
87
+
88
+ [What are attention masks?](../glossary#attention-mask)
89
+
90
+ bbox_first_token_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
91
+ Mask to indicate the first token of each bounding box. Mask values selected in `[0, 1]`:
92
+
93
+ - 1 for tokens that are **not masked**,
94
+ - 0 for tokens that are **masked**.
95
+
96
+ token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
97
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
98
+ 1]`:
99
+
100
+ - 0 corresponds to a *sentence A* token,
101
+ - 1 corresponds to a *sentence B* token.
102
+
103
+ [What are token type IDs?](../glossary#token-type-ids)
104
+
105
+ position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
106
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
107
+ config.max_position_embeddings - 1]`.
108
+
109
+ [What are position IDs?](../glossary#position-ids)
110
+
111
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
112
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
113
+
114
+ - 1 indicates the head is **not masked**,
115
+ - 0 indicates the head is **masked**.
116
+
117
+ inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
118
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
119
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
120
+ model's internal embedding lookup matrix.
121
+
122
+ output_attentions (`bool`, *optional*):
123
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
124
+ tensors for more detail.
125
+
126
+ output_hidden_states (`bool`, *optional*):
127
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
128
+ more detail.
129
+
130
+ return_dict (`bool`, *optional*):
131
+ Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
132
+ """
133
+
134
+
135
+ @dataclass
136
+ class BrosSpadeOutput(ModelOutput):
137
+ """
138
+ Base class for outputs of token classification models.
139
+
140
+ Args:
141
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided) :
142
+ Classification loss.
143
+ initial_token_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.num_labels)`):
144
+ Classification scores for entity initial tokens (before SoftMax).
145
+ subsequent_token_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, sequence_length+1)`):
146
+ Classification scores for entity sequence tokens (before SoftMax).
147
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
148
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
149
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
150
+
151
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
152
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
153
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
154
+ sequence_length)`.
155
+
156
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
157
+ heads.
158
+ """
159
+
160
+ loss: Optional[torch.FloatTensor] = None
161
+ initial_token_logits: torch.FloatTensor = None
162
+ subsequent_token_logits: torch.FloatTensor = None
163
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
164
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
165
+
166
+
167
+ class BrosPositionalEmbedding1D(nn.Module):
168
+ # Reference: https://github.com/kimiyoung/transformer-xl/blob/master/pytorch/mem_transformer.py#L15
169
+
170
+ def __init__(self, config):
171
+ super(BrosPositionalEmbedding1D, self).__init__()
172
+
173
+ self.dim_bbox_sinusoid_emb_1d = config.dim_bbox_sinusoid_emb_1d
174
+
175
+ inv_freq = 1 / (
176
+ 10000 ** (torch.arange(0.0, self.dim_bbox_sinusoid_emb_1d, 2.0) / self.dim_bbox_sinusoid_emb_1d)
177
+ )
178
+ self.register_buffer("inv_freq", inv_freq)
179
+
180
+ def forward(self, pos_seq: torch.Tensor) -> torch.Tensor:
181
+ seq_size = pos_seq.size()
182
+ b1, b2, b3 = seq_size
183
+ sinusoid_inp = pos_seq.view(b1, b2, b3, 1) * self.inv_freq.view(1, 1, 1, self.dim_bbox_sinusoid_emb_1d // 2)
184
+ pos_emb = torch.cat([sinusoid_inp.sin(), sinusoid_inp.cos()], dim=-1)
185
+ return pos_emb
186
+
187
+
188
+ class BrosPositionalEmbedding2D(nn.Module):
189
+ def __init__(self, config):
190
+ super(BrosPositionalEmbedding2D, self).__init__()
191
+
192
+ self.dim_bbox = config.dim_bbox
193
+ self.x_pos_emb = BrosPositionalEmbedding1D(config)
194
+ self.y_pos_emb = BrosPositionalEmbedding1D(config)
195
+
196
+ def forward(self, bbox: torch.Tensor) -> torch.Tensor:
197
+ stack = []
198
+ for i in range(self.dim_bbox):
199
+ if i % 2 == 0:
200
+ stack.append(self.x_pos_emb(bbox[..., i]))
201
+ else:
202
+ stack.append(self.y_pos_emb(bbox[..., i]))
203
+ bbox_pos_emb = torch.cat(stack, dim=-1)
204
+ return bbox_pos_emb
205
+
206
+
207
+ class BrosBboxEmbeddings(nn.Module):
208
+ def __init__(self, config):
209
+ super(BrosBboxEmbeddings, self).__init__()
210
+ self.bbox_sinusoid_emb = BrosPositionalEmbedding2D(config)
211
+ self.bbox_projection = nn.Linear(config.dim_bbox_sinusoid_emb_2d, config.dim_bbox_projection, bias=False)
212
+
213
+ def forward(self, bbox: torch.Tensor):
214
+ bbox_t = bbox.transpose(0, 1)
215
+ bbox_pos = bbox_t[None, :, :, :] - bbox_t[:, None, :, :]
216
+ bbox_pos_emb = self.bbox_sinusoid_emb(bbox_pos)
217
+ bbox_pos_emb = self.bbox_projection(bbox_pos_emb)
218
+
219
+ return bbox_pos_emb
220
+
221
+
222
+ class BrosTextEmbeddings(nn.Module):
223
+ """Construct the embeddings from word, position and token_type embeddings."""
224
+
225
+ def __init__(self, config):
226
+ super().__init__()
227
+
228
+ self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
229
+ self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
230
+ self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
231
+
232
+ # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
233
+ # any TensorFlow checkpoint file
234
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
235
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
236
+ # position_ids (1, len position emb) is contiguous in memory and exported when serialized
237
+ self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
238
+ self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)))
239
+ self.register_buffer(
240
+ "token_type_ids",
241
+ torch.zeros(
242
+ self.position_ids.size(),
243
+ dtype=torch.long,
244
+ device=self.position_ids.device,
245
+ ),
246
+ persistent=False,
247
+ )
248
+
249
+ def forward(
250
+ self,
251
+ input_ids: Optional[torch.Tensor] = None,
252
+ token_type_ids: Optional[torch.Tensor] = None,
253
+ position_ids: Optional[torch.Tensor] = None,
254
+ inputs_embeds: Optional[torch.Tensor] = None,
255
+ past_key_values_length: int = 0,
256
+ ) -> torch.Tensor:
257
+ if input_ids is not None:
258
+ input_shape = input_ids.size()
259
+ else:
260
+ input_shape = inputs_embeds.size()[:-1]
261
+
262
+ seq_length = input_shape[1]
263
+
264
+ if position_ids is None:
265
+ position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length]
266
+
267
+ if token_type_ids is None:
268
+ if hasattr(self, "token_type_ids"):
269
+ buffered_token_type_ids = self.token_type_ids[:, :seq_length]
270
+ buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length)
271
+ token_type_ids = buffered_token_type_ids_expanded
272
+ else:
273
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
274
+
275
+ if inputs_embeds is None:
276
+ inputs_embeds = self.word_embeddings(input_ids)
277
+ token_type_embeddings = self.token_type_embeddings(token_type_ids)
278
+
279
+ embeddings = inputs_embeds + token_type_embeddings
280
+ if self.position_embedding_type == "absolute":
281
+ position_embeddings = self.position_embeddings(position_ids)
282
+ embeddings += position_embeddings
283
+ embeddings = self.LayerNorm(embeddings)
284
+ embeddings = self.dropout(embeddings)
285
+ return embeddings
286
+
287
+
288
+ class BrosSelfAttention(nn.Module):
289
+ def __init__(self, config):
290
+ super().__init__()
291
+ if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
292
+ raise ValueError(
293
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
294
+ f"heads ({config.num_attention_heads})"
295
+ )
296
+
297
+ self.num_attention_heads = config.num_attention_heads
298
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
299
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
300
+
301
+ self.query = nn.Linear(config.hidden_size, self.all_head_size)
302
+ self.key = nn.Linear(config.hidden_size, self.all_head_size)
303
+ self.value = nn.Linear(config.hidden_size, self.all_head_size)
304
+
305
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
306
+ self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
307
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
308
+ self.max_position_embeddings = config.max_position_embeddings
309
+ self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
310
+
311
+ self.is_decoder = config.is_decoder
312
+
313
+ def transpose_for_scores(self, x: torch.Tensor):
314
+ new_x_shape = x.size()[:-1] + (
315
+ self.num_attention_heads,
316
+ self.attention_head_size,
317
+ )
318
+ x = x.view(*new_x_shape)
319
+ return x.permute(0, 2, 1, 3)
320
+
321
+ def forward(
322
+ self,
323
+ hidden_states: torch.Tensor,
324
+ bbox_pos_emb: torch.Tensor,
325
+ attention_mask: Optional[torch.Tensor] = None,
326
+ head_mask: Optional[torch.Tensor] = None,
327
+ encoder_hidden_states: Optional[torch.Tensor] = None,
328
+ encoder_attention_mask: Optional[torch.Tensor] = None,
329
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
330
+ output_attentions: Optional[torch.Tensor] = False,
331
+ ) -> Tuple[torch.Tensor]:
332
+ mixed_query_layer = self.query(hidden_states)
333
+
334
+ # If this is instantiated as a cross-attention module, the keys
335
+ # and values come from an encoder; the attention mask needs to be
336
+ # such that the encoder's padding tokens are not attended to.
337
+ is_cross_attention = encoder_hidden_states is not None
338
+
339
+ if is_cross_attention and past_key_value is not None:
340
+ # reuse k,v, cross_attentions
341
+ key_layer = past_key_value[0]
342
+ value_layer = past_key_value[1]
343
+ attention_mask = encoder_attention_mask
344
+ elif is_cross_attention:
345
+ key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
346
+ value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
347
+ attention_mask = encoder_attention_mask
348
+ elif past_key_value is not None:
349
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
350
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
351
+ key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
352
+ value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
353
+ else:
354
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
355
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
356
+
357
+ query_layer = self.transpose_for_scores(mixed_query_layer)
358
+
359
+ if self.is_decoder:
360
+ # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
361
+ # Further calls to cross_attention layer can then reuse all cross-attention
362
+ # key/value_states (first "if" case)
363
+ # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
364
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
365
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
366
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
367
+ past_key_value = (key_layer, value_layer)
368
+
369
+ # Take the dot product between "query" and "key" to get the raw attention scores.
370
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
371
+
372
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
373
+ seq_length = hidden_states.size()[1]
374
+ position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
375
+ position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
376
+ distance = position_ids_l - position_ids_r
377
+ positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
378
+ positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
379
+
380
+ if self.position_embedding_type == "relative_key":
381
+ relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
382
+ attention_scores = attention_scores + relative_position_scores
383
+ elif self.position_embedding_type == "relative_key_query":
384
+ relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
385
+ relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
386
+
387
+ attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
388
+
389
+ # bbox positional encoding
390
+ batch_size, n_head, seq_length, d_head = query_layer.shape
391
+ bbox_pos_emb = bbox_pos_emb.view(seq_length, seq_length, batch_size, d_head)
392
+ bbox_pos_emb = bbox_pos_emb.permute([2, 0, 1, 3])
393
+ bbox_pos_scores = torch.einsum("bnid,bijd->bnij", (query_layer, bbox_pos_emb))
394
+
395
+ attention_scores = attention_scores + bbox_pos_scores
396
+
397
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
398
+ if attention_mask is not None:
399
+ # Apply the attention mask is (precomputed for all layers in BrosModel forward() function)
400
+ attention_scores = attention_scores + attention_mask
401
+
402
+ # Normalize the attention scores to probabilities.
403
+ attention_probs = nn.Softmax(dim=-1)(attention_scores)
404
+
405
+ # This is actually dropping out entire tokens to attend to, which might
406
+ # seem a bit unusual, but is taken from the original Transformer paper.
407
+ attention_probs = self.dropout(attention_probs)
408
+
409
+ # Mask heads if we want to
410
+ if head_mask is not None:
411
+ attention_probs = attention_probs * head_mask
412
+
413
+ context_layer = torch.matmul(attention_probs, value_layer)
414
+
415
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
416
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
417
+ context_layer = context_layer.view(*new_context_layer_shape)
418
+
419
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
420
+
421
+ if self.is_decoder:
422
+ outputs = outputs + (past_key_value,)
423
+ return outputs
424
+
425
+
426
+ # Copied from transformers.models.bert.modeling_bert.BertSelfOutput with Bert->Bros
427
+ class BrosSelfOutput(nn.Module):
428
+ def __init__(self, config):
429
+ super().__init__()
430
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
431
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
432
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
433
+
434
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
435
+ hidden_states = self.dense(hidden_states)
436
+ hidden_states = self.dropout(hidden_states)
437
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
438
+ return hidden_states
439
+
440
+
441
+ class BrosAttention(nn.Module):
442
+ def __init__(self, config):
443
+ super().__init__()
444
+ self.self = BrosSelfAttention(config)
445
+ self.output = BrosSelfOutput(config)
446
+ self.pruned_heads = set()
447
+
448
+ def prune_heads(self, heads):
449
+ if len(heads) == 0:
450
+ return
451
+ heads, index = find_pruneable_heads_and_indices(
452
+ heads,
453
+ self.self.num_attention_heads,
454
+ self.self.attention_head_size,
455
+ self.pruned_heads,
456
+ )
457
+
458
+ # Prune linear layers
459
+ self.self.query = prune_linear_layer(self.self.query, index)
460
+ self.self.key = prune_linear_layer(self.self.key, index)
461
+ self.self.value = prune_linear_layer(self.self.value, index)
462
+ self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
463
+
464
+ # Update hyper params and store pruned heads
465
+ self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
466
+ self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
467
+ self.pruned_heads = self.pruned_heads.union(heads)
468
+
469
+ def forward(
470
+ self,
471
+ hidden_states: torch.Tensor,
472
+ bbox_pos_emb: torch.Tensor,
473
+ attention_mask: Optional[torch.Tensor] = None,
474
+ head_mask: Optional[torch.Tensor] = None,
475
+ encoder_hidden_states: Optional[torch.Tensor] = None,
476
+ encoder_attention_mask: Optional[torch.Tensor] = None,
477
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
478
+ output_attentions: Optional[bool] = False,
479
+ ) -> Tuple[torch.Tensor]:
480
+ self_outputs = self.self(
481
+ hidden_states=hidden_states,
482
+ bbox_pos_emb=bbox_pos_emb,
483
+ attention_mask=attention_mask,
484
+ head_mask=head_mask,
485
+ encoder_hidden_states=encoder_hidden_states,
486
+ encoder_attention_mask=encoder_attention_mask,
487
+ past_key_value=past_key_value,
488
+ output_attentions=output_attentions,
489
+ )
490
+ attention_output = self.output(self_outputs[0], hidden_states)
491
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
492
+ return outputs
493
+
494
+
495
+ # Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->Bros
496
+ class BrosIntermediate(nn.Module):
497
+ def __init__(self, config):
498
+ super().__init__()
499
+ self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
500
+ if isinstance(config.hidden_act, str):
501
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
502
+ else:
503
+ self.intermediate_act_fn = config.hidden_act
504
+
505
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
506
+ hidden_states = self.dense(hidden_states)
507
+ hidden_states = self.intermediate_act_fn(hidden_states)
508
+ return hidden_states
509
+
510
+
511
+ class BrosOutput(nn.Module):
512
+ def __init__(self, config):
513
+ super().__init__()
514
+ self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
515
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
516
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
517
+
518
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
519
+ hidden_states = self.dense(hidden_states)
520
+ hidden_states = self.dropout(hidden_states)
521
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
522
+ return hidden_states
523
+
524
+
525
+ class BrosLayer(nn.Module):
526
+ def __init__(self, config):
527
+ super().__init__()
528
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
529
+ self.seq_len_dim = 1
530
+ self.attention = BrosAttention(config)
531
+ self.is_decoder = config.is_decoder
532
+ self.add_cross_attention = config.add_cross_attention
533
+ if self.add_cross_attention:
534
+ if not self.is_decoder:
535
+ raise Exception(f"{self} should be used as a decoder model if cross attention is added")
536
+ self.crossattention = BrosAttention(config)
537
+ self.intermediate = BrosIntermediate(config)
538
+ self.output = BrosOutput(config)
539
+
540
+ def forward(
541
+ self,
542
+ hidden_states: torch.Tensor,
543
+ bbox_pos_emb: torch.Tensor,
544
+ attention_mask: Optional[torch.FloatTensor] = None,
545
+ head_mask: Optional[torch.FloatTensor] = None,
546
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
547
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
548
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
549
+ output_attentions: Optional[bool] = False,
550
+ ) -> Tuple[torch.Tensor]:
551
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
552
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
553
+ self_attention_outputs = self.attention(
554
+ hidden_states,
555
+ bbox_pos_emb=bbox_pos_emb,
556
+ attention_mask=attention_mask,
557
+ head_mask=head_mask,
558
+ output_attentions=output_attentions,
559
+ past_key_value=self_attn_past_key_value,
560
+ )
561
+ attention_output = self_attention_outputs[0]
562
+
563
+ # if decoder, the last output is tuple of self-attn cache
564
+ if self.is_decoder:
565
+ outputs = self_attention_outputs[1:-1]
566
+ present_key_value = self_attention_outputs[-1]
567
+ else:
568
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
569
+
570
+ cross_attn_present_key_value = None
571
+ if self.is_decoder and encoder_hidden_states is not None:
572
+ if hasattr(self, "crossattention"):
573
+ raise Exception(
574
+ f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`"
575
+ )
576
+
577
+ # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple
578
+ cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
579
+ cross_attention_outputs = self.crossattention(
580
+ attention_output,
581
+ attention_mask,
582
+ head_mask,
583
+ encoder_hidden_states,
584
+ encoder_attention_mask,
585
+ cross_attn_past_key_value,
586
+ output_attentions,
587
+ )
588
+ attention_output = cross_attention_outputs[0]
589
+ outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
590
+
591
+ # add cross-attn cache to positions 3,4 of present_key_value tuple
592
+ cross_attn_present_key_value = cross_attention_outputs[-1]
593
+ present_key_value = present_key_value + cross_attn_present_key_value
594
+
595
+ layer_output = apply_chunking_to_forward(
596
+ self.feed_forward_chunk,
597
+ self.chunk_size_feed_forward,
598
+ self.seq_len_dim,
599
+ attention_output,
600
+ )
601
+ outputs = (layer_output,) + outputs
602
+
603
+ # if decoder, return the attn key/values as the last output
604
+ if self.is_decoder:
605
+ outputs = outputs + (present_key_value,)
606
+
607
+ return outputs
608
+
609
+ def feed_forward_chunk(self, attention_output):
610
+ intermediate_output = self.intermediate(attention_output)
611
+ layer_output = self.output(intermediate_output, attention_output)
612
+ return layer_output
613
+
614
+
615
+ class BrosEncoder(nn.Module):
616
+ def __init__(self, config):
617
+ super().__init__()
618
+ self.config = config
619
+ self.layer = nn.ModuleList([BrosLayer(config) for _ in range(config.num_hidden_layers)])
620
+
621
+ def forward(
622
+ self,
623
+ hidden_states: torch.Tensor,
624
+ bbox_pos_emb: torch.Tensor,
625
+ attention_mask: Optional[torch.FloatTensor] = None,
626
+ head_mask: Optional[torch.FloatTensor] = None,
627
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
628
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
629
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
630
+ use_cache: Optional[bool] = None,
631
+ output_attentions: Optional[bool] = False,
632
+ output_hidden_states: Optional[bool] = False,
633
+ return_dict: Optional[bool] = True,
634
+ ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]:
635
+ all_hidden_states = () if output_hidden_states else None
636
+ all_self_attentions = () if output_attentions else None
637
+ all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
638
+
639
+ next_decoder_cache = () if use_cache else None
640
+ for i, layer_module in enumerate(self.layer):
641
+ if output_hidden_states:
642
+ all_hidden_states = all_hidden_states + (hidden_states,)
643
+
644
+ layer_head_mask = head_mask[i] if head_mask is not None else None
645
+ past_key_value = past_key_values[i] if past_key_values is not None else None
646
+
647
+ if getattr(self.config, "gradient_checkpointing", False) and self.training:
648
+ if use_cache:
649
+ logger.warning(
650
+ "`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting "
651
+ "`use_cache=False`..."
652
+ )
653
+ use_cache = False
654
+ layer_outputs = self._gradient_checkpointing_func(
655
+ layer_module.__call__,
656
+ hidden_states,
657
+ bbox_pos_emb,
658
+ attention_mask,
659
+ layer_head_mask,
660
+ encoder_hidden_states,
661
+ encoder_attention_mask,
662
+ output_attentions,
663
+ )
664
+ else:
665
+ layer_outputs = layer_module(
666
+ hidden_states=hidden_states,
667
+ bbox_pos_emb=bbox_pos_emb,
668
+ attention_mask=attention_mask,
669
+ head_mask=layer_head_mask,
670
+ encoder_hidden_states=encoder_hidden_states,
671
+ encoder_attention_mask=encoder_attention_mask,
672
+ past_key_value=past_key_value,
673
+ output_attentions=output_attentions,
674
+ )
675
+
676
+ hidden_states = layer_outputs[0]
677
+ if use_cache:
678
+ next_decoder_cache += (layer_outputs[-1],)
679
+ if output_attentions:
680
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
681
+ if self.config.add_cross_attention:
682
+ all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
683
+
684
+ if output_hidden_states:
685
+ all_hidden_states = all_hidden_states + (hidden_states,)
686
+
687
+ if not return_dict:
688
+ return tuple(
689
+ v
690
+ for v in [
691
+ hidden_states,
692
+ next_decoder_cache,
693
+ all_hidden_states,
694
+ all_self_attentions,
695
+ all_cross_attentions,
696
+ ]
697
+ if v is not None
698
+ )
699
+ return BaseModelOutputWithPastAndCrossAttentions(
700
+ last_hidden_state=hidden_states,
701
+ past_key_values=next_decoder_cache,
702
+ hidden_states=all_hidden_states,
703
+ attentions=all_self_attentions,
704
+ cross_attentions=all_cross_attentions,
705
+ )
706
+
707
+
708
+ # Copied from transformers.models.bert.modeling_bert.BertPooler with Bert->Bros
709
+ class BrosPooler(nn.Module):
710
+ def __init__(self, config):
711
+ super().__init__()
712
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
713
+ self.activation = nn.Tanh()
714
+
715
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
716
+ # We "pool" the model by simply taking the hidden state corresponding
717
+ # to the first token.
718
+ first_token_tensor = hidden_states[:, 0]
719
+ pooled_output = self.dense(first_token_tensor)
720
+ pooled_output = self.activation(pooled_output)
721
+ return pooled_output
722
+
723
+
724
+ class BrosRelationExtractor(nn.Module):
725
+ def __init__(self, config):
726
+ super().__init__()
727
+ self.n_relations = config.n_relations
728
+ self.backbone_hidden_size = config.hidden_size
729
+ self.head_hidden_size = config.hidden_size
730
+ self.classifier_dropout_prob = config.classifier_dropout_prob
731
+
732
+ self.drop = nn.Dropout(self.classifier_dropout_prob)
733
+ self.query = nn.Linear(self.backbone_hidden_size, self.n_relations * self.head_hidden_size)
734
+
735
+ self.key = nn.Linear(self.backbone_hidden_size, self.n_relations * self.head_hidden_size)
736
+
737
+ self.dummy_node = nn.Parameter(torch.zeros(1, self.backbone_hidden_size))
738
+
739
+ def forward(self, query_layer: torch.Tensor, key_layer: torch.Tensor):
740
+ query_layer = self.query(self.drop(query_layer))
741
+
742
+ dummy_vec = self.dummy_node.unsqueeze(0).repeat(1, key_layer.size(1), 1)
743
+ key_layer = torch.cat([key_layer, dummy_vec], axis=0)
744
+ key_layer = self.key(self.drop(key_layer))
745
+
746
+ query_layer = query_layer.view(
747
+ query_layer.size(0), query_layer.size(1), self.n_relations, self.head_hidden_size
748
+ )
749
+ key_layer = key_layer.view(key_layer.size(0), key_layer.size(1), self.n_relations, self.head_hidden_size)
750
+
751
+ relation_score = torch.matmul(
752
+ query_layer.permute(2, 1, 0, 3), key_layer.permute(2, 1, 3, 0)
753
+ ) # equivalent to torch.einsum("ibnd,jbnd->nbij", (query_layer, key_layer))
754
+
755
+ return relation_score
756
+
757
+
758
+ class BrosPreTrainedModel(PreTrainedModel):
759
+ """
760
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
761
+ models.
762
+ """
763
+
764
+ config_class = BrosConfig
765
+ base_model_prefix = "bros"
766
+
767
+ def _init_weights(self, module):
768
+ """Initialize the weights"""
769
+ if isinstance(module, nn.Linear):
770
+ # Slightly different from the TF version which uses truncated_normal for initialization
771
+ # cf https://github.com/pytorch/pytorch/pull/5617
772
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
773
+ if module.bias is not None:
774
+ module.bias.data.zero_()
775
+ elif isinstance(module, nn.Embedding):
776
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
777
+ if module.padding_idx is not None:
778
+ module.weight.data[module.padding_idx].zero_()
779
+ elif isinstance(module, nn.LayerNorm):
780
+ module.bias.data.zero_()
781
+ module.weight.data.fill_(1.0)
782
+
783
+
784
+ @add_start_docstrings(
785
+ "The bare Bros Model transformer outputting raw hidden-states without any specific head on top.",
786
+ BROS_START_DOCSTRING,
787
+ )
788
+ class BrosModel(BrosPreTrainedModel):
789
+ def __init__(self, config, add_pooling_layer=True):
790
+ super().__init__(config)
791
+ self.config = config
792
+
793
+ self.embeddings = BrosTextEmbeddings(config)
794
+ self.bbox_embeddings = BrosBboxEmbeddings(config)
795
+ self.encoder = BrosEncoder(config)
796
+
797
+ self.pooler = BrosPooler(config) if add_pooling_layer else None
798
+
799
+ self.init_weights()
800
+
801
+ def get_input_embeddings(self):
802
+ return self.embeddings.word_embeddings
803
+
804
+ def set_input_embeddings(self, value):
805
+ self.embeddings.word_embeddings = value
806
+
807
+ def _prune_heads(self, heads_to_prune):
808
+ """
809
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
810
+ class PreTrainedModel
811
+ """
812
+ for layer, heads in heads_to_prune.items():
813
+ self.encoder.layer[layer].attention.prune_heads(heads)
814
+
815
+ @add_start_docstrings_to_model_forward(BROS_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
816
+ @replace_return_docstrings(output_type=BaseModelOutputWithPoolingAndCrossAttentions, config_class=_CONFIG_FOR_DOC)
817
+ def forward(
818
+ self,
819
+ input_ids: Optional[torch.Tensor] = None,
820
+ bbox: Optional[torch.Tensor] = None,
821
+ attention_mask: Optional[torch.Tensor] = None,
822
+ token_type_ids: Optional[torch.Tensor] = None,
823
+ position_ids: Optional[torch.Tensor] = None,
824
+ head_mask: Optional[torch.Tensor] = None,
825
+ inputs_embeds: Optional[torch.Tensor] = None,
826
+ encoder_hidden_states: Optional[torch.Tensor] = None,
827
+ encoder_attention_mask: Optional[torch.Tensor] = None,
828
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
829
+ use_cache: Optional[bool] = None,
830
+ output_attentions: Optional[bool] = None,
831
+ output_hidden_states: Optional[bool] = None,
832
+ return_dict: Optional[bool] = None,
833
+ ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]:
834
+ r"""
835
+ Returns:
836
+
837
+ Examples:
838
+
839
+ ```python
840
+ >>> import torch
841
+ >>> from transformers import BrosProcessor, BrosModel
842
+
843
+ >>> processor = BrosProcessor.from_pretrained("jinho8345/bros-base-uncased")
844
+
845
+ >>> model = BrosModel.from_pretrained("jinho8345/bros-base-uncased")
846
+
847
+ >>> encoding = processor("Hello, my dog is cute", add_special_tokens=False, return_tensors="pt")
848
+ >>> bbox = torch.tensor([[[0, 0, 1, 1]]]).repeat(1, encoding["input_ids"].shape[-1], 1)
849
+ >>> encoding["bbox"] = bbox
850
+
851
+ >>> outputs = model(**encoding)
852
+ >>> last_hidden_states = outputs.last_hidden_state
853
+ ```"""
854
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
855
+ output_hidden_states = (
856
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
857
+ )
858
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
859
+
860
+ if self.config.is_decoder:
861
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
862
+ else:
863
+ use_cache = False
864
+
865
+ if input_ids is not None and inputs_embeds is not None:
866
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
867
+ elif input_ids is not None:
868
+ input_shape = input_ids.size()
869
+ elif inputs_embeds is not None:
870
+ input_shape = inputs_embeds.size()[:-1]
871
+ else:
872
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
873
+
874
+ if bbox is None:
875
+ raise ValueError("You have to specify bbox")
876
+
877
+ batch_size, seq_length = input_shape
878
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
879
+
880
+ # past_key_values_length
881
+ past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
882
+
883
+ if attention_mask is None:
884
+ attention_mask = torch.ones(input_shape, device=device)
885
+
886
+ if token_type_ids is None:
887
+ if hasattr(self.embeddings, "token_type_ids"):
888
+ buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length]
889
+ buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length)
890
+ token_type_ids = buffered_token_type_ids_expanded
891
+ else:
892
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
893
+
894
+ # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
895
+ # ourselves in which case we just need to make it broadcastable to all heads.
896
+ extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape, device)
897
+
898
+ # If a 2D or 3D attention mask is provided for the cross-attention
899
+ # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
900
+ if self.config.is_decoder and encoder_hidden_states is not None:
901
+ encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
902
+ encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
903
+ if encoder_attention_mask is None:
904
+ encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
905
+ encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
906
+ else:
907
+ encoder_extended_attention_mask = None
908
+
909
+ # Prepare head mask if needed
910
+ # 1.0 in head_mask indicate we keep the head
911
+ # attention_probs has shape bsz x n_heads x N x N
912
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
913
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
914
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
915
+
916
+ embedding_output = self.embeddings(
917
+ input_ids=input_ids,
918
+ position_ids=position_ids,
919
+ token_type_ids=token_type_ids,
920
+ inputs_embeds=inputs_embeds,
921
+ past_key_values_length=past_key_values_length,
922
+ )
923
+
924
+ # if bbox has 2 points (4 float tensors) per token, convert it to 4 points (8 float tensors) per token
925
+ if bbox.shape[-1] == 4:
926
+ bbox = bbox[:, :, [0, 1, 2, 1, 2, 3, 0, 3]]
927
+ scaled_bbox = bbox * self.config.bbox_scale
928
+ bbox_position_embeddings = self.bbox_embeddings(scaled_bbox)
929
+
930
+ encoder_outputs = self.encoder(
931
+ embedding_output,
932
+ bbox_pos_emb=bbox_position_embeddings,
933
+ attention_mask=extended_attention_mask,
934
+ head_mask=head_mask,
935
+ encoder_hidden_states=encoder_hidden_states,
936
+ encoder_attention_mask=encoder_extended_attention_mask,
937
+ past_key_values=past_key_values,
938
+ use_cache=use_cache,
939
+ output_attentions=output_attentions,
940
+ output_hidden_states=output_hidden_states,
941
+ return_dict=return_dict,
942
+ )
943
+ sequence_output = encoder_outputs[0]
944
+ pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
945
+
946
+ if not return_dict:
947
+ return (sequence_output, pooled_output) + encoder_outputs[1:]
948
+
949
+ return BaseModelOutputWithPoolingAndCrossAttentions(
950
+ last_hidden_state=sequence_output,
951
+ pooler_output=pooled_output,
952
+ past_key_values=encoder_outputs.past_key_values,
953
+ hidden_states=encoder_outputs.hidden_states,
954
+ attentions=encoder_outputs.attentions,
955
+ cross_attentions=encoder_outputs.cross_attentions,
956
+ )
957
+
958
+
959
+ @add_start_docstrings(
960
+ """
961
+ Bros Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
962
+ Named-Entity-Recognition (NER) tasks.
963
+ """,
964
+ BROS_START_DOCSTRING,
965
+ )
966
+ class BrosForTokenClassification(BrosPreTrainedModel):
967
+ _keys_to_ignore_on_load_unexpected = [r"pooler"]
968
+
969
+ def __init__(self, config):
970
+ super().__init__(config)
971
+ self.num_labels = config.num_labels
972
+
973
+ self.bros = BrosModel(config)
974
+ classifier_dropout = (
975
+ config.classifier_dropout if hasattr(config, "classifier_dropout") else config.hidden_dropout_prob
976
+ )
977
+ self.dropout = nn.Dropout(classifier_dropout)
978
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
979
+
980
+ self.init_weights()
981
+
982
+ @add_start_docstrings_to_model_forward(BROS_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
983
+ @replace_return_docstrings(output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC)
984
+ def forward(
985
+ self,
986
+ input_ids: Optional[torch.Tensor] = None,
987
+ bbox: Optional[torch.Tensor] = None,
988
+ attention_mask: Optional[torch.Tensor] = None,
989
+ bbox_first_token_mask: Optional[torch.Tensor] = None,
990
+ token_type_ids: Optional[torch.Tensor] = None,
991
+ position_ids: Optional[torch.Tensor] = None,
992
+ head_mask: Optional[torch.Tensor] = None,
993
+ inputs_embeds: Optional[torch.Tensor] = None,
994
+ labels: Optional[torch.Tensor] = None,
995
+ output_attentions: Optional[bool] = None,
996
+ output_hidden_states: Optional[bool] = None,
997
+ return_dict: Optional[bool] = None,
998
+ ) -> Union[Tuple[torch.Tensor], TokenClassifierOutput]:
999
+ r"""
1000
+
1001
+ Returns:
1002
+
1003
+ Examples:
1004
+
1005
+ ```python
1006
+ >>> import torch
1007
+ >>> from transformers import BrosProcessor, BrosForTokenClassification
1008
+
1009
+ >>> processor = BrosProcessor.from_pretrained("jinho8345/bros-base-uncased")
1010
+
1011
+ >>> model = BrosForTokenClassification.from_pretrained("jinho8345/bros-base-uncased")
1012
+
1013
+ >>> encoding = processor("Hello, my dog is cute", add_special_tokens=False, return_tensors="pt")
1014
+ >>> bbox = torch.tensor([[[0, 0, 1, 1]]]).repeat(1, encoding["input_ids"].shape[-1], 1)
1015
+ >>> encoding["bbox"] = bbox
1016
+
1017
+ >>> outputs = model(**encoding)
1018
+ ```"""
1019
+
1020
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1021
+
1022
+ outputs = self.bros(
1023
+ input_ids,
1024
+ bbox=bbox,
1025
+ attention_mask=attention_mask,
1026
+ token_type_ids=token_type_ids,
1027
+ position_ids=position_ids,
1028
+ head_mask=head_mask,
1029
+ inputs_embeds=inputs_embeds,
1030
+ output_attentions=output_attentions,
1031
+ output_hidden_states=output_hidden_states,
1032
+ return_dict=return_dict,
1033
+ )
1034
+
1035
+ sequence_output = outputs[0]
1036
+
1037
+ sequence_output = self.dropout(sequence_output)
1038
+ logits = self.classifier(sequence_output)
1039
+
1040
+ loss = None
1041
+ if labels is not None:
1042
+ loss_fct = CrossEntropyLoss()
1043
+ if bbox_first_token_mask is not None:
1044
+ bbox_first_token_mask = bbox_first_token_mask.view(-1)
1045
+ loss = loss_fct(
1046
+ logits.view(-1, self.num_labels)[bbox_first_token_mask], labels.view(-1)[bbox_first_token_mask]
1047
+ )
1048
+ else:
1049
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1050
+
1051
+ if not return_dict:
1052
+ output = (logits,) + outputs[2:]
1053
+ return ((loss,) + output) if loss is not None else output
1054
+
1055
+ return TokenClassifierOutput(
1056
+ loss=loss,
1057
+ logits=logits,
1058
+ hidden_states=outputs.hidden_states,
1059
+ attentions=outputs.attentions,
1060
+ )
1061
+
1062
+
1063
+ @add_start_docstrings(
1064
+ """
1065
+ Bros Model with a token classification head on top (initial_token_layers and subsequent_token_layer on top of the
1066
+ hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. The initial_token_classifier is used to
1067
+ predict the first token of each entity, and the subsequent_token_classifier is used to predict the subsequent
1068
+ tokens within an entity. Compared to BrosForTokenClassification, this model is more robust to serialization errors
1069
+ since it predicts next token from one token.
1070
+ """,
1071
+ BROS_START_DOCSTRING,
1072
+ )
1073
+ class BrosSpadeEEForTokenClassification(BrosPreTrainedModel):
1074
+ _keys_to_ignore_on_load_unexpected = [r"pooler"]
1075
+
1076
+ def __init__(self, config):
1077
+ super().__init__(config)
1078
+ self.config = config
1079
+ self.num_labels = config.num_labels
1080
+ self.n_relations = config.n_relations
1081
+ self.backbone_hidden_size = config.hidden_size
1082
+
1083
+ self.bros = BrosModel(config)
1084
+ classifier_dropout = (
1085
+ config.classifier_dropout if hasattr(config, "classifier_dropout") else config.hidden_dropout_prob
1086
+ )
1087
+
1088
+ # Initial token classification for Entity Extraction (NER)
1089
+ self.initial_token_classifier = nn.Sequential(
1090
+ nn.Dropout(classifier_dropout),
1091
+ nn.Linear(config.hidden_size, config.hidden_size),
1092
+ nn.Dropout(classifier_dropout),
1093
+ nn.Linear(config.hidden_size, config.num_labels),
1094
+ )
1095
+
1096
+ # Subsequent token classification for Entity Extraction (NER)
1097
+ self.subsequent_token_classifier = BrosRelationExtractor(config)
1098
+
1099
+ self.init_weights()
1100
+
1101
+ @add_start_docstrings_to_model_forward(BROS_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1102
+ @replace_return_docstrings(output_type=BrosSpadeOutput, config_class=_CONFIG_FOR_DOC)
1103
+ def forward(
1104
+ self,
1105
+ input_ids: Optional[torch.Tensor] = None,
1106
+ bbox: Optional[torch.Tensor] = None,
1107
+ attention_mask: Optional[torch.Tensor] = None,
1108
+ bbox_first_token_mask: Optional[torch.Tensor] = None,
1109
+ token_type_ids: Optional[torch.Tensor] = None,
1110
+ position_ids: Optional[torch.Tensor] = None,
1111
+ head_mask: Optional[torch.Tensor] = None,
1112
+ inputs_embeds: Optional[torch.Tensor] = None,
1113
+ initial_token_labels: Optional[torch.Tensor] = None,
1114
+ subsequent_token_labels: Optional[torch.Tensor] = None,
1115
+ output_attentions: Optional[bool] = None,
1116
+ output_hidden_states: Optional[bool] = None,
1117
+ return_dict: Optional[bool] = None,
1118
+ ) -> Union[Tuple[torch.Tensor], BrosSpadeOutput]:
1119
+ r"""
1120
+ Returns:
1121
+
1122
+ Examples:
1123
+
1124
+ ```python
1125
+ >>> import torch
1126
+ >>> from transformers import BrosProcessor, BrosSpadeEEForTokenClassification
1127
+
1128
+ >>> processor = BrosProcessor.from_pretrained("jinho8345/bros-base-uncased")
1129
+
1130
+ >>> model = BrosSpadeEEForTokenClassification.from_pretrained("jinho8345/bros-base-uncased")
1131
+
1132
+ >>> encoding = processor("Hello, my dog is cute", add_special_tokens=False, return_tensors="pt")
1133
+ >>> bbox = torch.tensor([[[0, 0, 1, 1]]]).repeat(1, encoding["input_ids"].shape[-1], 1)
1134
+ >>> encoding["bbox"] = bbox
1135
+
1136
+ >>> outputs = model(**encoding)
1137
+ ```"""
1138
+
1139
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1140
+
1141
+ outputs = self.bros(
1142
+ input_ids=input_ids,
1143
+ bbox=bbox,
1144
+ attention_mask=attention_mask,
1145
+ token_type_ids=token_type_ids,
1146
+ position_ids=position_ids,
1147
+ head_mask=head_mask,
1148
+ inputs_embeds=inputs_embeds,
1149
+ output_attentions=output_attentions,
1150
+ output_hidden_states=output_hidden_states,
1151
+ return_dict=return_dict,
1152
+ )
1153
+
1154
+ last_hidden_states = outputs[0]
1155
+ last_hidden_states = last_hidden_states.transpose(0, 1).contiguous()
1156
+ initial_token_logits = self.initial_token_classifier(last_hidden_states).transpose(0, 1).contiguous()
1157
+ subsequent_token_logits = self.subsequent_token_classifier(last_hidden_states, last_hidden_states).squeeze(0)
1158
+
1159
+ # make subsequent token (sequence token classification) mask
1160
+ inv_attention_mask = 1 - attention_mask
1161
+ batch_size, max_seq_length = inv_attention_mask.shape
1162
+ device = inv_attention_mask.device
1163
+ invalid_token_mask = torch.cat([inv_attention_mask, torch.zeros([batch_size, 1]).to(device)], axis=1).bool()
1164
+ subsequent_token_logits = subsequent_token_logits.masked_fill(
1165
+ invalid_token_mask[:, None, :], torch.finfo(subsequent_token_logits.dtype).min
1166
+ )
1167
+ self_token_mask = torch.eye(max_seq_length, max_seq_length + 1).to(device).bool()
1168
+ subsequent_token_logits = subsequent_token_logits.masked_fill(
1169
+ self_token_mask[None, :, :], torch.finfo(subsequent_token_logits.dtype).min
1170
+ )
1171
+ subsequent_token_mask = attention_mask.view(-1).bool()
1172
+
1173
+ loss = None
1174
+ if initial_token_labels is not None and subsequent_token_labels is not None:
1175
+ loss_fct = CrossEntropyLoss()
1176
+
1177
+ # get initial token loss
1178
+ initial_token_labels = initial_token_labels.view(-1)
1179
+ if bbox_first_token_mask is not None:
1180
+ bbox_first_token_mask = bbox_first_token_mask.view(-1)
1181
+ initial_token_loss = loss_fct(
1182
+ initial_token_logits.view(-1, self.num_labels)[bbox_first_token_mask],
1183
+ initial_token_labels[bbox_first_token_mask],
1184
+ )
1185
+ else:
1186
+ initial_token_loss = loss_fct(initial_token_logits.view(-1, self.num_labels), initial_token_labels)
1187
+
1188
+ subsequent_token_labels = subsequent_token_labels.view(-1)
1189
+ subsequent_token_loss = loss_fct(
1190
+ subsequent_token_logits.view(-1, max_seq_length + 1)[subsequent_token_mask],
1191
+ subsequent_token_labels[subsequent_token_mask],
1192
+ )
1193
+
1194
+ loss = initial_token_loss + subsequent_token_loss
1195
+
1196
+ if not return_dict:
1197
+ output = (initial_token_logits, subsequent_token_logits) + outputs[2:]
1198
+ return ((loss,) + output) if loss is not None else output
1199
+
1200
+ return BrosSpadeOutput(
1201
+ loss=loss,
1202
+ initial_token_logits=initial_token_logits,
1203
+ subsequent_token_logits=subsequent_token_logits,
1204
+ hidden_states=outputs.hidden_states,
1205
+ attentions=outputs.attentions,
1206
+ )
1207
+
1208
+
1209
+ @add_start_docstrings(
1210
+ """
1211
+ Bros Model with a token classification head on top (a entity_linker layer on top of the hidden-states output) e.g.
1212
+ for Entity-Linking. The entity_linker is used to predict intra-entity links (one entity to another entity).
1213
+ """,
1214
+ BROS_START_DOCSTRING,
1215
+ )
1216
+ class BrosSpadeELForTokenClassification(BrosPreTrainedModel):
1217
+ _keys_to_ignore_on_load_unexpected = [r"pooler"]
1218
+
1219
+ def __init__(self, config):
1220
+ super().__init__(config)
1221
+ self.config = config
1222
+ self.num_labels = config.num_labels
1223
+ self.n_relations = config.n_relations
1224
+ self.backbone_hidden_size = config.hidden_size
1225
+
1226
+ self.bros = BrosModel(config)
1227
+ (config.classifier_dropout if hasattr(config, "classifier_dropout") else config.hidden_dropout_prob)
1228
+
1229
+ self.entity_linker = BrosRelationExtractor(config)
1230
+
1231
+ self.init_weights()
1232
+
1233
+ @add_start_docstrings_to_model_forward(BROS_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1234
+ @replace_return_docstrings(output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC)
1235
+ def forward(
1236
+ self,
1237
+ input_ids: Optional[torch.Tensor] = None,
1238
+ bbox: Optional[torch.Tensor] = None,
1239
+ attention_mask: Optional[torch.Tensor] = None,
1240
+ bbox_first_token_mask: Optional[torch.Tensor] = None,
1241
+ token_type_ids: Optional[torch.Tensor] = None,
1242
+ position_ids: Optional[torch.Tensor] = None,
1243
+ head_mask: Optional[torch.Tensor] = None,
1244
+ inputs_embeds: Optional[torch.Tensor] = None,
1245
+ labels: Optional[torch.Tensor] = None,
1246
+ output_attentions: Optional[bool] = None,
1247
+ output_hidden_states: Optional[bool] = None,
1248
+ return_dict: Optional[bool] = None,
1249
+ ) -> Union[Tuple[torch.Tensor], TokenClassifierOutput]:
1250
+ r"""
1251
+ Returns:
1252
+
1253
+ Examples:
1254
+
1255
+ ```python
1256
+ >>> import torch
1257
+ >>> from transformers import BrosProcessor, BrosSpadeELForTokenClassification
1258
+
1259
+ >>> processor = BrosProcessor.from_pretrained("jinho8345/bros-base-uncased")
1260
+
1261
+ >>> model = BrosSpadeELForTokenClassification.from_pretrained("jinho8345/bros-base-uncased")
1262
+
1263
+ >>> encoding = processor("Hello, my dog is cute", add_special_tokens=False, return_tensors="pt")
1264
+ >>> bbox = torch.tensor([[[0, 0, 1, 1]]]).repeat(1, encoding["input_ids"].shape[-1], 1)
1265
+ >>> encoding["bbox"] = bbox
1266
+
1267
+ >>> outputs = model(**encoding)
1268
+ ```"""
1269
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1270
+
1271
+ outputs = self.bros(
1272
+ input_ids=input_ids,
1273
+ bbox=bbox,
1274
+ attention_mask=attention_mask,
1275
+ token_type_ids=token_type_ids,
1276
+ position_ids=position_ids,
1277
+ head_mask=head_mask,
1278
+ inputs_embeds=inputs_embeds,
1279
+ output_attentions=output_attentions,
1280
+ output_hidden_states=output_hidden_states,
1281
+ return_dict=return_dict,
1282
+ )
1283
+
1284
+ last_hidden_states = outputs[0]
1285
+ last_hidden_states = last_hidden_states.transpose(0, 1).contiguous()
1286
+
1287
+ logits = self.entity_linker(last_hidden_states, last_hidden_states).squeeze(0)
1288
+
1289
+ loss = None
1290
+ if labels is not None:
1291
+ loss_fct = CrossEntropyLoss()
1292
+
1293
+ batch_size, max_seq_length = attention_mask.shape
1294
+ device = attention_mask.device
1295
+
1296
+ self_token_mask = torch.eye(max_seq_length, max_seq_length + 1).to(device).bool()
1297
+
1298
+ mask = bbox_first_token_mask.view(-1)
1299
+ bbox_first_token_mask = torch.cat(
1300
+ [
1301
+ ~bbox_first_token_mask,
1302
+ torch.zeros([batch_size, 1], dtype=torch.bool).to(device),
1303
+ ],
1304
+ axis=1,
1305
+ )
1306
+ logits = logits.masked_fill(bbox_first_token_mask[:, None, :], torch.finfo(logits.dtype).min)
1307
+ logits = logits.masked_fill(self_token_mask[None, :, :], torch.finfo(logits.dtype).min)
1308
+
1309
+ loss = loss_fct(logits.view(-1, max_seq_length + 1)[mask], labels.view(-1)[mask])
1310
+
1311
+ if not return_dict:
1312
+ output = (logits,) + outputs[2:]
1313
+ return ((loss,) + output) if loss is not None else output
1314
+
1315
+ return TokenClassifierOutput(
1316
+ loss=loss,
1317
+ logits=logits,
1318
+ hidden_states=outputs.hidden_states,
1319
+ attentions=outputs.attentions,
1320
+ )
env-llmeval/lib/python3.10/site-packages/transformers/models/bros/processing_bros.py ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """
16
+ Processor class for Bros.
17
+ """
18
+
19
+ from typing import List, Optional, Union
20
+
21
+ from ...processing_utils import ProcessorMixin
22
+ from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
23
+ from ...utils import TensorType
24
+
25
+
26
+ class BrosProcessor(ProcessorMixin):
27
+ r"""
28
+ Constructs a Bros processor which wraps a BERT tokenizer.
29
+
30
+ [`BrosProcessor`] offers all the functionalities of [`BertTokenizerFast`]. See the docstring of
31
+ [`~BrosProcessor.__call__`] and [`~BrosProcessor.decode`] for more information.
32
+
33
+ Args:
34
+ tokenizer (`BertTokenizerFast`, *optional*):
35
+ An instance of ['BertTokenizerFast`]. The tokenizer is a required input.
36
+ """
37
+
38
+ attributes = ["tokenizer"]
39
+ tokenizer_class = ("BertTokenizer", "BertTokenizerFast")
40
+
41
+ def __init__(self, tokenizer=None, **kwargs):
42
+ if tokenizer is None:
43
+ raise ValueError("You need to specify a `tokenizer`.")
44
+
45
+ super().__init__(tokenizer)
46
+
47
+ def __call__(
48
+ self,
49
+ text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None,
50
+ add_special_tokens: bool = True,
51
+ padding: Union[bool, str, PaddingStrategy] = False,
52
+ truncation: Union[bool, str, TruncationStrategy] = None,
53
+ max_length: Optional[int] = None,
54
+ stride: int = 0,
55
+ pad_to_multiple_of: Optional[int] = None,
56
+ return_token_type_ids: Optional[bool] = None,
57
+ return_attention_mask: Optional[bool] = None,
58
+ return_overflowing_tokens: bool = False,
59
+ return_special_tokens_mask: bool = False,
60
+ return_offsets_mapping: bool = False,
61
+ return_length: bool = False,
62
+ verbose: bool = True,
63
+ return_tensors: Optional[Union[str, TensorType]] = None,
64
+ **kwargs,
65
+ ) -> BatchEncoding:
66
+ """
67
+ This method uses [`BertTokenizerFast.__call__`] to prepare text for the model.
68
+
69
+ Please refer to the docstring of the above two methods for more information.
70
+ """
71
+ encoding = self.tokenizer(
72
+ text=text,
73
+ add_special_tokens=add_special_tokens,
74
+ padding=padding,
75
+ truncation=truncation,
76
+ max_length=max_length,
77
+ stride=stride,
78
+ pad_to_multiple_of=pad_to_multiple_of,
79
+ return_token_type_ids=return_token_type_ids,
80
+ return_attention_mask=return_attention_mask,
81
+ return_overflowing_tokens=return_overflowing_tokens,
82
+ return_special_tokens_mask=return_special_tokens_mask,
83
+ return_offsets_mapping=return_offsets_mapping,
84
+ return_length=return_length,
85
+ verbose=verbose,
86
+ return_tensors=return_tensors,
87
+ **kwargs,
88
+ )
89
+
90
+ return encoding
91
+
92
+ def batch_decode(self, *args, **kwargs):
93
+ """
94
+ This method forwards all its arguments to BertTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please
95
+ refer to the docstring of this method for more information.
96
+ """
97
+ return self.tokenizer.batch_decode(*args, **kwargs)
98
+
99
+ def decode(self, *args, **kwargs):
100
+ """
101
+ This method forwards all its arguments to BertTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to
102
+ the docstring of this method for more information.
103
+ """
104
+ return self.tokenizer.decode(*args, **kwargs)
105
+
106
+ @property
107
+ def model_input_names(self):
108
+ tokenizer_input_names = self.tokenizer.model_input_names
109
+ return list(dict.fromkeys(tokenizer_input_names))
env-llmeval/lib/python3.10/site-packages/transformers/models/byt5/__init__.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2021 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import TYPE_CHECKING
16
+
17
+ from ...utils import _LazyModule
18
+
19
+
20
+ _import_structure = {"tokenization_byt5": ["ByT5Tokenizer"]}
21
+
22
+
23
+ if TYPE_CHECKING:
24
+ from .tokenization_byt5 import ByT5Tokenizer
25
+ else:
26
+ import sys
27
+
28
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
env-llmeval/lib/python3.10/site-packages/transformers/models/byt5/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (498 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/byt5/__pycache__/convert_byt5_original_tf_checkpoint_to_pytorch.cpython-310.pyc ADDED
Binary file (1.38 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/byt5/__pycache__/tokenization_byt5.cpython-310.pyc ADDED
Binary file (9.22 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/byt5/convert_byt5_original_tf_checkpoint_to_pytorch.py ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The T5 authors and HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Convert T5 checkpoint."""
16
+
17
+
18
+ import argparse
19
+
20
+ from transformers import T5Config, T5ForConditionalGeneration, load_tf_weights_in_t5
21
+ from transformers.utils import logging
22
+
23
+
24
+ logging.set_verbosity_info()
25
+
26
+
27
+ def convert_tf_checkpoint_to_pytorch(tf_checkpoint_path, config_file, pytorch_dump_path):
28
+ # Initialise PyTorch model
29
+ config = T5Config.from_json_file(config_file)
30
+ print(f"Building PyTorch model from configuration: {config}")
31
+ model = T5ForConditionalGeneration(config)
32
+
33
+ # Load weights from tf checkpoint
34
+ load_tf_weights_in_t5(model, config, tf_checkpoint_path)
35
+
36
+ # Save pytorch-model
37
+ print(f"Save PyTorch model to {pytorch_dump_path}")
38
+ model.save_pretrained(pytorch_dump_path)
39
+
40
+
41
+ if __name__ == "__main__":
42
+ parser = argparse.ArgumentParser()
43
+ # Required parameters
44
+ parser.add_argument(
45
+ "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
46
+ )
47
+ parser.add_argument(
48
+ "--config_file",
49
+ default=None,
50
+ type=str,
51
+ required=True,
52
+ help=(
53
+ "The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture."
54
+ ),
55
+ )
56
+ parser.add_argument(
57
+ "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
58
+ )
59
+ args = parser.parse_args()
60
+ convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
env-llmeval/lib/python3.10/site-packages/transformers/models/byt5/tokenization_byt5.py ADDED
@@ -0,0 +1,234 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 T5 Authors and HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Tokenization class for model ByT5."""
16
+
17
+
18
+ import warnings
19
+ from typing import List, Optional, Tuple
20
+
21
+ from ...tokenization_utils import AddedToken, PreTrainedTokenizer
22
+ from ...utils import logging
23
+
24
+
25
+ logger = logging.get_logger(__name__)
26
+
27
+
28
+ class ByT5Tokenizer(PreTrainedTokenizer):
29
+ """
30
+ Construct a ByT5 tokenizer. ByT5 simply uses raw bytes utf-8 encoding.
31
+
32
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
33
+ this superclass for more information regarding those methods.
34
+
35
+ Args:
36
+ eos_token (`str`, *optional*, defaults to `"</s>"`):
37
+ The end of sequence token.
38
+
39
+ <Tip>
40
+
41
+ When building a sequence using special tokens, this is not the token that is used for the end of sequence.
42
+ The token used is the `sep_token`.
43
+
44
+ </Tip>
45
+
46
+ unk_token (`str`, *optional*, defaults to `"<unk>"`):
47
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
48
+ token instead.
49
+ pad_token (`str`, *optional*, defaults to `"<pad>"`):
50
+ The token used for padding, for example when batching sequences of different lengths.
51
+ extra_ids (`int`, *optional*, defaults to 125):
52
+ Add a number of extra ids added to the end of the vocabulary for use as sentinels. These tokens are
53
+ accessible as "<extra_id_{%d}>" where "{%d}" is a number between 0 and extra_ids-1. Extra tokens are
54
+ indexed from the end of the vocabulary up to beginning ("<extra_id_0>" is the last token in the vocabulary
55
+ like in ByT5 preprocessing see
56
+ [here](https://github.com/google-research/text-to-text-transfer-transformer/blob/9fd7b14a769417be33bc6c850f9598764913c833/t5/data/preprocessors.py#L2117)).
57
+ additional_special_tokens (`List[str]`, *optional*):
58
+ Additional special tokens used by the tokenizer.
59
+ """
60
+
61
+ model_input_names = ["input_ids", "attention_mask"]
62
+
63
+ def __init__(
64
+ self,
65
+ eos_token="</s>",
66
+ unk_token="<unk>",
67
+ pad_token="<pad>",
68
+ extra_ids=125,
69
+ additional_special_tokens=None,
70
+ **kwargs,
71
+ ) -> None:
72
+ # Add extra_ids to the special token list
73
+ if extra_ids > 0 and additional_special_tokens is None:
74
+ additional_special_tokens = [f"<extra_id_{i}>" for i in range(extra_ids)]
75
+ elif extra_ids > 0 and additional_special_tokens is not None and len(additional_special_tokens) > 0:
76
+ # Check that we have the right number of extra_id special tokens
77
+ extra_tokens = len(set(filter(lambda x: bool("extra_id" in str(x)), additional_special_tokens)))
78
+ if extra_tokens != extra_ids:
79
+ raise ValueError(
80
+ f"Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"
81
+ " provided to ByT5Tokenizer. In this case the additional_special_tokens must include the"
82
+ " extra_ids tokens"
83
+ )
84
+
85
+ pad_token = AddedToken(pad_token, lstrip=True, rstrip=True) if isinstance(pad_token, str) else pad_token
86
+ # we force left and right stripping for backward compatibility. The byt5tests depend on this.
87
+ eos_token = AddedToken(eos_token, lstrip=True, rstrip=True) if isinstance(eos_token, str) else eos_token
88
+ unk_token = AddedToken(unk_token, lstrip=True, rstrip=True) if isinstance(unk_token, str) else unk_token
89
+ # unk token needs to be in the vocab with correct index
90
+ self._added_tokens_decoder = {0: pad_token, 1: eos_token, 2: unk_token}
91
+ self.offset = len(self._added_tokens_decoder)
92
+ self._utf_vocab_size = 2**8 # utf is 8 bits
93
+ super().__init__(
94
+ eos_token=eos_token,
95
+ unk_token=unk_token,
96
+ pad_token=pad_token,
97
+ extra_ids=0,
98
+ additional_special_tokens=additional_special_tokens, # TODO extra ids are not used :sweatywmile:
99
+ **kwargs,
100
+ )
101
+
102
+ @property
103
+ def vocab_size(self):
104
+ return self._utf_vocab_size
105
+
106
+ def get_vocab(self):
107
+ vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size + self.offset)}
108
+ vocab.update(self.added_tokens_encoder)
109
+ return vocab
110
+
111
+ def get_special_tokens_mask(
112
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
113
+ ) -> List[int]:
114
+ """
115
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
116
+ special tokens using the tokenizer `prepare_for_model` method.
117
+
118
+ Args:
119
+ token_ids_0 (`List[int]`):
120
+ List of IDs.
121
+ token_ids_1 (`List[int]`, *optional*):
122
+ Optional second list of IDs for sequence pairs.
123
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
124
+ Whether or not the token list is already formatted with special tokens for the model.
125
+
126
+ Returns:
127
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
128
+ """
129
+ if already_has_special_tokens:
130
+ return super().get_special_tokens_mask(
131
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
132
+ )
133
+
134
+ # normal case: some special tokens
135
+ if token_ids_1 is None:
136
+ return ([0] * len(token_ids_0)) + [1]
137
+ return ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
138
+
139
+ def _add_eos_if_not_present(self, token_ids: List[int]) -> List[int]:
140
+ """Do not add eos again if user already added it."""
141
+ if len(token_ids) > 0 and token_ids[-1] == self.eos_token_id:
142
+ warnings.warn(
143
+ f"This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated"
144
+ " eos tokens being added."
145
+ )
146
+ return token_ids
147
+ else:
148
+ return token_ids + [self.eos_token_id]
149
+
150
+ def create_token_type_ids_from_sequences(
151
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
152
+ ) -> List[int]:
153
+ """
154
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. ByT5 does not
155
+ make use of token type ids, therefore a list of zeros is returned.
156
+
157
+ Args:
158
+ token_ids_0 (`List[int]`):
159
+ List of IDs.
160
+ token_ids_1 (`List[int]`, *optional*):
161
+ Optional second list of IDs for sequence pairs.
162
+
163
+ Returns:
164
+ `List[int]`: List of zeros.
165
+ """
166
+ eos = [self.eos_token_id]
167
+
168
+ if token_ids_1 is None:
169
+ return len(token_ids_0 + eos) * [0]
170
+ return len(token_ids_0 + eos + token_ids_1 + eos) * [0]
171
+
172
+ def build_inputs_with_special_tokens(
173
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
174
+ ) -> List[int]:
175
+ """
176
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
177
+ adding special tokens. A sequence has the following format:
178
+
179
+ - single sequence: `X </s>`
180
+ - pair of sequences: `A </s> B </s>`
181
+
182
+ Args:
183
+ token_ids_0 (`List[int]`):
184
+ List of IDs to which the special tokens will be added.
185
+ token_ids_1 (`List[int]`, *optional*):
186
+ Optional second list of IDs for sequence pairs.
187
+
188
+ Returns:
189
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
190
+ """
191
+ token_ids_0 = self._add_eos_if_not_present(token_ids_0)
192
+ if token_ids_1 is None:
193
+ return token_ids_0
194
+ else:
195
+ token_ids_1 = self._add_eos_if_not_present(token_ids_1)
196
+ return token_ids_0 + token_ids_1
197
+
198
+ def _tokenize(self, text: str) -> List[str]:
199
+ """Take as input a string and return a list of strings (tokens) for words/sub-words"""
200
+ tokens = [chr(i) for i in text.encode("utf-8")]
201
+ return tokens
202
+
203
+ def _convert_token_to_id(self, token):
204
+ """Converts a token (str) in an id using the vocab."""
205
+
206
+ if len(token) != 1:
207
+ token_id = None
208
+ else:
209
+ token_id = ord(token) + self.offset
210
+
211
+ return token_id
212
+
213
+ def _convert_id_to_token(self, index):
214
+ """Converts an index (integer) in a token (str) using the vocab."""
215
+ token = chr(index - self.offset)
216
+ return token
217
+
218
+ def convert_tokens_to_string(self, tokens):
219
+ """Converts a sequence of tokens (string) in a single string."""
220
+ bstring = b""
221
+ for token in tokens:
222
+ if token in self.added_tokens_decoder:
223
+ tok_string = self.added_tokens_decoder[token].encode("utf-8")
224
+ elif token in self.added_tokens_encoder:
225
+ tok_string = token.encode("utf-8")
226
+ else:
227
+ tok_string = bytes([ord(token)])
228
+ bstring += tok_string
229
+ string = bstring.decode("utf-8", errors="ignore")
230
+ return string
231
+
232
+ # ByT5Tokenizer has no vocab file
233
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
234
+ return ()
env-llmeval/lib/python3.10/site-packages/transformers/models/clvp/__init__.py ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import (
17
+ OptionalDependencyNotAvailable,
18
+ _LazyModule,
19
+ is_torch_available,
20
+ )
21
+
22
+
23
+ _import_structure = {
24
+ "configuration_clvp": [
25
+ "CLVP_PRETRAINED_CONFIG_ARCHIVE_MAP",
26
+ "ClvpConfig",
27
+ "ClvpDecoderConfig",
28
+ "ClvpEncoderConfig",
29
+ ],
30
+ "feature_extraction_clvp": ["ClvpFeatureExtractor"],
31
+ "processing_clvp": ["ClvpProcessor"],
32
+ "tokenization_clvp": ["ClvpTokenizer"],
33
+ }
34
+
35
+
36
+ try:
37
+ if not is_torch_available():
38
+ raise OptionalDependencyNotAvailable()
39
+ except OptionalDependencyNotAvailable:
40
+ pass
41
+ else:
42
+ _import_structure["modeling_clvp"] = [
43
+ "CLVP_PRETRAINED_MODEL_ARCHIVE_LIST",
44
+ "ClvpModelForConditionalGeneration",
45
+ "ClvpForCausalLM",
46
+ "ClvpModel",
47
+ "ClvpPreTrainedModel",
48
+ "ClvpEncoder",
49
+ "ClvpDecoder",
50
+ ]
51
+
52
+
53
+ if TYPE_CHECKING:
54
+ from .configuration_clvp import (
55
+ CLVP_PRETRAINED_CONFIG_ARCHIVE_MAP,
56
+ ClvpConfig,
57
+ ClvpDecoderConfig,
58
+ ClvpEncoderConfig,
59
+ )
60
+ from .feature_extraction_clvp import ClvpFeatureExtractor
61
+ from .processing_clvp import ClvpProcessor
62
+ from .tokenization_clvp import ClvpTokenizer
63
+
64
+ try:
65
+ if not is_torch_available():
66
+ raise OptionalDependencyNotAvailable()
67
+ except OptionalDependencyNotAvailable:
68
+ pass
69
+ else:
70
+ from .modeling_clvp import (
71
+ CLVP_PRETRAINED_MODEL_ARCHIVE_LIST,
72
+ ClvpDecoder,
73
+ ClvpEncoder,
74
+ ClvpForCausalLM,
75
+ ClvpModel,
76
+ ClvpModelForConditionalGeneration,
77
+ ClvpPreTrainedModel,
78
+ )
79
+
80
+ else:
81
+ import sys
82
+
83
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
env-llmeval/lib/python3.10/site-packages/transformers/models/clvp/__pycache__/processing_clvp.cpython-310.pyc ADDED
Binary file (2.89 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/clvp/configuration_clvp.py ADDED
@@ -0,0 +1,457 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ CLVP model configuration"""
16
+
17
+
18
+ import os
19
+ from typing import TYPE_CHECKING, Union
20
+
21
+
22
+ if TYPE_CHECKING:
23
+ pass
24
+
25
+ from ...configuration_utils import PretrainedConfig
26
+ from ...utils import logging
27
+
28
+
29
+ logger = logging.get_logger(__name__)
30
+
31
+ CLVP_PRETRAINED_CONFIG_ARCHIVE_MAP = {
32
+ "susnato/clvp_dev": "https://huggingface.co/susnato/clvp_dev/resolve/main/config.json",
33
+ }
34
+
35
+
36
+ class ClvpEncoderConfig(PretrainedConfig):
37
+ r"""
38
+ This is the configuration class to store the configuration of a [`ClvpEncoder`]. It is used to instantiate a CLVP
39
+ text or CLVP speech encoder according to the specified arguments. Instantiating a configuration with the defaults
40
+ will yield a similar configuration to that of the encoder of the CLVP
41
+ [susnato/clvp_dev](https://huggingface.co/susnato/clvp_dev) architecture.
42
+
43
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
44
+ documentation from [`PretrainedConfig`] for more information.
45
+
46
+ Args:
47
+ vocab_size (`int`, *optional*, defaults to 256):
48
+ Vocabulary size of the CLVP Encoder model.
49
+ hidden_size (`int`, *optional*, defaults to 768):
50
+ Dimensionality of the encoder layers and the pooler layer.
51
+ intermediate_size (`int`, *optional*, defaults to 1536):
52
+ Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
53
+ projection_dim (`int`, *optional*, defaults to 768):
54
+ Dimensionality of the projection vector.
55
+ num_hidden_layers (`int`, *optional*, defaults to 20):
56
+ Number of hidden layers in the Transformer encoder.
57
+ num_attention_heads (`int`, *optional*, defaults to 12):
58
+ Number of attention heads for each attention layer in the Transformer encoder.
59
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
60
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
61
+ `"relu"`, `"selu"` and `"gelu_new"` `"quick_gelu"` are supported.
62
+ layer_norm_eps (`float`, *optional*, defaults to 1e-05):
63
+ The epsilon used by the layer normalization layers.
64
+ attention_dropout (`float`, *optional*, defaults to 0.1):
65
+ The dropout ratio for the attention probabilities.
66
+ dropout (`float`, *optional*, defaults to 0.1):
67
+ The dropout ratio for the feed-forward layers in [`ClvpEncoderMLP`].
68
+ use_rotary_embedding (`bool`, *optional*, defaults to `True`):
69
+ Whether to use rotary_embedding or not.
70
+ use_attention_bias (`bool`, *optional*, defaults to `False`):
71
+ Whether to use bias in Query, Key and Value layers during self attention.
72
+ summary_type (`str`, *optional*, defaults to `"mean"`):
73
+ What strategy to use to get pooler_output from the last_hidden_state. `"last"`, `"first"`, `"mean"` and
74
+ `"cls_index"` are supported.
75
+ initializer_factor (`float`, *optional*, defaults to 1.0):
76
+ A factor for initializing all weight matrices (should be kept to 1.0, used internally for initialization
77
+ testing).
78
+ bos_token_id (`int`, *optional*, defaults to 255):
79
+ Beginning of sequence token id.
80
+ eos_token_id (`int`, *optional*, defaults to 0):
81
+ End of sequence token id.
82
+
83
+ Example:
84
+
85
+ ```python
86
+ >>> from transformers import ClvpEncoderConfig, ClvpEncoder
87
+
88
+ >>> # Initializing a ClvpEncoderConfig with susnato/clvp_dev style configuration
89
+ >>> encoder_configuration = ClvpEncoderConfig()
90
+
91
+ >>> # Initializing a ClvpEncoder (with random weights) from the susnato/clvp_dev style configuration
92
+ >>> model = ClvpEncoder(encoder_configuration)
93
+
94
+ >>> # Accessing the model configuration
95
+ >>> configuration = model.config
96
+ ```"""
97
+
98
+ model_type = "clvp_encoder"
99
+
100
+ def __init__(
101
+ self,
102
+ vocab_size=256,
103
+ hidden_size=768,
104
+ intermediate_size=1536,
105
+ projection_dim=768,
106
+ num_hidden_layers=20,
107
+ num_attention_heads=12,
108
+ hidden_act="gelu",
109
+ layer_norm_eps=1e-5,
110
+ attention_dropout=0.1,
111
+ dropout=0.1,
112
+ use_rotary_embedding=True,
113
+ use_attention_bias=False,
114
+ summary_type="mean",
115
+ initializer_factor=1.0,
116
+ bos_token_id=255,
117
+ eos_token_id=0,
118
+ **kwargs,
119
+ ):
120
+ self.vocab_size = vocab_size
121
+ self.hidden_size = hidden_size
122
+ self.intermediate_size = intermediate_size
123
+ self.projection_dim = projection_dim
124
+ self.num_hidden_layers = num_hidden_layers
125
+ self.num_attention_heads = num_attention_heads
126
+ self.layer_norm_eps = layer_norm_eps
127
+ self.hidden_act = hidden_act
128
+ self.initializer_factor = initializer_factor
129
+ self.attention_dropout = attention_dropout
130
+ self.dropout = dropout
131
+ self.use_rotary_embedding = use_rotary_embedding
132
+ self.use_attention_bias = use_attention_bias
133
+ self.summary_type = summary_type
134
+ self.bos_token_id = bos_token_id
135
+ self.eos_token_id = eos_token_id
136
+
137
+ super().__init__(bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
138
+
139
+ @classmethod
140
+ def from_pretrained(
141
+ cls, pretrained_model_name_or_path: Union[str, os.PathLike], config_type: str = "text_config", **kwargs
142
+ ) -> "PretrainedConfig":
143
+ cls._set_token_in_kwargs(kwargs)
144
+
145
+ config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
146
+
147
+ # make sure to have the config_type be either "text_config" or "speech_config"
148
+ # this is to make sure that we can load only text or speech configs from the nested ClvpConfig.
149
+ if config_type not in ["text_config", "speech_config"]:
150
+ raise ValueError(
151
+ f"We can only load either 'text_config' or 'speech_config' but you are trying to load" f"{config_type}"
152
+ )
153
+
154
+ # get the text config dict if we are loading from ClvpConfig
155
+ if config_dict.get("model_type") == "clvp":
156
+ config_dict = config_dict[config_type]
157
+
158
+ if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
159
+ logger.warning(
160
+ f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
161
+ f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
162
+ )
163
+
164
+ return cls.from_dict(config_dict, **kwargs)
165
+
166
+
167
+ class ClvpDecoderConfig(PretrainedConfig):
168
+ r"""
169
+ This is the configuration class to store the configuration of a [`ClvpDecoder`]. It is used to instantiate a CLVP
170
+ Decoder Model according to the specified arguments, defining the model architecture. Instantiating a configuration
171
+ with the defaults will yield a similar configuration to that of the Decoder part of the CLVP
172
+ [susnato/clvp_dev](https://huggingface.co/susnato/clvp_dev) architecture.
173
+
174
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
175
+ documentation from [`PretrainedConfig`] for more information.
176
+
177
+ The architecture is similar to GPT2.
178
+
179
+ Args:
180
+ vocab_size (`int`, *optional*, defaults to 8194):
181
+ Vocabulary size of the model.
182
+ max_position_embeddings (`int`, *optional*, defaults to 608):
183
+ The maximum sequence length of mel tokens that this model might ever be used with. Similar to `n_positions`
184
+ in `GPT2Config`.
185
+ max_text_tokens (`int`, *optional*, defaults to 404):
186
+ The maximum sequence length of text tokens that this model might ever be used with. Similar to
187
+ `n_positions` in `GPT2Config`.
188
+ hidden_size (`int`, *optional*, defaults to 1024):
189
+ Dimensionality of the embeddings and hidden states.
190
+ num_hidden_layers (`int`, *optional*, defaults to 30):
191
+ Number of hidden layers in the Transformer encoder.
192
+ num_attention_heads (`int`, *optional*, defaults to 16):
193
+ Number of attention heads for each attention layer in the Transformer encoder.
194
+ n_inner (`int`, *optional*):
195
+ Dimensionality of the inner feed-forward layers. `None` will set it to 4 times `hidden_size`.
196
+ num_mel_attn_blocks (`int`, *optional*, defaults to 6):
197
+ Denotes the number of self attention layers in [`ClvpConditioningEncoder`].
198
+ activation_function (`str`, *optional*, defaults to `"gelu_new"`):
199
+ Activation function, to be selected in the list `["relu", "silu", "gelu", "tanh", "gelu_new"]`.
200
+ resid_pdrop (`float`, *optional*, defaults to 0.1):
201
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
202
+ embd_pdrop (`float`, *optional*, defaults to 0.1):
203
+ The dropout ratio for the embeddings.
204
+ attention_dropout (`float`, *optional*, defaults to 0.1):
205
+ The dropout ratio for the attention.
206
+ layer_norm_epsilon (`float`, *optional*, defaults to 1e-05):
207
+ The epsilon to use in the layer normalization layers.
208
+ initializer_range (`float`, *optional*, defaults to 0.02):
209
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
210
+ summary_type (`string`, *optional*, defaults to `"cls_index"`):
211
+ Argument used when doing sequence summary.
212
+
213
+ Has to be one of the following options:
214
+
215
+ - `"last"`: Take the last token hidden state (like XLNet).
216
+ - `"first"`: Take the first token hidden state (like BERT).
217
+ - `"mean"`: Take the mean of all tokens hidden states.
218
+ - `"cls_index"`: Supply a Tensor of classification token position (like GPT/GPT-2).
219
+ - `"attn"`: Not implemented now, use multi-head attention.
220
+ summary_use_proj (`bool`, *optional*, defaults to `True`):
221
+ Whether or not to add a projection after the vector extraction.
222
+ summary_activation (`str`, *optional*):
223
+ Pass `"tanh"` for a tanh activation to the output, any other value will result in no activation.
224
+ summary_proj_to_labels (`bool`, *optional*, defaults to `True`):
225
+ Whether the projection outputs should have `config.num_labels` or `config.hidden_size` classes.
226
+ summary_first_dropout (`float`, *optional*, defaults to 0.1):
227
+ The dropout ratio to be used after the projection and activation.
228
+ use_cache (`bool`, *optional*, defaults to `True`):
229
+ Whether or not the model should return the last key/values attentions (not used by all models).
230
+ bos_token_id (`int`, *optional*, defaults to 8192):
231
+ Beginning of sequence token id, used at the start of the generation.
232
+ eos_token_id (`int`, *optional*, defaults to 8193):
233
+ End of sequence token id, used in the method
234
+ [`ClvpModelForConditionalGeneration.fix_speech_decoder_output()`] to correct decoder outputs.
235
+ feature_size (`int`, *optional*, defaults to 80):
236
+ The feature dimension of the extracted mel features. This value is used in [`ClvpConditioningEncoder`].
237
+ use_attention_bias (`bool`, *optional*, defaults to `True`):
238
+ Whether to use bias in Query, Key and Value layers during self attention.
239
+ initializer_factor (`float`, *optional*, defaults to 1.0):
240
+ A factor for initializing all weight matrices (should be kept to 1.0, used internally for initialization
241
+ testing).
242
+ decoder_fixing_codes (`list`, *optional*, defaults to `[83, 45, 45, 248]`):
243
+ These values are used in the method `fix_speech_decoder_output` to fix decoder generated outputs.
244
+
245
+ Example:
246
+
247
+ ```python
248
+ >>> from transformers import ClvpDecoderConfig, ClvpDecoder
249
+
250
+ >>> # Initializing a ClvpDecoderConfig with susnato/clvp_dev style configuration
251
+ >>> decoder_configuration = ClvpDecoderConfig()
252
+
253
+ >>> # Initializing a ClvpDecoder (with random weights) from the susnato/clvp_dev style configuration
254
+ >>> model = ClvpDecoder(decoder_configuration)
255
+
256
+ >>> # Accessing the model configuration
257
+ >>> configuration = model.config
258
+ ```"""
259
+
260
+ model_type = "clvp_decoder"
261
+
262
+ def __init__(
263
+ self,
264
+ vocab_size=8194,
265
+ max_position_embeddings=608,
266
+ max_text_tokens=404,
267
+ hidden_size=1024,
268
+ num_hidden_layers=30,
269
+ num_attention_heads=16,
270
+ n_inner=None,
271
+ num_mel_attn_blocks=6,
272
+ activation_function="gelu_new",
273
+ resid_pdrop=0.1,
274
+ embd_pdrop=0.1,
275
+ attention_dropout=0.1,
276
+ layer_norm_epsilon=1e-5,
277
+ initializer_range=0.02,
278
+ summary_type="cls_index",
279
+ summary_use_proj=True,
280
+ summary_activation=None,
281
+ summary_proj_to_labels=True,
282
+ summary_first_dropout=0.1,
283
+ use_cache=True,
284
+ bos_token_id=8192,
285
+ eos_token_id=8193,
286
+ feature_size=80,
287
+ use_attention_bias=True,
288
+ initializer_factor=1.0,
289
+ decoder_fixing_codes=[83, 45, 45, 248],
290
+ **kwargs,
291
+ ):
292
+ self.vocab_size = vocab_size
293
+ self.max_position_embeddings = max_position_embeddings
294
+ self.max_text_tokens = max_text_tokens
295
+ self.hidden_size = hidden_size
296
+ self.num_hidden_layers = num_hidden_layers
297
+ self.num_attention_heads = num_attention_heads
298
+ self.n_inner = n_inner
299
+ self.num_mel_attn_blocks = num_mel_attn_blocks
300
+ self.activation_function = activation_function
301
+ self.resid_pdrop = resid_pdrop
302
+ self.embd_pdrop = embd_pdrop
303
+ self.attention_dropout = attention_dropout
304
+ self.layer_norm_epsilon = layer_norm_epsilon
305
+ self.initializer_range = initializer_range
306
+ self.summary_type = summary_type
307
+ self.summary_use_proj = summary_use_proj
308
+ self.summary_activation = summary_activation
309
+ self.summary_first_dropout = summary_first_dropout
310
+ self.summary_proj_to_labels = summary_proj_to_labels
311
+ self.use_cache = use_cache
312
+ self.feature_size = feature_size
313
+ self.use_attention_bias = use_attention_bias
314
+ self.initializer_factor = initializer_factor
315
+ self.decoder_fixing_codes = decoder_fixing_codes
316
+
317
+ self.bos_token_id = bos_token_id
318
+ self.eos_token_id = eos_token_id
319
+
320
+ super().__init__(bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
321
+
322
+ @classmethod
323
+ def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig":
324
+ cls._set_token_in_kwargs(kwargs)
325
+
326
+ config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
327
+
328
+ # get the speech config dict if we are loading from ClvpConfig
329
+ if config_dict.get("model_type") == "clvp":
330
+ config_dict = config_dict["decoder_config"]
331
+
332
+ if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
333
+ logger.warning(
334
+ f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
335
+ f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
336
+ )
337
+
338
+ return cls.from_dict(config_dict, **kwargs)
339
+
340
+
341
+ class ClvpConfig(PretrainedConfig):
342
+ r"""
343
+ [`ClvpConfig`] is the configuration class to store the configuration of a [`ClvpModelForConditionalGeneration`]. It
344
+ is used to instantiate a CLVP model according to the specified arguments, defining the text model, speech model and
345
+ decoder model configs. Instantiating a configuration with the defaults will yield a similar configuration to that
346
+ of the CLVP [susnato/clvp_dev](https://huggingface.co/susnato/clvp_dev) architecture.
347
+
348
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
349
+ documentation from [`PretrainedConfig`] for more information.
350
+
351
+ Args:
352
+ text_config (`dict`, *optional*):
353
+ Dictionary of configuration options used to initialize the CLVP text encoder.
354
+ speech_config (`dict`, *optional*):
355
+ Dictionary of configuration options used to initialize CLVP speech encoder.
356
+ decoder_config (`dict`, *optional*):
357
+ Dictionary of configuration options used to initialize [`ClvpDecoderConfig`].
358
+ projection_dim (`int`, *optional*, defaults to 768):
359
+ Dimentionality of text and speech projection layers.
360
+ logit_scale_init_value (`float`, *optional*, defaults to 2.6592):
361
+ The inital value of the *logit_scale* paramter. Default is used as per the original CLVP implementation.
362
+ initializer_factor (`float`, *optional*, defaults to 1.0):
363
+ A factor for initializing all weight matrices (should be kept to 1.0, used internally for initialization
364
+ testing).
365
+ kwargs (*optional*):
366
+ Dictionary of keyword arguments.
367
+
368
+ Example:
369
+
370
+ ```python
371
+ >>> from transformers import ClvpConfig, ClvpModelForConditionalGeneration
372
+
373
+ >>> # Initializing a ClvpConfig with susnato/clvp_dev style configuration
374
+ >>> configuration = ClvpConfig()
375
+
376
+ >>> # Initializing a ClvpModelForConditionalGeneration (with random weights) from the susnato/clvp_dev style configuration
377
+ >>> model = ClvpModelForConditionalGeneration(configuration)
378
+
379
+ >>> # Accessing the model configuration
380
+ >>> configuration = model.config
381
+
382
+ >>> # We can also initialize a CLVPConfig from a CLVPTextConfig, CLVPSpeechConfig and a CLVPAutoRegressiveConfig
383
+ >>> from transformers import ClvpEncoderConfig, ClvpDecoderConfig
384
+
385
+ >>> # Initializing a CLVP text, CLVP speech and CLVP decoder configuration
386
+ >>> config_text = ClvpEncoderConfig()
387
+ >>> config_speech = ClvpEncoderConfig()
388
+ >>> decoder_config = ClvpDecoderConfig()
389
+
390
+ >>> config = ClvpConfig.from_sub_model_configs(config_text, config_speech, decoder_config)
391
+ ```"""
392
+
393
+ model_type = "clvp"
394
+ is_composition = True
395
+
396
+ def __init__(
397
+ self,
398
+ text_config=None,
399
+ speech_config=None,
400
+ decoder_config=None,
401
+ projection_dim=768,
402
+ logit_scale_init_value=2.6592,
403
+ initializer_factor=1.0,
404
+ **kwargs,
405
+ ):
406
+ super().__init__(**kwargs)
407
+
408
+ if text_config is None:
409
+ text_config = {}
410
+ logger.info("`text_config` is `None`. Initializing the `ClvpEncoderConfig` with default values.")
411
+
412
+ if speech_config is None:
413
+ speech_config = {}
414
+ logger.info("`speech_config` is `None`. initializing the `ClvpEncoderConfig` with default values.")
415
+
416
+ if decoder_config is None:
417
+ decoder_config = {}
418
+ logger.info("`decoder_config` is `None`. initializing the `ClvpDecoderConfig` with default values.")
419
+
420
+ self.text_config = ClvpEncoderConfig(**text_config)
421
+ self.speech_config = ClvpEncoderConfig(**speech_config)
422
+ self.decoder_config = ClvpDecoderConfig(**decoder_config)
423
+
424
+ self.projection_dim = projection_dim
425
+ self.logit_scale_init_value = logit_scale_init_value
426
+ self.initializer_factor = initializer_factor
427
+
428
+ @classmethod
429
+ def from_sub_model_configs(
430
+ cls,
431
+ text_config: ClvpEncoderConfig,
432
+ speech_config: ClvpEncoderConfig,
433
+ decoder_config: ClvpDecoderConfig,
434
+ **kwargs,
435
+ ):
436
+ r"""
437
+ Instantiate a [`ClvpConfig`] (or a derived class) from CLVP text model configuration, CLVP speech model
438
+ configuration and CLVP decoder model configuration.
439
+
440
+ Args:
441
+ text_config (`ClvpEncoderConfig`):
442
+ Text model configuration of type [`ClvpEncoderConfig`].
443
+ speech_config (`ClvpEncoderConfig`):
444
+ Speech model configuration of type [`ClvpEncoderConfig`].
445
+ decoder_config (`ClvpDecoderConfig`):
446
+ Decoder model configuration of type [`ClvpDecoderConfig`].
447
+
448
+ Returns:
449
+ [`ClvpConfig`]: An instance of a configuration object
450
+ """
451
+
452
+ return cls(
453
+ text_config=text_config.to_dict(),
454
+ speech_config=speech_config.to_dict(),
455
+ decoder_config=decoder_config.to_dict(),
456
+ **kwargs,
457
+ )
env-llmeval/lib/python3.10/site-packages/transformers/models/clvp/feature_extraction_clvp.py ADDED
@@ -0,0 +1,238 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """
17
+ Feature extractor class for CLVP
18
+ """
19
+
20
+ from typing import List, Optional, Union
21
+
22
+ import numpy as np
23
+
24
+ from ...audio_utils import mel_filter_bank, spectrogram, window_function
25
+ from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
26
+ from ...feature_extraction_utils import BatchFeature
27
+ from ...utils import TensorType, logging
28
+
29
+
30
+ logger = logging.get_logger(__name__)
31
+
32
+
33
+ class ClvpFeatureExtractor(SequenceFeatureExtractor):
34
+ r"""
35
+ Constructs a CLVP feature extractor.
36
+
37
+ This feature extractor inherits from [`~feature_extraction_sequence_utils.SequenceFeatureExtractor`] which contains
38
+ most of the main methods. Users should refer to this superclass for more information regarding those methods.
39
+
40
+ This class extracts log-mel-spectrogram features from raw speech using a custom numpy implementation of the `Short
41
+ Time Fourier Transform` which should match pytorch's `torch.stft` equivalent.
42
+
43
+ Args:
44
+ feature_size (`int`, *optional*, defaults to 80):
45
+ The feature dimension of the extracted features.
46
+ sampling_rate (`int`, *optional*, defaults to 22050):
47
+ The sampling rate at which the audio files should be digitalized expressed in hertz (Hz).
48
+ default_audio_length (`int`, *optional*, defaults to 6):
49
+ The default length of raw audio in seconds. If `max_length` is not set during `__call__` then it will
50
+ automatically be set to default_audio_length * `self.sampling_rate`.
51
+ hop_length (`int`, *optional*, defaults to 256):
52
+ Length of the overlaping windows for the STFT used to obtain the Mel Frequency coefficients.
53
+ chunk_length (`int`, *optional*, defaults to 30):
54
+ The maximum number of chuncks of `sampling_rate` samples used to trim and pad longer or shorter audio
55
+ sequences.
56
+ n_fft (`int`, *optional*, defaults to 1024):
57
+ Size of the Fourier transform.
58
+ padding_value (`float`, *optional*, defaults to 0.0):
59
+ Padding value used to pad the audio. Should correspond to silences.
60
+ mel_norms (`list` of length `feature_size`, *optional*):
61
+ If `mel_norms` is provided then it will be used to normalize the log-mel spectrograms along each
62
+ mel-filter.
63
+ return_attention_mask (`bool`, *optional*, defaults to `False`):
64
+ Whether to return the attention mask. If left to the default, it will return the attention mask.
65
+
66
+ [What are attention masks?](../glossary#attention-mask)
67
+ """
68
+
69
+ model_input_names = ["input_features", "attention_mask"]
70
+
71
+ def __init__(
72
+ self,
73
+ feature_size=80,
74
+ sampling_rate=22050,
75
+ default_audio_length=6,
76
+ hop_length=256,
77
+ chunk_length=30,
78
+ n_fft=1024,
79
+ padding_value=0.0,
80
+ mel_norms=None,
81
+ return_attention_mask=False, # pad inputs to max length with silence token (zero) and no attention mask
82
+ **kwargs,
83
+ ):
84
+ super().__init__(
85
+ feature_size=feature_size,
86
+ sampling_rate=sampling_rate,
87
+ padding_value=padding_value,
88
+ return_attention_mask=return_attention_mask,
89
+ **kwargs,
90
+ )
91
+ self.n_fft = n_fft
92
+ self.hop_length = hop_length
93
+ self.chunk_length = chunk_length
94
+ self.n_samples = chunk_length * sampling_rate
95
+ self.nb_max_frames = self.n_samples // hop_length
96
+ self.sampling_rate = sampling_rate
97
+ self.default_audio_length = default_audio_length
98
+ self.mel_norms = mel_norms
99
+ self.mel_filters = mel_filter_bank(
100
+ num_frequency_bins=1 + (n_fft // 2),
101
+ num_mel_filters=feature_size,
102
+ min_frequency=0.0,
103
+ max_frequency=8000.0,
104
+ sampling_rate=sampling_rate,
105
+ norm="slaney",
106
+ mel_scale="htk",
107
+ )
108
+
109
+ def _np_extract_fbank_features(self, waveform: np.array) -> np.ndarray:
110
+ """
111
+ This method first computes the log-mel spectrogram of the provided audio then applies normalization along the
112
+ each mel-filterbank, if `mel_norms` is provided.
113
+ """
114
+ log_spec = spectrogram(
115
+ waveform,
116
+ window_function(self.n_fft, "hann"),
117
+ frame_length=self.n_fft,
118
+ hop_length=self.hop_length,
119
+ power=2.0,
120
+ mel_filters=self.mel_filters,
121
+ log_mel=None,
122
+ )
123
+
124
+ log_spec = np.log(np.clip(log_spec, a_min=1e-5, a_max=None))
125
+
126
+ if self.mel_norms is not None:
127
+ log_spec = log_spec / np.array(self.mel_norms)[:, None]
128
+
129
+ return log_spec
130
+
131
+ def __call__(
132
+ self,
133
+ raw_speech: Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]],
134
+ sampling_rate: Optional[int] = None,
135
+ truncation: bool = True,
136
+ pad_to_multiple_of: Optional[int] = None,
137
+ return_tensors: Optional[Union[str, TensorType]] = None,
138
+ return_attention_mask: Optional[bool] = True,
139
+ padding: Optional[str] = "max_length",
140
+ max_length: Optional[int] = None,
141
+ **kwargs,
142
+ ) -> BatchFeature:
143
+ """
144
+ `ClvpFeatureExtractor` is used to extract various voice specific properties such as the pitch and tone of the
145
+ voice, speaking speed, and even speaking defects like a lisp or stuttering from a sample voice or `raw_speech`.
146
+
147
+ First the voice is padded or truncated in a way such that it becomes a waveform of `self.default_audio_length`
148
+ seconds long and then the log-mel spectrogram is extracted from it.
149
+
150
+ Args:
151
+ raw_speech (`np.ndarray`, `List[float]`, `List[np.ndarray]`, `List[List[float]]`):
152
+ The sequence or batch of sequences to be padded. Each sequence can be a numpy array, a list of float
153
+ values, a list of numpy arrays or a list of list of float values. Must be mono channel audio, not
154
+ stereo, i.e. single float per timestep.
155
+ sampling_rate (`int`, *optional*):
156
+ The sampling rate at which the `raw_speech` input was sampled. It is strongly recommended to pass
157
+ `sampling_rate` at the forward call to prevent silent errors and allow automatic speech recognition
158
+ pipeline.
159
+ truncation (`bool`, *optional*, default to `True`):
160
+ Activates truncation to cut input sequences longer than *max_length* to *max_length*.
161
+ pad_to_multiple_of (`int`, *optional*):
162
+ If set will pad the sequence to a multiple of the provided value.
163
+
164
+ This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability
165
+ `>= 7.5` (Volta), or on TPUs which benefit from having sequence lengths be a multiple of 128.
166
+ return_attention_mask (`bool`, *optional*, defaults to `True`):
167
+ Whether to return the attention mask. If left to the default, it will return the attention mask.
168
+
169
+ [What are attention masks?](../glossary#attention-mask)
170
+ return_tensors (`str` or [`~utils.TensorType`], *optional*):
171
+ If set, will return tensors instead of list of python integers. Acceptable values are:
172
+
173
+ - `'tf'`: Return TensorFlow `tf.constant` objects.
174
+ - `'pt'`: Return PyTorch `torch.Tensor` objects.
175
+ - `'np'`: Return Numpy `np.ndarray` objects.
176
+ padding_value (`float`, defaults to 0.0):
177
+ The value that is used to fill the padding values / vectors.
178
+ max_length (`int`, *optional*):
179
+ The maximum input length of the inputs.
180
+ """
181
+
182
+ if sampling_rate is not None:
183
+ if sampling_rate != self.sampling_rate:
184
+ raise ValueError(
185
+ f"The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"
186
+ f" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"
187
+ f" was sampled with {self.sampling_rate} and not {sampling_rate}."
188
+ )
189
+ else:
190
+ logger.warning(
191
+ "It is strongly recommended to pass the `sampling_rate` argument to this function. "
192
+ "Failing to do so can result in silent errors that might be hard to debug."
193
+ )
194
+
195
+ is_batched_numpy = isinstance(raw_speech, np.ndarray) and len(raw_speech.shape) > 1
196
+ if is_batched_numpy and len(raw_speech.shape) > 2:
197
+ raise ValueError(f"Only mono-channel audio is supported for input to {self}")
198
+ is_batched = is_batched_numpy or (
199
+ isinstance(raw_speech, (list, tuple)) and (isinstance(raw_speech[0], (np.ndarray, tuple, list)))
200
+ )
201
+
202
+ if is_batched:
203
+ raw_speech = [np.asarray([speech], dtype=np.float32).T for speech in raw_speech]
204
+ elif not is_batched and not isinstance(raw_speech, np.ndarray):
205
+ raw_speech = np.asarray(raw_speech, dtype=np.float32)
206
+ elif isinstance(raw_speech, np.ndarray) and raw_speech.dtype is np.dtype(np.float64):
207
+ raw_speech = raw_speech.astype(np.float32)
208
+
209
+ # always return batch
210
+ if not is_batched:
211
+ raw_speech = [np.asarray([raw_speech]).T]
212
+
213
+ batched_speech = BatchFeature({"input_features": raw_speech})
214
+
215
+ max_length = self.default_audio_length * self.sampling_rate if max_length is None else max_length
216
+
217
+ padded_inputs = self.pad(
218
+ batched_speech,
219
+ padding=padding,
220
+ max_length=max_length,
221
+ truncation=truncation,
222
+ pad_to_multiple_of=pad_to_multiple_of,
223
+ return_attention_mask=return_attention_mask,
224
+ )
225
+
226
+ # make sure list is in array format
227
+ input_features = padded_inputs.get("input_features").transpose(2, 0, 1)
228
+
229
+ input_features = [
230
+ self._np_extract_fbank_features(waveform).astype(np.float32) for waveform in input_features[0]
231
+ ]
232
+
233
+ if isinstance(input_features[0], List):
234
+ padded_inputs["input_features"] = [np.asarray(feature) for feature in input_features]
235
+ else:
236
+ padded_inputs["input_features"] = input_features
237
+
238
+ return padded_inputs.convert_to_tensors(return_tensors)
env-llmeval/lib/python3.10/site-packages/transformers/models/clvp/processing_clvp.py ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """
17
+ Processor class for CLVP
18
+ """
19
+
20
+
21
+ from ...processing_utils import ProcessorMixin
22
+
23
+
24
+ class ClvpProcessor(ProcessorMixin):
25
+ r"""
26
+ Constructs a CLVP processor which wraps a CLVP Feature Extractor and a CLVP Tokenizer into a single processor.
27
+
28
+ [`ClvpProcessor`] offers all the functionalities of [`ClvpFeatureExtractor`] and [`ClvpTokenizer`]. See the
29
+ [`~ClvpProcessor.__call__`], [`~ClvpProcessor.decode`] and [`~ClvpProcessor.batch_decode`] for more information.
30
+
31
+ Args:
32
+ feature_extractor (`ClvpFeatureExtractor`):
33
+ An instance of [`ClvpFeatureExtractor`]. The feature extractor is a required input.
34
+ tokenizer (`ClvpTokenizer`):
35
+ An instance of [`ClvpTokenizer`]. The tokenizer is a required input.
36
+ """
37
+
38
+ feature_extractor_class = "ClvpFeatureExtractor"
39
+ tokenizer_class = "ClvpTokenizer"
40
+ model_input_names = [
41
+ "input_ids",
42
+ "input_features",
43
+ "attention_mask",
44
+ ]
45
+
46
+ def __init__(self, feature_extractor, tokenizer):
47
+ super().__init__(feature_extractor, tokenizer)
48
+
49
+ def __call__(self, *args, **kwargs):
50
+ """
51
+ Forwards the `audio` and `sampling_rate` arguments to [`~ClvpFeatureExtractor.__call__`] and the `text`
52
+ argument to [`~ClvpTokenizer.__call__`]. Please refer to the doctsring of the above two methods for more
53
+ information.
54
+ """
55
+
56
+ raw_speech = kwargs.pop("raw_speech", None)
57
+ sampling_rate = kwargs.pop("sampling_rate", None)
58
+ text = kwargs.pop("text", None)
59
+
60
+ if raw_speech is None and text is None:
61
+ raise ValueError("You need to specify either an `raw_speech` or `text` input to process.")
62
+
63
+ if raw_speech is not None:
64
+ inputs = self.feature_extractor(raw_speech, sampling_rate=sampling_rate, **kwargs)
65
+ if text is not None:
66
+ encodings = self.tokenizer(text, **kwargs)
67
+
68
+ if text is None:
69
+ return inputs
70
+ elif raw_speech is None:
71
+ return encodings
72
+ else:
73
+ inputs["input_ids"] = encodings["input_ids"]
74
+ inputs["attention_mask"] = encodings["attention_mask"]
75
+ return inputs
76
+
77
+ # Copied from transformers.models.whisper.processing_whisper.WhisperProcessor.batch_decode with Whisper->Clvp
78
+ def batch_decode(self, *args, **kwargs):
79
+ """
80
+ This method forwards all its arguments to ClvpTokenizer's [`~PreTrainedTokenizer.batch_decode`]. Please
81
+ refer to the docstring of this method for more information.
82
+ """
83
+ return self.tokenizer.batch_decode(*args, **kwargs)
84
+
85
+ # Copied from transformers.models.whisper.processing_whisper.WhisperProcessor.decode with Whisper->Clvp
86
+ def decode(self, *args, **kwargs):
87
+ """
88
+ This method forwards all its arguments to ClvpTokenizer's [`~PreTrainedTokenizer.decode`]. Please refer to
89
+ the docstring of this method for more information.
90
+ """
91
+ return self.tokenizer.decode(*args, **kwargs)
env-llmeval/lib/python3.10/site-packages/transformers/models/clvp/tokenization_clvp.py ADDED
@@ -0,0 +1,379 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Tokenization class for CLVP."""
16
+
17
+ import json
18
+ import os
19
+ from functools import lru_cache
20
+ from typing import List, Optional, Tuple
21
+
22
+ import regex as re
23
+
24
+ from ...tokenization_utils import AddedToken, PreTrainedTokenizer
25
+ from ...utils import logging
26
+ from .number_normalizer import EnglishNormalizer
27
+
28
+
29
+ logger = logging.get_logger(__name__)
30
+
31
+ VOCAB_FILES_NAMES = {
32
+ "vocab_file": "vocab.json",
33
+ "merges_file": "merges.txt",
34
+ }
35
+
36
+ PRETRAINED_VOCAB_FILES_MAP = {
37
+ "vocab_file": {
38
+ "clvp_dev": "https://huggingface.co/susnato/clvp_dev/blob/main/vocab.json",
39
+ },
40
+ "merges_file": {
41
+ "clvp_dev": "https://huggingface.co/susnato/clvp_dev/blob/main/merges.txt",
42
+ },
43
+ }
44
+
45
+ PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
46
+ "clvp_dev": 1024,
47
+ }
48
+
49
+
50
+ @lru_cache()
51
+ # Copied from transformers.models.gpt2.tokenization_gpt2.bytes_to_unicode
52
+ def bytes_to_unicode():
53
+ """
54
+ Returns list of utf-8 byte and a mapping to unicode strings. We specifically avoids mapping to whitespace/control
55
+ characters the bpe code barfs on.
56
+
57
+ The reversible bpe codes work on unicode strings. This means you need a large # of unicode characters in your vocab
58
+ if you want to avoid UNKs. When you're at something like a 10B token dataset you end up needing around 5K for
59
+ decent coverage. This is a significant percentage of your normal, say, 32K bpe vocab. To avoid that, we want lookup
60
+ tables between utf-8 bytes and unicode strings.
61
+ """
62
+ bs = (
63
+ list(range(ord("!"), ord("~") + 1)) + list(range(ord("¡"), ord("¬") + 1)) + list(range(ord("®"), ord("ÿ") + 1))
64
+ )
65
+ cs = bs[:]
66
+ n = 0
67
+ for b in range(2**8):
68
+ if b not in bs:
69
+ bs.append(b)
70
+ cs.append(2**8 + n)
71
+ n += 1
72
+ cs = [chr(n) for n in cs]
73
+ return dict(zip(bs, cs))
74
+
75
+
76
+ # Copied from transformers.models.gpt2.tokenization_gpt2.get_pairs
77
+ def get_pairs(word):
78
+ """
79
+ Return set of symbol pairs in a word.
80
+
81
+ Word is represented as tuple of symbols (symbols being variable-length strings).
82
+ """
83
+ pairs = set()
84
+ prev_char = word[0]
85
+ for char in word[1:]:
86
+ pairs.add((prev_char, char))
87
+ prev_char = char
88
+ return pairs
89
+
90
+
91
+ class ClvpTokenizer(PreTrainedTokenizer):
92
+ """
93
+ Construct a CLVP tokenizer. Based on byte-level Byte-Pair-Encoding.
94
+
95
+ This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will
96
+ be encoded differently whether it is at the beginning of the sentence (without space) or not:
97
+
98
+ ```python
99
+ >>> from transformers import ClvpTokenizer
100
+
101
+ >>> tokenizer = ClvpTokenizer.from_pretrained("susnato/clvp_dev")
102
+ >>> tokenizer("Hello world")["input_ids"]
103
+ [62, 84, 28, 2, 179, 79]
104
+
105
+ >>> tokenizer(" Hello world")["input_ids"]
106
+ [2, 62, 84, 28, 2, 179, 79]
107
+ ```
108
+
109
+ You can get around that behavior by passing `add_prefix_space=True` when instantiating this tokenizer or when you
110
+ call it on some text, but since the model was not pretrained this way, it might yield a decrease in performance.
111
+
112
+ <Tip>
113
+
114
+ When used with `is_split_into_words=True`, this tokenizer will add a space before each word (even the first one).
115
+
116
+ </Tip>
117
+
118
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
119
+ this superclass for more information regarding those methods.
120
+
121
+ Args:
122
+ vocab_file (`str`):
123
+ Path to the vocabulary file.
124
+ merges_file (`str`):
125
+ Path to the merges file.
126
+ errors (`str`, *optional*, defaults to `"replace"`):
127
+ Paradigm to follow when decoding bytes to UTF-8. See
128
+ [bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information.
129
+ unk_token (`str`, *optional*, defaults to `"[UNK]"`):
130
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
131
+ token instead.
132
+ bos_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
133
+ The beginning of sequence token.
134
+ eos_token (`str`, *optional*, defaults to `"[STOP]"`):
135
+ The end of sequence token.
136
+ pad_token (`str`, *optional*, defaults to `"[STOP]"`):
137
+ The pad token of the sequence.
138
+ add_prefix_space (`bool`, *optional*, defaults to `False`):
139
+ Whether or not to add an initial space to the input. This allows to treat the leading word just as any
140
+ other word. (CLVP tokenizer detect beginning of words by the preceding space).
141
+ add_bos_token (`bool`, *optional*, defaults to `False`):
142
+ Whether to add `bos_token` in front of the sequence when add_special_tokens=True.
143
+ add_eos_token (`bool`, *optional*, defaults to `False`):
144
+ Whether to add `eos_token` in end of the sequence when add_special_tokens=True.
145
+ """
146
+
147
+ vocab_files_names = VOCAB_FILES_NAMES
148
+ pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
149
+ max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
150
+ model_input_names = [
151
+ "input_ids",
152
+ "attention_mask",
153
+ ]
154
+
155
+ def __init__(
156
+ self,
157
+ vocab_file,
158
+ merges_file,
159
+ errors="replace",
160
+ unk_token="[UNK]",
161
+ bos_token="<|endoftext|>",
162
+ eos_token="[STOP]",
163
+ pad_token="[STOP]",
164
+ add_prefix_space=False,
165
+ add_bos_token=False,
166
+ add_eos_token=False,
167
+ **kwargs,
168
+ ):
169
+ bos_token = AddedToken(bos_token, special=True) if isinstance(bos_token, str) else bos_token
170
+ eos_token = AddedToken(eos_token, special=True) if isinstance(eos_token, str) else eos_token
171
+ unk_token = AddedToken(unk_token, special=True) if isinstance(unk_token, str) else unk_token
172
+ pad_token = AddedToken(pad_token, special=True) if isinstance(pad_token, str) else pad_token
173
+
174
+ self.add_bos_token = add_bos_token
175
+ self.add_eos_token = add_eos_token
176
+ self._normalizer = None
177
+
178
+ with open(vocab_file, encoding="utf-8") as vocab_handle:
179
+ self.encoder = json.load(vocab_handle)
180
+ self.decoder = {v: k for k, v in self.encoder.items()}
181
+ self.errors = errors # how to handle errors in decoding
182
+ self.byte_encoder = bytes_to_unicode()
183
+ self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
184
+ with open(merges_file, encoding="utf-8") as merges_handle:
185
+ bpe_merges = merges_handle.read().split("\n")[1:-1]
186
+ bpe_merges = [tuple(merge.split()) for merge in bpe_merges]
187
+ self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges))))
188
+ self.cache = {}
189
+ self.add_prefix_space = add_prefix_space
190
+
191
+ # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
192
+ self.pat = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""")
193
+
194
+ super().__init__(
195
+ errors=errors,
196
+ unk_token=unk_token,
197
+ bos_token=bos_token,
198
+ eos_token=eos_token,
199
+ pad_token=pad_token,
200
+ add_prefix_space=add_prefix_space,
201
+ add_bos_token=add_bos_token,
202
+ add_eos_token=add_eos_token,
203
+ **kwargs,
204
+ )
205
+
206
+ @property
207
+ def vocab_size(self):
208
+ return len(self.encoder)
209
+
210
+ @property
211
+ def normalizer(self):
212
+ if self._normalizer is None:
213
+ self._normalizer = EnglishNormalizer()
214
+ return self._normalizer
215
+
216
+ def get_vocab(self):
217
+ return dict(self.encoder, **self.added_tokens_encoder)
218
+
219
+ # Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer.bpe
220
+ def bpe(self, token):
221
+ if token in self.cache:
222
+ return self.cache[token]
223
+ word = tuple(token)
224
+ pairs = get_pairs(word)
225
+
226
+ if not pairs:
227
+ return token
228
+
229
+ while True:
230
+ bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf")))
231
+ if bigram not in self.bpe_ranks:
232
+ break
233
+ first, second = bigram
234
+ new_word = []
235
+ i = 0
236
+ while i < len(word):
237
+ try:
238
+ j = word.index(first, i)
239
+ except ValueError:
240
+ new_word.extend(word[i:])
241
+ break
242
+ else:
243
+ new_word.extend(word[i:j])
244
+ i = j
245
+
246
+ if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
247
+ new_word.append(first + second)
248
+ i += 2
249
+ else:
250
+ new_word.append(word[i])
251
+ i += 1
252
+ new_word = tuple(new_word)
253
+ word = new_word
254
+ if len(word) == 1:
255
+ break
256
+ else:
257
+ pairs = get_pairs(word)
258
+ word = " ".join(word)
259
+ self.cache[token] = word
260
+ return word
261
+
262
+ # Copied from transformers.models.llama.tokenization_llama.LlamaTokenizer.build_inputs_with_special_tokens
263
+ def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
264
+ bos_token_id = [self.bos_token_id] if self.add_bos_token else []
265
+ eos_token_id = [self.eos_token_id] if self.add_eos_token else []
266
+
267
+ output = bos_token_id + token_ids_0 + eos_token_id
268
+
269
+ if token_ids_1 is not None:
270
+ output = output + bos_token_id + token_ids_1 + eos_token_id
271
+
272
+ return output
273
+
274
+ # Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer.get_special_tokens_mask
275
+ def get_special_tokens_mask(
276
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
277
+ ) -> List[int]:
278
+ """
279
+ Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding
280
+ special tokens using the tokenizer `prepare_for_model` or `encode_plus` methods.
281
+
282
+ Args:
283
+ token_ids_0 (`List[int]`):
284
+ List of IDs.
285
+ token_ids_1 (`List[int]`, *optional*):
286
+ Optional second list of IDs for sequence pairs.
287
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
288
+ Whether or not the token list is already formatted with special tokens for the model.
289
+
290
+ Returns:
291
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
292
+ """
293
+ if already_has_special_tokens:
294
+ return super().get_special_tokens_mask(
295
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
296
+ )
297
+
298
+ if not self.add_bos_token:
299
+ return super().get_special_tokens_mask(
300
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=False
301
+ )
302
+
303
+ if token_ids_1 is None:
304
+ return [1] + ([0] * len(token_ids_0))
305
+ return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1))
306
+
307
+ def _tokenize(self, text):
308
+ """Tokenize a string."""
309
+ bpe_tokens = []
310
+ text = self.normalizer(text)
311
+ for token in re.findall(self.pat, text):
312
+ token = "".join(
313
+ self.byte_encoder[b] for b in token.encode("utf-8")
314
+ ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
315
+
316
+ # if the token is "Ġ" we replace it with "[SPACE]" (if "[SPACE]" is present in the vocab), otherwise we keep the "Ġ".
317
+ bpe_tokens.extend(
318
+ "[SPACE]" if bpe_token == "\u0120" and "[SPACE]" in self.encoder.keys() else bpe_token
319
+ for bpe_token in self.bpe(token).split(" ")
320
+ )
321
+
322
+ return bpe_tokens
323
+
324
+ # Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer._convert_token_to_id
325
+ def _convert_token_to_id(self, token):
326
+ """Converts a token (str) in an id using the vocab."""
327
+ return self.encoder.get(token, self.encoder.get(self.unk_token))
328
+
329
+ # Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer._convert_id_to_token
330
+ def _convert_id_to_token(self, index):
331
+ """Converts an index (integer) in a token (str) using the vocab."""
332
+ return self.decoder.get(index)
333
+
334
+ # Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer.convert_tokens_to_string
335
+ def convert_tokens_to_string(self, tokens):
336
+ """Converts a sequence of tokens (string) in a single string."""
337
+ text = "".join(tokens)
338
+ text = bytearray([self.byte_decoder[c] for c in text]).decode("utf-8", errors=self.errors)
339
+ return text
340
+
341
+ def clean_up_tokenization(self, text):
342
+ text = "".join(text)
343
+ vocab_tokens = list(self.encoder.keys()) + list(self.added_tokens_encoder.keys())
344
+
345
+ text = text.replace("[SPACE]", " ") if "[SPACE]" in vocab_tokens else text
346
+ text = text.replace("[STOP]", " ") if "[STOP]" in vocab_tokens else text
347
+
348
+ text = text.replace(self.unk_token, "").replace(" ", " ").replace(" ", " ")
349
+ return text
350
+
351
+ # Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer.save_vocabulary
352
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
353
+ if not os.path.isdir(save_directory):
354
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
355
+ return
356
+ vocab_file = os.path.join(
357
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
358
+ )
359
+ merge_file = os.path.join(
360
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"]
361
+ )
362
+
363
+ with open(vocab_file, "w", encoding="utf-8") as f:
364
+ f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + "\n")
365
+
366
+ index = 0
367
+ with open(merge_file, "w", encoding="utf-8") as writer:
368
+ writer.write("#version: 0.2\n")
369
+ for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]):
370
+ if index != token_index:
371
+ logger.warning(
372
+ f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
373
+ " Please check that the tokenizer is not corrupted!"
374
+ )
375
+ index = token_index
376
+ writer.write(" ".join(bpe_tokens) + "\n")
377
+ index += 1
378
+
379
+ return vocab_file, merge_file
env-llmeval/lib/python3.10/site-packages/transformers/models/decision_transformer/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.08 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/decision_transformer/__pycache__/configuration_decision_transformer.cpython-310.pyc ADDED
Binary file (6.3 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/decision_transformer/__pycache__/modeling_decision_transformer.cpython-310.pyc ADDED
Binary file (25.6 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/lxmert/__init__.py ADDED
@@ -0,0 +1,117 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import TYPE_CHECKING
16
+
17
+ from ...utils import (
18
+ OptionalDependencyNotAvailable,
19
+ _LazyModule,
20
+ is_tf_available,
21
+ is_tokenizers_available,
22
+ is_torch_available,
23
+ )
24
+
25
+
26
+ _import_structure = {
27
+ "configuration_lxmert": ["LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "LxmertConfig"],
28
+ "tokenization_lxmert": ["LxmertTokenizer"],
29
+ }
30
+
31
+ try:
32
+ if not is_tokenizers_available():
33
+ raise OptionalDependencyNotAvailable()
34
+ except OptionalDependencyNotAvailable:
35
+ pass
36
+ else:
37
+ _import_structure["tokenization_lxmert_fast"] = ["LxmertTokenizerFast"]
38
+
39
+ try:
40
+ if not is_torch_available():
41
+ raise OptionalDependencyNotAvailable()
42
+ except OptionalDependencyNotAvailable:
43
+ pass
44
+ else:
45
+ _import_structure["modeling_lxmert"] = [
46
+ "LxmertEncoder",
47
+ "LxmertForPreTraining",
48
+ "LxmertForQuestionAnswering",
49
+ "LxmertModel",
50
+ "LxmertPreTrainedModel",
51
+ "LxmertVisualFeatureEncoder",
52
+ "LxmertXLayer",
53
+ ]
54
+
55
+ try:
56
+ if not is_tf_available():
57
+ raise OptionalDependencyNotAvailable()
58
+ except OptionalDependencyNotAvailable:
59
+ pass
60
+ else:
61
+ _import_structure["modeling_tf_lxmert"] = [
62
+ "TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST",
63
+ "TFLxmertForPreTraining",
64
+ "TFLxmertMainLayer",
65
+ "TFLxmertModel",
66
+ "TFLxmertPreTrainedModel",
67
+ "TFLxmertVisualFeatureEncoder",
68
+ ]
69
+
70
+
71
+ if TYPE_CHECKING:
72
+ from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig
73
+ from .tokenization_lxmert import LxmertTokenizer
74
+
75
+ try:
76
+ if not is_tokenizers_available():
77
+ raise OptionalDependencyNotAvailable()
78
+ except OptionalDependencyNotAvailable:
79
+ pass
80
+ else:
81
+ from .tokenization_lxmert_fast import LxmertTokenizerFast
82
+
83
+ try:
84
+ if not is_torch_available():
85
+ raise OptionalDependencyNotAvailable()
86
+ except OptionalDependencyNotAvailable:
87
+ pass
88
+ else:
89
+ from .modeling_lxmert import (
90
+ LxmertEncoder,
91
+ LxmertForPreTraining,
92
+ LxmertForQuestionAnswering,
93
+ LxmertModel,
94
+ LxmertPreTrainedModel,
95
+ LxmertVisualFeatureEncoder,
96
+ LxmertXLayer,
97
+ )
98
+
99
+ try:
100
+ if not is_tf_available():
101
+ raise OptionalDependencyNotAvailable()
102
+ except OptionalDependencyNotAvailable:
103
+ pass
104
+ else:
105
+ from .modeling_tf_lxmert import (
106
+ TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST,
107
+ TFLxmertForPreTraining,
108
+ TFLxmertMainLayer,
109
+ TFLxmertModel,
110
+ TFLxmertPreTrainedModel,
111
+ TFLxmertVisualFeatureEncoder,
112
+ )
113
+
114
+ else:
115
+ import sys
116
+
117
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
env-llmeval/lib/python3.10/site-packages/transformers/models/lxmert/modeling_lxmert.py ADDED
@@ -0,0 +1,1438 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 Hao Tan, Mohit Bansal, and the HuggingFace team
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch LXMERT model."""
16
+
17
+
18
+ import math
19
+ import os
20
+ import warnings
21
+ from dataclasses import dataclass
22
+ from typing import Dict, Optional, Tuple, Union
23
+
24
+ import torch
25
+ from torch import nn
26
+ from torch.nn import CrossEntropyLoss, SmoothL1Loss
27
+
28
+ from ...activations import ACT2FN, gelu
29
+ from ...modeling_utils import PreTrainedModel
30
+ from ...utils import (
31
+ ModelOutput,
32
+ add_code_sample_docstrings,
33
+ add_start_docstrings,
34
+ add_start_docstrings_to_model_forward,
35
+ logging,
36
+ replace_return_docstrings,
37
+ )
38
+ from .configuration_lxmert import LxmertConfig
39
+
40
+
41
+ logger = logging.get_logger(__name__)
42
+
43
+ _CHECKPOINT_FOR_DOC = "unc-nlp/lxmert-base-uncased"
44
+ _CONFIG_FOR_DOC = "LxmertConfig"
45
+
46
+ LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST = [
47
+ "unc-nlp/lxmert-base-uncased",
48
+ ]
49
+
50
+
51
+ class GeLU(nn.Module):
52
+ def __init__(self):
53
+ super().__init__()
54
+
55
+ def forward(self, x):
56
+ return gelu(x)
57
+
58
+
59
+ @dataclass
60
+ class LxmertModelOutput(ModelOutput):
61
+ """
62
+ Lxmert's outputs that contain the last hidden states, pooled outputs, and attention probabilities for the language,
63
+ visual, and, cross-modality encoders. (note: the visual encoder in Lxmert is referred to as the "relation-ship"
64
+ encoder")
65
+
66
+
67
+ Args:
68
+ language_output (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
69
+ Sequence of hidden-states at the output of the last layer of the language encoder.
70
+ vision_output (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
71
+ Sequence of hidden-states at the output of the last layer of the visual encoder.
72
+ pooled_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`):
73
+ Last layer hidden-state of the first token of the sequence (classification, CLS, token) further processed
74
+ by a Linear layer and a Tanh activation function. The Linear
75
+ language_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
76
+ Tuple of `torch.FloatTensor` (one for input features + one for the output of each cross-modality layer) of
77
+ shape `(batch_size, sequence_length, hidden_size)`.
78
+ vision_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
79
+ Tuple of `torch.FloatTensor` (one for input features + one for the output of each cross-modality layer) of
80
+ shape `(batch_size, sequence_length, hidden_size)`.
81
+ language_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
82
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
83
+ sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
84
+ the self-attention heads.
85
+ vision_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
86
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
87
+ sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
88
+ the self-attention heads.
89
+ cross_encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
90
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
91
+ sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
92
+ the self-attention heads.
93
+ """
94
+
95
+ language_output: Optional[torch.FloatTensor] = None
96
+ vision_output: Optional[torch.FloatTensor] = None
97
+ pooled_output: Optional[torch.FloatTensor] = None
98
+ language_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
99
+ vision_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
100
+ language_attentions: Optional[Tuple[torch.FloatTensor]] = None
101
+ vision_attentions: Optional[Tuple[torch.FloatTensor]] = None
102
+ cross_encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
103
+
104
+
105
+ @dataclass
106
+ class LxmertForQuestionAnsweringOutput(ModelOutput):
107
+ """
108
+ Output type of [`LxmertForQuestionAnswering`].
109
+
110
+ Args:
111
+ loss (*optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`):
112
+ Total loss as the sum of the masked language modeling loss and the next sequence prediction
113
+ (classification) loss.k.
114
+ question_answering_score (`torch.FloatTensor` of shape `(batch_size, n_qa_answers)`, *optional*):
115
+ Prediction scores of question answering objective (classification).
116
+ language_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
117
+ Tuple of `torch.FloatTensor` (one for input features + one for the output of each cross-modality layer) of
118
+ shape `(batch_size, sequence_length, hidden_size)`.
119
+ vision_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
120
+ Tuple of `torch.FloatTensor` (one for input features + one for the output of each cross-modality layer) of
121
+ shape `(batch_size, sequence_length, hidden_size)`.
122
+ language_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
123
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
124
+ sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
125
+ the self-attention heads.
126
+ vision_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
127
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
128
+ sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
129
+ the self-attention heads.
130
+ cross_encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
131
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
132
+ sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
133
+ the self-attention heads.
134
+ """
135
+
136
+ loss: Optional[torch.FloatTensor] = None
137
+ question_answering_score: Optional[torch.FloatTensor] = None
138
+ language_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
139
+ vision_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
140
+ language_attentions: Optional[Tuple[torch.FloatTensor]] = None
141
+ vision_attentions: Optional[Tuple[torch.FloatTensor]] = None
142
+ cross_encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
143
+
144
+
145
+ @dataclass
146
+ class LxmertForPreTrainingOutput(ModelOutput):
147
+ """
148
+ Output type of [`LxmertForPreTraining`].
149
+
150
+ Args:
151
+ loss (*optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`):
152
+ Total loss as the sum of the masked language modeling loss and the next sequence prediction
153
+ (classification) loss.
154
+ prediction_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
155
+ Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
156
+ cross_relationship_score (`torch.FloatTensor` of shape `(batch_size, 2)`):
157
+ Prediction scores of the textual matching objective (classification) head (scores of True/False
158
+ continuation before SoftMax).
159
+ question_answering_score (`torch.FloatTensor` of shape `(batch_size, n_qa_answers)`):
160
+ Prediction scores of question answering objective (classification).
161
+ language_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
162
+ Tuple of `torch.FloatTensor` (one for input features + one for the output of each cross-modality layer) of
163
+ shape `(batch_size, sequence_length, hidden_size)`.
164
+ vision_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
165
+ Tuple of `torch.FloatTensor` (one for input features + one for the output of each cross-modality layer) of
166
+ shape `(batch_size, sequence_length, hidden_size)`.
167
+ language_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
168
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
169
+ sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
170
+ the self-attention heads.
171
+ vision_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
172
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
173
+ sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
174
+ the self-attention heads.
175
+ cross_encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
176
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
177
+ sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
178
+ the self-attention heads.
179
+
180
+ """
181
+
182
+ loss: Optional[torch.FloatTensor] = None
183
+ prediction_logits: Optional[torch.FloatTensor] = None
184
+ cross_relationship_score: Optional[torch.FloatTensor] = None
185
+ question_answering_score: Optional[torch.FloatTensor] = None
186
+ language_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
187
+ vision_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
188
+ language_attentions: Optional[Tuple[torch.FloatTensor]] = None
189
+ vision_attentions: Optional[Tuple[torch.FloatTensor]] = None
190
+ cross_encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
191
+
192
+
193
+ def load_tf_weights_in_lxmert(model, config, tf_checkpoint_path):
194
+ """Load tf checkpoints in a pytorch model."""
195
+ try:
196
+ import re
197
+
198
+ import numpy as np
199
+ import tensorflow as tf
200
+ except ImportError:
201
+ logger.error(
202
+ "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
203
+ "https://www.tensorflow.org/install/ for installation instructions."
204
+ )
205
+ raise
206
+ tf_path = os.path.abspath(tf_checkpoint_path)
207
+ logger.info(f"Converting TensorFlow checkpoint from {tf_path}")
208
+ # Load weights from TF model
209
+ init_vars = tf.train.list_variables(tf_path)
210
+ names = []
211
+ arrays = []
212
+ for name, shape in init_vars:
213
+ logger.info(f"Loading TF weight {name} with shape {shape}")
214
+ array = tf.train.load_variable(tf_path, name)
215
+ names.append(name)
216
+ arrays.append(array)
217
+
218
+ for name, array in zip(names, arrays):
219
+ name = name.split("/")
220
+ # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
221
+ # which are not required for using pretrained model
222
+ if any(
223
+ n
224
+ in [
225
+ "adam_v",
226
+ "adam_m",
227
+ "AdamWeightDecayOptimizer",
228
+ "AdamWeightDecayOptimizer_1",
229
+ "global_step",
230
+ ]
231
+ for n in name
232
+ ):
233
+ logger.info(f"Skipping {'/'.join(name)}")
234
+ continue
235
+ pointer = model
236
+ for m_name in name:
237
+ if re.fullmatch(r"[A-Za-z]+_\d+", m_name):
238
+ scope_names = re.split(r"_(\d+)", m_name)
239
+ else:
240
+ scope_names = [m_name]
241
+ if scope_names[0] == "kernel" or scope_names[0] == "gamma":
242
+ pointer = getattr(pointer, "weight")
243
+ elif scope_names[0] == "output_bias" or scope_names[0] == "beta":
244
+ pointer = getattr(pointer, "bias")
245
+ elif scope_names[0] == "output_weights":
246
+ pointer = getattr(pointer, "weight")
247
+ elif scope_names[0] == "squad":
248
+ pointer = getattr(pointer, "classifier")
249
+ else:
250
+ try:
251
+ pointer = getattr(pointer, scope_names[0])
252
+ except AttributeError:
253
+ logger.info(f"Skipping {'/'.join(name)}")
254
+ continue
255
+ if len(scope_names) >= 2:
256
+ num = int(scope_names[1])
257
+ pointer = pointer[num]
258
+ if m_name[-11:] == "_embeddings":
259
+ pointer = getattr(pointer, "weight")
260
+ elif m_name == "kernel":
261
+ array = np.transpose(array)
262
+ try:
263
+ assert pointer.shape == array.shape
264
+ except AssertionError as e:
265
+ e.args += (pointer.shape, array.shape)
266
+ raise
267
+ logger.info(f"Initialize PyTorch weight {name}")
268
+ pointer.data = torch.from_numpy(array)
269
+ return model
270
+
271
+
272
+ class LxmertEmbeddings(nn.Module):
273
+ """Construct the embeddings from word, position and token_type embeddings."""
274
+
275
+ def __init__(self, config):
276
+ super().__init__()
277
+ self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=0)
278
+ self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size, padding_idx=0)
279
+ self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size, padding_idx=0)
280
+
281
+ # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
282
+ # any TensorFlow checkpoint file
283
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=1e-12)
284
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
285
+
286
+ def forward(self, input_ids, token_type_ids=None, inputs_embeds=None):
287
+ if input_ids is not None:
288
+ input_shape = input_ids.size()
289
+ device = input_ids.device
290
+ else:
291
+ input_shape = inputs_embeds.size()[:-1]
292
+ device = inputs_embeds.device
293
+ seq_length = input_shape[1]
294
+
295
+ position_ids = torch.arange(seq_length, dtype=torch.long, device=device)
296
+ position_ids = position_ids.unsqueeze(0).expand(input_shape)
297
+
298
+ if token_type_ids is None:
299
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
300
+
301
+ if inputs_embeds is None:
302
+ inputs_embeds = self.word_embeddings(input_ids)
303
+ position_embeddings = self.position_embeddings(position_ids)
304
+ token_type_embeddings = self.token_type_embeddings(token_type_ids)
305
+
306
+ embeddings = inputs_embeds + position_embeddings + token_type_embeddings
307
+ embeddings = self.LayerNorm(embeddings)
308
+ embeddings = self.dropout(embeddings)
309
+ return embeddings
310
+
311
+
312
+ class LxmertAttention(nn.Module):
313
+ def __init__(self, config, ctx_dim=None):
314
+ super().__init__()
315
+ if config.hidden_size % config.num_attention_heads != 0:
316
+ raise ValueError(
317
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
318
+ f"heads ({config.num_attention_heads})"
319
+ )
320
+ self.num_attention_heads = config.num_attention_heads
321
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
322
+ self.head_size = self.num_attention_heads * self.attention_head_size
323
+
324
+ # visual_dim = 2048
325
+ if ctx_dim is None:
326
+ ctx_dim = config.hidden_size
327
+ self.query = nn.Linear(config.hidden_size, self.head_size)
328
+ self.key = nn.Linear(ctx_dim, self.head_size)
329
+ self.value = nn.Linear(ctx_dim, self.head_size)
330
+
331
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
332
+
333
+ def transpose_for_scores(self, x):
334
+ new_x_shape = x.size()[:-1] + (
335
+ self.num_attention_heads,
336
+ self.attention_head_size,
337
+ )
338
+ x = x.view(new_x_shape)
339
+ return x.permute(0, 2, 1, 3)
340
+
341
+ def forward(self, hidden_states, context, attention_mask=None, output_attentions=False):
342
+ mixed_query_layer = self.query(hidden_states)
343
+ mixed_key_layer = self.key(context)
344
+ mixed_value_layer = self.value(context)
345
+
346
+ query_layer = self.transpose_for_scores(mixed_query_layer)
347
+ key_layer = self.transpose_for_scores(mixed_key_layer)
348
+ value_layer = self.transpose_for_scores(mixed_value_layer)
349
+
350
+ # Take the dot product between "query" and "key" to get the raw attention scores.
351
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
352
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
353
+ # Apply the attention mask is (precomputed for all layers in BertModel forward() function)
354
+ if attention_mask is not None:
355
+ attention_scores = attention_scores + attention_mask
356
+
357
+ # Normalize the attention scores to probabilities.
358
+ attention_probs = nn.functional.softmax(attention_scores, dim=-1)
359
+
360
+ # This is actually dropping out entire tokens to attend to, which might
361
+ # seem a bit unusual, but is taken from the original Transformer paper.
362
+ attention_probs = self.dropout(attention_probs)
363
+
364
+ context_layer = torch.matmul(attention_probs, value_layer)
365
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
366
+ new_context_layer_shape = context_layer.size()[:-2] + (self.head_size,)
367
+ context_layer = context_layer.view(new_context_layer_shape)
368
+
369
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
370
+ return outputs
371
+
372
+
373
+ class LxmertAttentionOutput(nn.Module):
374
+ def __init__(self, config):
375
+ super().__init__()
376
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
377
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=1e-12)
378
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
379
+
380
+ def forward(self, hidden_states, input_tensor):
381
+ hidden_states = self.dense(hidden_states)
382
+ hidden_states = self.dropout(hidden_states)
383
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
384
+ return hidden_states
385
+
386
+
387
+ class LxmertCrossAttentionLayer(nn.Module):
388
+ def __init__(self, config):
389
+ super().__init__()
390
+ self.att = LxmertAttention(config)
391
+ self.output = LxmertAttentionOutput(config)
392
+
393
+ def forward(self, input_tensor, ctx_tensor, ctx_att_mask=None, output_attentions=False):
394
+ output = self.att(input_tensor, ctx_tensor, ctx_att_mask, output_attentions=output_attentions)
395
+ if output_attentions:
396
+ attention_probs = output[1]
397
+ attention_output = self.output(output[0], input_tensor)
398
+ outputs = (attention_output, attention_probs) if output_attentions else (attention_output,)
399
+ return outputs
400
+
401
+
402
+ class LxmertSelfAttentionLayer(nn.Module):
403
+ def __init__(self, config):
404
+ super().__init__()
405
+ self.self = LxmertAttention(config)
406
+ self.output = LxmertAttentionOutput(config)
407
+
408
+ def forward(self, input_tensor, attention_mask, output_attentions=False):
409
+ # Self attention attends to itself, thus keys and queries are the same (input_tensor).
410
+ output = self.self(
411
+ input_tensor,
412
+ input_tensor,
413
+ attention_mask,
414
+ output_attentions=output_attentions,
415
+ )
416
+ if output_attentions:
417
+ attention_probs = output[1]
418
+ attention_output = self.output(output[0], input_tensor)
419
+ outputs = (attention_output, attention_probs) if output_attentions else (attention_output,)
420
+ return outputs
421
+
422
+
423
+ class LxmertIntermediate(nn.Module):
424
+ def __init__(self, config):
425
+ super().__init__()
426
+ self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
427
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
428
+
429
+ def forward(self, hidden_states):
430
+ hidden_states = self.dense(hidden_states)
431
+ hidden_states = self.intermediate_act_fn(hidden_states)
432
+ return hidden_states
433
+
434
+
435
+ class LxmertOutput(nn.Module):
436
+ def __init__(self, config):
437
+ super().__init__()
438
+ self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
439
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=1e-12)
440
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
441
+
442
+ def forward(self, hidden_states, input_tensor):
443
+ hidden_states = self.dense(hidden_states)
444
+ hidden_states = self.dropout(hidden_states)
445
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
446
+ return hidden_states
447
+
448
+
449
+ class LxmertLayer(nn.Module):
450
+ def __init__(self, config):
451
+ super().__init__()
452
+ self.attention = LxmertSelfAttentionLayer(config)
453
+ self.intermediate = LxmertIntermediate(config)
454
+ self.output = LxmertOutput(config)
455
+
456
+ def forward(self, hidden_states, attention_mask=None, output_attentions=False):
457
+ outputs = self.attention(hidden_states, attention_mask, output_attentions=output_attentions)
458
+ attention_output = outputs[0]
459
+ intermediate_output = self.intermediate(attention_output)
460
+ layer_output = self.output(intermediate_output, attention_output)
461
+ outputs = (layer_output,) + outputs[1:] # add attentions if we output them
462
+ return outputs
463
+
464
+
465
+ class LxmertXLayer(nn.Module):
466
+ def __init__(self, config):
467
+ super().__init__()
468
+ # The cross-attention Layer
469
+ self.visual_attention = LxmertCrossAttentionLayer(config)
470
+
471
+ # Self-attention Layers
472
+ self.lang_self_att = LxmertSelfAttentionLayer(config)
473
+ self.visn_self_att = LxmertSelfAttentionLayer(config)
474
+
475
+ # Intermediate and Output Layers (FFNs)
476
+ self.lang_inter = LxmertIntermediate(config)
477
+ self.lang_output = LxmertOutput(config)
478
+ self.visn_inter = LxmertIntermediate(config)
479
+ self.visn_output = LxmertOutput(config)
480
+
481
+ def cross_att(
482
+ self,
483
+ lang_input,
484
+ lang_attention_mask,
485
+ visual_input,
486
+ visual_attention_mask,
487
+ output_x_attentions=False,
488
+ ):
489
+ # Cross Attention
490
+ lang_att_output = self.visual_attention(
491
+ lang_input,
492
+ visual_input,
493
+ ctx_att_mask=visual_attention_mask,
494
+ output_attentions=output_x_attentions,
495
+ )
496
+ visual_att_output = self.visual_attention(
497
+ visual_input,
498
+ lang_input,
499
+ ctx_att_mask=lang_attention_mask,
500
+ output_attentions=False,
501
+ )
502
+ return lang_att_output, visual_att_output
503
+
504
+ def self_att(self, lang_input, lang_attention_mask, visual_input, visual_attention_mask):
505
+ # Self Attention
506
+ lang_att_output = self.lang_self_att(lang_input, lang_attention_mask, output_attentions=False)
507
+ visual_att_output = self.visn_self_att(visual_input, visual_attention_mask, output_attentions=False)
508
+ return lang_att_output[0], visual_att_output[0]
509
+
510
+ def output_fc(self, lang_input, visual_input):
511
+ # FC layers
512
+ lang_inter_output = self.lang_inter(lang_input)
513
+ visual_inter_output = self.visn_inter(visual_input)
514
+
515
+ # Layer output
516
+ lang_output = self.lang_output(lang_inter_output, lang_input)
517
+ visual_output = self.visn_output(visual_inter_output, visual_input)
518
+
519
+ return lang_output, visual_output
520
+
521
+ def forward(
522
+ self,
523
+ lang_feats,
524
+ lang_attention_mask,
525
+ visual_feats,
526
+ visual_attention_mask,
527
+ output_attentions=False,
528
+ ):
529
+ lang_att_output, visual_att_output = self.cross_att(
530
+ lang_input=lang_feats,
531
+ lang_attention_mask=lang_attention_mask,
532
+ visual_input=visual_feats,
533
+ visual_attention_mask=visual_attention_mask,
534
+ output_x_attentions=output_attentions,
535
+ )
536
+ attention_probs = lang_att_output[1:]
537
+ lang_att_output, visual_att_output = self.self_att(
538
+ lang_att_output[0],
539
+ lang_attention_mask,
540
+ visual_att_output[0],
541
+ visual_attention_mask,
542
+ )
543
+
544
+ lang_output, visual_output = self.output_fc(lang_att_output, visual_att_output)
545
+ return (
546
+ (
547
+ lang_output,
548
+ visual_output,
549
+ attention_probs[0],
550
+ )
551
+ if output_attentions
552
+ else (lang_output, visual_output)
553
+ )
554
+
555
+
556
+ class LxmertVisualFeatureEncoder(nn.Module):
557
+ def __init__(self, config):
558
+ super().__init__()
559
+ feat_dim = config.visual_feat_dim
560
+ pos_dim = config.visual_pos_dim
561
+
562
+ # Object feature encoding
563
+ self.visn_fc = nn.Linear(feat_dim, config.hidden_size)
564
+ self.visn_layer_norm = nn.LayerNorm(config.hidden_size, eps=1e-12)
565
+
566
+ # Box position encoding
567
+ self.box_fc = nn.Linear(pos_dim, config.hidden_size)
568
+ self.box_layer_norm = nn.LayerNorm(config.hidden_size, eps=1e-12)
569
+
570
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
571
+
572
+ def forward(self, visual_feats, visual_pos):
573
+ x = self.visn_fc(visual_feats)
574
+ x = self.visn_layer_norm(x)
575
+ y = self.box_fc(visual_pos)
576
+ y = self.box_layer_norm(y)
577
+ output = (x + y) / 2
578
+
579
+ output = self.dropout(output)
580
+ return output
581
+
582
+
583
+ class LxmertEncoder(nn.Module):
584
+ def __init__(self, config):
585
+ super().__init__()
586
+
587
+ # Obj-level image embedding layer
588
+ self.visn_fc = LxmertVisualFeatureEncoder(config)
589
+ self.config = config
590
+
591
+ # Number of layers
592
+ self.num_l_layers = config.l_layers
593
+ self.num_x_layers = config.x_layers
594
+ self.num_r_layers = config.r_layers
595
+
596
+ # Layers
597
+ # Using self.layer instead of self.l_layer to support loading BERT weights.
598
+ self.layer = nn.ModuleList([LxmertLayer(config) for _ in range(self.num_l_layers)])
599
+ self.x_layers = nn.ModuleList([LxmertXLayer(config) for _ in range(self.num_x_layers)])
600
+ self.r_layers = nn.ModuleList([LxmertLayer(config) for _ in range(self.num_r_layers)])
601
+
602
+ def forward(
603
+ self,
604
+ lang_feats,
605
+ lang_attention_mask,
606
+ visual_feats,
607
+ visual_pos,
608
+ visual_attention_mask=None,
609
+ output_attentions=None,
610
+ ):
611
+ vision_hidden_states = ()
612
+ language_hidden_states = ()
613
+ vision_attentions = () if output_attentions or self.config.output_attentions else None
614
+ language_attentions = () if output_attentions or self.config.output_attentions else None
615
+ cross_encoder_attentions = () if output_attentions or self.config.output_attentions else None
616
+
617
+ visual_feats = self.visn_fc(visual_feats, visual_pos)
618
+
619
+ # Run language layers
620
+ for layer_module in self.layer:
621
+ l_outputs = layer_module(lang_feats, lang_attention_mask, output_attentions=output_attentions)
622
+ lang_feats = l_outputs[0]
623
+ language_hidden_states = language_hidden_states + (lang_feats,)
624
+ if language_attentions is not None:
625
+ language_attentions = language_attentions + (l_outputs[1],)
626
+
627
+ # Run relational layers
628
+ for layer_module in self.r_layers:
629
+ v_outputs = layer_module(visual_feats, visual_attention_mask, output_attentions=output_attentions)
630
+ visual_feats = v_outputs[0]
631
+ vision_hidden_states = vision_hidden_states + (visual_feats,)
632
+ if vision_attentions is not None:
633
+ vision_attentions = vision_attentions + (v_outputs[1],)
634
+
635
+ # Run cross-modality layers
636
+ for layer_module in self.x_layers:
637
+ x_outputs = layer_module(
638
+ lang_feats,
639
+ lang_attention_mask,
640
+ visual_feats,
641
+ visual_attention_mask,
642
+ output_attentions=output_attentions,
643
+ )
644
+ lang_feats, visual_feats = x_outputs[:2]
645
+ vision_hidden_states = vision_hidden_states + (visual_feats,)
646
+ language_hidden_states = language_hidden_states + (lang_feats,)
647
+ if cross_encoder_attentions is not None:
648
+ cross_encoder_attentions = cross_encoder_attentions + (x_outputs[2],)
649
+ visual_encoder_outputs = (
650
+ vision_hidden_states,
651
+ vision_attentions if output_attentions else None,
652
+ )
653
+ lang_encoder_outputs = (
654
+ language_hidden_states,
655
+ language_attentions if output_attentions else None,
656
+ )
657
+ return (
658
+ visual_encoder_outputs,
659
+ lang_encoder_outputs,
660
+ cross_encoder_attentions if output_attentions else None,
661
+ )
662
+
663
+
664
+ class LxmertPooler(nn.Module):
665
+ def __init__(self, config):
666
+ super(LxmertPooler, self).__init__()
667
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
668
+ self.activation = nn.Tanh()
669
+
670
+ def forward(self, hidden_states):
671
+ # We "pool" the model by simply taking the hidden state corresponding
672
+ # to the first token.
673
+ first_token_tensor = hidden_states[:, 0]
674
+ pooled_output = self.dense(first_token_tensor)
675
+ pooled_output = self.activation(pooled_output)
676
+ return pooled_output
677
+
678
+
679
+ class LxmertPredictionHeadTransform(nn.Module):
680
+ def __init__(self, config):
681
+ super(LxmertPredictionHeadTransform, self).__init__()
682
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
683
+ self.transform_act_fn = ACT2FN[config.hidden_act]
684
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=1e-12)
685
+
686
+ def forward(self, hidden_states):
687
+ hidden_states = self.dense(hidden_states)
688
+ hidden_states = self.transform_act_fn(hidden_states)
689
+ hidden_states = self.LayerNorm(hidden_states)
690
+ return hidden_states
691
+
692
+
693
+ class LxmertLMPredictionHead(nn.Module):
694
+ def __init__(self, config, lxmert_model_embedding_weights):
695
+ super(LxmertLMPredictionHead, self).__init__()
696
+ self.transform = LxmertPredictionHeadTransform(config)
697
+
698
+ # The output weights are the same as the input embeddings, but there is
699
+ # an output-only bias for each token.
700
+ self.decoder = nn.Linear(
701
+ lxmert_model_embedding_weights.size(1),
702
+ lxmert_model_embedding_weights.size(0),
703
+ bias=False,
704
+ )
705
+ self.decoder.weight = lxmert_model_embedding_weights
706
+ self.bias = nn.Parameter(torch.zeros(lxmert_model_embedding_weights.size(0)))
707
+
708
+ def forward(self, hidden_states):
709
+ hidden_states = self.transform(hidden_states)
710
+ hidden_states = self.decoder(hidden_states) + self.bias
711
+ return hidden_states
712
+
713
+
714
+ class LxmertVisualAnswerHead(nn.Module):
715
+ def __init__(self, config, num_labels):
716
+ super().__init__()
717
+ hid_dim = config.hidden_size
718
+ self.logit_fc = nn.Sequential(
719
+ nn.Linear(hid_dim, hid_dim * 2),
720
+ GeLU(),
721
+ nn.LayerNorm(hid_dim * 2, eps=1e-12),
722
+ nn.Linear(hid_dim * 2, num_labels),
723
+ )
724
+
725
+ def forward(self, hidden_states):
726
+ return self.logit_fc(hidden_states)
727
+
728
+
729
+ class LxmertVisualObjHead(nn.Module):
730
+ def __init__(self, config):
731
+ super().__init__()
732
+ self.transform = LxmertPredictionHeadTransform(config)
733
+ # Decide the use of visual losses
734
+ visual_losses = {}
735
+ if config.visual_obj_loss:
736
+ visual_losses["obj"] = {"shape": (-1,), "num": config.num_object_labels}
737
+ if config.visual_attr_loss:
738
+ visual_losses["attr"] = {"shape": (-1,), "num": config.num_attr_labels}
739
+ if config.visual_feat_loss:
740
+ visual_losses["feat"] = {
741
+ "shape": (-1, config.visual_feat_dim),
742
+ "num": config.visual_feat_dim,
743
+ }
744
+ self.visual_losses = visual_losses
745
+
746
+ # The output weights are the same as the input embeddings, but there is
747
+ # an output-only bias for each token.
748
+ self.decoder_dict = nn.ModuleDict(
749
+ {key: nn.Linear(config.hidden_size, self.visual_losses[key]["num"]) for key in self.visual_losses}
750
+ )
751
+
752
+ def forward(self, hidden_states):
753
+ hidden_states = self.transform(hidden_states)
754
+ output = {}
755
+ for key in self.visual_losses:
756
+ output[key] = self.decoder_dict[key](hidden_states)
757
+ return output
758
+
759
+
760
+ class LxmertPreTrainingHeads(nn.Module):
761
+ def __init__(self, config, lxmert_model_embedding_weights):
762
+ super(LxmertPreTrainingHeads, self).__init__()
763
+ self.predictions = LxmertLMPredictionHead(config, lxmert_model_embedding_weights)
764
+ self.seq_relationship = nn.Linear(config.hidden_size, 2)
765
+
766
+ def forward(self, sequence_output, pooled_output):
767
+ prediction_scores = self.predictions(sequence_output)
768
+ seq_relationship_score = self.seq_relationship(pooled_output)
769
+ return prediction_scores, seq_relationship_score
770
+
771
+
772
+ class LxmertPreTrainedModel(PreTrainedModel):
773
+ """
774
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
775
+ models.
776
+ """
777
+
778
+ config_class = LxmertConfig
779
+ load_tf_weights = load_tf_weights_in_lxmert
780
+ base_model_prefix = "lxmert"
781
+
782
+ def _init_weights(self, module):
783
+ """Initialize the weights"""
784
+ if isinstance(module, nn.Linear):
785
+ # Slightly different from the TF version which uses truncated_normal for initialization
786
+ # cf https://github.com/pytorch/pytorch/pull/5617
787
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
788
+ if module.bias is not None:
789
+ module.bias.data.zero_()
790
+ elif isinstance(module, nn.Embedding):
791
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
792
+ if module.padding_idx is not None:
793
+ module.weight.data[module.padding_idx].zero_()
794
+ elif isinstance(module, nn.LayerNorm):
795
+ module.bias.data.zero_()
796
+ module.weight.data.fill_(1.0)
797
+
798
+
799
+ LXMERT_START_DOCSTRING = r"""
800
+
801
+ The LXMERT model was proposed in [LXMERT: Learning Cross-Modality Encoder Representations from
802
+ Transformers](https://arxiv.org/abs/1908.07490) by Hao Tan and Mohit Bansal. It's a vision and language transformer
803
+ model, pretrained on a variety of multi-modal datasets comprising of GQA, VQAv2.0, MSCOCO captions, and Visual
804
+ genome, using a combination of masked language modeling, region of interest feature regression, cross entropy loss
805
+ for question answering attribute prediction, and object tag prediction.
806
+
807
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
808
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
809
+ etc.)
810
+
811
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
812
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
813
+ and behavior.
814
+
815
+ Parameters:
816
+ config ([`LxmertConfig`]): Model configuration class with all the parameters of the model.
817
+ Initializing with a config file does not load the weights associated with the model, only the
818
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
819
+ """
820
+
821
+ LXMERT_INPUTS_DOCSTRING = r"""
822
+
823
+ Args:
824
+ input_ids (`torch.LongTensor` of shape `({0})`):
825
+ Indices of input sequence tokens in the vocabulary.
826
+
827
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
828
+ [`PreTrainedTokenizer.__call__`] for details.
829
+
830
+ [What are input IDs?](../glossary#input-ids)
831
+ visual_feats (`torch.FloatTensor` of shape `(batch_size, num_visual_features, visual_feat_dim)`):
832
+ This input represents visual features. They ROI pooled object features from bounding boxes using a
833
+ faster-RCNN model)
834
+
835
+ These are currently not provided by the transformers library.
836
+ visual_pos (`torch.FloatTensor` of shape `(batch_size, num_visual_features, visual_pos_dim)`):
837
+ This input represents spacial features corresponding to their relative (via index) visual features. The
838
+ pre-trained LXMERT model expects these spacial features to be normalized bounding boxes on a scale of 0 to
839
+ 1.
840
+
841
+ These are currently not provided by the transformers library.
842
+ attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
843
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
844
+
845
+ - 1 for tokens that are **not masked**,
846
+ - 0 for tokens that are **masked**.
847
+
848
+ [What are attention masks?](../glossary#attention-mask)
849
+ visual_attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
850
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
851
+
852
+ - 1 for tokens that are **not masked**,
853
+ - 0 for tokens that are **masked**.
854
+
855
+ [What are attention masks?](../glossary#attention-mask)
856
+ token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
857
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
858
+ 1]`:
859
+
860
+ - 0 corresponds to a *sentence A* token,
861
+ - 1 corresponds to a *sentence B* token.
862
+
863
+ [What are token type IDs?](../glossary#token-type-ids)
864
+ inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
865
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
866
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
867
+ model's internal embedding lookup matrix.
868
+ output_attentions (`bool`, *optional*):
869
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
870
+ tensors for more detail.
871
+ output_hidden_states (`bool`, *optional*):
872
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
873
+ more detail.
874
+ return_dict (`bool`, *optional*):
875
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
876
+ """
877
+
878
+
879
+ @add_start_docstrings(
880
+ "The bare Lxmert Model transformer outputting raw hidden-states without any specific head on top.",
881
+ LXMERT_START_DOCSTRING,
882
+ )
883
+ class LxmertModel(LxmertPreTrainedModel):
884
+ def __init__(self, config):
885
+ super().__init__(config)
886
+ self.embeddings = LxmertEmbeddings(config)
887
+ self.encoder = LxmertEncoder(config)
888
+ self.pooler = LxmertPooler(config)
889
+ # Initialize weights and apply final processing
890
+ self.post_init()
891
+
892
+ def get_input_embeddings(self):
893
+ return self.embeddings.word_embeddings
894
+
895
+ def set_input_embeddings(self, new_embeddings):
896
+ self.embeddings.word_embeddings = new_embeddings
897
+
898
+ @add_start_docstrings_to_model_forward(LXMERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
899
+ @add_code_sample_docstrings(
900
+ checkpoint=_CHECKPOINT_FOR_DOC,
901
+ output_type=LxmertModelOutput,
902
+ config_class=_CONFIG_FOR_DOC,
903
+ )
904
+ def forward(
905
+ self,
906
+ input_ids: Optional[torch.LongTensor] = None,
907
+ visual_feats: Optional[torch.FloatTensor] = None,
908
+ visual_pos: Optional[torch.FloatTensor] = None,
909
+ attention_mask: Optional[torch.FloatTensor] = None,
910
+ visual_attention_mask: Optional[torch.FloatTensor] = None,
911
+ token_type_ids: Optional[torch.LongTensor] = None,
912
+ inputs_embeds: Optional[torch.FloatTensor] = None,
913
+ output_attentions: Optional[bool] = None,
914
+ output_hidden_states: Optional[bool] = None,
915
+ return_dict: Optional[bool] = None,
916
+ ) -> Union[LxmertModelOutput, Tuple[torch.FloatTensor]]:
917
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
918
+ output_hidden_states = (
919
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
920
+ )
921
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
922
+
923
+ if input_ids is not None and inputs_embeds is not None:
924
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
925
+ elif input_ids is not None:
926
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
927
+ input_shape = input_ids.size()
928
+ elif inputs_embeds is not None:
929
+ input_shape = inputs_embeds.size()[:-1]
930
+ else:
931
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
932
+
933
+ if visual_feats is None:
934
+ raise ValueError("`visual_feats` cannot be `None`")
935
+ if visual_pos is None:
936
+ raise ValueError("`visual_pos` cannot be `None`")
937
+
938
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
939
+
940
+ if attention_mask is None:
941
+ attention_mask = torch.ones(input_shape, device=device)
942
+ if token_type_ids is None:
943
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
944
+
945
+ # We create a 3D attention mask from a 2D tensor mask.
946
+ # Sizes are [batch_size, 1, 1, to_seq_length]
947
+ # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
948
+ # this attention mask is more simple than the triangular masking of causal attention
949
+ # used in OpenAI GPT, we just need to prepare the broadcast dimension here.
950
+ extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
951
+
952
+ # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
953
+ # masked positions, this operation will create a tensor which is 0.0 for
954
+ # positions we want to attend and the dtype's smallest value for masked positions.
955
+ # Since we are adding it to the raw scores before the softmax, this is
956
+ # effectively the same as removing these entirely.
957
+ extended_attention_mask = extended_attention_mask.to(dtype=self.dtype)
958
+ extended_attention_mask = (1.0 - extended_attention_mask) * torch.finfo(self.dtype).min
959
+
960
+ # Process the visual attention mask
961
+ if visual_attention_mask is not None:
962
+ extended_visual_attention_mask = visual_attention_mask.unsqueeze(1).unsqueeze(2)
963
+ extended_visual_attention_mask = extended_visual_attention_mask.to(dtype=self.dtype)
964
+ extended_visual_attention_mask = (1.0 - extended_visual_attention_mask) * torch.finfo(self.dtype).min
965
+ else:
966
+ extended_visual_attention_mask = None
967
+
968
+ # Positional Word Embeddings
969
+ embedding_output = self.embeddings(input_ids, token_type_ids, inputs_embeds)
970
+
971
+ # Run Lxmert encoder
972
+ encoder_outputs = self.encoder(
973
+ embedding_output,
974
+ extended_attention_mask,
975
+ visual_feats=visual_feats,
976
+ visual_pos=visual_pos,
977
+ visual_attention_mask=extended_visual_attention_mask,
978
+ output_attentions=output_attentions,
979
+ )
980
+
981
+ visual_encoder_outputs, lang_encoder_outputs = encoder_outputs[:2]
982
+ vision_hidden_states = visual_encoder_outputs[0]
983
+ language_hidden_states = lang_encoder_outputs[0]
984
+
985
+ all_attentions = ()
986
+ if output_attentions:
987
+ language_attentions = lang_encoder_outputs[1]
988
+ vision_attentions = visual_encoder_outputs[1]
989
+ cross_encoder_attentions = encoder_outputs[2]
990
+ all_attentions = (
991
+ language_attentions,
992
+ vision_attentions,
993
+ cross_encoder_attentions,
994
+ )
995
+
996
+ hidden_states = (language_hidden_states, vision_hidden_states) if output_hidden_states else ()
997
+
998
+ visual_output = vision_hidden_states[-1]
999
+ lang_output = language_hidden_states[-1]
1000
+ pooled_output = self.pooler(lang_output)
1001
+
1002
+ if not return_dict:
1003
+ return (lang_output, visual_output, pooled_output) + hidden_states + all_attentions
1004
+
1005
+ return LxmertModelOutput(
1006
+ pooled_output=pooled_output,
1007
+ language_output=lang_output,
1008
+ vision_output=visual_output,
1009
+ language_hidden_states=language_hidden_states if output_hidden_states else None,
1010
+ vision_hidden_states=vision_hidden_states if output_hidden_states else None,
1011
+ language_attentions=language_attentions if output_attentions else None,
1012
+ vision_attentions=vision_attentions if output_attentions else None,
1013
+ cross_encoder_attentions=cross_encoder_attentions if output_attentions else None,
1014
+ )
1015
+
1016
+
1017
+ @add_start_docstrings(
1018
+ """Lxmert Model with a specified pretraining head on top.""",
1019
+ LXMERT_START_DOCSTRING,
1020
+ )
1021
+ class LxmertForPreTraining(LxmertPreTrainedModel):
1022
+ _tied_weights_keys = ["cls.predictions.decoder.weight"]
1023
+
1024
+ def __init__(self, config):
1025
+ super().__init__(config)
1026
+ # Configuration
1027
+ self.config = config
1028
+ self.num_qa_labels = config.num_qa_labels
1029
+ self.visual_loss_normalizer = config.visual_loss_normalizer
1030
+
1031
+ # Use of pretraining tasks
1032
+ self.task_mask_lm = config.task_mask_lm
1033
+ self.task_obj_predict = config.task_obj_predict
1034
+ self.task_matched = config.task_matched
1035
+ self.task_qa = config.task_qa
1036
+
1037
+ # Lxmert backbone
1038
+ self.lxmert = LxmertModel(config)
1039
+
1040
+ # Pre-training heads
1041
+ self.cls = LxmertPreTrainingHeads(config, self.lxmert.embeddings.word_embeddings.weight)
1042
+ if self.task_obj_predict:
1043
+ self.obj_predict_head = LxmertVisualObjHead(config)
1044
+ if self.task_qa:
1045
+ self.answer_head = LxmertVisualAnswerHead(config, self.num_qa_labels)
1046
+
1047
+ # Weight initialization
1048
+ # Initialize weights and apply final processing
1049
+ self.post_init()
1050
+
1051
+ # Loss functions
1052
+ self.loss_fcts = {
1053
+ "l2": SmoothL1Loss(reduction="none"),
1054
+ "visual_ce": CrossEntropyLoss(reduction="none"),
1055
+ "ce": CrossEntropyLoss(),
1056
+ }
1057
+
1058
+ visual_losses = {}
1059
+ if config.visual_obj_loss:
1060
+ visual_losses["obj"] = {
1061
+ "shape": (-1,),
1062
+ "num": config.num_object_labels,
1063
+ "loss": "visual_ce",
1064
+ }
1065
+ if config.visual_attr_loss:
1066
+ visual_losses["attr"] = {
1067
+ "shape": (-1,),
1068
+ "num": config.num_attr_labels,
1069
+ "loss": "visual_ce",
1070
+ }
1071
+ if config.visual_feat_loss:
1072
+ visual_losses["feat"] = {
1073
+ "shape": (-1, config.visual_feat_dim),
1074
+ "num": config.visual_feat_dim,
1075
+ "loss": "l2",
1076
+ }
1077
+ self.visual_losses = visual_losses
1078
+
1079
+ def resize_num_qa_labels(self, num_labels):
1080
+ """
1081
+ Build a resized question answering linear layer Module from a provided new linear layer. Increasing the size
1082
+ will add newly initialized weights. Reducing the size will remove weights from the end
1083
+
1084
+ Args:
1085
+ num_labels (`int`, *optional*):
1086
+ New number of labels in the linear layer weight matrix. Increasing the size will add newly initialized
1087
+ weights at the end. Reducing the size will remove weights from the end. If not provided or `None`, just
1088
+ returns a pointer to the qa labels ``torch.nn.Linear``` module of the model without doing anything.
1089
+
1090
+ Return:
1091
+ `torch.nn.Linear`: Pointer to the resized Linear layer or the old Linear layer
1092
+ """
1093
+
1094
+ cur_qa_logit_layer = self.get_qa_logit_layer()
1095
+ if num_labels is None or cur_qa_logit_layer is None:
1096
+ return
1097
+ new_qa_logit_layer = self._resize_qa_labels(num_labels)
1098
+ self.config.num_qa_labels = num_labels
1099
+ self.num_qa_labels = num_labels
1100
+
1101
+ return new_qa_logit_layer
1102
+
1103
+ def _resize_qa_labels(self, num_labels):
1104
+ cur_qa_logit_layer = self.get_qa_logit_layer()
1105
+ new_qa_logit_layer = self._get_resized_qa_labels(cur_qa_logit_layer, num_labels)
1106
+ self._set_qa_logit_layer(new_qa_logit_layer)
1107
+ return self.get_qa_logit_layer()
1108
+
1109
+ def get_qa_logit_layer(self) -> nn.Module:
1110
+ """
1111
+ Returns the linear layer that produces question answering logits.
1112
+
1113
+ Returns:
1114
+ `nn.Module`: A torch module mapping the question answering prediction hidden states or `None` if LXMERT
1115
+ does not have a visual answering head.
1116
+ """
1117
+ if hasattr(self, "answer_head"):
1118
+ return self.answer_head.logit_fc[-1]
1119
+
1120
+ def _set_qa_logit_layer(self, qa_logit_layer):
1121
+ self.answer_head.logit_fc[-1] = qa_logit_layer
1122
+
1123
+ def _get_resized_qa_labels(self, cur_qa_logit_layer, num_labels):
1124
+ if num_labels is None:
1125
+ return cur_qa_logit_layer
1126
+
1127
+ cur_qa_labels, hidden_dim = cur_qa_logit_layer.weight.size()
1128
+ if cur_qa_labels == num_labels:
1129
+ return cur_qa_logit_layer
1130
+
1131
+ # Build new linear output
1132
+ if getattr(cur_qa_logit_layer, "bias", None) is not None:
1133
+ new_qa_logit_layer = nn.Linear(hidden_dim, num_labels)
1134
+ else:
1135
+ new_qa_logit_layer = nn.Linear(hidden_dim, num_labels, bias=False)
1136
+
1137
+ new_qa_logit_layer.to(cur_qa_logit_layer.weight.device)
1138
+
1139
+ # initialize all new labels
1140
+ self._init_weights(new_qa_logit_layer)
1141
+
1142
+ # Copy labels from the previous weights
1143
+ num_labels_to_copy = min(cur_qa_labels, num_labels)
1144
+ new_qa_logit_layer.weight.data[:num_labels_to_copy, :] = cur_qa_logit_layer.weight.data[:num_labels_to_copy, :]
1145
+ if getattr(cur_qa_logit_layer, "bias", None) is not None:
1146
+ new_qa_logit_layer.bias.data[:num_labels_to_copy] = cur_qa_logit_layer.bias.data[:num_labels_to_copy]
1147
+
1148
+ return new_qa_logit_layer
1149
+
1150
+ @add_start_docstrings_to_model_forward(LXMERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1151
+ @replace_return_docstrings(output_type=LxmertForPreTrainingOutput, config_class=_CONFIG_FOR_DOC)
1152
+ def forward(
1153
+ self,
1154
+ input_ids: Optional[torch.LongTensor] = None,
1155
+ visual_feats: Optional[torch.FloatTensor] = None,
1156
+ visual_pos: Optional[torch.FloatTensor] = None,
1157
+ attention_mask: Optional[torch.FloatTensor] = None,
1158
+ visual_attention_mask: Optional[torch.FloatTensor] = None,
1159
+ token_type_ids: Optional[torch.LongTensor] = None,
1160
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1161
+ labels: Optional[torch.LongTensor] = None,
1162
+ obj_labels: Optional[Dict[str, Tuple[torch.FloatTensor, torch.FloatTensor]]] = None,
1163
+ matched_label: Optional[torch.LongTensor] = None,
1164
+ ans: Optional[torch.Tensor] = None,
1165
+ output_attentions: Optional[bool] = None,
1166
+ output_hidden_states: Optional[bool] = None,
1167
+ return_dict: Optional[bool] = None,
1168
+ **kwargs,
1169
+ ) -> Union[LxmertForPreTrainingOutput, Tuple[torch.FloatTensor]]:
1170
+ r"""
1171
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1172
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
1173
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
1174
+ loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
1175
+ obj_labels (`Dict[Str: Tuple[Torch.FloatTensor, Torch.FloatTensor]]`, *optional*):
1176
+ each key is named after each one of the visual losses and each element of the tuple is of the shape
1177
+ `(batch_size, num_features)` and `(batch_size, num_features, visual_feature_dim)` for each the label id and
1178
+ the label score respectively
1179
+ matched_label (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1180
+ Labels for computing the whether or not the text input matches the image (classification) loss. Input
1181
+ should be a sequence pair (see `input_ids` docstring) Indices should be in `[0, 1]`:
1182
+
1183
+ - 0 indicates that the sentence does not match the image,
1184
+ - 1 indicates that the sentence does match the image.
1185
+ ans (`Torch.Tensor` of shape `(batch_size)`, *optional*):
1186
+ a one hot representation hof the correct answer *optional*
1187
+
1188
+ Returns:
1189
+ """
1190
+
1191
+ if "masked_lm_labels" in kwargs:
1192
+ warnings.warn(
1193
+ "The `masked_lm_labels` argument is deprecated and will be removed in a future version, use `labels`"
1194
+ " instead.",
1195
+ FutureWarning,
1196
+ )
1197
+ labels = kwargs.pop("masked_lm_labels")
1198
+
1199
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1200
+
1201
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
1202
+ lxmert_output = self.lxmert(
1203
+ input_ids=input_ids,
1204
+ visual_feats=visual_feats,
1205
+ visual_pos=visual_pos,
1206
+ token_type_ids=token_type_ids,
1207
+ attention_mask=attention_mask,
1208
+ visual_attention_mask=visual_attention_mask,
1209
+ inputs_embeds=inputs_embeds,
1210
+ output_hidden_states=output_hidden_states,
1211
+ output_attentions=output_attentions,
1212
+ return_dict=return_dict,
1213
+ )
1214
+
1215
+ lang_output, visual_output, pooled_output = (
1216
+ lxmert_output[0],
1217
+ lxmert_output[1],
1218
+ lxmert_output[2],
1219
+ )
1220
+ lang_prediction_scores, cross_relationship_score = self.cls(lang_output, pooled_output)
1221
+ if self.task_qa:
1222
+ answer_score = self.answer_head(pooled_output)
1223
+ else:
1224
+ answer_score = pooled_output[0][0]
1225
+
1226
+ total_loss = (
1227
+ None
1228
+ if (labels is None and matched_label is None and obj_labels is None and ans is None)
1229
+ else torch.tensor(0.0, device=device)
1230
+ )
1231
+ if labels is not None and self.task_mask_lm:
1232
+ masked_lm_loss = self.loss_fcts["ce"](
1233
+ lang_prediction_scores.view(-1, self.config.vocab_size),
1234
+ labels.view(-1),
1235
+ )
1236
+ total_loss += masked_lm_loss
1237
+ if matched_label is not None and self.task_matched:
1238
+ matched_loss = self.loss_fcts["ce"](cross_relationship_score.view(-1, 2), matched_label.view(-1))
1239
+ total_loss += matched_loss
1240
+ if obj_labels is not None and self.task_obj_predict:
1241
+ total_visual_loss = torch.tensor(0.0, device=input_ids.device)
1242
+ visual_prediction_scores_dict = self.obj_predict_head(visual_output)
1243
+ for key, key_info in self.visual_losses.items():
1244
+ label, mask_conf = obj_labels[key]
1245
+ output_dim = key_info["num"]
1246
+ loss_fct_name = key_info["loss"]
1247
+ label_shape = key_info["shape"]
1248
+ weight = self.visual_loss_normalizer
1249
+ visual_loss_fct = self.loss_fcts[loss_fct_name]
1250
+ visual_prediction_scores = visual_prediction_scores_dict[key]
1251
+ visual_loss = visual_loss_fct(
1252
+ visual_prediction_scores.view(-1, output_dim),
1253
+ label.view(label_shape),
1254
+ )
1255
+ if visual_loss.dim() > 1: # Regression Losses
1256
+ visual_loss = visual_loss.mean(1)
1257
+ visual_loss = (visual_loss * mask_conf.view(-1)).mean() * weight
1258
+ total_visual_loss += visual_loss
1259
+ total_loss += total_visual_loss
1260
+ if ans is not None and self.task_qa:
1261
+ answer_loss = self.loss_fcts["ce"](answer_score.view(-1, self.num_qa_labels), ans.view(-1))
1262
+ total_loss += answer_loss
1263
+
1264
+ if not return_dict:
1265
+ output = (
1266
+ lang_prediction_scores,
1267
+ cross_relationship_score,
1268
+ answer_score,
1269
+ ) + lxmert_output[3:]
1270
+ return ((total_loss,) + output) if total_loss is not None else output
1271
+
1272
+ return LxmertForPreTrainingOutput(
1273
+ loss=total_loss,
1274
+ prediction_logits=lang_prediction_scores,
1275
+ cross_relationship_score=cross_relationship_score,
1276
+ question_answering_score=answer_score,
1277
+ language_hidden_states=lxmert_output.language_hidden_states,
1278
+ vision_hidden_states=lxmert_output.vision_hidden_states,
1279
+ language_attentions=lxmert_output.language_attentions,
1280
+ vision_attentions=lxmert_output.vision_attentions,
1281
+ cross_encoder_attentions=lxmert_output.cross_encoder_attentions,
1282
+ )
1283
+
1284
+
1285
+ @add_start_docstrings(
1286
+ """Lxmert Model with a visual-answering head on top for downstream QA tasks""",
1287
+ LXMERT_START_DOCSTRING,
1288
+ )
1289
+ class LxmertForQuestionAnswering(LxmertPreTrainedModel):
1290
+ def __init__(self, config):
1291
+ super().__init__(config)
1292
+ # Configuration
1293
+ self.config = config
1294
+ self.num_qa_labels = config.num_qa_labels
1295
+ self.visual_loss_normalizer = config.visual_loss_normalizer
1296
+
1297
+ # Lxmert backbone
1298
+ self.lxmert = LxmertModel(config)
1299
+
1300
+ self.answer_head = LxmertVisualAnswerHead(config, self.num_qa_labels)
1301
+
1302
+ # Weight initialization
1303
+ # Initialize weights and apply final processing
1304
+ self.post_init()
1305
+
1306
+ # Loss function
1307
+ self.loss = CrossEntropyLoss()
1308
+
1309
+ def resize_num_qa_labels(self, num_labels):
1310
+ """
1311
+ Build a resized question answering linear layer Module from a provided new linear layer. Increasing the size
1312
+ will add newly initialized weights. Reducing the size will remove weights from the end
1313
+
1314
+ Args:
1315
+ num_labels (`int`, *optional*):
1316
+ New number of labels in the linear layer weight matrix. Increasing the size will add newly initialized
1317
+ weights at the end. Reducing the size will remove weights from the end. If not provided or `None`, just
1318
+ returns a pointer to the qa labels ``torch.nn.Linear``` module of the model without doing anything.
1319
+
1320
+ Return:
1321
+ `torch.nn.Linear`: Pointer to the resized Linear layer or the old Linear layer
1322
+ """
1323
+
1324
+ cur_qa_logit_layer = self.get_qa_logit_layer()
1325
+ if num_labels is None or cur_qa_logit_layer is None:
1326
+ return
1327
+ new_qa_logit_layer = self._resize_qa_labels(num_labels)
1328
+ self.config.num_qa_labels = num_labels
1329
+ self.num_qa_labels = num_labels
1330
+
1331
+ return new_qa_logit_layer
1332
+
1333
+ def _resize_qa_labels(self, num_labels):
1334
+ cur_qa_logit_layer = self.get_qa_logit_layer()
1335
+ new_qa_logit_layer = self._get_resized_qa_labels(cur_qa_logit_layer, num_labels)
1336
+ self._set_qa_logit_layer(new_qa_logit_layer)
1337
+ return self.get_qa_logit_layer()
1338
+
1339
+ def get_qa_logit_layer(self) -> nn.Module:
1340
+ """
1341
+ Returns the linear layer that produces question answering logits
1342
+
1343
+ Returns:
1344
+ `nn.Module`: A torch module mapping the question answering prediction hidden states. `None`: A NoneType
1345
+ object if Lxmert does not have the visual answering head.
1346
+ """
1347
+
1348
+ if hasattr(self, "answer_head"):
1349
+ return self.answer_head.logit_fc[-1]
1350
+
1351
+ def _set_qa_logit_layer(self, qa_logit_layer):
1352
+ self.answer_head.logit_fc[-1] = qa_logit_layer
1353
+
1354
+ def _get_resized_qa_labels(self, cur_qa_logit_layer, num_labels):
1355
+ if num_labels is None:
1356
+ return cur_qa_logit_layer
1357
+
1358
+ cur_qa_labels, hidden_dim = cur_qa_logit_layer.weight.size()
1359
+ if cur_qa_labels == num_labels:
1360
+ return cur_qa_logit_layer
1361
+
1362
+ # Build new linear output
1363
+ if getattr(cur_qa_logit_layer, "bias", None) is not None:
1364
+ new_qa_logit_layer = nn.Linear(hidden_dim, num_labels)
1365
+ else:
1366
+ new_qa_logit_layer = nn.Linear(hidden_dim, num_labels, bias=False)
1367
+
1368
+ new_qa_logit_layer.to(cur_qa_logit_layer.weight.device)
1369
+
1370
+ # initialize all new labels
1371
+ self._init_weights(new_qa_logit_layer)
1372
+
1373
+ # Copy labels from the previous weights
1374
+ num_labels_to_copy = min(cur_qa_labels, num_labels)
1375
+ new_qa_logit_layer.weight.data[:num_labels_to_copy, :] = cur_qa_logit_layer.weight.data[:num_labels_to_copy, :]
1376
+ if getattr(cur_qa_logit_layer, "bias", None) is not None:
1377
+ new_qa_logit_layer.bias.data[:num_labels_to_copy] = cur_qa_logit_layer.bias.data[:num_labels_to_copy]
1378
+
1379
+ return new_qa_logit_layer
1380
+
1381
+ @add_start_docstrings_to_model_forward(LXMERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1382
+ @add_code_sample_docstrings(
1383
+ checkpoint=_CHECKPOINT_FOR_DOC,
1384
+ output_type=LxmertForQuestionAnsweringOutput,
1385
+ config_class=_CONFIG_FOR_DOC,
1386
+ )
1387
+ def forward(
1388
+ self,
1389
+ input_ids: Optional[torch.LongTensor] = None,
1390
+ visual_feats: Optional[torch.FloatTensor] = None,
1391
+ visual_pos: Optional[torch.FloatTensor] = None,
1392
+ attention_mask: Optional[torch.FloatTensor] = None,
1393
+ visual_attention_mask: Optional[torch.FloatTensor] = None,
1394
+ token_type_ids: Optional[torch.LongTensor] = None,
1395
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1396
+ labels: Optional[torch.Tensor] = None,
1397
+ output_attentions: Optional[bool] = None,
1398
+ output_hidden_states: Optional[bool] = None,
1399
+ return_dict: Optional[bool] = None,
1400
+ ) -> Union[LxmertForQuestionAnsweringOutput, Tuple[torch.FloatTensor]]:
1401
+ r"""
1402
+ labels (`Torch.Tensor` of shape `(batch_size)`, *optional*):
1403
+ A one-hot representation of the correct answer
1404
+ """
1405
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1406
+
1407
+ lxmert_output = self.lxmert(
1408
+ input_ids=input_ids,
1409
+ visual_feats=visual_feats,
1410
+ visual_pos=visual_pos,
1411
+ token_type_ids=token_type_ids,
1412
+ attention_mask=attention_mask,
1413
+ visual_attention_mask=visual_attention_mask,
1414
+ inputs_embeds=inputs_embeds,
1415
+ output_hidden_states=output_hidden_states,
1416
+ output_attentions=output_attentions,
1417
+ return_dict=return_dict,
1418
+ )
1419
+
1420
+ pooled_output = lxmert_output[2]
1421
+ answer_score = self.answer_head(pooled_output)
1422
+ loss = None
1423
+ if labels is not None:
1424
+ loss = self.loss(answer_score.view(-1, self.num_qa_labels), labels.view(-1))
1425
+
1426
+ if not return_dict:
1427
+ output = (answer_score,) + lxmert_output[3:]
1428
+ return (loss,) + output if loss is not None else output
1429
+
1430
+ return LxmertForQuestionAnsweringOutput(
1431
+ loss=loss,
1432
+ question_answering_score=answer_score,
1433
+ language_hidden_states=lxmert_output.language_hidden_states,
1434
+ vision_hidden_states=lxmert_output.vision_hidden_states,
1435
+ language_attentions=lxmert_output.language_attentions,
1436
+ vision_attentions=lxmert_output.vision_attentions,
1437
+ cross_encoder_attentions=lxmert_output.cross_encoder_attentions,
1438
+ )
env-llmeval/lib/python3.10/site-packages/transformers/models/lxmert/modeling_tf_lxmert.py ADDED
@@ -0,0 +1,1657 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The Google AI Language Team Authors, The HuggingFace Inc. team, and the
3
+ # Lxmert Authors.
4
+ # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
5
+ #
6
+ # Licensed under the Apache License, Version 2.0 (the "License");
7
+ # you may not use this file except in compliance with the License.
8
+ # You may obtain a copy of the License at
9
+ #
10
+ # http://www.apache.org/licenses/LICENSE-2.0
11
+ #
12
+ # Unless required by applicable law or agreed to in writing, software
13
+ # distributed under the License is distributed on an "AS IS" BASIS,
14
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15
+ # See the License for the specific language governing permissions and
16
+ # limitations under the License.
17
+ """ TF 2.0 LXMERT model."""
18
+
19
+
20
+ from __future__ import annotations
21
+
22
+ import warnings
23
+ from dataclasses import dataclass
24
+ from typing import Dict, Optional, Tuple, Union
25
+
26
+ import numpy as np
27
+ import tensorflow as tf
28
+
29
+ from ...activations_tf import get_tf_activation
30
+ from ...modeling_tf_utils import (
31
+ TFModelInputType,
32
+ TFPreTrainedModel,
33
+ get_initializer,
34
+ keras,
35
+ keras_serializable,
36
+ shape_list,
37
+ unpack_inputs,
38
+ )
39
+ from ...tf_utils import check_embeddings_within_bounds, stable_softmax
40
+ from ...utils import (
41
+ ModelOutput,
42
+ add_code_sample_docstrings,
43
+ add_start_docstrings,
44
+ add_start_docstrings_to_model_forward,
45
+ logging,
46
+ replace_return_docstrings,
47
+ )
48
+ from .configuration_lxmert import LxmertConfig
49
+
50
+
51
+ logger = logging.get_logger(__name__)
52
+
53
+ _CHECKPOINT_FOR_DOC = "unc-nlp/lxmert-base-uncased"
54
+ _CONFIG_FOR_DOC = "LxmertConfig"
55
+
56
+ TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST = [
57
+ "unc-nlp/lxmert-base-uncased",
58
+ ]
59
+
60
+
61
+ @dataclass
62
+ class TFLxmertModelOutput(ModelOutput):
63
+ """
64
+ Lxmert's outputs that contain the last hidden states, pooled outputs, and attention probabilities for the language,
65
+ visual, and, cross-modality encoders. (note: the visual encoder in Lxmert is referred to as the "relation-ship"
66
+ encoder")
67
+
68
+
69
+ Args:
70
+ language_output (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`):
71
+ Sequence of hidden-states at the output of the last layer of the language encoder.
72
+ vision_output (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`):
73
+ Sequence of hidden-states at the output of the last layer of the visual encoder.
74
+ pooled_output (`tf.Tensor` of shape `(batch_size, hidden_size)`):
75
+ Last layer hidden-state of the first token of the sequence (classification, CLS, token) further processed
76
+ by a Linear layer and a Tanh activation function. The Linear
77
+ language_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
78
+ Tuple of `tf.Tensor` (one for input features + one for the output of each cross-modality layer) of shape
79
+ `(batch_size, sequence_length, hidden_size)`.
80
+ vision_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
81
+ Tuple of `tf.Tensor` (one for input features + one for the output of each cross-modality layer) of shape
82
+ `(batch_size, sequence_length, hidden_size)`.
83
+ language_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
84
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
85
+ sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
86
+ the self-attention heads.
87
+ vision_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
88
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
89
+ sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
90
+ the self-attention heads.
91
+ cross_encoder_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
92
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
93
+ sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
94
+ the self-attention heads.
95
+ """
96
+
97
+ language_output: tf.Tensor | None = None
98
+ vision_output: tf.Tensor | None = None
99
+ pooled_output: tf.Tensor | None = None
100
+ language_hidden_states: Tuple[tf.Tensor] | None = None
101
+ vision_hidden_states: Tuple[tf.Tensor] | None = None
102
+ language_attentions: Tuple[tf.Tensor] | None = None
103
+ vision_attentions: Tuple[tf.Tensor] | None = None
104
+ cross_encoder_attentions: Tuple[tf.Tensor] | None = None
105
+
106
+
107
+ @dataclass
108
+ class TFLxmertForPreTrainingOutput(ModelOutput):
109
+ """
110
+ Output type of [`LxmertForPreTraining`].
111
+
112
+ Args:
113
+ loss (*optional*, returned when `labels` is provided, `tf.Tensor` of shape `(1,)`):
114
+ Total loss as the sum of the masked language modeling loss and the next sequence prediction
115
+ (classification) loss.
116
+ prediction_logits (`tf.Tensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
117
+ Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
118
+ cross_relationship_score (`tf.Tensor` of shape `(batch_size, 2)`):
119
+ Prediction scores of the textual matching objective (classification) head (scores of True/False
120
+ continuation before SoftMax).
121
+ question_answering_score (`tf.Tensor` of shape `(batch_size, n_qa_answers)`):
122
+ Prediction scores of question answering objective (classification).
123
+ language_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
124
+ Tuple of `tf.Tensor` (one for input features + one for the output of each cross-modality layer) of shape
125
+ `(batch_size, sequence_length, hidden_size)`.
126
+ vision_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
127
+ Tuple of `tf.Tensor` (one for input features + one for the output of each cross-modality layer) of shape
128
+ `(batch_size, sequence_length, hidden_size)`.
129
+ language_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
130
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
131
+ sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
132
+ the self-attention heads.
133
+ vision_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
134
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
135
+ sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
136
+ the self-attention heads.
137
+ cross_encoder_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
138
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
139
+ sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
140
+ the self-attention heads.
141
+
142
+ """
143
+
144
+ loss: tf.Tensor | None = None
145
+ prediction_logits: tf.Tensor | None = None
146
+ cross_relationship_score: tf.Tensor | None = None
147
+ question_answering_score: tf.Tensor | None = None
148
+ language_hidden_states: Tuple[tf.Tensor] | None = None
149
+ vision_hidden_states: Tuple[tf.Tensor] | None = None
150
+ language_attentions: Tuple[tf.Tensor] | None = None
151
+ vision_attentions: Tuple[tf.Tensor] | None = None
152
+ cross_encoder_attentions: Tuple[tf.Tensor] | None = None
153
+
154
+
155
+ class TFLxmertVisualFeatureEncoder(keras.layers.Layer):
156
+ def __init__(self, config, **kwargs):
157
+ super().__init__(**kwargs)
158
+
159
+ # Object feature encoding
160
+ self.visn_fc = keras.layers.Dense(
161
+ config.hidden_size,
162
+ kernel_initializer=get_initializer(config.initializer_range),
163
+ name="visn_fc",
164
+ )
165
+ self.visn_layer_norm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="visn_layer_norm")
166
+
167
+ # Box position encoding
168
+ self.box_fc = keras.layers.Dense(
169
+ config.hidden_size,
170
+ kernel_initializer=get_initializer(config.initializer_range),
171
+ name="box_fc",
172
+ )
173
+ self.box_layer_norm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="box_layer_norm")
174
+
175
+ self.dropout = keras.layers.Dropout(config.hidden_dropout_prob)
176
+ self.feat_dim = config.visual_feat_dim
177
+ self.pos_dim = config.visual_pos_dim
178
+ self.config = config
179
+
180
+ def call(self, visn_input, training=False):
181
+ feats, boxes = visn_input
182
+
183
+ x = self.visn_fc(feats)
184
+ x = self.visn_layer_norm(x)
185
+ y = self.box_fc(boxes)
186
+ y = self.box_layer_norm(y)
187
+ output = (x + y) / 2
188
+
189
+ output = self.dropout(output, training=training)
190
+ return output
191
+
192
+ def build(self, input_shape=None):
193
+ if self.built:
194
+ return
195
+ self.built = True
196
+ if getattr(self, "visn_fc", None) is not None:
197
+ with tf.name_scope(self.visn_fc.name):
198
+ self.visn_fc.build([None, None, self.feat_dim])
199
+ if getattr(self, "visn_layer_norm", None) is not None:
200
+ with tf.name_scope(self.visn_layer_norm.name):
201
+ self.visn_layer_norm.build([None, None, self.config.hidden_size])
202
+ if getattr(self, "box_fc", None) is not None:
203
+ with tf.name_scope(self.box_fc.name):
204
+ self.box_fc.build([None, None, self.pos_dim])
205
+ if getattr(self, "box_layer_norm", None) is not None:
206
+ with tf.name_scope(self.box_layer_norm.name):
207
+ self.box_layer_norm.build([None, None, self.config.hidden_size])
208
+
209
+
210
+ class TFLxmertEmbeddings(keras.layers.Layer):
211
+ """Construct the embeddings from word, position and token_type embeddings."""
212
+
213
+ def __init__(self, config, **kwargs):
214
+ super().__init__(**kwargs)
215
+
216
+ self.config = config
217
+ self.hidden_size = config.hidden_size
218
+ self.max_position_embeddings = config.max_position_embeddings
219
+ self.initializer_range = config.initializer_range
220
+ self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
221
+ self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob)
222
+
223
+ def build(self, input_shape=None):
224
+ with tf.name_scope("word_embeddings"):
225
+ self.weight = self.add_weight(
226
+ name="weight",
227
+ shape=[self.config.vocab_size, self.hidden_size],
228
+ initializer=get_initializer(initializer_range=self.initializer_range),
229
+ )
230
+
231
+ with tf.name_scope("token_type_embeddings"):
232
+ self.token_type_embeddings = self.add_weight(
233
+ name="embeddings",
234
+ shape=[self.config.type_vocab_size, self.hidden_size],
235
+ initializer=get_initializer(initializer_range=self.initializer_range),
236
+ )
237
+
238
+ with tf.name_scope("position_embeddings"):
239
+ self.position_embeddings = self.add_weight(
240
+ name="embeddings",
241
+ shape=[self.max_position_embeddings, self.hidden_size],
242
+ initializer=get_initializer(initializer_range=self.initializer_range),
243
+ )
244
+
245
+ if self.built:
246
+ return
247
+ self.built = True
248
+ if getattr(self, "LayerNorm", None) is not None:
249
+ with tf.name_scope(self.LayerNorm.name):
250
+ self.LayerNorm.build([None, None, self.config.hidden_size])
251
+
252
+ def call(self, input_ids=None, token_type_ids=None, inputs_embeds=None, training=False):
253
+ """
254
+ Applies embedding based on inputs tensor.
255
+
256
+ Returns:
257
+ final_embeddings (`tf.Tensor`): output embedding tensor.
258
+ """
259
+ assert not (input_ids is None and inputs_embeds is None)
260
+
261
+ if input_ids is not None:
262
+ check_embeddings_within_bounds(input_ids, self.config.vocab_size)
263
+ inputs_embeds = tf.gather(params=self.weight, indices=input_ids)
264
+
265
+ input_shape = shape_list(inputs_embeds)[:-1]
266
+
267
+ if token_type_ids is None:
268
+ token_type_ids = tf.fill(dims=input_shape, value=0)
269
+
270
+ position_ids = tf.expand_dims(tf.range(start=0, limit=input_shape[-1]), axis=0)
271
+ position_embeds = tf.gather(params=self.position_embeddings, indices=position_ids)
272
+ token_type_embeds = tf.gather(params=self.token_type_embeddings, indices=token_type_ids)
273
+ final_embeddings = inputs_embeds + position_embeds + token_type_embeds
274
+ final_embeddings = self.LayerNorm(inputs=final_embeddings)
275
+ final_embeddings = self.dropout(inputs=final_embeddings, training=training)
276
+
277
+ return final_embeddings
278
+
279
+
280
+ class TFLxmertAttention(keras.layers.Layer):
281
+ def __init__(self, config, **kwargs):
282
+ super().__init__(**kwargs)
283
+ if config.hidden_size % config.num_attention_heads != 0:
284
+ raise ValueError(
285
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
286
+ f"heads ({config.num_attention_heads}"
287
+ )
288
+
289
+ self.num_attention_heads = config.num_attention_heads
290
+ assert config.hidden_size % config.num_attention_heads == 0
291
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
292
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
293
+
294
+ self.query = keras.layers.Dense(
295
+ self.all_head_size,
296
+ kernel_initializer=get_initializer(config.initializer_range),
297
+ name="query",
298
+ )
299
+ self.key = keras.layers.Dense(
300
+ self.all_head_size,
301
+ kernel_initializer=get_initializer(config.initializer_range),
302
+ name="key",
303
+ )
304
+ self.value = keras.layers.Dense(
305
+ self.all_head_size,
306
+ kernel_initializer=get_initializer(config.initializer_range),
307
+ name="value",
308
+ )
309
+
310
+ self.dropout = keras.layers.Dropout(config.attention_probs_dropout_prob)
311
+ self.ctx_dim = config.hidden_size
312
+ self.config = config
313
+
314
+ def transpose_for_scores(self, x, batch_size):
315
+ # Reshape from [batch_size, seq_length, all_head_size] to [batch_size, seq_length, num_attention_heads, attention_head_size]
316
+ x = tf.reshape(x, (batch_size, -1, self.num_attention_heads, self.attention_head_size))
317
+ return tf.transpose(x, perm=[0, 2, 1, 3])
318
+
319
+ def call(self, hidden_states, context, attention_mask, output_attentions, training=False):
320
+ batch_size = shape_list(hidden_states)[0]
321
+ mixed_query_layer = self.query(hidden_states)
322
+ mixed_key_layer = self.key(context)
323
+ mixed_value_layer = self.value(context)
324
+
325
+ query_layer = self.transpose_for_scores(mixed_query_layer, batch_size)
326
+ key_layer = self.transpose_for_scores(mixed_key_layer, batch_size)
327
+ value_layer = self.transpose_for_scores(mixed_value_layer, batch_size)
328
+
329
+ # Take the dot product between "query" and "key" to get the raw attention scores.
330
+ attention_scores = tf.matmul(
331
+ query_layer, key_layer, transpose_b=True
332
+ ) # (batch size, num_heads, seq_len_q, seq_len_k)
333
+ dk = tf.cast(shape_list(key_layer)[-1], dtype=attention_scores.dtype) # scale attention_scores
334
+ attention_scores = attention_scores / tf.math.sqrt(dk)
335
+
336
+ if attention_mask is not None:
337
+ # Apply the attention mask is (precomputed for all layers in TFLxmertModel call() function)
338
+ attention_mask = tf.cast(attention_mask, dtype=attention_scores.dtype)
339
+ attention_scores = attention_scores + attention_mask
340
+
341
+ # Normalize the attention scores to probabilities.
342
+ attention_probs = stable_softmax(attention_scores, axis=-1)
343
+
344
+ # This is actually dropping out entire tokens to attend to, which might
345
+ # seem a bit unusual, but is taken from the original Transformer paper.
346
+ attention_probs = self.dropout(attention_probs, training=training)
347
+ context_layer = tf.matmul(attention_probs, value_layer)
348
+
349
+ context_layer = tf.transpose(context_layer, perm=[0, 2, 1, 3])
350
+ context_layer = tf.reshape(
351
+ context_layer, (batch_size, -1, self.all_head_size)
352
+ ) # (batch_size, seq_len_q, all_head_size)
353
+
354
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
355
+ return outputs
356
+
357
+ def build(self, input_shape=None):
358
+ if self.built:
359
+ return
360
+ self.built = True
361
+ if getattr(self, "query", None) is not None:
362
+ with tf.name_scope(self.query.name):
363
+ self.query.build([None, None, self.config.hidden_size])
364
+ if getattr(self, "key", None) is not None:
365
+ with tf.name_scope(self.key.name):
366
+ self.key.build([None, None, self.ctx_dim])
367
+ if getattr(self, "value", None) is not None:
368
+ with tf.name_scope(self.value.name):
369
+ self.value.build([None, None, self.ctx_dim])
370
+
371
+
372
+ class TFLxmertIntermediate(keras.layers.Layer):
373
+ def __init__(self, config, **kwargs):
374
+ super().__init__(**kwargs)
375
+ self.dense = keras.layers.Dense(
376
+ config.intermediate_size,
377
+ kernel_initializer=get_initializer(config.initializer_range),
378
+ name="dense",
379
+ )
380
+ if isinstance(config.hidden_act, str):
381
+ self.intermediate_act_fn = get_tf_activation(config.hidden_act)
382
+ else:
383
+ self.intermediate_act_fn = config.hidden_act
384
+ self.config = config
385
+
386
+ def call(self, hidden_states):
387
+ hidden_states = self.dense(hidden_states)
388
+ hidden_states = self.intermediate_act_fn(hidden_states)
389
+ return hidden_states
390
+
391
+ def build(self, input_shape=None):
392
+ if self.built:
393
+ return
394
+ self.built = True
395
+ if getattr(self, "dense", None) is not None:
396
+ with tf.name_scope(self.dense.name):
397
+ self.dense.build([None, None, self.config.hidden_size])
398
+
399
+
400
+ class TFLxmertOutput(keras.layers.Layer):
401
+ def __init__(self, config, **kwargs):
402
+ super().__init__(**kwargs)
403
+ self.dense = keras.layers.Dense(
404
+ config.hidden_size,
405
+ kernel_initializer=get_initializer(config.initializer_range),
406
+ name="dense",
407
+ )
408
+
409
+ self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
410
+ self.dropout = keras.layers.Dropout(config.hidden_dropout_prob)
411
+ self.config = config
412
+
413
+ def call(self, hidden_states, input_tensor, training=False):
414
+ hidden_states = self.dense(hidden_states)
415
+ hidden_states = self.dropout(hidden_states, training)
416
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
417
+ return hidden_states
418
+
419
+ def build(self, input_shape=None):
420
+ if self.built:
421
+ return
422
+ self.built = True
423
+ if getattr(self, "dense", None) is not None:
424
+ with tf.name_scope(self.dense.name):
425
+ self.dense.build([None, None, self.config.intermediate_size])
426
+ if getattr(self, "LayerNorm", None) is not None:
427
+ with tf.name_scope(self.LayerNorm.name):
428
+ self.LayerNorm.build([None, None, self.config.hidden_size])
429
+
430
+
431
+ class TFLxmertAttentionOutput(keras.layers.Layer):
432
+ def __init__(self, config, **kwargs):
433
+ super().__init__(**kwargs)
434
+ self.dense = keras.layers.Dense(
435
+ config.hidden_size,
436
+ kernel_initializer=get_initializer(config.initializer_range),
437
+ name="dense",
438
+ )
439
+ self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
440
+ self.dropout = keras.layers.Dropout(config.hidden_dropout_prob)
441
+ self.config = config
442
+
443
+ def call(self, hidden_states, input_tensor, training=False):
444
+ hidden_states = self.dense(hidden_states)
445
+ hidden_states = self.dropout(hidden_states, training=training)
446
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
447
+ return hidden_states
448
+
449
+ def build(self, input_shape=None):
450
+ if self.built:
451
+ return
452
+ self.built = True
453
+ if getattr(self, "dense", None) is not None:
454
+ with tf.name_scope(self.dense.name):
455
+ self.dense.build([None, None, self.config.hidden_size])
456
+ if getattr(self, "LayerNorm", None) is not None:
457
+ with tf.name_scope(self.LayerNorm.name):
458
+ self.LayerNorm.build([None, None, self.config.hidden_size])
459
+
460
+
461
+ class TFLxmertSelfAttentionLayer(keras.layers.Layer):
462
+ def __init__(self, config, **kwargs):
463
+ super().__init__(**kwargs)
464
+ self.self = TFLxmertAttention(config, name="self")
465
+ self.attention_output = TFLxmertAttentionOutput(config, name="output")
466
+
467
+ def call(self, input_tensor, attention_mask, output_attentions, training=False):
468
+ # Self attention attends to itself, thus keys and queries are the same (input_tensor).
469
+ self_output = self.self(input_tensor, input_tensor, attention_mask, output_attentions)
470
+ if output_attentions:
471
+ attention_probs = self_output[1]
472
+ attention_output = self.attention_output(self_output[0], input_tensor)
473
+ return (attention_output, attention_probs) if output_attentions else (attention_output,)
474
+
475
+ def build(self, input_shape=None):
476
+ if self.built:
477
+ return
478
+ self.built = True
479
+ if getattr(self, "self", None) is not None:
480
+ with tf.name_scope(self.self.name):
481
+ self.self.build(None)
482
+ if getattr(self, "attention_output", None) is not None:
483
+ with tf.name_scope(self.attention_output.name):
484
+ self.attention_output.build(None)
485
+
486
+
487
+ class TFLxmertCrossAttentionLayer(keras.layers.Layer):
488
+ def __init__(self, config, **kwargs):
489
+ super().__init__(**kwargs)
490
+ self.att = TFLxmertAttention(config, name="att")
491
+ self.attention_output = TFLxmertAttentionOutput(config, name="output")
492
+
493
+ def call(
494
+ self,
495
+ input_tensor,
496
+ ctx_tensor,
497
+ ctx_att_mask,
498
+ output_attentions=False,
499
+ training=False,
500
+ ):
501
+ output = self.att(input_tensor, ctx_tensor, ctx_att_mask, output_attentions, training=training)
502
+ if output_attentions:
503
+ attention_probs = output[1]
504
+ attention_output = self.attention_output(output[0], input_tensor, training=training)
505
+ outputs = (attention_output, attention_probs) if output_attentions else (attention_output,)
506
+ return outputs
507
+
508
+ def build(self, input_shape=None):
509
+ if self.built:
510
+ return
511
+ self.built = True
512
+ if getattr(self, "att", None) is not None:
513
+ with tf.name_scope(self.att.name):
514
+ self.att.build(None)
515
+ if getattr(self, "attention_output", None) is not None:
516
+ with tf.name_scope(self.attention_output.name):
517
+ self.attention_output.build(None)
518
+
519
+
520
+ class TFLxmertLayer(keras.layers.Layer):
521
+ def __init__(self, config, **kwargs):
522
+ super().__init__(**kwargs)
523
+ self.attention = TFLxmertSelfAttentionLayer(config, name="attention")
524
+ self.intermediate = TFLxmertIntermediate(config, name="intermediate")
525
+ self.transformer_output = TFLxmertOutput(config, name="output")
526
+
527
+ def call(self, hidden_states, attention_mask, output_attentions, training=False):
528
+ attention_outputs = self.attention(hidden_states, attention_mask, output_attentions, training=training)
529
+ attention_output = attention_outputs[0]
530
+ intermediate_output = self.intermediate(attention_output)
531
+ layer_output = self.transformer_output(intermediate_output, attention_output, training=training)
532
+ outputs = (layer_output,) + attention_outputs[1:] # add attentions if we output them
533
+ return outputs
534
+
535
+ def build(self, input_shape=None):
536
+ if self.built:
537
+ return
538
+ self.built = True
539
+ if getattr(self, "attention", None) is not None:
540
+ with tf.name_scope(self.attention.name):
541
+ self.attention.build(None)
542
+ if getattr(self, "intermediate", None) is not None:
543
+ with tf.name_scope(self.intermediate.name):
544
+ self.intermediate.build(None)
545
+ if getattr(self, "transformer_output", None) is not None:
546
+ with tf.name_scope(self.transformer_output.name):
547
+ self.transformer_output.build(None)
548
+
549
+
550
+ class TFLxmertXLayer(keras.layers.Layer):
551
+ def __init__(self, config, **kwargs):
552
+ super().__init__(**kwargs)
553
+ self.visual_attention = TFLxmertCrossAttentionLayer(config, name="visual_attention")
554
+
555
+ # Self-attention Layers
556
+ self.lang_self_att = TFLxmertSelfAttentionLayer(config, name="lang_self_att")
557
+ self.visn_self_att = TFLxmertSelfAttentionLayer(config, name="visn_self_att")
558
+
559
+ # Intermediate and Output Layers (FFNs)
560
+ self.lang_inter = TFLxmertIntermediate(config, name="lang_inter")
561
+ self.lang_output = TFLxmertOutput(config, name="lang_output")
562
+ self.visn_inter = TFLxmertIntermediate(config, name="visn_inter")
563
+ self.visn_output = TFLxmertOutput(config, name="visn_output")
564
+
565
+ def cross_att(
566
+ self,
567
+ lang_input,
568
+ lang_attention_mask,
569
+ visn_input,
570
+ visn_attention_mask,
571
+ output_attentions,
572
+ training=False,
573
+ ):
574
+ # Cross Attention
575
+
576
+ # Keras saving and loading model *does not work* with the same inputs for two layers.
577
+ lang_attention_lang_input = tf.identity(lang_input)
578
+ visn_attention_lang_input = tf.identity(lang_input)
579
+ lang_attention_visn_input = tf.identity(visn_input)
580
+ visn_attention_visn_input = tf.identity(visn_input)
581
+
582
+ lang_att_output = self.visual_attention(
583
+ lang_attention_lang_input,
584
+ lang_attention_visn_input,
585
+ visn_attention_mask,
586
+ output_attentions=output_attentions,
587
+ training=training,
588
+ )
589
+ visn_att_output = self.visual_attention(
590
+ visn_attention_visn_input,
591
+ visn_attention_lang_input,
592
+ lang_attention_mask,
593
+ output_attentions=output_attentions,
594
+ training=training,
595
+ )
596
+ return lang_att_output, visn_att_output
597
+
598
+ def self_att(
599
+ self,
600
+ lang_input,
601
+ lang_attention_mask,
602
+ visn_input,
603
+ visn_attention_mask,
604
+ training=False,
605
+ ):
606
+ # Self Attention
607
+ output_attentions = False
608
+ lang_att_output = self.lang_self_att(lang_input, lang_attention_mask, output_attentions, training=training)
609
+ visn_att_output = self.visn_self_att(visn_input, visn_attention_mask, output_attentions, training=training)
610
+ return lang_att_output[0], visn_att_output[0]
611
+
612
+ def output_fc(self, lang_input, visn_input, training=False):
613
+ # FC layers
614
+ lang_inter_output = self.lang_inter(lang_input)
615
+ visn_inter_output = self.visn_inter(visn_input)
616
+
617
+ # Layer output
618
+ lang_output = self.lang_output(lang_inter_output, lang_input, training)
619
+ visn_output = self.visn_output(visn_inter_output, visn_input, training)
620
+ return lang_output, visn_output
621
+
622
+ def call(
623
+ self,
624
+ lang_feats,
625
+ lang_attention_mask,
626
+ visn_feats,
627
+ visn_attention_mask,
628
+ output_attentions,
629
+ training=False,
630
+ ):
631
+ lang_att_output = lang_feats
632
+ visn_att_output = visn_feats
633
+
634
+ lang_att_output, visn_att_output = self.cross_att(
635
+ lang_att_output,
636
+ lang_attention_mask,
637
+ visn_att_output,
638
+ visn_attention_mask,
639
+ output_attentions,
640
+ training=training,
641
+ )
642
+ attention_probs = lang_att_output[1:]
643
+ lang_att_output, visn_att_output = self.self_att(
644
+ lang_att_output[0],
645
+ lang_attention_mask,
646
+ visn_att_output[0],
647
+ visn_attention_mask,
648
+ training=training,
649
+ )
650
+ lang_output, visn_output = self.output_fc(lang_att_output, visn_att_output, training=training)
651
+
652
+ return (lang_output, visn_output, attention_probs[0]) if output_attentions else (lang_output, visn_output)
653
+
654
+ def build(self, input_shape=None):
655
+ if self.built:
656
+ return
657
+ self.built = True
658
+ if getattr(self, "visual_attention", None) is not None:
659
+ with tf.name_scope(self.visual_attention.name):
660
+ self.visual_attention.build(None)
661
+ if getattr(self, "lang_self_att", None) is not None:
662
+ with tf.name_scope(self.lang_self_att.name):
663
+ self.lang_self_att.build(None)
664
+ if getattr(self, "visn_self_att", None) is not None:
665
+ with tf.name_scope(self.visn_self_att.name):
666
+ self.visn_self_att.build(None)
667
+ if getattr(self, "lang_inter", None) is not None:
668
+ with tf.name_scope(self.lang_inter.name):
669
+ self.lang_inter.build(None)
670
+ if getattr(self, "lang_output", None) is not None:
671
+ with tf.name_scope(self.lang_output.name):
672
+ self.lang_output.build(None)
673
+ if getattr(self, "visn_inter", None) is not None:
674
+ with tf.name_scope(self.visn_inter.name):
675
+ self.visn_inter.build(None)
676
+ if getattr(self, "visn_output", None) is not None:
677
+ with tf.name_scope(self.visn_output.name):
678
+ self.visn_output.build(None)
679
+
680
+
681
+ class TFLxmertEncoder(keras.layers.Layer):
682
+ def __init__(self, config, **kwargs):
683
+ super().__init__(**kwargs)
684
+
685
+ self.visn_fc = TFLxmertVisualFeatureEncoder(config, name="visn_fc")
686
+
687
+ # Number of layers
688
+ self.num_l_layers = config.l_layers
689
+ self.num_x_layers = config.x_layers
690
+ self.num_r_layers = config.r_layers
691
+
692
+ # Layers
693
+ # Using self.layer instead of self.l_layer to support loading BERT weights.
694
+ self.layer = [TFLxmertLayer(config, name=f"layer_._{i}") for i in range(self.num_l_layers)]
695
+ self.x_layers = [TFLxmertXLayer(config, name=f"x_layers_._{i}") for i in range(self.num_x_layers)]
696
+ self.r_layers = [TFLxmertLayer(config, name=f"r_layers_._{i}") for i in range(self.num_r_layers)]
697
+ self.config = config
698
+
699
+ def call(
700
+ self,
701
+ lang_feats=None,
702
+ lang_attention_mask=None,
703
+ visual_feats=None,
704
+ visual_pos=None,
705
+ visual_attention_mask=None,
706
+ output_attentions=None,
707
+ training=False,
708
+ ):
709
+ vision_hidden_states = ()
710
+ language_hidden_states = ()
711
+ vision_attentions = () if output_attentions or self.config.output_attentions else None
712
+ language_attentions = () if output_attentions or self.config.output_attentions else None
713
+ cross_encoder_attentions = () if output_attentions or self.config.output_attentions else None
714
+
715
+ visual_feats = self.visn_fc([visual_feats, visual_pos], training=training)
716
+
717
+ # Run language layers
718
+ for layer_module in self.layer:
719
+ l_outputs = layer_module(lang_feats, lang_attention_mask, output_attentions, training=training)
720
+ lang_feats = l_outputs[0]
721
+ language_hidden_states = language_hidden_states + (lang_feats,)
722
+ if language_attentions is not None:
723
+ language_attentions = language_attentions + (l_outputs[1],)
724
+
725
+ # Run relational layers
726
+ for layer_module in self.r_layers:
727
+ v_outputs = layer_module(
728
+ visual_feats,
729
+ visual_attention_mask,
730
+ output_attentions,
731
+ training=training,
732
+ )
733
+ visual_feats = v_outputs[0]
734
+ vision_hidden_states = vision_hidden_states + (visual_feats,)
735
+ if vision_attentions is not None:
736
+ vision_attentions = vision_attentions + (v_outputs[1],)
737
+
738
+ # Run cross-modality layers
739
+ for layer_module in self.x_layers:
740
+ x_outputs = layer_module(
741
+ lang_feats,
742
+ lang_attention_mask,
743
+ visual_feats,
744
+ visual_attention_mask,
745
+ output_attentions,
746
+ training=training,
747
+ )
748
+ lang_feats, visual_feats = x_outputs[:2]
749
+ vision_hidden_states = vision_hidden_states + (visual_feats,)
750
+ language_hidden_states = language_hidden_states + (lang_feats,)
751
+ if cross_encoder_attentions is not None:
752
+ cross_encoder_attentions = cross_encoder_attentions + (x_outputs[2],)
753
+
754
+ visual_encoder_outputs = (
755
+ vision_hidden_states,
756
+ vision_attentions if output_attentions else None,
757
+ )
758
+ lang_encoder_outputs = (
759
+ language_hidden_states,
760
+ language_attentions if output_attentions else None,
761
+ )
762
+
763
+ return (
764
+ visual_encoder_outputs,
765
+ lang_encoder_outputs,
766
+ cross_encoder_attentions if output_attentions else None,
767
+ )
768
+
769
+ def build(self, input_shape=None):
770
+ if self.built:
771
+ return
772
+ self.built = True
773
+ if getattr(self, "visn_fc", None) is not None:
774
+ with tf.name_scope(self.visn_fc.name):
775
+ self.visn_fc.build(None)
776
+ if getattr(self, "layer", None) is not None:
777
+ for layer in self.layer:
778
+ with tf.name_scope(layer.name):
779
+ layer.build(None)
780
+ if getattr(self, "x_layers", None) is not None:
781
+ for layer in self.x_layers:
782
+ with tf.name_scope(layer.name):
783
+ layer.build(None)
784
+ if getattr(self, "r_layers", None) is not None:
785
+ for layer in self.r_layers:
786
+ with tf.name_scope(layer.name):
787
+ layer.build(None)
788
+
789
+
790
+ @keras_serializable
791
+ class TFLxmertMainLayer(keras.layers.Layer):
792
+ config_class = LxmertConfig
793
+
794
+ def __init__(self, config, **kwargs):
795
+ super().__init__(**kwargs)
796
+
797
+ self.config = config
798
+ self.num_l_layers = config.l_layers
799
+ self.num_x_layers = config.x_layers
800
+ self.num_r_layers = config.r_layers
801
+ self.initializer_range = config.initializer_range
802
+ self.output_attentions = config.output_attentions
803
+ self.output_hidden_states = config.output_hidden_states
804
+ self.return_dict = config.use_return_dict
805
+ self.embeddings = TFLxmertEmbeddings(config, name="embeddings")
806
+ self.encoder = TFLxmertEncoder(config, name="encoder")
807
+ self.pooler = TFLxmertPooler(config, name="pooler")
808
+ self.config = config
809
+
810
+ def get_input_embeddings(self):
811
+ return self.embeddings
812
+
813
+ def set_input_embeddings(self, value):
814
+ self.embeddings.weight = value
815
+ self.embeddings.vocab_size = shape_list(value)[0]
816
+
817
+ def _prune_heads(self, heads_to_prune):
818
+ raise NotImplementedError
819
+
820
+ @unpack_inputs
821
+ def call(
822
+ self,
823
+ input_ids=None,
824
+ visual_feats=None,
825
+ visual_pos=None,
826
+ attention_mask=None,
827
+ visual_attention_mask=None,
828
+ token_type_ids=None,
829
+ inputs_embeds=None,
830
+ output_attentions=None,
831
+ output_hidden_states=None,
832
+ return_dict=None,
833
+ training=False,
834
+ ):
835
+ if input_ids is not None and inputs_embeds is not None:
836
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
837
+ elif input_ids is not None:
838
+ input_shape = shape_list(input_ids)
839
+ elif inputs_embeds is not None:
840
+ input_shape = shape_list(inputs_embeds)[:-1]
841
+ else:
842
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
843
+ if visual_pos is None or visual_feats is None:
844
+ raise ValueError("visual_feats and visual_pos cannot be `None` in LXMERT's `call` method.")
845
+
846
+ if attention_mask is None:
847
+ attention_mask = tf.fill(input_shape, 1)
848
+
849
+ if token_type_ids is None:
850
+ token_type_ids = tf.fill(input_shape, 0)
851
+
852
+ # Positional Word Embeddings
853
+ embedding_output = self.embeddings(input_ids, token_type_ids, inputs_embeds, training)
854
+
855
+ # We create a 3D attention mask from a 2D tensor mask.
856
+ # Sizes are [batch_size, 1, 1, to_seq_length]
857
+ # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
858
+ # this attention mask is more simple than the triangular masking of causal attention
859
+ # used in OpenAI GPT, we just need to prepare the broadcast dimension here.
860
+ extended_attention_mask = tf.reshape(attention_mask, (input_shape[0], 1, 1, input_shape[1]))
861
+
862
+ # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
863
+ # masked positions, this operation will create a tensor which is 0.0 for
864
+ # positions we want to attend and -10000.0 for masked positions.
865
+ # Since we are adding it to the raw scores before the softmax, this is
866
+ # effectively the same as removing these entirely.
867
+
868
+ extended_attention_mask = tf.cast(extended_attention_mask, dtype=embedding_output.dtype)
869
+ one_cst = tf.constant(1.0, dtype=embedding_output.dtype)
870
+ ten_thousand_cst = tf.constant(-10000.0, dtype=embedding_output.dtype)
871
+ extended_attention_mask = tf.multiply(tf.subtract(one_cst, extended_attention_mask), ten_thousand_cst)
872
+
873
+ if visual_attention_mask is not None:
874
+ extended_visual_attention_mask = tf.reshape(visual_attention_mask, (input_shape[0], 1, 1, input_shape[1]))
875
+ extended_visual_attention_mask = tf.expand_dims(tf.expand_dims(visual_attention_mask, axis=1), axis=1)
876
+
877
+ extended_visual_attention_mask = tf.cast(extended_visual_attention_mask, dtype=embedding_output.dtype)
878
+ extended_visual_attention_mask = tf.multiply(
879
+ tf.subtract(one_cst, extended_visual_attention_mask), ten_thousand_cst
880
+ )
881
+ else:
882
+ extended_visual_attention_mask = None
883
+
884
+ # Run Lxmert encoder
885
+ encoder_outputs = self.encoder(
886
+ embedding_output,
887
+ extended_attention_mask,
888
+ visual_feats,
889
+ visual_pos,
890
+ extended_visual_attention_mask,
891
+ output_attentions,
892
+ training,
893
+ )
894
+ visual_encoder_outputs, lang_encoder_outputs = encoder_outputs[:2]
895
+ vision_hidden_states = visual_encoder_outputs[0]
896
+ language_hidden_states = lang_encoder_outputs[0]
897
+
898
+ all_attentions = ()
899
+ if output_attentions:
900
+ language_attentions = lang_encoder_outputs[1]
901
+ vision_attentions = visual_encoder_outputs[1]
902
+ cross_encoder_attentions = encoder_outputs[2]
903
+ all_attentions = (
904
+ language_attentions,
905
+ vision_attentions,
906
+ cross_encoder_attentions,
907
+ )
908
+
909
+ hidden_states = (language_hidden_states, vision_hidden_states) if output_hidden_states else ()
910
+
911
+ visual_output = vision_hidden_states[-1]
912
+ lang_output = language_hidden_states[-1]
913
+ pooled_output = self.pooler(lang_output)
914
+
915
+ if not return_dict:
916
+ return (lang_output, visual_output, pooled_output) + hidden_states + all_attentions
917
+
918
+ return TFLxmertModelOutput(
919
+ pooled_output=pooled_output,
920
+ language_output=lang_output,
921
+ vision_output=visual_output,
922
+ language_hidden_states=language_hidden_states if output_hidden_states else None,
923
+ vision_hidden_states=vision_hidden_states if output_hidden_states else None,
924
+ language_attentions=language_attentions if output_attentions else None,
925
+ vision_attentions=vision_attentions if output_attentions else None,
926
+ cross_encoder_attentions=cross_encoder_attentions if output_attentions else None,
927
+ )
928
+
929
+ def build(self, input_shape=None):
930
+ if self.built:
931
+ return
932
+ self.built = True
933
+ if getattr(self, "embeddings", None) is not None:
934
+ with tf.name_scope(self.embeddings.name):
935
+ self.embeddings.build(None)
936
+ if getattr(self, "encoder", None) is not None:
937
+ with tf.name_scope(self.encoder.name):
938
+ self.encoder.build(None)
939
+ if getattr(self, "pooler", None) is not None:
940
+ with tf.name_scope(self.pooler.name):
941
+ self.pooler.build(None)
942
+
943
+
944
+ class TFLxmertPreTrainedModel(TFPreTrainedModel):
945
+ """
946
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
947
+ models.
948
+ """
949
+
950
+ config_class = LxmertConfig
951
+ base_model_prefix = "lxmert"
952
+
953
+ @property
954
+ def dummy_inputs(self):
955
+ """
956
+ Dummy inputs to build the network.
957
+
958
+ Returns:
959
+ tf.Tensor with dummy inputs
960
+ """
961
+ batch_size = 2
962
+ num_visual_features = 10
963
+ input_ids = tf.constant([[3, 5, 6], [2, 3, 4]], dtype=tf.int32)
964
+ visual_feats = tf.random.uniform((batch_size, num_visual_features, self.config.visual_feat_dim))
965
+ visual_pos = tf.random.uniform((batch_size, num_visual_features, 4))
966
+
967
+ return {
968
+ "input_ids": input_ids,
969
+ "visual_feats": visual_feats,
970
+ "visual_pos": visual_pos,
971
+ }
972
+
973
+ @property
974
+ def input_signature(self):
975
+ return {
976
+ "input_ids": tf.TensorSpec((None, None), tf.int32, name="input_ids"),
977
+ "attention_mask": tf.TensorSpec((None, None), tf.int32, name="attention_mask"),
978
+ "visual_feats": tf.TensorSpec((None, None, self.config.visual_feat_dim), tf.float32, name="visual_feats"),
979
+ "visual_pos": tf.TensorSpec((None, None, 4), tf.float32, name="visual_pos"),
980
+ "visual_attention_mask": tf.TensorSpec((None, None), tf.int32, name="visual_attention_mask"),
981
+ "token_type_ids": tf.TensorSpec((None, None), tf.int32, name="token_type_ids"),
982
+ }
983
+
984
+
985
+ LXMERT_START_DOCSTRING = r"""
986
+
987
+ The LXMERT model was proposed in [LXMERT: Learning Cross-Modality Encoder Representations from
988
+ Transformers](https://arxiv.org/abs/1908.07490) by Hao Tan and Mohit Bansal. It's a vision and language transformer
989
+ model, pre-trained on a variety of multi-modal datasets comprising of GQA, VQAv2.0, MCSCOCO captions, and Visual
990
+ genome, using a combination of masked language modeling, region of interest feature regression, cross entropy loss
991
+ for question answering attribute prediction, and object tag prediction.
992
+
993
+ This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
994
+ as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
995
+ behavior.
996
+
997
+ <Tip>
998
+
999
+ TensorFlow models and layers in `transformers` accept two formats as input:
1000
+
1001
+ - having all inputs as keyword arguments (like PyTorch models), or
1002
+ - having all inputs as a list, tuple or dict in the first positional argument.
1003
+
1004
+ The reason the second format is supported is that Keras methods prefer this format when passing inputs to models
1005
+ and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just
1006
+ pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second
1007
+ format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with
1008
+ the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first
1009
+ positional argument:
1010
+
1011
+ - a single Tensor with `input_ids` only and nothing else: `model(input_ids)`
1012
+ - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
1013
+ `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`
1014
+ - a dictionary with one or several input Tensors associated to the input names given in the docstring:
1015
+ `model({"input_ids": input_ids, "token_type_ids": token_type_ids})`
1016
+
1017
+ Note that when creating models and layers with
1018
+ [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry
1019
+ about any of this, as you can just pass inputs like you would to any other Python function!
1020
+
1021
+ </Tip>
1022
+
1023
+ Parameters:
1024
+ config ([`LxmertConfig`]): Model configuration class with all the parameters of the model.
1025
+ Initializing with a config file does not load the weights associated with the model, only the
1026
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
1027
+ """
1028
+
1029
+ LXMERT_INPUTS_DOCSTRING = r"""
1030
+ Args:
1031
+ input_ids (`np.ndarray` or `tf.Tensor` of shape `(batch_size, sequence_length)`):
1032
+ Indices of input sequence tokens in the vocabulary.
1033
+
1034
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and
1035
+ [`PreTrainedTokenizer.encode`] for details.
1036
+
1037
+ [What are input IDs?](../glossary#input-ids)
1038
+ visual_feats (`tf.Tensor` of shape `(batch_size, num_visual_features, visual_feat_dim)`):
1039
+ This input represents visual features. They ROI pooled object features from bounding boxes using a
1040
+ faster-RCNN model)
1041
+
1042
+ These are currently not provided by the transformers library.
1043
+ visual_pos (`tf.Tensor` of shape `(batch_size, num_visual_features, visual_feat_dim)`):
1044
+ This input represents spacial features corresponding to their relative (via index) visual features. The
1045
+ pre-trained LXMERT model expects these spacial features to be normalized bounding boxes on a scale of 0 to
1046
+ 1.
1047
+
1048
+ These are currently not provided by the transformers library.
1049
+ attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
1050
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
1051
+
1052
+ - 1 for tokens that are **not masked**,
1053
+ - 0 for tokens that are **masked**.
1054
+
1055
+ [What are attention masks?](../glossary#attention-mask)
1056
+ visual_attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
1057
+ MMask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
1058
+
1059
+ - 1 for tokens that are **not masked**,
1060
+ - 0 for tokens that are **masked**.
1061
+
1062
+ [What are attention masks?](../glossary#attention-mask)
1063
+ token_type_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
1064
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
1065
+ 1]`:
1066
+
1067
+ - 0 corresponds to a *sentence A* token,
1068
+ - 1 corresponds to a *sentence B* token.
1069
+
1070
+ [What are token type IDs?](../glossary#token-type-ids)
1071
+ inputs_embeds (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
1072
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
1073
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
1074
+ model's internal embedding lookup matrix.
1075
+ output_attentions (`bool`, *optional*):
1076
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
1077
+ tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the
1078
+ config will be used instead.
1079
+ output_hidden_states (`bool`, *optional*):
1080
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
1081
+ more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
1082
+ used instead.
1083
+ return_dict (`bool`, *optional*):
1084
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in
1085
+ eager mode, in graph mode the value will always be set to True.
1086
+ training (`bool`, *optional*, defaults to `False`):
1087
+ Whether or not to use the model in training mode (some modules like dropout modules have different
1088
+ behaviors between training and evaluation).
1089
+ """
1090
+
1091
+
1092
+ @add_start_docstrings(
1093
+ "The bare Lxmert Model transformer outputting raw hidden-states without any specific head on top.",
1094
+ LXMERT_START_DOCSTRING,
1095
+ )
1096
+ class TFLxmertModel(TFLxmertPreTrainedModel):
1097
+ def __init__(self, config, *inputs, **kwargs):
1098
+ super().__init__(config, *inputs, **kwargs)
1099
+ self.lxmert = TFLxmertMainLayer(config, name="lxmert")
1100
+
1101
+ @unpack_inputs
1102
+ @add_start_docstrings_to_model_forward(LXMERT_INPUTS_DOCSTRING)
1103
+ @add_code_sample_docstrings(
1104
+ checkpoint=_CHECKPOINT_FOR_DOC,
1105
+ output_type=TFLxmertModelOutput,
1106
+ config_class=_CONFIG_FOR_DOC,
1107
+ )
1108
+ def call(
1109
+ self,
1110
+ input_ids: TFModelInputType | None = None,
1111
+ visual_feats: tf.Tensor | None = None,
1112
+ visual_pos: tf.Tensor | None = None,
1113
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1114
+ visual_attention_mask: np.ndarray | tf.Tensor | None = None,
1115
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1116
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1117
+ output_attentions: Optional[bool] = None,
1118
+ output_hidden_states: Optional[bool] = None,
1119
+ return_dict: Optional[bool] = None,
1120
+ training: bool = False,
1121
+ ) -> Union[Tuple, TFLxmertModelOutput]:
1122
+ outputs = self.lxmert(
1123
+ input_ids,
1124
+ visual_feats,
1125
+ visual_pos,
1126
+ attention_mask,
1127
+ visual_attention_mask,
1128
+ token_type_ids,
1129
+ inputs_embeds,
1130
+ output_attentions,
1131
+ output_hidden_states,
1132
+ return_dict,
1133
+ training,
1134
+ )
1135
+
1136
+ return outputs
1137
+
1138
+ def build(self, input_shape=None):
1139
+ if self.built:
1140
+ return
1141
+ self.built = True
1142
+ if getattr(self, "lxmert", None) is not None:
1143
+ with tf.name_scope(self.lxmert.name):
1144
+ self.lxmert.build(None)
1145
+
1146
+
1147
+ class TFLxmertPooler(keras.layers.Layer):
1148
+ def __init__(self, config, **kwargs):
1149
+ super().__init__(**kwargs)
1150
+ self.dense = keras.layers.Dense(
1151
+ config.hidden_size,
1152
+ kernel_initializer=get_initializer(config.initializer_range),
1153
+ activation="tanh",
1154
+ name="dense",
1155
+ )
1156
+ self.config = config
1157
+
1158
+ def call(self, hidden_states):
1159
+ # We "pool" the model by simply taking the hidden state corresponding
1160
+ # to the first token.
1161
+ first_token_tensor = hidden_states[:, 0]
1162
+ pooled_output = self.dense(first_token_tensor)
1163
+ return pooled_output
1164
+
1165
+ def build(self, input_shape=None):
1166
+ if self.built:
1167
+ return
1168
+ self.built = True
1169
+ if getattr(self, "dense", None) is not None:
1170
+ with tf.name_scope(self.dense.name):
1171
+ self.dense.build([None, None, self.config.hidden_size])
1172
+
1173
+
1174
+ # Copied from transformers.models.bert.modeling_tf_bert.TFBertPredictionHeadTransform with Bert->Lxmert
1175
+ class TFLxmertPredictionHeadTransform(keras.layers.Layer):
1176
+ def __init__(self, config: LxmertConfig, **kwargs):
1177
+ super().__init__(**kwargs)
1178
+
1179
+ self.dense = keras.layers.Dense(
1180
+ units=config.hidden_size,
1181
+ kernel_initializer=get_initializer(config.initializer_range),
1182
+ name="dense",
1183
+ )
1184
+
1185
+ if isinstance(config.hidden_act, str):
1186
+ self.transform_act_fn = get_tf_activation(config.hidden_act)
1187
+ else:
1188
+ self.transform_act_fn = config.hidden_act
1189
+
1190
+ self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
1191
+ self.config = config
1192
+
1193
+ def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
1194
+ hidden_states = self.dense(inputs=hidden_states)
1195
+ hidden_states = self.transform_act_fn(hidden_states)
1196
+ hidden_states = self.LayerNorm(inputs=hidden_states)
1197
+
1198
+ return hidden_states
1199
+
1200
+ def build(self, input_shape=None):
1201
+ if self.built:
1202
+ return
1203
+ self.built = True
1204
+ if getattr(self, "dense", None) is not None:
1205
+ with tf.name_scope(self.dense.name):
1206
+ self.dense.build([None, None, self.config.hidden_size])
1207
+ if getattr(self, "LayerNorm", None) is not None:
1208
+ with tf.name_scope(self.LayerNorm.name):
1209
+ self.LayerNorm.build([None, None, self.config.hidden_size])
1210
+
1211
+
1212
+ # Copied from transformers.models.bert.modeling_tf_bert.TFBertLMPredictionHead with Bert->Lxmert
1213
+ class TFLxmertLMPredictionHead(keras.layers.Layer):
1214
+ def __init__(self, config: LxmertConfig, input_embeddings: keras.layers.Layer, **kwargs):
1215
+ super().__init__(**kwargs)
1216
+
1217
+ self.config = config
1218
+ self.hidden_size = config.hidden_size
1219
+
1220
+ self.transform = TFLxmertPredictionHeadTransform(config, name="transform")
1221
+
1222
+ # The output weights are the same as the input embeddings, but there is
1223
+ # an output-only bias for each token.
1224
+ self.input_embeddings = input_embeddings
1225
+
1226
+ def build(self, input_shape=None):
1227
+ self.bias = self.add_weight(shape=(self.config.vocab_size,), initializer="zeros", trainable=True, name="bias")
1228
+
1229
+ if self.built:
1230
+ return
1231
+ self.built = True
1232
+ if getattr(self, "transform", None) is not None:
1233
+ with tf.name_scope(self.transform.name):
1234
+ self.transform.build(None)
1235
+
1236
+ def get_output_embeddings(self) -> keras.layers.Layer:
1237
+ return self.input_embeddings
1238
+
1239
+ def set_output_embeddings(self, value: tf.Variable):
1240
+ self.input_embeddings.weight = value
1241
+ self.input_embeddings.vocab_size = shape_list(value)[0]
1242
+
1243
+ def get_bias(self) -> Dict[str, tf.Variable]:
1244
+ return {"bias": self.bias}
1245
+
1246
+ def set_bias(self, value: tf.Variable):
1247
+ self.bias = value["bias"]
1248
+ self.config.vocab_size = shape_list(value["bias"])[0]
1249
+
1250
+ def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
1251
+ hidden_states = self.transform(hidden_states=hidden_states)
1252
+ seq_length = shape_list(hidden_states)[1]
1253
+ hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, self.hidden_size])
1254
+ hidden_states = tf.matmul(a=hidden_states, b=self.input_embeddings.weight, transpose_b=True)
1255
+ hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, seq_length, self.config.vocab_size])
1256
+ hidden_states = tf.nn.bias_add(value=hidden_states, bias=self.bias)
1257
+
1258
+ return hidden_states
1259
+
1260
+
1261
+ # Copied from transformers.models.bert.modeling_tf_bert.TFBertMLMHead with Bert->Lxmert
1262
+ class TFLxmertMLMHead(keras.layers.Layer):
1263
+ def __init__(self, config: LxmertConfig, input_embeddings: keras.layers.Layer, **kwargs):
1264
+ super().__init__(**kwargs)
1265
+
1266
+ self.predictions = TFLxmertLMPredictionHead(config, input_embeddings, name="predictions")
1267
+
1268
+ def call(self, sequence_output: tf.Tensor) -> tf.Tensor:
1269
+ prediction_scores = self.predictions(hidden_states=sequence_output)
1270
+
1271
+ return prediction_scores
1272
+
1273
+ def build(self, input_shape=None):
1274
+ if self.built:
1275
+ return
1276
+ self.built = True
1277
+ if getattr(self, "predictions", None) is not None:
1278
+ with tf.name_scope(self.predictions.name):
1279
+ self.predictions.build(None)
1280
+
1281
+
1282
+ class TFLxmertPreTrainingHeads(keras.layers.Layer):
1283
+ def __init__(self, config, input_embeddings, **kwargs):
1284
+ super().__init__(**kwargs)
1285
+ self.predictions = TFLxmertLMPredictionHead(config, input_embeddings, name="predictions")
1286
+
1287
+ self.seq_relationship = keras.layers.Dense(
1288
+ 2,
1289
+ kernel_initializer=get_initializer(config.initializer_range),
1290
+ name="seq_relationship",
1291
+ )
1292
+ self.config = config
1293
+
1294
+ def call(self, sequence_output, pooled_output):
1295
+ prediction_scores = self.predictions(sequence_output)
1296
+ seq_relationship_score = self.seq_relationship(pooled_output)
1297
+ return prediction_scores, seq_relationship_score
1298
+
1299
+ def build(self, input_shape=None):
1300
+ if self.built:
1301
+ return
1302
+ self.built = True
1303
+ if getattr(self, "predictions", None) is not None:
1304
+ with tf.name_scope(self.predictions.name):
1305
+ self.predictions.build(None)
1306
+ if getattr(self, "seq_relationship", None) is not None:
1307
+ with tf.name_scope(self.seq_relationship.name):
1308
+ self.seq_relationship.build([None, None, self.config.hidden_size])
1309
+
1310
+
1311
+ class TFLxmertVisualAnswerHead(keras.layers.Layer):
1312
+ def __init__(self, config, num_labels, **kwargs):
1313
+ super().__init__(**kwargs)
1314
+ hid_dim = config.hidden_size
1315
+ self.dense = keras.layers.Dense(
1316
+ hid_dim * 2,
1317
+ kernel_initializer=get_initializer(config.initializer_range),
1318
+ name="logit_fc_._0",
1319
+ )
1320
+ self.activation = get_tf_activation("gelu")
1321
+ self.layer_norm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="logit_fc_._2")
1322
+ self.dense_1 = keras.layers.Dense(
1323
+ num_labels,
1324
+ kernel_initializer=get_initializer(config.initializer_range),
1325
+ name="logit_fc_._3",
1326
+ )
1327
+ self.hid_dim = hid_dim
1328
+
1329
+ def call(self, hidden_states):
1330
+ hidden_states = self.dense(hidden_states)
1331
+ hidden_states = self.activation(hidden_states)
1332
+ hidden_states = self.layer_norm(hidden_states)
1333
+ hidden_states = self.dense_1(hidden_states)
1334
+
1335
+ return hidden_states
1336
+
1337
+ def build(self, input_shape=None):
1338
+ if self.built:
1339
+ return
1340
+ self.built = True
1341
+ if getattr(self, "dense", None) is not None:
1342
+ with tf.name_scope(self.dense.name):
1343
+ self.dense.build([None, None, self.hid_dim])
1344
+ if getattr(self, "layer_norm", None) is not None:
1345
+ with tf.name_scope(self.layer_norm.name):
1346
+ self.layer_norm.build([None, self.hid_dim * 2])
1347
+ if getattr(self, "dense_1", None) is not None:
1348
+ with tf.name_scope(self.dense_1.name):
1349
+ self.dense_1.build([None, None, self.hid_dim * 2])
1350
+
1351
+
1352
+ class TFLxmertVisualObjHead(keras.layers.Layer):
1353
+ def __init__(self, config, **kwargs):
1354
+ super().__init__(**kwargs)
1355
+ self.transform = TFLxmertPredictionHeadTransform(config, name="transform")
1356
+
1357
+ # Decide the use of visual losses
1358
+ visual_losses = {}
1359
+ if config.visual_obj_loss:
1360
+ visual_losses["obj"] = {"shape": (-1,), "num": config.num_object_labels}
1361
+ if config.visual_attr_loss:
1362
+ visual_losses["attr"] = {"shape": (-1,), "num": config.num_attr_labels}
1363
+ if config.visual_feat_loss:
1364
+ visual_losses["feat"] = {"shape": (-1, 2048), "num": config.visual_feat_dim}
1365
+ self.visual_losses = visual_losses
1366
+
1367
+ # The output weights are the same as the input embeddings, but there is
1368
+ # an output-only bias for each token.
1369
+ self.decoder_dict = {
1370
+ key: keras.layers.Dense(
1371
+ self.visual_losses[key]["num"],
1372
+ kernel_initializer=get_initializer(config.initializer_range),
1373
+ name=f"decoder_dict.{key}",
1374
+ )
1375
+ for key in self.visual_losses
1376
+ }
1377
+ self.config = config
1378
+
1379
+ def call(self, hidden_states):
1380
+ hidden_states = self.transform(hidden_states)
1381
+ output = {}
1382
+ for key in self.visual_losses:
1383
+ output[key] = self.decoder_dict[key](hidden_states)
1384
+ return output
1385
+
1386
+ def build(self, input_shape=None):
1387
+ if self.built:
1388
+ return
1389
+ self.built = True
1390
+ if getattr(self, "transform", None) is not None:
1391
+ with tf.name_scope(self.transform.name):
1392
+ self.transform.build(None)
1393
+ if getattr(self, "decoder_dict", None) is not None:
1394
+ for layer in self.decoder_dict.values():
1395
+ with tf.name_scope(layer.name):
1396
+ layer.build([None, None, self.config.hidden_size])
1397
+
1398
+
1399
+ @add_start_docstrings("""Lxmert Model with a `language modeling` head on top.""", LXMERT_START_DOCSTRING)
1400
+ class TFLxmertForPreTraining(TFLxmertPreTrainedModel):
1401
+ def __init__(self, config, *inputs, **kwargs):
1402
+ super().__init__(config, *inputs, **kwargs)
1403
+
1404
+ self.config = config
1405
+ self.num_qa_labels = config.num_qa_labels
1406
+ self.visual_loss_normalizer = config.visual_loss_normalizer
1407
+
1408
+ # Use of pretraining tasks
1409
+ self.task_mask_lm = config.task_mask_lm
1410
+ self.task_obj_predict = config.task_obj_predict
1411
+ self.task_matched = config.task_matched
1412
+ self.task_qa = config.task_qa
1413
+
1414
+ # Lxmert backbone
1415
+ self.lxmert = TFLxmertMainLayer(config, name="lxmert")
1416
+
1417
+ # Pre-training heads
1418
+ self.cls = TFLxmertPreTrainingHeads(config, self.lxmert.embeddings, name="cls")
1419
+ if self.task_obj_predict:
1420
+ self.obj_predict_head = TFLxmertVisualObjHead(config, name="obj_predict_head")
1421
+ if self.task_qa:
1422
+ self.answer_head = TFLxmertVisualAnswerHead(config, self.num_qa_labels, name="answer_head")
1423
+
1424
+ # Loss functions
1425
+ self.loss_fcts = {
1426
+ "l2": keras.losses.Huber(delta=1.0, name="huber_loss"),
1427
+ "visn_ce": keras.losses.SparseCategoricalCrossentropy(from_logits=True),
1428
+ "ce": keras.losses.SparseCategoricalCrossentropy(from_logits=True),
1429
+ }
1430
+
1431
+ visual_losses = {}
1432
+ if config.visual_obj_loss:
1433
+ visual_losses["obj"] = {
1434
+ "shape": (-1,),
1435
+ "num": config.num_object_labels,
1436
+ "loss": "visn_ce",
1437
+ }
1438
+ if config.visual_attr_loss:
1439
+ visual_losses["attr"] = {
1440
+ "shape": (-1,),
1441
+ "num": config.num_attr_labels,
1442
+ "loss": "visn_ce",
1443
+ }
1444
+ if config.visual_feat_loss:
1445
+ visual_losses["feat"] = {
1446
+ "shape": (-1, config.visual_feat_dim),
1447
+ "num": config.visual_feat_dim,
1448
+ "loss": "l2",
1449
+ }
1450
+ self.visual_losses = visual_losses
1451
+
1452
+ @property
1453
+ def dummy_inputs(self):
1454
+ """
1455
+ Dummy inputs to build the network.
1456
+
1457
+ Returns:
1458
+ tf.Tensor with dummy inputs
1459
+ """
1460
+ batch_size = 2
1461
+ num_visual_features = 10
1462
+ input_ids = tf.constant([[3, 5, 6], [2, 3, 4]], dtype=tf.int32)
1463
+ visual_feats = tf.random.uniform((batch_size, num_visual_features, self.config.visual_feat_dim))
1464
+ visual_pos = tf.random.uniform((batch_size, num_visual_features, 4))
1465
+
1466
+ if self.config.task_obj_predict:
1467
+ obj_labels = {}
1468
+ if self.config.visual_attr_loss and self.config.task_obj_predict:
1469
+ obj_labels["attr"] = (
1470
+ tf.ones([batch_size, num_visual_features]),
1471
+ tf.ones([batch_size, num_visual_features]),
1472
+ )
1473
+ if self.config.visual_feat_loss and self.config.task_obj_predict:
1474
+ obj_labels["feat"] = (
1475
+ tf.ones([batch_size, num_visual_features, self.config.visual_feat_dim]),
1476
+ tf.ones([batch_size, num_visual_features]),
1477
+ )
1478
+ if self.config.visual_obj_loss and self.config.task_obj_predict:
1479
+ obj_labels["obj"] = (
1480
+ tf.ones([batch_size, num_visual_features]),
1481
+ tf.ones([batch_size, num_visual_features]),
1482
+ )
1483
+
1484
+ return {
1485
+ **{
1486
+ "input_ids": input_ids,
1487
+ "visual_feats": visual_feats,
1488
+ "visual_pos": visual_pos,
1489
+ },
1490
+ **({"obj_labels": obj_labels} if self.config.task_obj_predict else {}),
1491
+ }
1492
+
1493
+ def get_lm_head(self):
1494
+ return self.cls.predictions
1495
+
1496
+ def get_prefix_bias_name(self):
1497
+ warnings.warn("The method get_prefix_bias_name is deprecated. Please use `get_bias` instead.", FutureWarning)
1498
+ return self.name + "/" + self.cls.name + "/" + self.cls.predictions.name
1499
+
1500
+ @unpack_inputs
1501
+ @add_start_docstrings_to_model_forward(LXMERT_INPUTS_DOCSTRING)
1502
+ @replace_return_docstrings(output_type=TFLxmertForPreTrainingOutput, config_class=_CONFIG_FOR_DOC)
1503
+ def call(
1504
+ self,
1505
+ input_ids: TFModelInputType | None = None,
1506
+ visual_feats: tf.Tensor | None = None,
1507
+ visual_pos: tf.Tensor | None = None,
1508
+ attention_mask: tf.Tensor | None = None,
1509
+ visual_attention_mask: tf.Tensor | None = None,
1510
+ token_type_ids: tf.Tensor | None = None,
1511
+ inputs_embeds: tf.Tensor | None = None,
1512
+ masked_lm_labels: tf.Tensor | None = None,
1513
+ obj_labels: Dict[str, Tuple[tf.Tensor, tf.Tensor]] | None = None,
1514
+ matched_label: tf.Tensor | None = None,
1515
+ ans: tf.Tensor | None = None,
1516
+ output_attentions: bool | None = None,
1517
+ output_hidden_states: bool | None = None,
1518
+ return_dict: bool | None = None,
1519
+ training: bool = False,
1520
+ ) -> Tuple[tf.Tensor] | TFLxmertForPreTrainingOutput:
1521
+ r"""
1522
+ masked_lm_labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
1523
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
1524
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
1525
+ loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
1526
+ obj_labels (`Dict[Str: Tuple[tf.Tensor, tf.Tensor]]`, *optional*, defaults to `None`):
1527
+ each key is named after each one of the visual losses and each element of the tuple is of the shape
1528
+ `(batch_size, num_features)` and `(batch_size, num_features, visual_feature_dim)` for each the label id and
1529
+ the label score respectively
1530
+ matched_label (`tf.Tensor` of shape `(batch_size,)`, *optional*):
1531
+ Labels for computing the whether or not the text input matches the image (classification) loss. Input
1532
+ should be a sequence pair (see `input_ids` docstring) Indices should be in `[0, 1]`:
1533
+
1534
+ - 0 indicates that the sentence does not match the image,
1535
+ - 1 indicates that the sentence does match the image.
1536
+ ans (`tf.Tensor` of shape `(batch_size)`, *optional*, defaults to `None`):
1537
+ a one hot representation hof the correct answer *optional*
1538
+
1539
+ Returns:
1540
+ """
1541
+
1542
+ lxmert_output = self.lxmert(
1543
+ input_ids,
1544
+ visual_feats,
1545
+ visual_pos,
1546
+ attention_mask,
1547
+ visual_attention_mask,
1548
+ token_type_ids,
1549
+ inputs_embeds,
1550
+ output_attentions,
1551
+ output_hidden_states,
1552
+ return_dict,
1553
+ training,
1554
+ )
1555
+
1556
+ lang_output, visual_output, pooled_output = (
1557
+ lxmert_output[0],
1558
+ lxmert_output[1],
1559
+ lxmert_output[2],
1560
+ )
1561
+ lang_prediction_scores, cross_relationship_score = self.cls(lang_output, pooled_output)
1562
+ if self.task_qa:
1563
+ answer_score = self.answer_head(pooled_output)
1564
+ else:
1565
+ answer_score = pooled_output[0][0]
1566
+
1567
+ total_loss = (
1568
+ None
1569
+ if (masked_lm_labels is None and matched_label is None and obj_labels is None and ans is None)
1570
+ else tf.constant(0.0)
1571
+ )
1572
+ losses = ()
1573
+ if masked_lm_labels is not None and self.task_mask_lm:
1574
+ masked_lm_loss = self.loss_fcts["ce"](
1575
+ tf.reshape(masked_lm_labels, [-1]),
1576
+ tf.reshape(lang_prediction_scores, [-1, self.config.vocab_size]),
1577
+ )
1578
+ total_loss += masked_lm_loss
1579
+ losses += (masked_lm_loss,)
1580
+ if matched_label is not None and self.task_matched:
1581
+ matched_loss = self.loss_fcts["ce"](
1582
+ tf.reshape(matched_label, [-1]),
1583
+ tf.reshape(cross_relationship_score, [-1, 2]),
1584
+ )
1585
+ total_loss += matched_loss
1586
+ losses += (matched_loss,)
1587
+ if obj_labels is not None and self.task_obj_predict:
1588
+ total_visn_loss = 0.0
1589
+ visn_prediction_scores_dict = self.obj_predict_head(visual_output)
1590
+ for key, key_info in self.visual_losses.items():
1591
+ label, mask_conf = obj_labels[key]
1592
+ output_dim = key_info["num"]
1593
+ loss_fct_name = key_info["loss"]
1594
+ label_shape = key_info["shape"]
1595
+ weight = self.visual_loss_normalizer
1596
+ visn_loss_fct = self.loss_fcts[loss_fct_name]
1597
+ visn_prediction_scores = visn_prediction_scores_dict[key]
1598
+ visn_loss = visn_loss_fct(
1599
+ tf.reshape(label, label_shape),
1600
+ tf.reshape(visn_prediction_scores, [-1, output_dim]),
1601
+ )
1602
+
1603
+ if visn_loss.ndim > 1: # Regression Losses
1604
+ visn_loss = tf.reduce_mean(visn_loss)
1605
+ visn_loss = tf.reduce_mean(visn_loss * tf.cast(tf.reshape(mask_conf, [-1]), visn_loss.dtype)) * weight
1606
+ total_visn_loss += visn_loss
1607
+ losses += (visn_loss,)
1608
+ total_loss += total_visn_loss
1609
+ if ans is not None and self.task_qa:
1610
+ answer_loss = self.loss_fcts["ce"](
1611
+ tf.reshape(ans, [-1]), tf.reshape(answer_score, [-1, self.num_qa_labels])
1612
+ )
1613
+ # exclude "*2" here to match the effect of QA losses.
1614
+ # Previous: (loss *0) for 6 epochs, (loss *2) for 6 epochs. (Used 10 instead of 6 in EMNLP paper)
1615
+ # Now : (loss *1) for 12 epochs
1616
+ #
1617
+ # * 2 # Multiply by 2 because > half of the data will not have label
1618
+ total_loss += answer_loss
1619
+ losses += (answer_loss,)
1620
+ # return total_loss, tf.stack(losses)[tf.new_axis, ...], answer_score.detach()
1621
+
1622
+ if not return_dict:
1623
+ output = (
1624
+ lang_prediction_scores,
1625
+ cross_relationship_score,
1626
+ answer_score,
1627
+ ) + lxmert_output[3:]
1628
+ return ((total_loss,) + output) if total_loss is not None else output
1629
+
1630
+ return TFLxmertForPreTrainingOutput(
1631
+ loss=total_loss,
1632
+ prediction_logits=lang_prediction_scores,
1633
+ cross_relationship_score=cross_relationship_score,
1634
+ question_answering_score=answer_score,
1635
+ language_hidden_states=lxmert_output.language_hidden_states,
1636
+ vision_hidden_states=lxmert_output.vision_hidden_states,
1637
+ language_attentions=lxmert_output.language_attentions,
1638
+ vision_attentions=lxmert_output.vision_attentions,
1639
+ cross_encoder_attentions=lxmert_output.cross_encoder_attentions,
1640
+ )
1641
+
1642
+ def build(self, input_shape=None):
1643
+ if self.built:
1644
+ return
1645
+ self.built = True
1646
+ if getattr(self, "lxmert", None) is not None:
1647
+ with tf.name_scope(self.lxmert.name):
1648
+ self.lxmert.build(None)
1649
+ if getattr(self, "cls", None) is not None:
1650
+ with tf.name_scope(self.cls.name):
1651
+ self.cls.build(None)
1652
+ if getattr(self, "obj_predict_head", None) is not None:
1653
+ with tf.name_scope(self.obj_predict_head.name):
1654
+ self.obj_predict_head.build(None)
1655
+ if getattr(self, "answer_head", None) is not None:
1656
+ with tf.name_scope(self.answer_head.name):
1657
+ self.answer_head.build(None)
env-llmeval/lib/python3.10/site-packages/transformers/models/lxmert/tokenization_lxmert.py ADDED
@@ -0,0 +1,520 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The Google AI Team, Stanford University and The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import collections
17
+ import os
18
+ import unicodedata
19
+ from typing import List, Optional, Tuple
20
+
21
+ from ...tokenization_utils import PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace
22
+ from ...utils import logging
23
+
24
+
25
+ logger = logging.get_logger(__name__)
26
+
27
+ VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt"}
28
+
29
+ PRETRAINED_VOCAB_FILES_MAP = {
30
+ "vocab_file": {
31
+ "unc-nlp/lxmert-base-uncased": "https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt",
32
+ }
33
+ }
34
+
35
+ PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
36
+ "unc-nlp/lxmert-base-uncased": 512,
37
+ }
38
+
39
+ PRETRAINED_INIT_CONFIGURATION = {
40
+ "unc-nlp/lxmert-base-uncased": {"do_lower_case": True},
41
+ }
42
+
43
+
44
+ # Copied from transformers.models.bert.tokenization_bert.load_vocab
45
+ def load_vocab(vocab_file):
46
+ """Loads a vocabulary file into a dictionary."""
47
+ vocab = collections.OrderedDict()
48
+ with open(vocab_file, "r", encoding="utf-8") as reader:
49
+ tokens = reader.readlines()
50
+ for index, token in enumerate(tokens):
51
+ token = token.rstrip("\n")
52
+ vocab[token] = index
53
+ return vocab
54
+
55
+
56
+ # Copied from transformers.models.bert.tokenization_bert.whitespace_tokenize
57
+ def whitespace_tokenize(text):
58
+ """Runs basic whitespace cleaning and splitting on a piece of text."""
59
+ text = text.strip()
60
+ if not text:
61
+ return []
62
+ tokens = text.split()
63
+ return tokens
64
+
65
+
66
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer with bert-base-cased->unc-nlp/lxmert-base-uncased, BERT->Lxmert, BertTokenizer->LxmertTokenizer
67
+ class LxmertTokenizer(PreTrainedTokenizer):
68
+ r"""
69
+ Construct a Lxmert tokenizer. Based on WordPiece.
70
+
71
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
72
+ this superclass for more information regarding those methods.
73
+
74
+ Args:
75
+ vocab_file (`str`):
76
+ File containing the vocabulary.
77
+ do_lower_case (`bool`, *optional*, defaults to `True`):
78
+ Whether or not to lowercase the input when tokenizing.
79
+ do_basic_tokenize (`bool`, *optional*, defaults to `True`):
80
+ Whether or not to do basic tokenization before WordPiece.
81
+ never_split (`Iterable`, *optional*):
82
+ Collection of tokens which will never be split during tokenization. Only has an effect when
83
+ `do_basic_tokenize=True`
84
+ unk_token (`str`, *optional*, defaults to `"[UNK]"`):
85
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
86
+ token instead.
87
+ sep_token (`str`, *optional*, defaults to `"[SEP]"`):
88
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
89
+ sequence classification or for a text and a question for question answering. It is also used as the last
90
+ token of a sequence built with special tokens.
91
+ pad_token (`str`, *optional*, defaults to `"[PAD]"`):
92
+ The token used for padding, for example when batching sequences of different lengths.
93
+ cls_token (`str`, *optional*, defaults to `"[CLS]"`):
94
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
95
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
96
+ mask_token (`str`, *optional*, defaults to `"[MASK]"`):
97
+ The token used for masking values. This is the token used when training this model with masked language
98
+ modeling. This is the token which the model will try to predict.
99
+ tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
100
+ Whether or not to tokenize Chinese characters.
101
+
102
+ This should likely be deactivated for Japanese (see this
103
+ [issue](https://github.com/huggingface/transformers/issues/328)).
104
+ strip_accents (`bool`, *optional*):
105
+ Whether or not to strip all accents. If this option is not specified, then it will be determined by the
106
+ value for `lowercase` (as in the original Lxmert).
107
+ """
108
+
109
+ vocab_files_names = VOCAB_FILES_NAMES
110
+ pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
111
+ pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION
112
+ max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
113
+
114
+ def __init__(
115
+ self,
116
+ vocab_file,
117
+ do_lower_case=True,
118
+ do_basic_tokenize=True,
119
+ never_split=None,
120
+ unk_token="[UNK]",
121
+ sep_token="[SEP]",
122
+ pad_token="[PAD]",
123
+ cls_token="[CLS]",
124
+ mask_token="[MASK]",
125
+ tokenize_chinese_chars=True,
126
+ strip_accents=None,
127
+ **kwargs,
128
+ ):
129
+ if not os.path.isfile(vocab_file):
130
+ raise ValueError(
131
+ f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"
132
+ " model use `tokenizer = LxmertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`"
133
+ )
134
+ self.vocab = load_vocab(vocab_file)
135
+ self.ids_to_tokens = collections.OrderedDict([(ids, tok) for tok, ids in self.vocab.items()])
136
+ self.do_basic_tokenize = do_basic_tokenize
137
+ if do_basic_tokenize:
138
+ self.basic_tokenizer = BasicTokenizer(
139
+ do_lower_case=do_lower_case,
140
+ never_split=never_split,
141
+ tokenize_chinese_chars=tokenize_chinese_chars,
142
+ strip_accents=strip_accents,
143
+ )
144
+
145
+ self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=str(unk_token))
146
+
147
+ super().__init__(
148
+ do_lower_case=do_lower_case,
149
+ do_basic_tokenize=do_basic_tokenize,
150
+ never_split=never_split,
151
+ unk_token=unk_token,
152
+ sep_token=sep_token,
153
+ pad_token=pad_token,
154
+ cls_token=cls_token,
155
+ mask_token=mask_token,
156
+ tokenize_chinese_chars=tokenize_chinese_chars,
157
+ strip_accents=strip_accents,
158
+ **kwargs,
159
+ )
160
+
161
+ @property
162
+ def do_lower_case(self):
163
+ return self.basic_tokenizer.do_lower_case
164
+
165
+ @property
166
+ def vocab_size(self):
167
+ return len(self.vocab)
168
+
169
+ def get_vocab(self):
170
+ return dict(self.vocab, **self.added_tokens_encoder)
171
+
172
+ def _tokenize(self, text, split_special_tokens=False):
173
+ split_tokens = []
174
+ if self.do_basic_tokenize:
175
+ for token in self.basic_tokenizer.tokenize(
176
+ text, never_split=self.all_special_tokens if not split_special_tokens else None
177
+ ):
178
+ # If the token is part of the never_split set
179
+ if token in self.basic_tokenizer.never_split:
180
+ split_tokens.append(token)
181
+ else:
182
+ split_tokens += self.wordpiece_tokenizer.tokenize(token)
183
+ else:
184
+ split_tokens = self.wordpiece_tokenizer.tokenize(text)
185
+ return split_tokens
186
+
187
+ def _convert_token_to_id(self, token):
188
+ """Converts a token (str) in an id using the vocab."""
189
+ return self.vocab.get(token, self.vocab.get(self.unk_token))
190
+
191
+ def _convert_id_to_token(self, index):
192
+ """Converts an index (integer) in a token (str) using the vocab."""
193
+ return self.ids_to_tokens.get(index, self.unk_token)
194
+
195
+ def convert_tokens_to_string(self, tokens):
196
+ """Converts a sequence of tokens (string) in a single string."""
197
+ out_string = " ".join(tokens).replace(" ##", "").strip()
198
+ return out_string
199
+
200
+ def build_inputs_with_special_tokens(
201
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
202
+ ) -> List[int]:
203
+ """
204
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
205
+ adding special tokens. A Lxmert sequence has the following format:
206
+
207
+ - single sequence: `[CLS] X [SEP]`
208
+ - pair of sequences: `[CLS] A [SEP] B [SEP]`
209
+
210
+ Args:
211
+ token_ids_0 (`List[int]`):
212
+ List of IDs to which the special tokens will be added.
213
+ token_ids_1 (`List[int]`, *optional*):
214
+ Optional second list of IDs for sequence pairs.
215
+
216
+ Returns:
217
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
218
+ """
219
+ if token_ids_1 is None:
220
+ return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
221
+ cls = [self.cls_token_id]
222
+ sep = [self.sep_token_id]
223
+ return cls + token_ids_0 + sep + token_ids_1 + sep
224
+
225
+ def get_special_tokens_mask(
226
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
227
+ ) -> List[int]:
228
+ """
229
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
230
+ special tokens using the tokenizer `prepare_for_model` method.
231
+
232
+ Args:
233
+ token_ids_0 (`List[int]`):
234
+ List of IDs.
235
+ token_ids_1 (`List[int]`, *optional*):
236
+ Optional second list of IDs for sequence pairs.
237
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
238
+ Whether or not the token list is already formatted with special tokens for the model.
239
+
240
+ Returns:
241
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
242
+ """
243
+
244
+ if already_has_special_tokens:
245
+ return super().get_special_tokens_mask(
246
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
247
+ )
248
+
249
+ if token_ids_1 is not None:
250
+ return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
251
+ return [1] + ([0] * len(token_ids_0)) + [1]
252
+
253
+ def create_token_type_ids_from_sequences(
254
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
255
+ ) -> List[int]:
256
+ """
257
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. A Lxmert sequence
258
+ pair mask has the following format:
259
+
260
+ ```
261
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
262
+ | first sequence | second sequence |
263
+ ```
264
+
265
+ If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
266
+
267
+ Args:
268
+ token_ids_0 (`List[int]`):
269
+ List of IDs.
270
+ token_ids_1 (`List[int]`, *optional*):
271
+ Optional second list of IDs for sequence pairs.
272
+
273
+ Returns:
274
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
275
+ """
276
+ sep = [self.sep_token_id]
277
+ cls = [self.cls_token_id]
278
+ if token_ids_1 is None:
279
+ return len(cls + token_ids_0 + sep) * [0]
280
+ return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
281
+
282
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
283
+ index = 0
284
+ if os.path.isdir(save_directory):
285
+ vocab_file = os.path.join(
286
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
287
+ )
288
+ else:
289
+ vocab_file = (filename_prefix + "-" if filename_prefix else "") + save_directory
290
+ with open(vocab_file, "w", encoding="utf-8") as writer:
291
+ for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]):
292
+ if index != token_index:
293
+ logger.warning(
294
+ f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
295
+ " Please check that the vocabulary is not corrupted!"
296
+ )
297
+ index = token_index
298
+ writer.write(token + "\n")
299
+ index += 1
300
+ return (vocab_file,)
301
+
302
+
303
+ # Copied from transformers.models.bert.tokenization_bert.BasicTokenizer
304
+ class BasicTokenizer(object):
305
+ """
306
+ Constructs a BasicTokenizer that will run basic tokenization (punctuation splitting, lower casing, etc.).
307
+
308
+ Args:
309
+ do_lower_case (`bool`, *optional*, defaults to `True`):
310
+ Whether or not to lowercase the input when tokenizing.
311
+ never_split (`Iterable`, *optional*):
312
+ Collection of tokens which will never be split during tokenization. Only has an effect when
313
+ `do_basic_tokenize=True`
314
+ tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
315
+ Whether or not to tokenize Chinese characters.
316
+
317
+ This should likely be deactivated for Japanese (see this
318
+ [issue](https://github.com/huggingface/transformers/issues/328)).
319
+ strip_accents (`bool`, *optional*):
320
+ Whether or not to strip all accents. If this option is not specified, then it will be determined by the
321
+ value for `lowercase` (as in the original BERT).
322
+ do_split_on_punc (`bool`, *optional*, defaults to `True`):
323
+ In some instances we want to skip the basic punctuation splitting so that later tokenization can capture
324
+ the full context of the words, such as contractions.
325
+ """
326
+
327
+ def __init__(
328
+ self,
329
+ do_lower_case=True,
330
+ never_split=None,
331
+ tokenize_chinese_chars=True,
332
+ strip_accents=None,
333
+ do_split_on_punc=True,
334
+ ):
335
+ if never_split is None:
336
+ never_split = []
337
+ self.do_lower_case = do_lower_case
338
+ self.never_split = set(never_split)
339
+ self.tokenize_chinese_chars = tokenize_chinese_chars
340
+ self.strip_accents = strip_accents
341
+ self.do_split_on_punc = do_split_on_punc
342
+
343
+ def tokenize(self, text, never_split=None):
344
+ """
345
+ Basic Tokenization of a piece of text. For sub-word tokenization, see WordPieceTokenizer.
346
+
347
+ Args:
348
+ never_split (`List[str]`, *optional*)
349
+ Kept for backward compatibility purposes. Now implemented directly at the base class level (see
350
+ [`PreTrainedTokenizer.tokenize`]) List of token not to split.
351
+ """
352
+ # union() returns a new set by concatenating the two sets.
353
+ never_split = self.never_split.union(set(never_split)) if never_split else self.never_split
354
+ text = self._clean_text(text)
355
+
356
+ # This was added on November 1st, 2018 for the multilingual and Chinese
357
+ # models. This is also applied to the English models now, but it doesn't
358
+ # matter since the English models were not trained on any Chinese data
359
+ # and generally don't have any Chinese data in them (there are Chinese
360
+ # characters in the vocabulary because Wikipedia does have some Chinese
361
+ # words in the English Wikipedia.).
362
+ if self.tokenize_chinese_chars:
363
+ text = self._tokenize_chinese_chars(text)
364
+ # prevents treating the same character with different unicode codepoints as different characters
365
+ unicode_normalized_text = unicodedata.normalize("NFC", text)
366
+ orig_tokens = whitespace_tokenize(unicode_normalized_text)
367
+ split_tokens = []
368
+ for token in orig_tokens:
369
+ if token not in never_split:
370
+ if self.do_lower_case:
371
+ token = token.lower()
372
+ if self.strip_accents is not False:
373
+ token = self._run_strip_accents(token)
374
+ elif self.strip_accents:
375
+ token = self._run_strip_accents(token)
376
+ split_tokens.extend(self._run_split_on_punc(token, never_split))
377
+
378
+ output_tokens = whitespace_tokenize(" ".join(split_tokens))
379
+ return output_tokens
380
+
381
+ def _run_strip_accents(self, text):
382
+ """Strips accents from a piece of text."""
383
+ text = unicodedata.normalize("NFD", text)
384
+ output = []
385
+ for char in text:
386
+ cat = unicodedata.category(char)
387
+ if cat == "Mn":
388
+ continue
389
+ output.append(char)
390
+ return "".join(output)
391
+
392
+ def _run_split_on_punc(self, text, never_split=None):
393
+ """Splits punctuation on a piece of text."""
394
+ if not self.do_split_on_punc or (never_split is not None and text in never_split):
395
+ return [text]
396
+ chars = list(text)
397
+ i = 0
398
+ start_new_word = True
399
+ output = []
400
+ while i < len(chars):
401
+ char = chars[i]
402
+ if _is_punctuation(char):
403
+ output.append([char])
404
+ start_new_word = True
405
+ else:
406
+ if start_new_word:
407
+ output.append([])
408
+ start_new_word = False
409
+ output[-1].append(char)
410
+ i += 1
411
+
412
+ return ["".join(x) for x in output]
413
+
414
+ def _tokenize_chinese_chars(self, text):
415
+ """Adds whitespace around any CJK character."""
416
+ output = []
417
+ for char in text:
418
+ cp = ord(char)
419
+ if self._is_chinese_char(cp):
420
+ output.append(" ")
421
+ output.append(char)
422
+ output.append(" ")
423
+ else:
424
+ output.append(char)
425
+ return "".join(output)
426
+
427
+ def _is_chinese_char(self, cp):
428
+ """Checks whether CP is the codepoint of a CJK character."""
429
+ # This defines a "chinese character" as anything in the CJK Unicode block:
430
+ # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
431
+ #
432
+ # Note that the CJK Unicode block is NOT all Japanese and Korean characters,
433
+ # despite its name. The modern Korean Hangul alphabet is a different block,
434
+ # as is Japanese Hiragana and Katakana. Those alphabets are used to write
435
+ # space-separated words, so they are not treated specially and handled
436
+ # like the all of the other languages.
437
+ if (
438
+ (cp >= 0x4E00 and cp <= 0x9FFF)
439
+ or (cp >= 0x3400 and cp <= 0x4DBF) #
440
+ or (cp >= 0x20000 and cp <= 0x2A6DF) #
441
+ or (cp >= 0x2A700 and cp <= 0x2B73F) #
442
+ or (cp >= 0x2B740 and cp <= 0x2B81F) #
443
+ or (cp >= 0x2B820 and cp <= 0x2CEAF) #
444
+ or (cp >= 0xF900 and cp <= 0xFAFF)
445
+ or (cp >= 0x2F800 and cp <= 0x2FA1F) #
446
+ ): #
447
+ return True
448
+
449
+ return False
450
+
451
+ def _clean_text(self, text):
452
+ """Performs invalid character removal and whitespace cleanup on text."""
453
+ output = []
454
+ for char in text:
455
+ cp = ord(char)
456
+ if cp == 0 or cp == 0xFFFD or _is_control(char):
457
+ continue
458
+ if _is_whitespace(char):
459
+ output.append(" ")
460
+ else:
461
+ output.append(char)
462
+ return "".join(output)
463
+
464
+
465
+ # Copied from transformers.models.bert.tokenization_bert.WordpieceTokenizer
466
+ class WordpieceTokenizer(object):
467
+ """Runs WordPiece tokenization."""
468
+
469
+ def __init__(self, vocab, unk_token, max_input_chars_per_word=100):
470
+ self.vocab = vocab
471
+ self.unk_token = unk_token
472
+ self.max_input_chars_per_word = max_input_chars_per_word
473
+
474
+ def tokenize(self, text):
475
+ """
476
+ Tokenizes a piece of text into its word pieces. This uses a greedy longest-match-first algorithm to perform
477
+ tokenization using the given vocabulary.
478
+
479
+ For example, `input = "unaffable"` wil return as output `["un", "##aff", "##able"]`.
480
+
481
+ Args:
482
+ text: A single token or whitespace separated tokens. This should have
483
+ already been passed through *BasicTokenizer*.
484
+
485
+ Returns:
486
+ A list of wordpiece tokens.
487
+ """
488
+
489
+ output_tokens = []
490
+ for token in whitespace_tokenize(text):
491
+ chars = list(token)
492
+ if len(chars) > self.max_input_chars_per_word:
493
+ output_tokens.append(self.unk_token)
494
+ continue
495
+
496
+ is_bad = False
497
+ start = 0
498
+ sub_tokens = []
499
+ while start < len(chars):
500
+ end = len(chars)
501
+ cur_substr = None
502
+ while start < end:
503
+ substr = "".join(chars[start:end])
504
+ if start > 0:
505
+ substr = "##" + substr
506
+ if substr in self.vocab:
507
+ cur_substr = substr
508
+ break
509
+ end -= 1
510
+ if cur_substr is None:
511
+ is_bad = True
512
+ break
513
+ sub_tokens.append(cur_substr)
514
+ start = end
515
+
516
+ if is_bad:
517
+ output_tokens.append(self.unk_token)
518
+ else:
519
+ output_tokens.extend(sub_tokens)
520
+ return output_tokens
env-llmeval/lib/python3.10/site-packages/transformers/models/rag/__init__.py ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import TYPE_CHECKING
16
+
17
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
18
+
19
+
20
+ _import_structure = {
21
+ "configuration_rag": ["RagConfig"],
22
+ "retrieval_rag": ["RagRetriever"],
23
+ "tokenization_rag": ["RagTokenizer"],
24
+ }
25
+
26
+ try:
27
+ if not is_torch_available():
28
+ raise OptionalDependencyNotAvailable()
29
+ except OptionalDependencyNotAvailable:
30
+ pass
31
+ else:
32
+ _import_structure["modeling_rag"] = [
33
+ "RagModel",
34
+ "RagPreTrainedModel",
35
+ "RagSequenceForGeneration",
36
+ "RagTokenForGeneration",
37
+ ]
38
+
39
+ try:
40
+ if not is_tf_available():
41
+ raise OptionalDependencyNotAvailable()
42
+ except OptionalDependencyNotAvailable:
43
+ pass
44
+ else:
45
+ _import_structure["modeling_tf_rag"] = [
46
+ "TFRagModel",
47
+ "TFRagPreTrainedModel",
48
+ "TFRagSequenceForGeneration",
49
+ "TFRagTokenForGeneration",
50
+ ]
51
+
52
+
53
+ if TYPE_CHECKING:
54
+ from .configuration_rag import RagConfig
55
+ from .retrieval_rag import RagRetriever
56
+ from .tokenization_rag import RagTokenizer
57
+
58
+ try:
59
+ if not is_torch_available():
60
+ raise OptionalDependencyNotAvailable()
61
+ except OptionalDependencyNotAvailable:
62
+ pass
63
+ else:
64
+ from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
65
+
66
+ try:
67
+ if not is_tf_available():
68
+ raise OptionalDependencyNotAvailable()
69
+ except OptionalDependencyNotAvailable:
70
+ pass
71
+ else:
72
+ from .modeling_tf_rag import (
73
+ TFRagModel,
74
+ TFRagPreTrainedModel,
75
+ TFRagSequenceForGeneration,
76
+ TFRagTokenForGeneration,
77
+ )
78
+
79
+ else:
80
+ import sys
81
+
82
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
env-llmeval/lib/python3.10/site-packages/transformers/models/rag/configuration_rag.py ADDED
@@ -0,0 +1,182 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020, The RAG Authors and The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ RAG model configuration"""
16
+
17
+
18
+ from ...configuration_utils import PretrainedConfig
19
+ from ...utils import add_start_docstrings
20
+
21
+
22
+ RAG_CONFIG_DOC = r"""
23
+ [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and
24
+ can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.
25
+
26
+ Args:
27
+ title_sep (`str`, *optional*, defaults to `" / "`):
28
+ Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].
29
+ doc_sep (`str`, *optional*, defaults to `" // "`):
30
+ Separator inserted between the text of the retrieved document and the original input when calling
31
+ [`RagRetriever`].
32
+ n_docs (`int`, *optional*, defaults to 5):
33
+ Number of documents to retrieve.
34
+ max_combined_length (`int`, *optional*, defaults to 300):
35
+ Max length of contextualized input returned by [`~RagRetriever.__call__`].
36
+ retrieval_vector_size (`int`, *optional*, defaults to 768):
37
+ Dimensionality of the document embeddings indexed by [`RagRetriever`].
38
+ retrieval_batch_size (`int`, *optional*, defaults to 8):
39
+ Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated
40
+ [`RagRetriever`].
41
+ dataset (`str`, *optional*, defaults to `"wiki_dpr"`):
42
+ A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids
43
+ using `datasets.list_datasets()`).
44
+ dataset_split (`str`, *optional*, defaults to `"train"`)
45
+ Which split of the `dataset` to load.
46
+ index_name (`str`, *optional*, defaults to `"compressed"`)
47
+ The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and
48
+ `"compressed"`.
49
+ index_path (`str`, *optional*)
50
+ The path to the serialized faiss index on disk.
51
+ passages_path (`str`, *optional*):
52
+ A path to text passages compatible with the faiss index. Required if using
53
+ [`~models.rag.retrieval_rag.LegacyIndex`]
54
+ use_dummy_dataset (`bool`, *optional*, defaults to `False`)
55
+ Whether to load a "dummy" variant of the dataset specified by `dataset`.
56
+ label_smoothing (`float`, *optional*, defaults to 0.0):
57
+ Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing
58
+ in the loss calculation. If set to 0, no label smoothing is performed.
59
+ do_marginalize (`bool`, *optional*, defaults to `False`):
60
+ If `True`, the logits are marginalized over all documents by making use of
61
+ `torch.nn.functional.log_softmax`.
62
+ reduce_loss (`bool`, *optional*, defaults to `False`):
63
+ Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.
64
+ do_deduplication (`bool`, *optional*, defaults to `True`):
65
+ Whether or not to deduplicate the generations from different context documents for a given input. Has to be
66
+ set to `False` if used while training with distributed backend.
67
+ exclude_bos_score (`bool`, *optional*, defaults to `False`):
68
+ Whether or not to disregard the BOS token when computing the loss.
69
+ output_retrieved(`bool`, *optional*, defaults to `False`):
70
+ If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and
71
+ `context_attention_mask` are returned. See returned tensors for more detail.
72
+ use_cache (`bool`, *optional*, defaults to `True`):
73
+ Whether or not the model should return the last key/values attentions (not used by all models).
74
+ forced_eos_token_id (`int`, *optional*):
75
+ The id of the token to force as the last generated token when `max_length` is reached. Usually set to
76
+ `eos_token_id`.
77
+ """
78
+
79
+
80
+ @add_start_docstrings(RAG_CONFIG_DOC)
81
+ class RagConfig(PretrainedConfig):
82
+ model_type = "rag"
83
+ is_composition = True
84
+
85
+ def __init__(
86
+ self,
87
+ vocab_size=None,
88
+ is_encoder_decoder=True,
89
+ prefix=None,
90
+ bos_token_id=None,
91
+ pad_token_id=None,
92
+ eos_token_id=None,
93
+ decoder_start_token_id=None,
94
+ title_sep=" / ",
95
+ doc_sep=" // ",
96
+ n_docs=5,
97
+ max_combined_length=300,
98
+ retrieval_vector_size=768,
99
+ retrieval_batch_size=8,
100
+ dataset="wiki_dpr",
101
+ dataset_split="train",
102
+ index_name="compressed",
103
+ index_path=None,
104
+ passages_path=None,
105
+ use_dummy_dataset=False,
106
+ reduce_loss=False,
107
+ label_smoothing=0.0,
108
+ do_deduplication=True,
109
+ exclude_bos_score=False,
110
+ do_marginalize=False,
111
+ output_retrieved=False,
112
+ use_cache=True,
113
+ forced_eos_token_id=None,
114
+ dataset_revision=None,
115
+ **kwargs,
116
+ ):
117
+ super().__init__(
118
+ bos_token_id=bos_token_id,
119
+ pad_token_id=pad_token_id,
120
+ eos_token_id=eos_token_id,
121
+ decoder_start_token_id=decoder_start_token_id,
122
+ forced_eos_token_id=forced_eos_token_id,
123
+ is_encoder_decoder=is_encoder_decoder,
124
+ prefix=prefix,
125
+ vocab_size=vocab_size,
126
+ **kwargs,
127
+ )
128
+ assert (
129
+ "question_encoder" in kwargs and "generator" in kwargs
130
+ ), "Config has to be initialized with question_encoder and generator config"
131
+ question_encoder_config = kwargs.pop("question_encoder")
132
+ question_encoder_model_type = question_encoder_config.pop("model_type")
133
+ decoder_config = kwargs.pop("generator")
134
+ decoder_model_type = decoder_config.pop("model_type")
135
+
136
+ from ..auto.configuration_auto import AutoConfig
137
+
138
+ self.question_encoder = AutoConfig.for_model(question_encoder_model_type, **question_encoder_config)
139
+ self.generator = AutoConfig.for_model(decoder_model_type, **decoder_config)
140
+
141
+ self.reduce_loss = reduce_loss
142
+ self.label_smoothing = label_smoothing
143
+ self.exclude_bos_score = exclude_bos_score
144
+ self.do_marginalize = do_marginalize
145
+
146
+ self.title_sep = title_sep
147
+ self.doc_sep = doc_sep
148
+ self.n_docs = n_docs
149
+ self.max_combined_length = max_combined_length
150
+
151
+ self.dataset = dataset
152
+ self.dataset_split = dataset_split
153
+ self.index_name = index_name
154
+
155
+ self.retrieval_vector_size = retrieval_vector_size
156
+ self.retrieval_batch_size = retrieval_batch_size
157
+ self.passages_path = passages_path
158
+ self.index_path = index_path
159
+ self.use_dummy_dataset = use_dummy_dataset
160
+ self.dataset_revision = dataset_revision
161
+
162
+ self.output_retrieved = output_retrieved
163
+
164
+ self.do_deduplication = do_deduplication
165
+
166
+ self.use_cache = use_cache
167
+
168
+ if self.forced_eos_token_id is None:
169
+ self.forced_eos_token_id = getattr(self.generator, "forced_eos_token_id", None)
170
+
171
+ @classmethod
172
+ def from_question_encoder_generator_configs(
173
+ cls, question_encoder_config: PretrainedConfig, generator_config: PretrainedConfig, **kwargs
174
+ ) -> PretrainedConfig:
175
+ r"""
176
+ Instantiate a [`EncoderDecoderConfig`] (or a derived class) from a pre-trained encoder model configuration and
177
+ decoder model configuration.
178
+
179
+ Returns:
180
+ [`EncoderDecoderConfig`]: An instance of a configuration object
181
+ """
182
+ return cls(question_encoder=question_encoder_config.to_dict(), generator=generator_config.to_dict(), **kwargs)
env-llmeval/lib/python3.10/site-packages/transformers/models/rag/modeling_rag.py ADDED
@@ -0,0 +1,1628 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020, The RAG Authors and The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """RAG model implementation."""
16
+
17
+ import copy
18
+ from dataclasses import dataclass
19
+ from typing import Callable, List, Optional, Tuple, Union
20
+
21
+ import torch
22
+ from torch import nn
23
+
24
+ from ...configuration_utils import PretrainedConfig
25
+ from ...generation import BeamSearchScorer, GenerationConfig, LogitsProcessorList, StoppingCriteriaList
26
+ from ...modeling_outputs import ModelOutput
27
+ from ...modeling_utils import PreTrainedModel
28
+ from ...utils import add_start_docstrings_to_model_forward, logging, replace_return_docstrings
29
+ from .configuration_rag import RagConfig
30
+ from .retrieval_rag import RagRetriever
31
+
32
+
33
+ logger = logging.get_logger(__name__)
34
+
35
+ _CONFIG_FOR_DOC = "RagConfig"
36
+
37
+
38
+ @dataclass
39
+ class RetrievAugLMMarginOutput(ModelOutput):
40
+ """
41
+ Base class for retriever augmented marginalized models outputs.
42
+
43
+ Args:
44
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
45
+ Language modeling loss.
46
+ logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
47
+ Prediction scores of the language modeling head. The score is possibly marginalized over all documents for
48
+ each vocabulary token.
49
+ doc_scores (`torch.FloatTensor` of shape `(batch_size, config.n_docs)`):
50
+ Score between each retrieved document embeddings (see `retrieved_doc_embeds`) and
51
+ `question_encoder_last_hidden_state`.
52
+ past_key_values (`List[torch.FloatTensor]`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
53
+ List of `torch.FloatTensor` of length `config.n_layers`, with each tensor of shape `(2, batch_size,
54
+ num_heads, sequence_length, embed_size_per_head)`).
55
+
56
+ Contains precomputed hidden-states (key and values in the attention blocks) of the decoder that can be used
57
+ (see `past_key_values` input) to speed up sequential decoding.
58
+ retrieved_doc_embeds (`torch.FloatTensor` of shape `(batch_size, config.n_docs, hidden_size)`, *optional*, returned when *output_retrieved=True*):
59
+ Embedded documents retrieved by the retriever. Is used with `question_encoder_last_hidden_state` to compute
60
+ the `doc_scores`.
61
+ retrieved_doc_ids (`torch.LongTensor` of shape `(batch_size, config.n_docs)`, *optional*, returned when *output_retrieved=True*):
62
+ The indexes of the embedded documents retrieved by the retriever.
63
+ context_input_ids (`torch.LongTensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*):
64
+ Input ids post-processed from the retrieved documents and the question encoder input_ids by the retriever.
65
+ context_attention_mask (`torch.LongTensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*):
66
+ Attention mask post-processed from the retrieved documents and the question encoder `input_ids` by the
67
+ retriever.
68
+ question_encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
69
+ Sequence of hidden states at the output of the last layer of the question encoder pooled output of the
70
+ model.
71
+ question_enc_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
72
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings and one for the output of each layer) of
73
+ shape `(batch_size, sequence_length, hidden_size)`.
74
+
75
+ Hidden states of the question encoder at the output of each layer plus the initial embedding outputs.
76
+ question_enc_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
77
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
78
+ sequence_length)`.
79
+
80
+ Attentions weights of the question encoder, after the attention softmax, used to compute the weighted
81
+ average in the self-attention heads.
82
+ generator_enc_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
83
+ Sequence of hidden-states at the output of the last layer of the generator encoder of the model.
84
+ generator_enc_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
85
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings and one for the output of each layer) of
86
+ shape `(batch_size, sequence_length, hidden_size)`.
87
+
88
+ Hidden states of the generator encoder at the output of each layer plus the initial embedding outputs.
89
+ generator_enc_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
90
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
91
+ sequence_length)`.
92
+
93
+ Attentions weights of the generator encoder, after the attention softmax, used to compute the weighted
94
+ average in the self-attention heads.
95
+ generator_dec_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
96
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings and one for the output of each layer) of
97
+ shape `(batch_size, sequence_length, hidden_size)`.
98
+
99
+ Hidden states of the generator decoder at the output of each layer plus the initial embedding outputs.
100
+ generator_dec_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
101
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
102
+ sequence_length)`.
103
+
104
+ Attentions weights of the generator decoder, after the attention softmax, used to compute the weighted
105
+ average in the self-attention heads.
106
+ generator_cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
107
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
108
+ sequence_length)`.
109
+
110
+ Cross-attentions weights of the generator decoder, after the attention softmax, used to compute the
111
+ weighted average in the cross-attention heads.
112
+ """
113
+
114
+ loss: Optional[torch.FloatTensor] = None
115
+ logits: torch.FloatTensor = None
116
+ doc_scores: torch.FloatTensor = None
117
+ past_key_values: Optional[List[torch.FloatTensor]] = None
118
+ retrieved_doc_embeds: Optional[torch.FloatTensor] = None
119
+ retrieved_doc_ids: Optional[torch.LongTensor] = None
120
+ context_input_ids: Optional[torch.LongTensor] = None
121
+ context_attention_mask: Optional[torch.LongTensor] = None
122
+ question_encoder_last_hidden_state: Optional[torch.FloatTensor] = None
123
+ question_enc_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
124
+ question_enc_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
125
+ generator_enc_last_hidden_state: Optional[torch.FloatTensor] = None
126
+ generator_enc_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
127
+ generator_enc_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
128
+ generator_dec_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
129
+ generator_dec_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
130
+ generator_cross_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
131
+
132
+
133
+ @dataclass
134
+ class RetrievAugLMOutput(ModelOutput):
135
+ """
136
+ Args:
137
+ logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
138
+ Prediction scores of the language modeling head. The score is possibly marginalized over all documents for
139
+ each vocabulary token.
140
+ doc_scores (`torch.FloatTensor` of shape `(batch_size, config.n_docs)`):
141
+ Score between each retrieved document embeddings (see `retrieved_doc_embeds`) and
142
+ `question_encoder_last_hidden_state`.
143
+ past_key_values (`List[torch.FloatTensor]`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
144
+ List of `torch.FloatTensor` of length `config.n_layers`, with each tensor of shape `(2, batch_size,
145
+ num_heads, sequence_length, embed_size_per_head)`).
146
+
147
+ Contains precomputed hidden-states (key and values in the attention blocks) of the decoder that can be used
148
+ (see `past_key_values` input) to speed up sequential decoding.
149
+ retrieved_doc_embeds (`torch.FloatTensor` of shape `(batch_size, config.n_docs, hidden_size)`, *optional*, returned when *output_retrieved=True*):
150
+ Embedded documents retrieved by the retriever. Is used with `question_encoder_last_hidden_state` to compute
151
+ the `doc_scores`.
152
+ retrieved_doc_ids (`torch.LongTensor` of shape `(batch_size, config.n_docs)`, *optional*, returned when *output_retrieved=True*):
153
+ The indexes of the embedded documents retrieved by the retriever.
154
+ context_input_ids (`torch.LongTensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*):
155
+ Input ids post-processed from the retrieved documents and the question encoder input_ids by the retriever.
156
+ context_attention_mask (`torch.LongTensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*):
157
+ Attention mask post-processed from the retrieved documents and the question encoder `input_ids` by the
158
+ retriever.
159
+ question_encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
160
+ Sequence of hidden states at the output of the last layer of the question encoder pooled output of the
161
+ model.
162
+ question_enc_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
163
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings and one for the output of each layer) of
164
+ shape `(batch_size, sequence_length, hidden_size)`.
165
+
166
+ Hidden states of the question encoder at the output of each layer plus the initial embedding outputs.
167
+ question_enc_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
168
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
169
+ sequence_length)`.
170
+
171
+ Attentions weights of the question encoder, after the attention softmax, used to compute the weighted
172
+ average in the self-attention heads.
173
+ generator_enc_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
174
+ Sequence of hidden-states at the output of the last layer of the generator encoder of the model.
175
+ generator_enc_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
176
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings and one for the output of each layer) of
177
+ shape `(batch_size, sequence_length, hidden_size)`.
178
+
179
+ Hidden states of the generator encoder at the output of each layer plus the initial embedding outputs.
180
+ generator_enc_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
181
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
182
+ sequence_length)`.
183
+
184
+ Attentions weights of the generator encoder, after the attention softmax, used to compute the weighted
185
+ average in the self-attention heads.
186
+ generator_dec_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
187
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings and one for the output of each layer) of
188
+ shape `(batch_size, sequence_length, hidden_size)`.
189
+
190
+ Hidden states of the generator decoder at the output of each layer plus the initial embedding outputs.
191
+ generator_dec_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
192
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
193
+ sequence_length)`.
194
+
195
+ Attentions weights of the generator decoder, after the attention softmax, used to compute the weighted
196
+ average in the self-attention heads.
197
+ generator_cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
198
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
199
+ sequence_length)`.
200
+
201
+ Cross-attentions weights of the generator decoder, after the attention softmax, used to compute the
202
+ weighted average in the cross-attention heads.
203
+ """
204
+
205
+ logits: torch.FloatTensor = None
206
+ doc_scores: torch.FloatTensor = None
207
+ past_key_values: Optional[List[torch.FloatTensor]] = None
208
+ retrieved_doc_embeds: Optional[torch.FloatTensor] = None
209
+ retrieved_doc_ids: Optional[torch.LongTensor] = None
210
+ context_input_ids: Optional[torch.LongTensor] = None
211
+ context_attention_mask: Optional[torch.LongTensor] = None
212
+ question_encoder_last_hidden_state: Optional[torch.FloatTensor] = None
213
+ question_enc_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
214
+ question_enc_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
215
+ generator_enc_last_hidden_state: Optional[torch.FloatTensor] = None
216
+ generator_enc_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
217
+ generator_enc_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
218
+ generator_dec_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
219
+ generator_dec_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
220
+ generator_cross_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
221
+
222
+
223
+ class RagPreTrainedModel(PreTrainedModel):
224
+ r"""
225
+ RAG models were released with the paper [Retrieval-Augmented Generation for Knowledge-Intensive NLP
226
+ Tasks](https://arxiv.org/abs/2005.11401) by Patrick Lewis, Ethan Perez, Aleksandra Piktus et al.
227
+
228
+ RAG is a retriever augmented model and encapsulate three components: a question encoder, a dataset retriever and a
229
+ generator, the encoder and generator are trainable while the retriever is just an indexed dataset.
230
+
231
+ """
232
+
233
+ config_class = RagConfig
234
+ base_model_prefix = "rag"
235
+
236
+ @classmethod
237
+ def from_pretrained(cls, *args, **kwargs):
238
+ # At the moment fast initialization is not supported
239
+ # for composite models
240
+ kwargs["_fast_init"] = False
241
+ return super().from_pretrained(*args, **kwargs)
242
+
243
+ @classmethod
244
+ def from_pretrained_question_encoder_generator(
245
+ cls,
246
+ question_encoder_pretrained_model_name_or_path: str = None,
247
+ generator_pretrained_model_name_or_path: str = None,
248
+ retriever: RagRetriever = None,
249
+ **kwargs,
250
+ ) -> PreTrainedModel:
251
+ r"""
252
+ Instantiates an question encoder and a generator from one or two base classes of the library from pretrained
253
+ model checkpoints.
254
+
255
+ The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated). To train
256
+ the model, you need to first set it back in training mode with `model.train()`.
257
+
258
+ Params:
259
+ question_encoder_pretrained_model_name_or_path (`str`, *optional*, defaults to `None`):
260
+ Information necessary to initiate the question encoder. Can be either:
261
+
262
+ - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co.
263
+ - A path to a *directory* containing model weights saved using
264
+ [`~PreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`.
265
+ - A path or url to a *tensorflow index checkpoint file* (e.g, `./tf_model/model.ckpt.index`). In
266
+ this case, `from_tf` should be set to `True` and a configuration object should be provided as
267
+ `config` argument. This loading path is slower than converting the TensorFlow checkpoint in a
268
+ PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.
269
+
270
+ generator_pretrained_model_name_or_path (`str`, *optional*, defaults to `None`):
271
+ Information necessary to initiate the generator. Can be either:
272
+
273
+ - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co.
274
+ - A path to a *directory* containing model weights saved using
275
+ [`~PreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`.
276
+ - A path or url to a *tensorflow index checkpoint file* (e.g, `./tf_model/model.ckpt.index`). In
277
+ this case, `from_tf` should be set to `True` and a configuration object should be provided as
278
+ `config` argument. This loading path is slower than converting the TensorFlow checkpoint in a
279
+ PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.
280
+
281
+ model_args (remaining positional arguments, *optional*):
282
+ All remaining positional arguments will be passed to the underlying model's `__init__` method.
283
+ retriever ([`RagRetriever`], *optional*):
284
+ The retriever to use.
285
+ kwwargs (remaining dictionary of keyword arguments, *optional*):
286
+ Can be used to update the configuration object (after it being loaded) and initiate the model (e.g.,
287
+ `output_attentions=True`).
288
+
289
+ - To update the question_encoder configuration, use the prefix *question_encoder_* for each
290
+ configuration parameter.
291
+ - To update the generator configuration, use the prefix *generator_* for each configuration parameter.
292
+ - To update the parent model configuration, do not use a prefix for each configuration parameter.
293
+
294
+ Behaves differently depending on whether a `config` is provided or automatically loaded.
295
+
296
+ Example:
297
+
298
+ ```python
299
+ >>> from transformers import RagModel
300
+
301
+ >>> # initialize a RAG from two pretrained models.
302
+ >>> model = RagModel.from_pretrained_question_encoder_generator(
303
+ ... "facebook/dpr-question_encoder-single-nq-base", "google-t5/t5-small"
304
+ ... )
305
+ >>> # saving model after fine-tuning
306
+ >>> model.save_pretrained("./rag")
307
+ >>> # load fine-tuned model
308
+ >>> model = RagModel.from_pretrained("./rag")
309
+ ```"""
310
+
311
+ kwargs_question_encoder = {
312
+ argument[len("question_encoder_") :]: value
313
+ for argument, value in kwargs.items()
314
+ if argument.startswith("question_encoder_")
315
+ }
316
+
317
+ kwargs_generator = {
318
+ argument[len("generator_") :]: value
319
+ for argument, value in kwargs.items()
320
+ if argument.startswith("generator_")
321
+ }
322
+
323
+ # remove question_encoder, generator kwargs from kwargs
324
+ for key in kwargs_question_encoder.keys():
325
+ del kwargs["question_encoder_" + key]
326
+ for key in kwargs_generator.keys():
327
+ del kwargs["generator_" + key]
328
+
329
+ # Load and initialize the question_encoder and generator
330
+ # The distinction between question_encoder and generator at the model level is made
331
+ # by the value of the flag `is_generator` that we need to set correctly.
332
+ question_encoder = kwargs_question_encoder.pop("model", None)
333
+ if question_encoder is None:
334
+ assert question_encoder_pretrained_model_name_or_path is not None, (
335
+ "If `model` is not defined as an argument, a `question_encoder_pretrained_model_name_or_path` has to"
336
+ " be defined"
337
+ )
338
+ from ..auto.modeling_auto import AutoModel
339
+
340
+ if "config" not in kwargs_question_encoder:
341
+ from ..auto.configuration_auto import AutoConfig
342
+
343
+ question_encoder_config, kwargs_question_encoder = AutoConfig.from_pretrained(
344
+ question_encoder_pretrained_model_name_or_path,
345
+ **kwargs_question_encoder,
346
+ return_unused_kwargs=True,
347
+ )
348
+ kwargs_question_encoder["config"] = question_encoder_config
349
+
350
+ question_encoder = AutoModel.from_pretrained(
351
+ question_encoder_pretrained_model_name_or_path, **kwargs_question_encoder
352
+ )
353
+
354
+ generator = kwargs_generator.pop("model", None)
355
+ if generator is None:
356
+ assert generator_pretrained_model_name_or_path is not None, (
357
+ "If `generator_model` is not defined as an argument, a `generator_pretrained_model_name_or_path` has"
358
+ " to be defined"
359
+ )
360
+ from ..auto.modeling_auto import AutoModelForSeq2SeqLM
361
+
362
+ if "config" not in kwargs_generator:
363
+ from ..auto.configuration_auto import AutoConfig
364
+
365
+ generator_config, kwargs_generator = AutoConfig.from_pretrained(
366
+ generator_pretrained_model_name_or_path, **kwargs_generator, return_unused_kwargs=True
367
+ )
368
+
369
+ kwargs_generator["config"] = generator_config
370
+
371
+ generator = AutoModelForSeq2SeqLM.from_pretrained(
372
+ generator_pretrained_model_name_or_path, **kwargs_generator
373
+ )
374
+
375
+ # instantiate config with corresponding kwargs
376
+ config = kwargs.get("config", None)
377
+ if config is None:
378
+ config = RagConfig.from_question_encoder_generator_configs(
379
+ question_encoder.config, generator.config, **kwargs
380
+ )
381
+
382
+ return cls(question_encoder=question_encoder, generator=generator, config=config, retriever=retriever)
383
+
384
+
385
+ RAG_START_DOCSTRING = r"""
386
+
387
+ RAG is a seq2seq model which encapsulates two core components: a question encoder and a generator. During a forward
388
+ pass, we encode the input with the question encoder and pass it to the retriever to extract relevant context
389
+ documents. The documents are then prepended to the input. Such contextualized inputs is passed to the generator.
390
+
391
+ The question encoder can be any *autoencoding* model, preferably [`DPRQuestionEncoder`], and the generator can be
392
+ any *seq2seq* model, preferably [`BartForConditionalGeneration`].
393
+
394
+ The model can be initialized with a [`RagRetriever`] for end-to-end generation or used in combination with the
395
+ outputs of a retriever in multiple steps---see examples for more details. The model is compatible any
396
+ *autoencoding* model as the `question_encoder` and any *seq2seq* model with language model head as the `generator`.
397
+ It has been tested with [`DPRQuestionEncoder`] as the `question_encoder` and [`BartForConditionalGeneration`] or
398
+ [`T5ForConditionalGeneration`] as the `generator`.
399
+
400
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
401
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
402
+ etc.)
403
+
404
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
405
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
406
+ and behavior.
407
+
408
+
409
+ Args:
410
+ config ([`RagConfig`]):
411
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
412
+ load the weights associated with the model, only the configuration. Check out the
413
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
414
+ question_encoder ([`PreTrainedModel`]):
415
+ An encoder model compatible with the faiss index encapsulated by the `retriever`.
416
+ generator ([`PreTrainedModel`]):
417
+ A seq2seq model used as the generator in the RAG architecture.
418
+ retriever ([`RagRetriever`]):
419
+ A retriever class encapsulating a faiss index queried to obtain context documents for current inputs.
420
+ """
421
+
422
+
423
+ RAG_FORWARD_INPUTS_DOCSTRING = r"""
424
+ Args:
425
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
426
+ Indices of input sequence tokens in the vocabulary. [`RagConfig`], used to initialize the model, specifies
427
+ which generator to use, it also specifies a compatible generator tokenizer. Use that tokenizer class to
428
+ obtain the indices.
429
+
430
+ [What are input IDs?](../glossary#input-ids)
431
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
432
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
433
+
434
+ - 1 for tokens that are **not masked**,
435
+ - 0 for tokens that are **masked**.
436
+
437
+ [What are attention masks?](../glossary#attention-mask)
438
+ encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*)
439
+ Tuple consists of (`generator_enc_last_hidden_state`, *optional*: `generator_enc_hidden_states`,
440
+ *optional*: `generator_enc_attentions`). `generator_enc_last_hidden_state` of shape `(batch_size, n_docs *
441
+ sequence_length, hidden_size)` is a sequence of hidden-states at the output of the last layer of the
442
+ generator's encoder.
443
+
444
+ Used by the ([`RagModel`]) model during decoding.
445
+ decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
446
+ Provide for generation tasks. `None` by default, construct as per instructions for the generator model
447
+ you're using with your RAG instance.
448
+ decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
449
+ Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
450
+ be used by default.
451
+ past_key_values (`tuple(tuple(torch.FloatTensor))`):
452
+ Tuple consists of two elements: `encoder_outputs` of the RAG model (see `encoder_outputs`) and
453
+ `past_key_values` of the underlying generator. Can be used to speed up decoding. `past_key_values` are used
454
+ in the ([`RagTokenForGeneration`]) model during decoding.
455
+ doc_scores (`torch.FloatTensor` of shape `(batch_size, config.n_docs)`):
456
+ Score between each retrieved document embeddings (see `retrieved_doc_embeds`) and
457
+ `question_encoder_last_hidden_state`. If the model has is not initialized with a `retriever` `doc_scores`
458
+ has to be provided to the forward pass. `doc_scores` can be computed via
459
+ `question_encoder_last_hidden_state` and `retrieved_doc_embeds`, see examples for more information.
460
+ context_input_ids (`torch.LongTensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*):
461
+ Input IDs post-processed from the retrieved documents and the question encoder `input_ids` by the
462
+ retriever. If the model was not initialized with a `retriever` ``context_input_ids` has to be provided to
463
+ the forward pass. `context_input_ids` are returned by [`~RagRetriever.__call__`].
464
+ context_attention_mask (`torch.LongTensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`,*optional*, returned when *output_retrieved=True*):
465
+ Attention mask post-processed from the retrieved documents and the question encoder `input_ids` by the
466
+ retriever. If the model has is not initialized with a `retriever` `context_attention_mask` has to be
467
+ provided to the forward pass. `context_attention_mask` are returned by [`~RagRetriever.__call__`].
468
+ use_cache (`bool`, *optional*, defaults to `True`):
469
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
470
+ `past_key_values`).
471
+ output_attentions (`bool`, *optional*):
472
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
473
+ tensors for more detail.
474
+ output_hidden_states (`bool`, *optional*):
475
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
476
+ more detail.
477
+ output_retrieved(`bool`, *optional*):
478
+ Whether or not to return the `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and
479
+ `context_attention_mask`. See returned tensors for more detail.
480
+ n_docs (`int`, *optional*, defaults to `config.n_docs``)
481
+ Number of documents to retrieve and/or number of documents for which to generate an answer.
482
+ """
483
+
484
+
485
+ @add_start_docstrings_to_model_forward(RAG_START_DOCSTRING)
486
+ class RagModel(RagPreTrainedModel):
487
+ def __init__(
488
+ self,
489
+ config: Optional[PretrainedConfig] = None,
490
+ question_encoder: Optional[PreTrainedModel] = None,
491
+ generator: Optional[PreTrainedModel] = None,
492
+ retriever: Optional[RagRetriever] = None, # or maybe just use a `set_retriever(...)` method
493
+ **kwargs,
494
+ ):
495
+ assert config is not None or (
496
+ question_encoder is not None and generator is not None
497
+ ), "Either a configuration or an question_encoder and a generator has to be provided."
498
+
499
+ if config is None:
500
+ config = RagConfig.from_question_encoder_generator_configs(
501
+ question_encoder.config, generator.config, **kwargs
502
+ )
503
+ else:
504
+ assert isinstance(config, self.config_class), f"config: {config} has to be of type {self.config_class}"
505
+ super().__init__(config)
506
+ if question_encoder is None:
507
+ from ..auto.modeling_auto import AutoModel
508
+
509
+ question_encoder = AutoModel.from_config(config.question_encoder)
510
+
511
+ if generator is None:
512
+ from ..auto.modeling_auto import AutoModelForSeq2SeqLM
513
+
514
+ generator = AutoModelForSeq2SeqLM.from_config(config.generator)
515
+
516
+ self.retriever = retriever
517
+ if self.retriever is not None:
518
+ assert isinstance(
519
+ retriever, RagRetriever
520
+ ), f"`self.retriever` is of type {type(self.retriever)}, but should be of type `RagRetriever`"
521
+ self.retriever = retriever
522
+
523
+ self.question_encoder = question_encoder
524
+ self.generator = generator
525
+
526
+ self.ctx_encoder = None
527
+ self.context_encoder_training = False
528
+
529
+ @add_start_docstrings_to_model_forward(RAG_FORWARD_INPUTS_DOCSTRING)
530
+ @replace_return_docstrings(output_type=RetrievAugLMOutput, config_class=_CONFIG_FOR_DOC)
531
+ def forward(
532
+ self,
533
+ input_ids: Optional[torch.LongTensor] = None,
534
+ attention_mask: Optional[torch.Tensor] = None,
535
+ encoder_outputs: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
536
+ decoder_input_ids: Optional[torch.LongTensor] = None,
537
+ decoder_attention_mask: Optional[torch.BoolTensor] = None,
538
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
539
+ doc_scores: Optional[torch.FloatTensor] = None,
540
+ context_input_ids: Optional[torch.LongTensor] = None,
541
+ context_attention_mask: Optional[torch.LongTensor] = None,
542
+ use_cache: Optional[bool] = None,
543
+ output_attentions: Optional[bool] = None,
544
+ output_hidden_states: Optional[bool] = None,
545
+ output_retrieved: Optional[bool] = None,
546
+ n_docs: Optional[int] = None,
547
+ ) -> Union[Tuple[torch.Tensor], RetrievAugLMOutput]:
548
+ r"""
549
+ Returns:
550
+
551
+ Example:
552
+
553
+ ```python
554
+ >>> from transformers import AutoTokenizer, RagRetriever, RagModel
555
+ >>> import torch
556
+
557
+ >>> tokenizer = AutoTokenizer.from_pretrained("facebook/rag-token-base")
558
+ >>> retriever = RagRetriever.from_pretrained(
559
+ ... "facebook/rag-token-base", index_name="exact", use_dummy_dataset=True
560
+ ... )
561
+ >>> # initialize with RagRetriever to do everything in one forward call
562
+ >>> model = RagModel.from_pretrained("facebook/rag-token-base", retriever=retriever)
563
+
564
+ >>> inputs = tokenizer("How many people live in Paris?", return_tensors="pt")
565
+ >>> outputs = model(input_ids=inputs["input_ids"])
566
+ ```"""
567
+ n_docs = n_docs if n_docs is not None else self.config.n_docs
568
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
569
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
570
+ output_hidden_states = (
571
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
572
+ )
573
+ output_retrieved = output_retrieved if output_retrieved is not None else self.config.output_retrieved
574
+
575
+ # whether retriever has to be used
576
+ has_to_retrieve = (
577
+ self.retriever is not None
578
+ and (context_input_ids is None or context_attention_mask is None or doc_scores is None)
579
+ and encoder_outputs is None
580
+ )
581
+ # encoder_outputs are pre-computed during RAG-token generation
582
+ if encoder_outputs is None:
583
+ if has_to_retrieve:
584
+ question_enc_outputs = self.question_encoder(
585
+ input_ids, attention_mask=attention_mask, return_dict=True
586
+ )
587
+ question_encoder_last_hidden_state = question_enc_outputs[0] # hidden states of question encoder
588
+
589
+ retriever_outputs = self.retriever(
590
+ input_ids,
591
+ question_encoder_last_hidden_state.cpu().detach().to(torch.float32).numpy(),
592
+ prefix=self.generator.config.prefix,
593
+ n_docs=n_docs,
594
+ return_tensors="pt",
595
+ )
596
+ if self.context_encoder_training:
597
+ (
598
+ context_input_ids,
599
+ context_attention_mask,
600
+ retrieved_doc_embeds,
601
+ retrived_doc_input_ids,
602
+ retrived_doc_attention_mask,
603
+ retrieved_doc_ids,
604
+ ) = (
605
+ retriever_outputs["context_input_ids"],
606
+ retriever_outputs["context_attention_mask"],
607
+ retriever_outputs["retrieved_doc_embeds"],
608
+ retriever_outputs["tokenized_doc_ids"],
609
+ retriever_outputs["tokenized_doc_attention_mask"],
610
+ retriever_outputs["doc_ids"],
611
+ )
612
+
613
+ context_input_ids = context_input_ids.to(input_ids)
614
+ context_attention_mask = context_attention_mask.to(input_ids)
615
+
616
+ retrived_doc_input_ids = retrived_doc_input_ids.to(input_ids)
617
+ retrived_doc_attention_mask = retrived_doc_attention_mask.to(input_ids)
618
+ retrieved_doc_embeds = self.ctx_encoder(
619
+ retrived_doc_input_ids, attention_mask=retrived_doc_attention_mask, return_dict=True
620
+ ).pooler_output
621
+ retrieved_doc_embeds = retrieved_doc_embeds.view(
622
+ -1, n_docs, question_encoder_last_hidden_state.shape[1]
623
+ ) # reshaping
624
+
625
+ # compute doc_scores involving ctx_encoder
626
+ doc_scores = torch.bmm(
627
+ question_encoder_last_hidden_state.unsqueeze(1), retrieved_doc_embeds.transpose(1, 2)
628
+ ).squeeze(1)
629
+
630
+ else:
631
+ context_input_ids, context_attention_mask, retrieved_doc_embeds, retrieved_doc_ids = (
632
+ retriever_outputs["context_input_ids"],
633
+ retriever_outputs["context_attention_mask"],
634
+ retriever_outputs["retrieved_doc_embeds"],
635
+ retriever_outputs["doc_ids"],
636
+ )
637
+
638
+ # set to correct device
639
+ retrieved_doc_embeds = retrieved_doc_embeds.to(question_encoder_last_hidden_state)
640
+ context_input_ids = context_input_ids.to(input_ids)
641
+ context_attention_mask = context_attention_mask.to(input_ids)
642
+
643
+ # compute doc_scores
644
+ doc_scores = torch.bmm(
645
+ question_encoder_last_hidden_state.unsqueeze(1), retrieved_doc_embeds.transpose(1, 2)
646
+ ).squeeze(1)
647
+ else:
648
+ assert context_input_ids is not None, (
649
+ "Make sure that `context_input_ids` are passed, if no `retriever` is set. Alternatively, you can"
650
+ " set a retriever using the `set_retriever(...)` function."
651
+ )
652
+ assert context_attention_mask is not None, (
653
+ "Make sure that `context_attention_mask` are passed, if no `retriever` is set. Alternatively, you"
654
+ " can set a retriever using the `set_retriever(...)` function."
655
+ )
656
+ assert doc_scores is not None, (
657
+ "Make sure that `doc_scores` are passed, if no `retriever` is set. Alternatively, you can set a"
658
+ " retriever using the `set_retriever(...)` function."
659
+ )
660
+
661
+ assert (
662
+ doc_scores is not None
663
+ ), "Make sure that `doc_scores` are passed when passing `encoder_outputs` to the forward function."
664
+
665
+ assert (doc_scores.shape[1] % n_docs) == 0, (
666
+ f" The first dimension of `context_input_ids` should be a multiple of `n_docs`={n_docs}, but is"
667
+ f" {context_input_ids.shape[0]}."
668
+ )
669
+
670
+ # Decoder input without context documents
671
+ if decoder_input_ids is not None:
672
+ decoder_input_ids = decoder_input_ids.repeat_interleave(n_docs, dim=0)
673
+
674
+ if decoder_attention_mask is not None:
675
+ decoder_attention_mask = decoder_attention_mask.repeat_interleave(n_docs, dim=0)
676
+
677
+ gen_outputs = self.generator(
678
+ input_ids=context_input_ids,
679
+ attention_mask=context_attention_mask,
680
+ encoder_outputs=encoder_outputs,
681
+ decoder_input_ids=decoder_input_ids,
682
+ decoder_attention_mask=decoder_attention_mask,
683
+ past_key_values=past_key_values,
684
+ use_cache=use_cache,
685
+ output_attentions=output_attentions,
686
+ return_dict=True,
687
+ )
688
+
689
+ if not has_to_retrieve:
690
+ question_encoder_last_hidden_state = None
691
+ question_enc_hidden_states = None
692
+ question_enc_attentions = None
693
+ retrieved_doc_embeds = None
694
+ retrieved_doc_ids = None
695
+ else:
696
+ question_enc_hidden_states = question_enc_outputs.hidden_states
697
+ question_enc_attentions = question_enc_outputs.attentions
698
+
699
+ if not has_to_retrieve or not output_retrieved:
700
+ # don't output retrieved docs
701
+ context_input_ids = (None,)
702
+ context_attention_mask = None
703
+ retrieved_doc_embeds = None
704
+ retrieved_doc_ids = None
705
+
706
+ return RetrievAugLMOutput(
707
+ logits=gen_outputs.logits,
708
+ doc_scores=doc_scores,
709
+ past_key_values=gen_outputs.past_key_values,
710
+ context_input_ids=context_input_ids,
711
+ context_attention_mask=context_attention_mask,
712
+ retrieved_doc_embeds=retrieved_doc_embeds,
713
+ retrieved_doc_ids=retrieved_doc_ids,
714
+ question_encoder_last_hidden_state=question_encoder_last_hidden_state,
715
+ question_enc_hidden_states=question_enc_hidden_states,
716
+ question_enc_attentions=question_enc_attentions,
717
+ generator_enc_last_hidden_state=gen_outputs.encoder_last_hidden_state,
718
+ generator_enc_hidden_states=gen_outputs.encoder_hidden_states,
719
+ generator_enc_attentions=gen_outputs.encoder_attentions,
720
+ generator_dec_hidden_states=gen_outputs.decoder_hidden_states,
721
+ generator_dec_attentions=gen_outputs.decoder_attentions,
722
+ generator_cross_attentions=gen_outputs.cross_attentions,
723
+ )
724
+
725
+
726
+ @add_start_docstrings_to_model_forward(
727
+ """
728
+ A RAG-sequence model implementation. It performs RAG-sequence specific marginalization in the forward pass.
729
+ """,
730
+ RAG_START_DOCSTRING,
731
+ )
732
+ class RagSequenceForGeneration(RagPreTrainedModel):
733
+ def __init__(
734
+ self,
735
+ config: Optional[PretrainedConfig] = None,
736
+ question_encoder: Optional[PreTrainedModel] = None,
737
+ generator: Optional[PreTrainedModel] = None,
738
+ retriever: Optional[RagRetriever] = None,
739
+ **kwargs,
740
+ ):
741
+ assert config is not None or (
742
+ question_encoder is not None and generator is not None
743
+ ), "Either a configuration or an encoder and a generator has to be provided."
744
+
745
+ if config is None:
746
+ config = RagConfig.from_question_encoder_generator_configs(
747
+ question_encoder.config, generator.config, **kwargs
748
+ )
749
+ super().__init__(config)
750
+
751
+ # instantiate model
752
+ self.rag = RagModel(config=config, question_encoder=question_encoder, generator=generator, retriever=retriever)
753
+
754
+ def set_retriever(self, retriever: RagRetriever):
755
+ self.rag.retriever = retriever
756
+
757
+ def set_context_encoder_for_training(self, ctx_encoder: PreTrainedModel):
758
+ self.rag.context_encoder_training = True
759
+ self.rag.ctx_encoder = ctx_encoder
760
+
761
+ @add_start_docstrings_to_model_forward(RAG_FORWARD_INPUTS_DOCSTRING)
762
+ @replace_return_docstrings(output_type=RetrievAugLMMarginOutput, config_class=_CONFIG_FOR_DOC)
763
+ def forward(
764
+ self,
765
+ input_ids: Optional[torch.LongTensor] = None,
766
+ attention_mask: Optional[torch.Tensor] = None,
767
+ encoder_outputs: Optional[Tuple[Tuple[torch.Tensor]]] = None,
768
+ decoder_input_ids: Optional[torch.LongTensor] = None,
769
+ decoder_attention_mask: Optional[torch.BoolTensor] = None,
770
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
771
+ context_input_ids: Optional[torch.LongTensor] = None,
772
+ context_attention_mask: Optional[torch.LongTensor] = None,
773
+ doc_scores: Optional[torch.FloatTensor] = None,
774
+ use_cache: Optional[bool] = None,
775
+ output_attentions: Optional[bool] = None,
776
+ output_hidden_states: Optional[bool] = None,
777
+ output_retrieved: Optional[bool] = None,
778
+ exclude_bos_score: Optional[bool] = None,
779
+ reduce_loss: Optional[bool] = None,
780
+ labels: Optional[torch.LongTensor] = None,
781
+ n_docs: Optional[int] = None,
782
+ **kwargs, # needs kwargs for generation
783
+ ) -> RetrievAugLMMarginOutput:
784
+ r"""
785
+ exclude_bos_score (`bool`, *optional*):
786
+ Only relevant if `labels` is passed. If `True`, the score of the BOS token is disregarded when computing
787
+ the loss.
788
+ reduce_loss (`bool`, *optional*):
789
+ Only relevant if `labels` is passed. If `True`, the NLL loss is reduced using the `torch.Tensor.sum`
790
+ operation.
791
+ kwargs (`Dict[str, any]`, optional, defaults to *{}*):
792
+ Legacy dictionary, which is required so that model can use *generate()* function.
793
+
794
+ Returns:
795
+
796
+ Example:
797
+
798
+ ```python
799
+ >>> from transformers import AutoTokenizer, RagRetriever, RagSequenceForGeneration
800
+ >>> import torch
801
+
802
+ >>> tokenizer = AutoTokenizer.from_pretrained("facebook/rag-sequence-nq")
803
+ >>> retriever = RagRetriever.from_pretrained(
804
+ ... "facebook/rag-sequence-nq", index_name="exact", use_dummy_dataset=True
805
+ ... )
806
+ >>> # initialize with RagRetriever to do everything in one forward call
807
+ >>> model = RagSequenceForGeneration.from_pretrained("facebook/rag-token-nq", retriever=retriever)
808
+
809
+ >>> inputs = tokenizer("How many people live in Paris?", return_tensors="pt")
810
+ >>> targets = tokenizer(text_target="In Paris, there are 10 million people.", return_tensors="pt")
811
+ >>> input_ids = inputs["input_ids"]
812
+ >>> labels = targets["input_ids"]
813
+ >>> outputs = model(input_ids=input_ids, labels=labels)
814
+
815
+ >>> # or use retriever separately
816
+ >>> model = RagSequenceForGeneration.from_pretrained("facebook/rag-sequence-nq", use_dummy_dataset=True)
817
+ >>> # 1. Encode
818
+ >>> question_hidden_states = model.question_encoder(input_ids)[0]
819
+ >>> # 2. Retrieve
820
+ >>> docs_dict = retriever(input_ids.numpy(), question_hidden_states.detach().numpy(), return_tensors="pt")
821
+ >>> doc_scores = torch.bmm(
822
+ ... question_hidden_states.unsqueeze(1), docs_dict["retrieved_doc_embeds"].float().transpose(1, 2)
823
+ ... ).squeeze(1)
824
+ >>> # 3. Forward to generator
825
+ >>> outputs = model(
826
+ ... context_input_ids=docs_dict["context_input_ids"],
827
+ ... context_attention_mask=docs_dict["context_attention_mask"],
828
+ ... doc_scores=doc_scores,
829
+ ... decoder_input_ids=labels,
830
+ ... )
831
+ ```"""
832
+ n_docs = n_docs if n_docs is not None else self.config.n_docs
833
+ exclude_bos_score = exclude_bos_score if exclude_bos_score is not None else self.config.exclude_bos_score
834
+ reduce_loss = reduce_loss if reduce_loss is not None else self.config.reduce_loss
835
+
836
+ if labels is not None:
837
+ if decoder_input_ids is None:
838
+ decoder_input_ids = labels
839
+ use_cache = False
840
+
841
+ outputs = self.rag(
842
+ input_ids=input_ids,
843
+ attention_mask=attention_mask,
844
+ encoder_outputs=encoder_outputs,
845
+ decoder_input_ids=decoder_input_ids,
846
+ decoder_attention_mask=decoder_attention_mask,
847
+ context_input_ids=context_input_ids,
848
+ context_attention_mask=context_attention_mask,
849
+ doc_scores=doc_scores,
850
+ past_key_values=past_key_values,
851
+ use_cache=use_cache,
852
+ output_attentions=output_attentions,
853
+ output_hidden_states=output_hidden_states,
854
+ output_retrieved=output_retrieved,
855
+ n_docs=n_docs,
856
+ )
857
+
858
+ loss = None
859
+ if labels is not None:
860
+ loss = self.get_nll(
861
+ outputs.logits,
862
+ outputs.doc_scores,
863
+ decoder_input_ids,
864
+ reduce_loss=reduce_loss,
865
+ epsilon=self.config.label_smoothing,
866
+ exclude_bos_score=exclude_bos_score,
867
+ n_docs=n_docs,
868
+ )
869
+
870
+ return RetrievAugLMMarginOutput(
871
+ loss=loss,
872
+ logits=outputs.logits,
873
+ doc_scores=outputs.doc_scores,
874
+ past_key_values=outputs.past_key_values,
875
+ context_input_ids=outputs.context_input_ids,
876
+ context_attention_mask=outputs.context_attention_mask,
877
+ retrieved_doc_embeds=outputs.retrieved_doc_embeds,
878
+ retrieved_doc_ids=outputs.retrieved_doc_ids,
879
+ question_encoder_last_hidden_state=outputs.question_encoder_last_hidden_state,
880
+ question_enc_hidden_states=outputs.question_enc_hidden_states,
881
+ question_enc_attentions=outputs.question_enc_attentions,
882
+ generator_enc_last_hidden_state=outputs.generator_enc_last_hidden_state,
883
+ generator_enc_hidden_states=outputs.generator_enc_hidden_states,
884
+ generator_enc_attentions=outputs.generator_enc_attentions,
885
+ generator_dec_hidden_states=outputs.generator_dec_hidden_states,
886
+ generator_dec_attentions=outputs.generator_dec_attentions,
887
+ generator_cross_attentions=outputs.generator_cross_attentions,
888
+ )
889
+
890
+ @property
891
+ def retriever(self):
892
+ return self.rag.retriever
893
+
894
+ @property
895
+ def generator(self):
896
+ return self.rag.generator
897
+
898
+ @property
899
+ def question_encoder(self):
900
+ return self.rag.question_encoder
901
+
902
+ @torch.no_grad()
903
+ def generate(
904
+ self,
905
+ input_ids: Optional[torch.LongTensor] = None,
906
+ attention_mask: Optional[torch.LongTensor] = None,
907
+ context_input_ids: Optional[torch.LongTensor] = None,
908
+ context_attention_mask: Optional[torch.LongTensor] = None,
909
+ doc_scores: Optional[torch.FloatTensor] = None,
910
+ do_deduplication: Optional[bool] = None, # defaults to True
911
+ num_return_sequences: Optional[int] = None, # defaults to 1
912
+ num_beams: Optional[int] = None, # defaults to 1
913
+ n_docs: Optional[int] = None,
914
+ **model_kwargs,
915
+ ) -> torch.LongTensor:
916
+ """
917
+ Implements RAG sequence "thorough" decoding. Read the [`~generation.GenerationMixin.generate`]` documentation
918
+ for more information on how to set other generate input parameters.
919
+
920
+ Args:
921
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
922
+ The sequence used as a prompt for the generation. If `input_ids` is not passed, then
923
+ `context_input_ids` has to be provided.
924
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
925
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
926
+
927
+ - 1 for tokens that are **not masked**,
928
+ - 0 for tokens that are **masked**.
929
+
930
+ [What are attention masks?](../glossary#attention-mask)
931
+ context_input_ids (`torch.LongTensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*):
932
+ Input IDs post-processed from the retrieved documents and the question encoder input_ids by the
933
+ retriever.
934
+ context_attention_mask (`torch.LongTensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*):
935
+ Attention mask post-processed from the retrieved documents and the question encoder `input_ids` by the
936
+ retriever.
937
+
938
+ If the model is not initialized with a `retriever` or `input_ids` is not given, `context_input_ids` and
939
+ `context_attention_mask` have to be provided to the forward pass. They are returned by
940
+ [`~RagRetriever.__call__`].
941
+ doc_scores (`torch.FloatTensor` of shape `(batch_size, config.n_docs)`):
942
+ Score between each retrieved document embeddings (see `retrieved_doc_embeds`) and
943
+ `question_encoder_last_hidden_state`.
944
+
945
+ If the model is not initialized with a `retriever` or `input_ids` is not given, `doc_scores` has to be
946
+ provided to the forward pass. `doc_scores` are returned by [`~RagRetriever.__call__`].
947
+ do_deduplication (`bool`, *optional*):
948
+ Whether or not to deduplicate the generations from different context documents for a given input. Has
949
+ to be set to `False` if used while training with distributed backend.
950
+ num_return_sequences(`int`, *optional*, defaults to 1):
951
+ The number of independently computed returned sequences for each element in the batch. Note that this
952
+ is not the value we pass to the `generator`'s `[`~generation.GenerationMixin.generate`]` function,
953
+ where we set `num_return_sequences` to `num_beams`.
954
+ num_beams (`int`, *optional*, defaults to 1):
955
+ Number of beams for beam search. 1 means no beam search.
956
+ n_docs (`int`, *optional*, defaults to `config.n_docs`)
957
+ Number of documents to retrieve and/or number of documents for which to generate an answer.
958
+ kwargs (`Dict[str, Any]`, *optional*):
959
+ Additional kwargs will be passed to [`~generation.GenerationMixin.generate`].
960
+
961
+ Return:
962
+ `torch.LongTensor` of shape `(batch_size * num_return_sequences, sequence_length)`: The generated
963
+ sequences. The second dimension (sequence length) is either equal to `max_length` or shorter if all batches
964
+ finished early due to the `eos_token_id`.
965
+ """
966
+
967
+ n_docs = n_docs if n_docs is not None else self.config.n_docs
968
+ do_deduplication = do_deduplication if do_deduplication is not None else self.config.do_deduplication
969
+ num_doc_return_sequences = (
970
+ num_return_sequences if num_return_sequences is not None else self.config.num_return_sequences
971
+ )
972
+ num_beams = num_beams if num_beams is not None else self.config.num_beams
973
+
974
+ assert (
975
+ input_ids is not None or context_input_ids is not None
976
+ ), " At least one of input_ids or context_input_ids must be given"
977
+
978
+ if self.retriever is not None and context_input_ids is None:
979
+ question_hidden_states = self.question_encoder(input_ids, attention_mask=attention_mask)[0]
980
+ context_input_ids = self.retriever(
981
+ input_ids,
982
+ question_hidden_states.cpu().detach().to(torch.float32).numpy(),
983
+ prefix=self.generator.config.prefix,
984
+ n_docs=n_docs,
985
+ return_tensors="pt",
986
+ )["context_input_ids"]
987
+
988
+ # set to correct device
989
+ context_input_ids = context_input_ids.to(input_ids)
990
+
991
+ hypos = []
992
+ model_kwargs["num_beams"] = num_beams
993
+ model_kwargs["num_return_sequences"] = num_beams
994
+ model_kwargs["attention_mask"] = None
995
+
996
+ batch_size = input_ids.shape[0] if input_ids is not None else context_input_ids.shape[0] // n_docs
997
+
998
+ for index in range(batch_size):
999
+ # first, generate beams from documents:
1000
+ generator_input_ids = context_input_ids[index * n_docs : (index + 1) * n_docs] # (n_docs, max_len)
1001
+
1002
+ output_sequences = self.generator.generate(
1003
+ generator_input_ids,
1004
+ **model_kwargs,
1005
+ ) # n_docs * n_beam, tgt_len
1006
+ if do_deduplication:
1007
+ # do_deduplication, max_output_len
1008
+ output_sequences = torch.stack(list({str(k.tolist()): k for k in output_sequences}.values()))
1009
+
1010
+ num_candidates = output_sequences.shape[
1011
+ 0
1012
+ ] # after deduplication, this number can be less than n_docs*n_beam
1013
+
1014
+ # then, run model forwards to get nll scores:
1015
+ if input_ids is not None:
1016
+ new_input_ids = input_ids[index : index + 1].repeat(num_candidates, 1)
1017
+ outputs = self(new_input_ids, labels=output_sequences, exclude_bos_score=True)
1018
+ else: # input_ids is None, need context_input_ids/mask and doc_scores
1019
+ assert context_attention_mask is not None, (
1020
+ "Make sure that `context_attention_mask` are passed, if no `input_ids` is set. Alternatively, you"
1021
+ " can set a retriever using the `set_retriever(...)` function."
1022
+ )
1023
+ assert doc_scores is not None, (
1024
+ "Make sure that `doc_scores` are passed, if no `input_ids` is set. Alternatively, you can set a"
1025
+ " retriever using the `set_retriever(...)` function."
1026
+ )
1027
+
1028
+ individual_input_ids = generator_input_ids.repeat(
1029
+ num_candidates, 1
1030
+ ) # (num_candidates*n_docs, max_len)
1031
+
1032
+ individual_attention_mask = context_attention_mask[index * n_docs : (index + 1) * n_docs]
1033
+ individual_attention_mask = individual_attention_mask.repeat(num_candidates, 1)
1034
+
1035
+ individual_doc_scores = doc_scores[index : (index + 1), :] # doc_scores.shape = [batch, n_docs]
1036
+ individual_doc_scores = individual_doc_scores.repeat(num_candidates, 1) # [num_candidates, n_docs]
1037
+
1038
+ outputs = self(
1039
+ context_input_ids=individual_input_ids,
1040
+ context_attention_mask=individual_attention_mask,
1041
+ doc_scores=individual_doc_scores,
1042
+ labels=output_sequences,
1043
+ exclude_bos_score=True,
1044
+ )
1045
+
1046
+ top_cand_inds = (-outputs["loss"]).topk(num_doc_return_sequences)[1]
1047
+
1048
+ # add hypothesis
1049
+ hypos.append(output_sequences[top_cand_inds])
1050
+
1051
+ return self._cat_and_pad(hypos, pad_token_id=self.config.generator.pad_token_id)
1052
+
1053
+ def get_nll(
1054
+ self, seq_logits, doc_scores, target, reduce_loss=False, epsilon=0.0, exclude_bos_score=False, n_docs=None
1055
+ ):
1056
+ # shift tokens left
1057
+ target = torch.cat(
1058
+ [target[:, 1:], target.new(target.shape[0], 1).fill_(self.config.generator.pad_token_id)], 1
1059
+ )
1060
+
1061
+ n_docs = n_docs if n_docs is not None else self.config.n_docs
1062
+
1063
+ # bos_token_id is None for T5
1064
+ bos_token_id = self.config.bos_token_id or self.config.generator.bos_token_id
1065
+ use_bos = bos_token_id is not None and target[:, 0].eq(bos_token_id).all()
1066
+
1067
+ def _mask_pads(ll, smooth_obj):
1068
+ pad_mask = target.eq(self.config.generator.pad_token_id)
1069
+ if pad_mask.any():
1070
+ ll.masked_fill_(pad_mask, 0.0)
1071
+ smooth_obj.masked_fill_(pad_mask, 0.0)
1072
+ return ll.squeeze(-1), smooth_obj.squeeze(-1)
1073
+
1074
+ # seq_logits dim = (batch*n_docs, tgt_len , #vocabs)
1075
+ seq_logprobs = nn.functional.log_softmax(seq_logits, dim=-1).view(
1076
+ seq_logits.shape[0] // n_docs, n_docs, -1, seq_logits.size(-1)
1077
+ ) # batch_size x n_docs x tgt_len x #vocab_size
1078
+ doc_logprobs = nn.functional.log_softmax(doc_scores, dim=1).unsqueeze(-1).unsqueeze(-1)
1079
+
1080
+ # RAG-sequence marginalization
1081
+ first_token_scores = seq_logprobs[:, :, :1, :]
1082
+ second_token_scores = seq_logprobs[:, :, 1:2, :]
1083
+ remainder = seq_logprobs[:, :, 2:, :]
1084
+ rag_logprobs = torch.cat([first_token_scores, second_token_scores + doc_logprobs, remainder], dim=2)
1085
+
1086
+ # calculate loss
1087
+ target = target.unsqueeze(1).unsqueeze(-1).repeat(1, n_docs, 1, 1)
1088
+ assert target.dim() == rag_logprobs.dim()
1089
+
1090
+ ll = rag_logprobs.gather(dim=-1, index=target)
1091
+ smooth_obj = rag_logprobs.sum(dim=-1, keepdim=True) # total sum of all (normalised) logits
1092
+
1093
+ ll, smooth_obj = _mask_pads(ll, smooth_obj)
1094
+
1095
+ # sum over tokens, exclude bos while scoring
1096
+ ll = ll[:, :, 1:].sum(2) if exclude_bos_score and use_bos else ll.sum(2)
1097
+ smooth_obj = smooth_obj.sum(2)
1098
+ ll = ll.logsumexp(1) # logsumexp over docs
1099
+ smooth_obj = smooth_obj.logsumexp(1)
1100
+
1101
+ nll_loss = -ll
1102
+ smooth_loss = -smooth_obj
1103
+
1104
+ if reduce_loss:
1105
+ nll_loss = nll_loss.sum()
1106
+ smooth_loss = smooth_loss.sum()
1107
+
1108
+ eps_i = epsilon / rag_logprobs.size(-1)
1109
+ loss = (1.0 - epsilon) * nll_loss + eps_i * smooth_loss
1110
+ return loss
1111
+
1112
+ @staticmethod
1113
+ def _cat_and_pad(tensors, pad_token_id):
1114
+ output = (
1115
+ tensors[0].new(sum([t.shape[0] for t in tensors]), max([t.shape[1] for t in tensors])).fill_(pad_token_id)
1116
+ )
1117
+ ind = 0
1118
+ for t in tensors:
1119
+ output[ind : ind + t.shape[0], : t.shape[1]] = t
1120
+ ind += t.shape[0]
1121
+ return output
1122
+
1123
+
1124
+ @add_start_docstrings_to_model_forward(
1125
+ """
1126
+ A RAG-token model implementation. It performs RAG-token specific marginalization in the forward pass.
1127
+ """,
1128
+ RAG_START_DOCSTRING,
1129
+ )
1130
+ class RagTokenForGeneration(RagPreTrainedModel):
1131
+ def __init__(
1132
+ self,
1133
+ config: Optional[PretrainedConfig] = None,
1134
+ question_encoder: Optional[PreTrainedModel] = None,
1135
+ generator: Optional[PreTrainedModel] = None,
1136
+ retriever: Optional[RagRetriever] = None,
1137
+ **kwargs,
1138
+ ):
1139
+ assert config is not None or (
1140
+ question_encoder is not None and generator is not None
1141
+ ), "Either a configuration or an encoder and a generator has to be provided."
1142
+
1143
+ if config is None:
1144
+ config = RagConfig.from_question_encoder_generator_configs(
1145
+ question_encoder.config, generator.config, **kwargs
1146
+ )
1147
+
1148
+ super().__init__(config)
1149
+
1150
+ # instantiate model
1151
+ self.rag = RagModel(config=config, question_encoder=question_encoder, generator=generator, retriever=retriever)
1152
+
1153
+ def set_retriever(self, retriever: RagRetriever):
1154
+ self.rag.retriever = retriever
1155
+
1156
+ def set_context_encoder_for_training(self, ctx_encoder: PreTrainedModel):
1157
+ self.rag.context_encoder_training = True
1158
+ self.rag.ctx_encoder = ctx_encoder
1159
+
1160
+ def prepare_inputs_for_generation(
1161
+ self,
1162
+ decoder_input_ids,
1163
+ past_key_values=None,
1164
+ attention_mask=None,
1165
+ use_cache=None,
1166
+ encoder_outputs=None,
1167
+ doc_scores=None,
1168
+ n_docs=None,
1169
+ **kwargs,
1170
+ ):
1171
+ if past_key_values is not None:
1172
+ # if past is defined use only last decoder_input_ids
1173
+ decoder_input_ids = decoder_input_ids[:, -1:]
1174
+
1175
+ return {
1176
+ "input_ids": None,
1177
+ "encoder_outputs": encoder_outputs,
1178
+ "doc_scores": doc_scores,
1179
+ "context_attention_mask": attention_mask,
1180
+ "decoder_input_ids": decoder_input_ids,
1181
+ "past_key_values": past_key_values,
1182
+ "use_cache": use_cache,
1183
+ "do_marginalize": True,
1184
+ "n_docs": n_docs,
1185
+ }
1186
+
1187
+ @property
1188
+ def retriever(self):
1189
+ return self.rag.retriever
1190
+
1191
+ @property
1192
+ def generator(self):
1193
+ return self.rag.generator
1194
+
1195
+ @property
1196
+ def question_encoder(self):
1197
+ return self.rag.question_encoder
1198
+
1199
+ @staticmethod
1200
+ def _reorder_cache(past_key_values, beam_idx):
1201
+ """Reorders cache for generation. BART-inspired but we need to take care of the extra dimension for docs"""
1202
+
1203
+ def _reorder_stacked(hidden_states, new_order):
1204
+ n_docs = hidden_states.shape[0] // new_order.shape[0]
1205
+ hidden_states = hidden_states.view(-1, n_docs, *hidden_states.shape[1:])
1206
+ hidden_states = hidden_states.index_select(0, new_order)
1207
+ result = hidden_states.view(-1, *hidden_states.shape[2:])
1208
+ return result
1209
+
1210
+ reordered_past = ()
1211
+ for layer_past in past_key_values:
1212
+ # get the correct batch idx from decoder layer's batch dim for cross and self-attn
1213
+ reordered_past += (
1214
+ tuple(_reorder_stacked(past_state, beam_idx.to(past_state.device)) for past_state in layer_past),
1215
+ )
1216
+
1217
+ return reordered_past
1218
+
1219
+ def marginalize(self, seq_logits, doc_scores, n_docs=None):
1220
+ n_docs = n_docs if n_docs is not None else self.config.n_docs
1221
+
1222
+ # RAG-token marginalization
1223
+ seq_logprobs = nn.functional.log_softmax(seq_logits, dim=-1).view(
1224
+ seq_logits.shape[0] // n_docs, n_docs, -1, seq_logits.size(-1)
1225
+ )
1226
+ doc_logprobs = torch.log_softmax(doc_scores, dim=1)
1227
+ log_prob_sum = seq_logprobs + doc_logprobs.unsqueeze(-1).unsqueeze(-1)
1228
+ return torch.logsumexp(log_prob_sum, dim=1)
1229
+
1230
+ @add_start_docstrings_to_model_forward(RAG_FORWARD_INPUTS_DOCSTRING)
1231
+ @replace_return_docstrings(output_type=RetrievAugLMMarginOutput, config_class=_CONFIG_FOR_DOC)
1232
+ def forward(
1233
+ self,
1234
+ input_ids: Optional[torch.LongTensor] = None,
1235
+ attention_mask: Optional[torch.FloatTensor] = None,
1236
+ encoder_outputs: Optional[Tuple[Tuple[torch.Tensor]]] = None,
1237
+ decoder_input_ids: Optional[torch.LongTensor] = None,
1238
+ decoder_attention_mask: Optional[torch.BoolTensor] = None,
1239
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
1240
+ context_input_ids: Optional[torch.LongTensor] = None,
1241
+ context_attention_mask: Optional[torch.LongTensor] = None,
1242
+ doc_scores: Optional[torch.FloatTensor] = None,
1243
+ use_cache: Optional[bool] = None,
1244
+ output_attentions: Optional[bool] = None,
1245
+ output_hidden_states: Optional[bool] = None,
1246
+ output_retrieved: Optional[bool] = None,
1247
+ do_marginalize: Optional[bool] = None,
1248
+ reduce_loss: Optional[bool] = None,
1249
+ labels: Optional[torch.LongTensor] = None,
1250
+ n_docs: Optional[int] = None,
1251
+ **kwargs, # needs kwargs for generation
1252
+ ) -> RetrievAugLMMarginOutput:
1253
+ r"""
1254
+ do_marginalize (`bool`, *optional*):
1255
+ If `True`, the logits are marginalized over all documents by making use of
1256
+ `torch.nn.functional.log_softmax`.
1257
+ reduce_loss (`bool`, *optional*):
1258
+ Only relevant if `labels` is passed. If `True`, the NLL loss is reduced using the `torch.Tensor.sum`
1259
+ operation.
1260
+ kwargs (`Dict[str, any]`, optional, defaults to *{}*):
1261
+ Legacy dictionary, which is required so that model can use *generate()* function.
1262
+
1263
+ Returns:
1264
+
1265
+ Example:
1266
+
1267
+ ```python
1268
+ >>> from transformers import AutoTokenizer, RagRetriever, RagTokenForGeneration
1269
+ >>> import torch
1270
+
1271
+ >>> tokenizer = AutoTokenizer.from_pretrained("facebook/rag-token-nq")
1272
+ >>> retriever = RagRetriever.from_pretrained(
1273
+ ... "facebook/rag-token-nq", index_name="exact", use_dummy_dataset=True
1274
+ ... )
1275
+ >>> # initialize with RagRetriever to do everything in one forward call
1276
+ >>> model = RagTokenForGeneration.from_pretrained("facebook/rag-token-nq", retriever=retriever)
1277
+
1278
+ >>> inputs = tokenizer("How many people live in Paris?", return_tensors="pt")
1279
+ >>> targets = tokenizer(text_target="In Paris, there are 10 million people.", return_tensors="pt")
1280
+ >>> input_ids = inputs["input_ids"]
1281
+ >>> labels = targets["input_ids"]
1282
+ >>> outputs = model(input_ids=input_ids, labels=labels)
1283
+
1284
+ >>> # or use retriever separately
1285
+ >>> model = RagTokenForGeneration.from_pretrained("facebook/rag-token-nq", use_dummy_dataset=True)
1286
+ >>> # 1. Encode
1287
+ >>> question_hidden_states = model.question_encoder(input_ids)[0]
1288
+ >>> # 2. Retrieve
1289
+ >>> docs_dict = retriever(input_ids.numpy(), question_hidden_states.detach().numpy(), return_tensors="pt")
1290
+ >>> doc_scores = torch.bmm(
1291
+ ... question_hidden_states.unsqueeze(1), docs_dict["retrieved_doc_embeds"].float().transpose(1, 2)
1292
+ ... ).squeeze(1)
1293
+ >>> # 3. Forward to generator
1294
+ >>> outputs = model(
1295
+ ... context_input_ids=docs_dict["context_input_ids"],
1296
+ ... context_attention_mask=docs_dict["context_attention_mask"],
1297
+ ... doc_scores=doc_scores,
1298
+ ... decoder_input_ids=labels,
1299
+ ... )
1300
+
1301
+ >>> # or directly generate
1302
+ >>> generated = model.generate(
1303
+ ... context_input_ids=docs_dict["context_input_ids"],
1304
+ ... context_attention_mask=docs_dict["context_attention_mask"],
1305
+ ... doc_scores=doc_scores,
1306
+ ... )
1307
+ >>> generated_string = tokenizer.batch_decode(generated, skip_special_tokens=True)
1308
+ ```"""
1309
+ n_docs = n_docs if n_docs is not None else self.config.n_docs
1310
+ do_marginalize = do_marginalize if do_marginalize is not None else self.config.do_marginalize
1311
+ reduce_loss = reduce_loss if reduce_loss is not None else self.config.reduce_loss
1312
+
1313
+ if labels is not None:
1314
+ if decoder_input_ids is None:
1315
+ decoder_input_ids = labels
1316
+ use_cache = False
1317
+
1318
+ outputs = self.rag(
1319
+ input_ids=input_ids,
1320
+ attention_mask=attention_mask,
1321
+ encoder_outputs=encoder_outputs,
1322
+ decoder_input_ids=decoder_input_ids,
1323
+ decoder_attention_mask=decoder_attention_mask,
1324
+ context_input_ids=context_input_ids,
1325
+ context_attention_mask=context_attention_mask,
1326
+ doc_scores=doc_scores,
1327
+ past_key_values=past_key_values,
1328
+ use_cache=use_cache,
1329
+ output_attentions=output_attentions,
1330
+ output_hidden_states=output_hidden_states,
1331
+ output_retrieved=output_retrieved,
1332
+ n_docs=n_docs,
1333
+ )
1334
+
1335
+ loss = None
1336
+ logits = outputs.logits
1337
+ if labels is not None:
1338
+ assert decoder_input_ids is not None
1339
+ loss = self.get_nll(
1340
+ outputs.logits,
1341
+ outputs.doc_scores,
1342
+ labels,
1343
+ reduce_loss=reduce_loss,
1344
+ epsilon=self.config.label_smoothing,
1345
+ n_docs=n_docs,
1346
+ )
1347
+
1348
+ if do_marginalize:
1349
+ logits = self.marginalize(logits, outputs.doc_scores, n_docs)
1350
+
1351
+ return RetrievAugLMMarginOutput(
1352
+ loss=loss,
1353
+ logits=logits,
1354
+ doc_scores=outputs.doc_scores,
1355
+ past_key_values=outputs.past_key_values,
1356
+ context_input_ids=outputs.context_input_ids,
1357
+ context_attention_mask=outputs.context_attention_mask,
1358
+ retrieved_doc_embeds=outputs.retrieved_doc_embeds,
1359
+ retrieved_doc_ids=outputs.retrieved_doc_ids,
1360
+ question_encoder_last_hidden_state=outputs.question_encoder_last_hidden_state,
1361
+ question_enc_hidden_states=outputs.question_enc_hidden_states,
1362
+ question_enc_attentions=outputs.question_enc_attentions,
1363
+ generator_enc_last_hidden_state=outputs.generator_enc_last_hidden_state,
1364
+ generator_enc_hidden_states=outputs.generator_enc_hidden_states,
1365
+ generator_enc_attentions=outputs.generator_enc_attentions,
1366
+ generator_dec_hidden_states=outputs.generator_dec_hidden_states,
1367
+ generator_dec_attentions=outputs.generator_dec_attentions,
1368
+ generator_cross_attentions=outputs.generator_cross_attentions,
1369
+ )
1370
+
1371
+ @torch.no_grad()
1372
+ def generate(
1373
+ self,
1374
+ input_ids: Optional[torch.LongTensor] = None,
1375
+ attention_mask: Optional[torch.LongTensor] = None,
1376
+ context_input_ids: Optional[torch.LongTensor] = None,
1377
+ context_attention_mask: Optional[torch.LongTensor] = None,
1378
+ doc_scores: Optional[torch.FloatTensor] = None,
1379
+ n_docs: Optional[int] = None,
1380
+ generation_config: Optional[GenerationConfig] = None,
1381
+ prefix_allowed_tokens_fn: Callable[[int, torch.Tensor], List[int]] = None,
1382
+ logits_processor: Optional[LogitsProcessorList] = LogitsProcessorList(),
1383
+ stopping_criteria: Optional[StoppingCriteriaList] = StoppingCriteriaList(),
1384
+ **kwargs,
1385
+ ) -> torch.LongTensor:
1386
+ """
1387
+ Implements RAG token decoding.
1388
+
1389
+ Args:
1390
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1391
+ The sequence used as a prompt for the generation. If `input_ids` is not passed, then
1392
+ `context_input_ids` has to be provided.
1393
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
1394
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
1395
+
1396
+ - 1 for tokens that are **not masked**,
1397
+ - 0 for tokens that are **masked**.
1398
+
1399
+ [What are attention masks?](../glossary#attention-mask)
1400
+ context_input_ids (`torch.LongTensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*):
1401
+ Input IDs post-processed from the retrieved documents and the question encoder `input_ids` by the
1402
+ retriever.
1403
+
1404
+ If the model has is not initialized with a `retriever`, `context_input_ids` has to be provided to the
1405
+ forward pass. `context_input_ids` are returned by [`~RagRetriever.__call__`].
1406
+ context_attention_mask (`torch.LongTensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*):
1407
+ Attention mask post-processed from the retrieved documents and the question encoder `input_ids` by the
1408
+ retriever.
1409
+
1410
+ If the model has is not initialized with a `retriever`, `context_input_ids` has to be provided to the
1411
+ forward pass. `context_input_ids` are returned by [`~RagRetriever.__call__`].
1412
+ doc_scores (`torch.FloatTensor` of shape `(batch_size, config.n_docs)`):
1413
+ Score between each retrieved document embeddings (see `retrieved_doc_embeds`) and
1414
+ `question_encoder_last_hidden_state`.
1415
+
1416
+ If the model has is not initialized with a `retriever`, `context_input_ids` has to be provided to the
1417
+ forward pass. `context_input_ids` are returned by [`~RagRetriever.__call__`].
1418
+ n_docs (`int`, *optional*, defaults to `config.n_docs`)
1419
+ Number of documents to retrieve and/or number of documents for which to generate an answer.
1420
+ generation_config (`~generation.GenerationConfig`, *optional*):
1421
+ The generation configuration to be used as base parametrization for the generation call. `**kwargs`
1422
+ passed to generate matching the attributes of `generation_config` will override them. If
1423
+ `generation_config` is not provided, the default will be used, which has the following loading
1424
+ priority: 1) from the `generation_config.json` model file, if it exists; 2) from the model
1425
+ configuration. Please note that unspecified parameters will inherit [`~generation.GenerationConfig`]'s
1426
+ default values, whose documentation should be checked to parameterize generation.
1427
+ prefix_allowed_tokens_fn (`Callable[[int, torch.Tensor], List[int]]`, *optional*):
1428
+ If provided, this function constraints the beam search to allowed tokens only at each step. If not
1429
+ provided no constraint is applied. This function takes 2 arguments `inputs_ids` and the batch ID
1430
+ `batch_id`. It has to return a list with the allowed tokens for the next generation step conditioned on
1431
+ the previously generated tokens `inputs_ids` and the batch ID `batch_id`. This argument is useful for
1432
+ constrained generation conditioned on the prefix, as described in [Autoregressive Entity
1433
+ Retrieval](https://arxiv.org/abs/2010.00904).
1434
+ logits_processor (`LogitsProcessorList`, *optional*):
1435
+ Custom logits processors that complement the default logits processors built from arguments and a
1436
+ model's config. If a logit processor is passed that is already created with the arguments or a model's
1437
+ config an error is thrown.
1438
+ stopping_criteria (`StoppingCriteriaList`, *optional*):
1439
+ Custom stopping criteria that complement the default stopping criteria built from arguments and a
1440
+ model's config. If a stopping criteria is passed that is already created with the arguments or a
1441
+ model's config an error is thrown.
1442
+ kwargs (`Dict[str, Any]`, *optional*):
1443
+ Ad hoc parametrization of `generate_config` and/or additional model-specific kwargs that will be
1444
+ forwarded to the `forward` function of the model.
1445
+
1446
+ Return:
1447
+ `torch.LongTensor` of shape `(batch_size * num_return_sequences, sequence_length)`: The generated
1448
+ sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter if all batches
1449
+ finished early due to the `eos_token_id`.
1450
+ """
1451
+ # Handle `generation_config` and kwargs that might update it
1452
+ if generation_config is None:
1453
+ generation_config = self.generation_config
1454
+ generation_config = copy.deepcopy(generation_config)
1455
+ model_kwargs = generation_config.update(**kwargs) # All unused kwargs must be model kwargs
1456
+
1457
+ # set default parameters
1458
+ n_docs = n_docs if n_docs is not None else self.config.n_docs
1459
+
1460
+ # retrieve docs
1461
+ if self.retriever is not None and context_input_ids is None:
1462
+ question_hidden_states = self.question_encoder(input_ids, attention_mask=attention_mask)[0]
1463
+ out = self.retriever(
1464
+ input_ids,
1465
+ question_hidden_states.cpu().detach().to(torch.float32).numpy(),
1466
+ prefix=self.generator.config.prefix,
1467
+ n_docs=n_docs,
1468
+ return_tensors="pt",
1469
+ )
1470
+ context_input_ids, context_attention_mask, retrieved_doc_embeds = (
1471
+ out["context_input_ids"],
1472
+ out["context_attention_mask"],
1473
+ out["retrieved_doc_embeds"],
1474
+ )
1475
+
1476
+ # set to correct device
1477
+ retrieved_doc_embeds = retrieved_doc_embeds.to(question_hidden_states)
1478
+ context_input_ids = context_input_ids.to(input_ids)
1479
+ context_attention_mask = context_attention_mask.to(input_ids)
1480
+
1481
+ # compute doc_scores
1482
+ doc_scores = torch.bmm(question_hidden_states.unsqueeze(1), retrieved_doc_embeds.transpose(1, 2)).squeeze(
1483
+ 1
1484
+ )
1485
+
1486
+ assert (context_input_ids.shape[0] % n_docs) == 0, (
1487
+ f" The first dimension of `context_input_ids` should be a multiple of `n_docs`={n_docs}, but is"
1488
+ f" {context_input_ids.shape[0]}."
1489
+ )
1490
+
1491
+ # batch_size
1492
+ batch_size = context_input_ids.shape[0] // n_docs
1493
+
1494
+ encoder = self.rag.generator.get_encoder()
1495
+ encoder_outputs = encoder(input_ids=context_input_ids, attention_mask=context_attention_mask, return_dict=True)
1496
+
1497
+ input_ids = torch.full(
1498
+ (batch_size * generation_config.num_beams, 1),
1499
+ generation_config.decoder_start_token_id,
1500
+ dtype=torch.long,
1501
+ device=next(self.parameters()).device,
1502
+ )
1503
+ input_ids_seq_length = input_ids.shape[-1]
1504
+ last_hidden_state = encoder_outputs["last_hidden_state"]
1505
+
1506
+ def extend_enc_output(tensor, num_beams=None):
1507
+ # split into `batch_size`, `num_beams`, `num_docs`
1508
+ tensor = tensor[None, None, :].reshape((batch_size, 1, n_docs) + tensor.shape[1:])
1509
+ # repeat same last hidden states over `num_beams` dimension
1510
+ tensor = tensor.expand((batch_size, num_beams, n_docs) + tensor.shape[3:])
1511
+ # merge `batch_size`, `num_beams`, `num_docs` dims again
1512
+ return tensor.reshape((batch_size * num_beams * n_docs,) + tensor.shape[3:])
1513
+
1514
+ # correctly extend last_hidden_state and attention mask
1515
+ context_attention_mask = extend_enc_output(context_attention_mask, num_beams=generation_config.num_beams)
1516
+ encoder_outputs["last_hidden_state"] = extend_enc_output(
1517
+ last_hidden_state, num_beams=generation_config.num_beams
1518
+ )
1519
+
1520
+ doc_scores = doc_scores.repeat_interleave(generation_config.num_beams, dim=0)
1521
+
1522
+ # define start_len & additional parameters
1523
+ model_kwargs["doc_scores"] = doc_scores
1524
+ model_kwargs["encoder_outputs"] = encoder_outputs
1525
+ model_kwargs["attention_mask"] = context_attention_mask
1526
+ model_kwargs["n_docs"] = n_docs
1527
+
1528
+ pre_processor = self._get_logits_processor(
1529
+ generation_config=generation_config,
1530
+ input_ids_seq_length=input_ids_seq_length,
1531
+ encoder_input_ids=context_input_ids,
1532
+ prefix_allowed_tokens_fn=prefix_allowed_tokens_fn,
1533
+ logits_processor=logits_processor,
1534
+ )
1535
+
1536
+ if generation_config.num_beams == 1:
1537
+ if generation_config.num_return_sequences > 1:
1538
+ raise ValueError(
1539
+ f"num_return_sequences has to be 1, but is {generation_config.num_return_sequences} when doing"
1540
+ " greedy search."
1541
+ )
1542
+ return self._greedy_search(
1543
+ input_ids,
1544
+ logits_processor=pre_processor,
1545
+ max_length=generation_config.max_length,
1546
+ pad_token_id=generation_config.pad_token_id,
1547
+ eos_token_id=generation_config.eos_token_id,
1548
+ **model_kwargs,
1549
+ )
1550
+ elif generation_config.num_beams > 1:
1551
+ if generation_config.num_return_sequences > generation_config.num_beams:
1552
+ raise ValueError("`num_return_sequences` has to be smaller or equal to `num_beams`.")
1553
+ beam_scorer = BeamSearchScorer(
1554
+ batch_size=batch_size,
1555
+ num_beams=generation_config.num_beams,
1556
+ device=self.device,
1557
+ length_penalty=generation_config.length_penalty,
1558
+ do_early_stopping=generation_config.early_stopping,
1559
+ num_beam_hyps_to_keep=generation_config.num_return_sequences,
1560
+ max_length=generation_config.max_length,
1561
+ )
1562
+ return self._beam_search(
1563
+ input_ids,
1564
+ beam_scorer,
1565
+ logits_processor=pre_processor,
1566
+ max_length=generation_config.max_length,
1567
+ pad_token_id=generation_config.pad_token_id,
1568
+ eos_token_id=generation_config.eos_token_id,
1569
+ **model_kwargs,
1570
+ )
1571
+ else:
1572
+ raise ValueError(
1573
+ f"`num_beams` has to be an integer strictly superior to 0 (≥ 1), but is {generation_config.num_beams}"
1574
+ )
1575
+
1576
+ def get_input_embeddings(self):
1577
+ return self.rag.generator.get_input_embeddings()
1578
+
1579
+ def get_output_embeddings(self):
1580
+ return self.rag.generator.get_output_embeddings()
1581
+
1582
+ def set_output_embeddings(self, new_embeddings):
1583
+ return self.rag.generator.set_output_embeddings(new_embeddings)
1584
+
1585
+ def shift_tokens_right(self, input_ids, start_token_id=None):
1586
+ """Shift input ids one token to the right, and pad with start_token_id"""
1587
+ if start_token_id is None:
1588
+ start_token_id = self.config.decoder_start_token_id
1589
+ shifted_input_ids = input_ids.new_zeros(input_ids.shape)
1590
+ shifted_input_ids[:, 1:] = input_ids[:, :-1].clone()
1591
+ shifted_input_ids[:, 0] = start_token_id
1592
+ return shifted_input_ids
1593
+
1594
+ def get_nll(self, seq_logits, doc_scores, target, reduce_loss=False, epsilon=0.0, n_docs=None):
1595
+ n_docs = n_docs if n_docs is not None else self.config.n_docs
1596
+ # shift tokens left
1597
+ target = torch.cat(
1598
+ [target[:, 1:], target.new(target.shape[0], 1).fill_(self.config.generator.pad_token_id)], 1
1599
+ )
1600
+
1601
+ def _mask_pads(ll, smooth_obj):
1602
+ pad_mask = target.eq(self.config.generator.pad_token_id)
1603
+ if pad_mask.any():
1604
+ ll.masked_fill_(pad_mask, 0.0)
1605
+ smooth_obj.masked_fill_(pad_mask, 0.0)
1606
+ return ll.squeeze(-1), smooth_obj.squeeze(-1)
1607
+
1608
+ rag_logprobs = self.marginalize(seq_logits, doc_scores, n_docs)
1609
+
1610
+ target = target.unsqueeze(-1)
1611
+ assert target.dim() == rag_logprobs.dim()
1612
+
1613
+ ll = rag_logprobs.gather(dim=-1, index=target)
1614
+ smooth_obj = rag_logprobs.sum(dim=-1, keepdim=True) # total sum of all (normalised) logits
1615
+ ll, smooth_obj = _mask_pads(ll, smooth_obj)
1616
+ ll = ll.sum(1) # sum over tokens
1617
+ smooth_obj = smooth_obj.sum(1)
1618
+
1619
+ nll_loss = -ll
1620
+ smooth_loss = -smooth_obj
1621
+
1622
+ if reduce_loss:
1623
+ nll_loss = nll_loss.sum()
1624
+ smooth_loss = smooth_loss.sum()
1625
+
1626
+ eps_i = epsilon / rag_logprobs.size(-1)
1627
+ loss = (1.0 - epsilon) * nll_loss + eps_i * smooth_loss
1628
+ return loss
env-llmeval/lib/python3.10/site-packages/transformers/models/rag/retrieval_rag.py ADDED
@@ -0,0 +1,674 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020, The RAG Authors and The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """RAG Retriever model implementation."""
16
+
17
+ import os
18
+ import pickle
19
+ import time
20
+ from typing import Iterable, List, Optional, Tuple
21
+
22
+ import numpy as np
23
+
24
+ from ...tokenization_utils import PreTrainedTokenizer
25
+ from ...tokenization_utils_base import BatchEncoding
26
+ from ...utils import cached_file, is_datasets_available, is_faiss_available, logging, requires_backends, strtobool
27
+ from .configuration_rag import RagConfig
28
+ from .tokenization_rag import RagTokenizer
29
+
30
+
31
+ if is_datasets_available():
32
+ from datasets import Dataset, load_dataset, load_from_disk
33
+
34
+ if is_faiss_available():
35
+ import faiss
36
+
37
+
38
+ logger = logging.get_logger(__name__)
39
+
40
+
41
+ LEGACY_INDEX_PATH = "https://storage.googleapis.com/huggingface-nlp/datasets/wiki_dpr/"
42
+
43
+
44
+ class Index:
45
+ """
46
+ A base class for the Indices encapsulated by the [`RagRetriever`].
47
+ """
48
+
49
+ def get_doc_dicts(self, doc_ids: np.ndarray) -> List[dict]:
50
+ """
51
+ Returns a list of dictionaries, containing titles and text of the retrieved documents.
52
+
53
+ Args:
54
+ doc_ids (`np.ndarray` of shape `(batch_size, n_docs)`):
55
+ A tensor of document indices.
56
+ """
57
+ raise NotImplementedError
58
+
59
+ def get_top_docs(self, question_hidden_states: np.ndarray, n_docs=5) -> Tuple[np.ndarray, np.ndarray]:
60
+ """
61
+ For each query in the batch, retrieves `n_docs` documents.
62
+
63
+ Args:
64
+ question_hidden_states (`np.ndarray` of shape `(batch_size, vector_size)`):
65
+ An array of query vectors.
66
+ n_docs (`int`):
67
+ The number of docs retrieved per query.
68
+
69
+ Returns:
70
+ `np.ndarray` of shape `(batch_size, n_docs)`: A tensor of indices of retrieved documents. `np.ndarray` of
71
+ shape `(batch_size, vector_size)`: A tensor of vector representations of retrieved documents.
72
+ """
73
+ raise NotImplementedError
74
+
75
+ def is_initialized(self):
76
+ """
77
+ Returns `True` if index is already initialized.
78
+ """
79
+ raise NotImplementedError
80
+
81
+ def init_index(self):
82
+ """
83
+ A function responsible for loading the index into memory. Should be called only once per training run of a RAG
84
+ model. E.g. if the model is trained on multiple GPUs in a distributed setup, only one of the workers will load
85
+ the index.
86
+ """
87
+ raise NotImplementedError
88
+
89
+
90
+ class LegacyIndex(Index):
91
+ """
92
+ An index which can be deserialized from the files built using https://github.com/facebookresearch/DPR. We use
93
+ default faiss index parameters as specified in that repository.
94
+
95
+ Args:
96
+ vector_size (`int`):
97
+ The dimension of indexed vectors.
98
+ index_path (`str`):
99
+ A path to a *directory* containing index files compatible with [`~models.rag.retrieval_rag.LegacyIndex`]
100
+ """
101
+
102
+ INDEX_FILENAME = "hf_bert_base.hnswSQ8_correct_phi_128.c_index"
103
+ PASSAGE_FILENAME = "psgs_w100.tsv.pkl"
104
+
105
+ def __init__(self, vector_size, index_path):
106
+ self.index_id_to_db_id = []
107
+ self.index_path = index_path
108
+ self.passages = self._load_passages()
109
+ self.vector_size = vector_size
110
+ self.index = None
111
+ self._index_initialized = False
112
+
113
+ def _resolve_path(self, index_path, filename):
114
+ is_local = os.path.isdir(index_path)
115
+ try:
116
+ # Load from URL or cache if already cached
117
+ resolved_archive_file = cached_file(index_path, filename)
118
+ except EnvironmentError:
119
+ msg = (
120
+ f"Can't load '{filename}'. Make sure that:\n\n"
121
+ f"- '{index_path}' is a correct remote path to a directory containing a file named {filename}\n\n"
122
+ f"- or '{index_path}' is the correct path to a directory containing a file named {filename}.\n\n"
123
+ )
124
+ raise EnvironmentError(msg)
125
+ if is_local:
126
+ logger.info(f"loading file {resolved_archive_file}")
127
+ else:
128
+ logger.info(f"loading file {filename} from cache at {resolved_archive_file}")
129
+ return resolved_archive_file
130
+
131
+ def _load_passages(self):
132
+ logger.info(f"Loading passages from {self.index_path}")
133
+ passages_path = self._resolve_path(self.index_path, self.PASSAGE_FILENAME)
134
+ if not strtobool(os.environ.get("TRUST_REMOTE_CODE", "False")):
135
+ raise ValueError(
136
+ "This part uses `pickle.load` which is insecure and will execute arbitrary code that is potentially "
137
+ "malicious. It's recommended to never unpickle data that could have come from an untrusted source, or "
138
+ "that could have been tampered with. If you already verified the pickle data and decided to use it, "
139
+ "you can set the environment variable `TRUST_REMOTE_CODE` to `True` to allow it."
140
+ )
141
+ with open(passages_path, "rb") as passages_file:
142
+ passages = pickle.load(passages_file)
143
+ return passages
144
+
145
+ def _deserialize_index(self):
146
+ logger.info(f"Loading index from {self.index_path}")
147
+ resolved_index_path = self._resolve_path(self.index_path, self.INDEX_FILENAME + ".index.dpr")
148
+ self.index = faiss.read_index(resolved_index_path)
149
+ resolved_meta_path = self._resolve_path(self.index_path, self.INDEX_FILENAME + ".index_meta.dpr")
150
+ if not strtobool(os.environ.get("TRUST_REMOTE_CODE", "False")):
151
+ raise ValueError(
152
+ "This part uses `pickle.load` which is insecure and will execute arbitrary code that is potentially "
153
+ "malicious. It's recommended to never unpickle data that could have come from an untrusted source, or "
154
+ "that could have been tampered with. If you already verified the pickle data and decided to use it, "
155
+ "you can set the environment variable `TRUST_REMOTE_CODE` to `True` to allow it."
156
+ )
157
+ with open(resolved_meta_path, "rb") as metadata_file:
158
+ self.index_id_to_db_id = pickle.load(metadata_file)
159
+ assert (
160
+ len(self.index_id_to_db_id) == self.index.ntotal
161
+ ), "Deserialized index_id_to_db_id should match faiss index size"
162
+
163
+ def is_initialized(self):
164
+ return self._index_initialized
165
+
166
+ def init_index(self):
167
+ index = faiss.IndexHNSWFlat(self.vector_size + 1, 512)
168
+ index.hnsw.efSearch = 128
169
+ index.hnsw.efConstruction = 200
170
+ self.index = index
171
+ self._deserialize_index()
172
+ self._index_initialized = True
173
+
174
+ def get_doc_dicts(self, doc_ids: np.array):
175
+ doc_list = []
176
+ for doc_ids_i in doc_ids:
177
+ ids = [str(int(doc_id)) for doc_id in doc_ids_i]
178
+ docs = [self.passages[doc_id] for doc_id in ids]
179
+ doc_list.append(docs)
180
+ doc_dicts = []
181
+ for docs in doc_list:
182
+ doc_dict = {}
183
+ doc_dict["title"] = [doc[1] for doc in docs]
184
+ doc_dict["text"] = [doc[0] for doc in docs]
185
+ doc_dicts.append(doc_dict)
186
+ return doc_dicts
187
+
188
+ def get_top_docs(self, question_hidden_states: np.ndarray, n_docs=5) -> Tuple[np.ndarray, np.ndarray]:
189
+ aux_dim = np.zeros(len(question_hidden_states), dtype="float32").reshape(-1, 1)
190
+ query_nhsw_vectors = np.hstack((question_hidden_states, aux_dim))
191
+ _, docs_ids = self.index.search(query_nhsw_vectors, n_docs)
192
+ vectors = [[self.index.reconstruct(int(doc_id))[:-1] for doc_id in doc_ids] for doc_ids in docs_ids]
193
+ ids = [[int(self.index_id_to_db_id[doc_id]) for doc_id in doc_ids] for doc_ids in docs_ids]
194
+ return np.array(ids), np.array(vectors)
195
+
196
+
197
+ class HFIndexBase(Index):
198
+ def __init__(self, vector_size, dataset, index_initialized=False):
199
+ self.vector_size = vector_size
200
+ self.dataset = dataset
201
+ self._index_initialized = index_initialized
202
+ self._check_dataset_format(with_index=index_initialized)
203
+ dataset.set_format("numpy", columns=["embeddings"], output_all_columns=True, dtype="float32")
204
+
205
+ def _check_dataset_format(self, with_index: bool):
206
+ if not isinstance(self.dataset, Dataset):
207
+ raise ValueError(f"Dataset should be a datasets.Dataset object, but got {type(self.dataset)}")
208
+ if len({"title", "text", "embeddings"} - set(self.dataset.column_names)) > 0:
209
+ raise ValueError(
210
+ "Dataset should be a dataset with the following columns: "
211
+ "title (str), text (str) and embeddings (arrays of dimension vector_size), "
212
+ f"but got columns {self.dataset.column_names}"
213
+ )
214
+ if with_index and "embeddings" not in self.dataset.list_indexes():
215
+ raise ValueError(
216
+ "Missing faiss index in the dataset. Make sure you called `dataset.add_faiss_index` to compute it "
217
+ "or `dataset.load_faiss_index` to load one from the disk."
218
+ )
219
+
220
+ def init_index(self):
221
+ raise NotImplementedError()
222
+
223
+ def is_initialized(self):
224
+ return self._index_initialized
225
+
226
+ def get_doc_dicts(self, doc_ids: np.ndarray) -> List[dict]:
227
+ return [self.dataset[doc_ids[i].tolist()] for i in range(doc_ids.shape[0])]
228
+
229
+ def get_top_docs(self, question_hidden_states: np.ndarray, n_docs=5) -> Tuple[np.ndarray, np.ndarray]:
230
+ _, ids = self.dataset.search_batch("embeddings", question_hidden_states, n_docs)
231
+ docs = [self.dataset[[i for i in indices if i >= 0]] for indices in ids]
232
+ vectors = [doc["embeddings"] for doc in docs]
233
+ for i in range(len(vectors)):
234
+ if len(vectors[i]) < n_docs:
235
+ vectors[i] = np.vstack([vectors[i], np.zeros((n_docs - len(vectors[i]), self.vector_size))])
236
+ return np.array(ids), np.array(vectors) # shapes (batch_size, n_docs) and (batch_size, n_docs, d)
237
+
238
+
239
+ class CanonicalHFIndex(HFIndexBase):
240
+ """
241
+ A wrapper around an instance of [`~datasets.Datasets`]. If `index_path` is set to `None`, we load the pre-computed
242
+ index available with the [`~datasets.arrow_dataset.Dataset`], otherwise, we load the index from the indicated path
243
+ on disk.
244
+
245
+ Args:
246
+ vector_size (`int`): the dimension of the passages embeddings used by the index
247
+ dataset_name (`str`, optional, defaults to `wiki_dpr`):
248
+ A dataset identifier of the indexed dataset on HuggingFace AWS bucket (list all available datasets and ids
249
+ with `datasets.list_datasets()`).
250
+ dataset_split (`str`, optional, defaults to `train`)
251
+ Which split of the `dataset` to load.
252
+ index_name (`str`, optional, defaults to `train`)
253
+ The index_name of the index associated with the `dataset`. The index loaded from `index_path` will be saved
254
+ under this name.
255
+ index_path (`str`, optional, defaults to `None`)
256
+ The path to the serialized faiss index on disk.
257
+ use_dummy_dataset (`bool`, optional, defaults to `False`):
258
+ If True, use the dummy configuration of the dataset for tests.
259
+ """
260
+
261
+ def __init__(
262
+ self,
263
+ vector_size: int,
264
+ dataset_name: str = "wiki_dpr",
265
+ dataset_split: str = "train",
266
+ index_name: Optional[str] = None,
267
+ index_path: Optional[str] = None,
268
+ use_dummy_dataset=False,
269
+ dataset_revision=None,
270
+ ):
271
+ if int(index_path is None) + int(index_name is None) != 1:
272
+ raise ValueError("Please provide `index_name` or `index_path`.")
273
+ self.dataset_name = dataset_name
274
+ self.dataset_split = dataset_split
275
+ self.index_name = index_name
276
+ self.index_path = index_path
277
+ self.use_dummy_dataset = use_dummy_dataset
278
+ self.dataset_revision = dataset_revision
279
+ logger.info(f"Loading passages from {self.dataset_name}")
280
+ dataset = load_dataset(
281
+ self.dataset_name,
282
+ with_index=False,
283
+ split=self.dataset_split,
284
+ dummy=self.use_dummy_dataset,
285
+ revision=dataset_revision,
286
+ )
287
+ super().__init__(vector_size, dataset, index_initialized=False)
288
+
289
+ def init_index(self):
290
+ if self.index_path is not None:
291
+ logger.info(f"Loading index from {self.index_path}")
292
+ self.dataset.load_faiss_index("embeddings", file=self.index_path)
293
+ else:
294
+ logger.info(f"Loading index from {self.dataset_name} with index name {self.index_name}")
295
+ self.dataset = load_dataset(
296
+ self.dataset_name,
297
+ with_embeddings=True,
298
+ with_index=True,
299
+ split=self.dataset_split,
300
+ index_name=self.index_name,
301
+ dummy=self.use_dummy_dataset,
302
+ revision=self.dataset_revision,
303
+ )
304
+ self.dataset.set_format("numpy", columns=["embeddings"], output_all_columns=True)
305
+ self._index_initialized = True
306
+
307
+
308
+ class CustomHFIndex(HFIndexBase):
309
+ """
310
+ A wrapper around an instance of [`~datasets.Datasets`]. The dataset and the index are both loaded from the
311
+ indicated paths on disk.
312
+
313
+ Args:
314
+ vector_size (`int`): the dimension of the passages embeddings used by the index
315
+ dataset_path (`str`):
316
+ The path to the serialized dataset on disk. The dataset should have 3 columns: title (str), text (str) and
317
+ embeddings (arrays of dimension vector_size)
318
+ index_path (`str`)
319
+ The path to the serialized faiss index on disk.
320
+ """
321
+
322
+ def __init__(self, vector_size: int, dataset, index_path=None):
323
+ super().__init__(vector_size, dataset, index_initialized=index_path is None)
324
+ self.index_path = index_path
325
+
326
+ @classmethod
327
+ def load_from_disk(cls, vector_size, dataset_path, index_path):
328
+ logger.info(f"Loading passages from {dataset_path}")
329
+ if dataset_path is None or index_path is None:
330
+ raise ValueError(
331
+ "Please provide `dataset_path` and `index_path` after calling `dataset.save_to_disk(dataset_path)` "
332
+ "and `dataset.get_index('embeddings').save(index_path)`."
333
+ )
334
+ dataset = load_from_disk(dataset_path)
335
+ return cls(vector_size=vector_size, dataset=dataset, index_path=index_path)
336
+
337
+ def init_index(self):
338
+ if not self.is_initialized():
339
+ logger.info(f"Loading index from {self.index_path}")
340
+ self.dataset.load_faiss_index("embeddings", file=self.index_path)
341
+ self._index_initialized = True
342
+
343
+
344
+ class RagRetriever:
345
+ """
346
+ Retriever used to get documents from vector queries. It retrieves the documents embeddings as well as the documents
347
+ contents, and it formats them to be used with a RagModel.
348
+
349
+ Args:
350
+ config ([`RagConfig`]):
351
+ The configuration of the RAG model this Retriever is used with. Contains parameters indicating which
352
+ `Index` to build. You can load your own custom dataset with `config.index_name="custom"` or use a canonical
353
+ one (default) from the datasets library with `config.index_name="wiki_dpr"` for example.
354
+ question_encoder_tokenizer ([`PreTrainedTokenizer`]):
355
+ The tokenizer that was used to tokenize the question. It is used to decode the question and then use the
356
+ generator_tokenizer.
357
+ generator_tokenizer ([`PreTrainedTokenizer`]):
358
+ The tokenizer used for the generator part of the RagModel.
359
+ index ([`~models.rag.retrieval_rag.Index`], optional, defaults to the one defined by the configuration):
360
+ If specified, use this index instead of the one built using the configuration
361
+
362
+ Examples:
363
+
364
+ ```python
365
+ >>> # To load the default "wiki_dpr" dataset with 21M passages from wikipedia (index name is 'compressed' or 'exact')
366
+ >>> from transformers import RagRetriever
367
+
368
+ >>> retriever = RagRetriever.from_pretrained(
369
+ ... "facebook/dpr-ctx_encoder-single-nq-base", dataset="wiki_dpr", index_name="compressed"
370
+ ... )
371
+
372
+ >>> # To load your own indexed dataset built with the datasets library. More info on how to build the indexed dataset in examples/rag/use_own_knowledge_dataset.py
373
+ >>> from transformers import RagRetriever
374
+
375
+ >>> dataset = (
376
+ ... ...
377
+ ... ) # dataset must be a datasets.Datasets object with columns "title", "text" and "embeddings", and it must have a faiss index
378
+ >>> retriever = RagRetriever.from_pretrained("facebook/dpr-ctx_encoder-single-nq-base", indexed_dataset=dataset)
379
+
380
+ >>> # To load your own indexed dataset built with the datasets library that was saved on disk. More info in examples/rag/use_own_knowledge_dataset.py
381
+ >>> from transformers import RagRetriever
382
+
383
+ >>> dataset_path = "path/to/my/dataset" # dataset saved via *dataset.save_to_disk(...)*
384
+ >>> index_path = "path/to/my/index.faiss" # faiss index saved via *dataset.get_index("embeddings").save(...)*
385
+ >>> retriever = RagRetriever.from_pretrained(
386
+ ... "facebook/dpr-ctx_encoder-single-nq-base",
387
+ ... index_name="custom",
388
+ ... passages_path=dataset_path,
389
+ ... index_path=index_path,
390
+ ... )
391
+
392
+ >>> # To load the legacy index built originally for Rag's paper
393
+ >>> from transformers import RagRetriever
394
+
395
+ >>> retriever = RagRetriever.from_pretrained("facebook/dpr-ctx_encoder-single-nq-base", index_name="legacy")
396
+ ```"""
397
+
398
+ def __init__(self, config, question_encoder_tokenizer, generator_tokenizer, index=None, init_retrieval=True):
399
+ self._init_retrieval = init_retrieval
400
+ requires_backends(self, ["datasets", "faiss"])
401
+ super().__init__()
402
+ self.index = index or self._build_index(config)
403
+ self.generator_tokenizer = generator_tokenizer
404
+ self.question_encoder_tokenizer = question_encoder_tokenizer
405
+
406
+ self.n_docs = config.n_docs
407
+ self.batch_size = config.retrieval_batch_size
408
+
409
+ self.config = config
410
+ if self._init_retrieval:
411
+ self.init_retrieval()
412
+
413
+ self.ctx_encoder_tokenizer = None
414
+ self.return_tokenized_docs = False
415
+
416
+ @staticmethod
417
+ def _build_index(config):
418
+ if config.index_name == "legacy":
419
+ return LegacyIndex(
420
+ config.retrieval_vector_size,
421
+ config.index_path or LEGACY_INDEX_PATH,
422
+ )
423
+ elif config.index_name == "custom":
424
+ return CustomHFIndex.load_from_disk(
425
+ vector_size=config.retrieval_vector_size,
426
+ dataset_path=config.passages_path,
427
+ index_path=config.index_path,
428
+ )
429
+ else:
430
+ return CanonicalHFIndex(
431
+ vector_size=config.retrieval_vector_size,
432
+ dataset_name=config.dataset,
433
+ dataset_split=config.dataset_split,
434
+ index_name=config.index_name,
435
+ index_path=config.index_path,
436
+ use_dummy_dataset=config.use_dummy_dataset,
437
+ dataset_revision=config.dataset_revision,
438
+ )
439
+
440
+ @classmethod
441
+ def from_pretrained(cls, retriever_name_or_path, indexed_dataset=None, **kwargs):
442
+ requires_backends(cls, ["datasets", "faiss"])
443
+ config = kwargs.pop("config", None) or RagConfig.from_pretrained(retriever_name_or_path, **kwargs)
444
+ rag_tokenizer = RagTokenizer.from_pretrained(retriever_name_or_path, config=config)
445
+ question_encoder_tokenizer = rag_tokenizer.question_encoder
446
+ generator_tokenizer = rag_tokenizer.generator
447
+ if indexed_dataset is not None:
448
+ config.index_name = "custom"
449
+ index = CustomHFIndex(config.retrieval_vector_size, indexed_dataset)
450
+ else:
451
+ index = cls._build_index(config)
452
+ return cls(
453
+ config,
454
+ question_encoder_tokenizer=question_encoder_tokenizer,
455
+ generator_tokenizer=generator_tokenizer,
456
+ index=index,
457
+ )
458
+
459
+ def save_pretrained(self, save_directory):
460
+ if isinstance(self.index, CustomHFIndex):
461
+ if self.config.index_path is None:
462
+ index_path = os.path.join(save_directory, "hf_dataset_index.faiss")
463
+ self.index.dataset.get_index("embeddings").save(index_path)
464
+ self.config.index_path = index_path
465
+ if self.config.passages_path is None:
466
+ passages_path = os.path.join(save_directory, "hf_dataset")
467
+ # datasets don't support save_to_disk with indexes right now
468
+ faiss_index = self.index.dataset._indexes.pop("embeddings")
469
+ self.index.dataset.save_to_disk(passages_path)
470
+ self.index.dataset._indexes["embeddings"] = faiss_index
471
+ self.config.passages_path = passages_path
472
+ self.config.save_pretrained(save_directory)
473
+ rag_tokenizer = RagTokenizer(
474
+ question_encoder=self.question_encoder_tokenizer,
475
+ generator=self.generator_tokenizer,
476
+ )
477
+ rag_tokenizer.save_pretrained(save_directory)
478
+
479
+ def init_retrieval(self):
480
+ """
481
+ Retriever initialization function. It loads the index into memory.
482
+ """
483
+
484
+ logger.info("initializing retrieval")
485
+ self.index.init_index()
486
+
487
+ def postprocess_docs(self, docs, input_strings, prefix, n_docs, return_tensors=None):
488
+ r"""
489
+ Postprocessing retrieved `docs` and combining them with `input_strings`.
490
+
491
+ Args:
492
+ docs (`dict`):
493
+ Retrieved documents.
494
+ input_strings (`str`):
495
+ Input strings decoded by `preprocess_query`.
496
+ prefix (`str`):
497
+ Prefix added at the beginning of each input, typically used with T5-based models.
498
+
499
+ Return:
500
+ `tuple(tensors)`: a tuple consisting of two elements: contextualized `input_ids` and a compatible
501
+ `attention_mask`.
502
+ """
503
+
504
+ def cat_input_and_doc(doc_title, doc_text, input_string, prefix):
505
+ # TODO(Patrick): if we train more RAG models, I want to put the input first to take advantage of effortless truncation
506
+ # TODO(piktus): better handling of truncation
507
+ if doc_title.startswith('"'):
508
+ doc_title = doc_title[1:]
509
+ if doc_title.endswith('"'):
510
+ doc_title = doc_title[:-1]
511
+ if prefix is None:
512
+ prefix = ""
513
+ out = (prefix + doc_title + self.config.title_sep + doc_text + self.config.doc_sep + input_string).replace(
514
+ " ", " "
515
+ )
516
+ return out
517
+
518
+ rag_input_strings = [
519
+ cat_input_and_doc(
520
+ docs[i]["title"][j],
521
+ docs[i]["text"][j],
522
+ input_strings[i],
523
+ prefix,
524
+ )
525
+ for i in range(len(docs))
526
+ for j in range(n_docs)
527
+ ]
528
+
529
+ contextualized_inputs = self.generator_tokenizer.batch_encode_plus(
530
+ rag_input_strings,
531
+ max_length=self.config.max_combined_length,
532
+ return_tensors=return_tensors,
533
+ padding="max_length",
534
+ truncation=True,
535
+ )
536
+
537
+ return contextualized_inputs["input_ids"], contextualized_inputs["attention_mask"]
538
+
539
+ def _chunk_tensor(self, t: Iterable, chunk_size: int) -> List[Iterable]:
540
+ return [t[i : i + chunk_size] for i in range(0, len(t), chunk_size)]
541
+
542
+ def _main_retrieve(self, question_hidden_states: np.ndarray, n_docs: int) -> Tuple[np.ndarray, np.ndarray]:
543
+ question_hidden_states_batched = self._chunk_tensor(question_hidden_states, self.batch_size)
544
+ ids_batched = []
545
+ vectors_batched = []
546
+ for question_hidden_states in question_hidden_states_batched:
547
+ start_time = time.time()
548
+ ids, vectors = self.index.get_top_docs(question_hidden_states, n_docs)
549
+ logger.debug(
550
+ f"index search time: {time.time() - start_time} sec, batch size {question_hidden_states.shape}"
551
+ )
552
+ ids_batched.extend(ids)
553
+ vectors_batched.extend(vectors)
554
+ return (
555
+ np.array(ids_batched),
556
+ np.array(vectors_batched),
557
+ ) # shapes (batch_size, n_docs) and (batch_size, n_docs, d)
558
+
559
+ def retrieve(self, question_hidden_states: np.ndarray, n_docs: int) -> Tuple[np.ndarray, List[dict]]:
560
+ """
561
+ Retrieves documents for specified `question_hidden_states`.
562
+
563
+ Args:
564
+ question_hidden_states (`np.ndarray` of shape `(batch_size, vector_size)`):
565
+ A batch of query vectors to retrieve with.
566
+ n_docs (`int`):
567
+ The number of docs retrieved per query.
568
+
569
+ Return:
570
+ `Tuple[np.ndarray, np.ndarray, List[dict]]`: A tuple with the following objects:
571
+
572
+ - **retrieved_doc_embeds** (`np.ndarray` of shape `(batch_size, n_docs, dim)`) -- The retrieval embeddings
573
+ of the retrieved docs per query.
574
+ - **doc_ids** (`np.ndarray` of shape `(batch_size, n_docs)`) -- The ids of the documents in the index
575
+ - **doc_dicts** (`List[dict]`): The `retrieved_doc_embeds` examples per query.
576
+ """
577
+
578
+ doc_ids, retrieved_doc_embeds = self._main_retrieve(question_hidden_states, n_docs)
579
+ return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(doc_ids)
580
+
581
+ def set_ctx_encoder_tokenizer(self, ctx_encoder_tokenizer: PreTrainedTokenizer):
582
+ # used in end2end retriever training
583
+ self.ctx_encoder_tokenizer = ctx_encoder_tokenizer
584
+ self.return_tokenized_docs = True
585
+
586
+ def __call__(
587
+ self,
588
+ question_input_ids: List[List[int]],
589
+ question_hidden_states: np.ndarray,
590
+ prefix=None,
591
+ n_docs=None,
592
+ return_tensors=None,
593
+ ) -> BatchEncoding:
594
+ """
595
+ Retrieves documents for specified `question_hidden_states`.
596
+
597
+ Args:
598
+ question_input_ids (`List[List[int]]`) batch of input ids
599
+ question_hidden_states (`np.ndarray` of shape `(batch_size, vector_size)`:
600
+ A batch of query vectors to retrieve with.
601
+ prefix (`str`, *optional*):
602
+ The prefix used by the generator's tokenizer.
603
+ n_docs (`int`, *optional*):
604
+ The number of docs retrieved per query.
605
+ return_tensors (`str` or [`~utils.TensorType`], *optional*, defaults to "pt"):
606
+ If set, will return tensors instead of list of python integers. Acceptable values are:
607
+
608
+ - `'tf'`: Return TensorFlow `tf.constant` objects.
609
+ - `'pt'`: Return PyTorch `torch.Tensor` objects.
610
+ - `'np'`: Return Numpy `np.ndarray` objects.
611
+
612
+ Returns: [`BatchEncoding`]: A [`BatchEncoding`] with the following fields:
613
+
614
+ - **context_input_ids** -- List of token ids to be fed to a model.
615
+
616
+ [What are input IDs?](../glossary#input-ids)
617
+
618
+ - **context_attention_mask** -- List of indices specifying which tokens should be attended to by the model
619
+ (when `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names`).
620
+
621
+ [What are attention masks?](../glossary#attention-mask)
622
+
623
+ - **retrieved_doc_embeds** -- List of embeddings of the retrieved documents
624
+ - **doc_ids** -- List of ids of the retrieved documents
625
+ """
626
+
627
+ n_docs = n_docs if n_docs is not None else self.n_docs
628
+ prefix = prefix if prefix is not None else self.config.generator.prefix
629
+ retrieved_doc_embeds, doc_ids, docs = self.retrieve(question_hidden_states, n_docs)
630
+
631
+ input_strings = self.question_encoder_tokenizer.batch_decode(question_input_ids, skip_special_tokens=True)
632
+ context_input_ids, context_attention_mask = self.postprocess_docs(
633
+ docs, input_strings, prefix, n_docs, return_tensors=return_tensors
634
+ )
635
+
636
+ if self.return_tokenized_docs:
637
+ retrieved_doc_text = []
638
+ retrieved_doc_title = []
639
+
640
+ for b_idx in range(len(docs)):
641
+ for doc_idx in range(n_docs):
642
+ retrieved_doc_text.append(docs[b_idx]["text"][doc_idx])
643
+ retrieved_doc_title.append(docs[b_idx]["title"][doc_idx])
644
+
645
+ tokenized_docs = self.ctx_encoder_tokenizer(
646
+ retrieved_doc_title,
647
+ retrieved_doc_text,
648
+ truncation=True,
649
+ padding="longest",
650
+ return_tensors=return_tensors,
651
+ )
652
+
653
+ return BatchEncoding(
654
+ {
655
+ "context_input_ids": context_input_ids,
656
+ "context_attention_mask": context_attention_mask,
657
+ "retrieved_doc_embeds": retrieved_doc_embeds,
658
+ "doc_ids": doc_ids,
659
+ "tokenized_doc_ids": tokenized_docs["input_ids"],
660
+ "tokenized_doc_attention_mask": tokenized_docs["attention_mask"],
661
+ },
662
+ tensor_type=return_tensors,
663
+ )
664
+
665
+ else:
666
+ return BatchEncoding(
667
+ {
668
+ "context_input_ids": context_input_ids,
669
+ "context_attention_mask": context_attention_mask,
670
+ "retrieved_doc_embeds": retrieved_doc_embeds,
671
+ "doc_ids": doc_ids,
672
+ },
673
+ tensor_type=return_tensors,
674
+ )
env-llmeval/lib/python3.10/site-packages/transformers/models/rag/tokenization_rag.py ADDED
@@ -0,0 +1,120 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020, The RAG Authors and The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Tokenization classes for RAG."""
16
+ import os
17
+ import warnings
18
+ from typing import List, Optional
19
+
20
+ from ...tokenization_utils_base import BatchEncoding
21
+ from ...utils import logging
22
+ from .configuration_rag import RagConfig
23
+
24
+
25
+ logger = logging.get_logger(__name__)
26
+
27
+
28
+ class RagTokenizer:
29
+ def __init__(self, question_encoder, generator):
30
+ self.question_encoder = question_encoder
31
+ self.generator = generator
32
+ self.current_tokenizer = self.question_encoder
33
+
34
+ def save_pretrained(self, save_directory):
35
+ if os.path.isfile(save_directory):
36
+ raise ValueError(f"Provided path ({save_directory}) should be a directory, not a file")
37
+ os.makedirs(save_directory, exist_ok=True)
38
+ question_encoder_path = os.path.join(save_directory, "question_encoder_tokenizer")
39
+ generator_path = os.path.join(save_directory, "generator_tokenizer")
40
+ self.question_encoder.save_pretrained(question_encoder_path)
41
+ self.generator.save_pretrained(generator_path)
42
+
43
+ @classmethod
44
+ def from_pretrained(cls, pretrained_model_name_or_path, **kwargs):
45
+ # dynamically import AutoTokenizer
46
+ from ..auto.tokenization_auto import AutoTokenizer
47
+
48
+ config = kwargs.pop("config", None)
49
+
50
+ if config is None:
51
+ config = RagConfig.from_pretrained(pretrained_model_name_or_path)
52
+
53
+ question_encoder = AutoTokenizer.from_pretrained(
54
+ pretrained_model_name_or_path, config=config.question_encoder, subfolder="question_encoder_tokenizer"
55
+ )
56
+ generator = AutoTokenizer.from_pretrained(
57
+ pretrained_model_name_or_path, config=config.generator, subfolder="generator_tokenizer"
58
+ )
59
+ return cls(question_encoder=question_encoder, generator=generator)
60
+
61
+ def __call__(self, *args, **kwargs):
62
+ return self.current_tokenizer(*args, **kwargs)
63
+
64
+ def batch_decode(self, *args, **kwargs):
65
+ return self.generator.batch_decode(*args, **kwargs)
66
+
67
+ def decode(self, *args, **kwargs):
68
+ return self.generator.decode(*args, **kwargs)
69
+
70
+ def _switch_to_input_mode(self):
71
+ self.current_tokenizer = self.question_encoder
72
+
73
+ def _switch_to_target_mode(self):
74
+ self.current_tokenizer = self.generator
75
+
76
+ def prepare_seq2seq_batch(
77
+ self,
78
+ src_texts: List[str],
79
+ tgt_texts: Optional[List[str]] = None,
80
+ max_length: Optional[int] = None,
81
+ max_target_length: Optional[int] = None,
82
+ padding: str = "longest",
83
+ return_tensors: str = None,
84
+ truncation: bool = True,
85
+ **kwargs,
86
+ ) -> BatchEncoding:
87
+ warnings.warn(
88
+ "`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the "
89
+ "regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` "
90
+ "context manager to prepare your targets. See the documentation of your specific tokenizer for more "
91
+ "details",
92
+ FutureWarning,
93
+ )
94
+ if max_length is None:
95
+ max_length = self.current_tokenizer.model_max_length
96
+ model_inputs = self(
97
+ src_texts,
98
+ add_special_tokens=True,
99
+ return_tensors=return_tensors,
100
+ max_length=max_length,
101
+ padding=padding,
102
+ truncation=truncation,
103
+ **kwargs,
104
+ )
105
+ if tgt_texts is None:
106
+ return model_inputs
107
+ # Process tgt_texts
108
+ if max_target_length is None:
109
+ max_target_length = self.current_tokenizer.model_max_length
110
+ labels = self(
111
+ text_target=tgt_texts,
112
+ add_special_tokens=True,
113
+ return_tensors=return_tensors,
114
+ padding=padding,
115
+ max_length=max_target_length,
116
+ truncation=truncation,
117
+ **kwargs,
118
+ )
119
+ model_inputs["labels"] = labels["input_ids"]
120
+ return model_inputs
env-llmeval/lib/python3.10/site-packages/transformers/models/reformer/__init__.py ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import TYPE_CHECKING
16
+
17
+ from ...utils import (
18
+ OptionalDependencyNotAvailable,
19
+ _LazyModule,
20
+ is_sentencepiece_available,
21
+ is_tokenizers_available,
22
+ is_torch_available,
23
+ )
24
+
25
+
26
+ _import_structure = {"configuration_reformer": ["REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "ReformerConfig"]}
27
+
28
+ try:
29
+ if not is_sentencepiece_available():
30
+ raise OptionalDependencyNotAvailable()
31
+ except OptionalDependencyNotAvailable:
32
+ pass
33
+ else:
34
+ _import_structure["tokenization_reformer"] = ["ReformerTokenizer"]
35
+
36
+ try:
37
+ if not is_tokenizers_available():
38
+ raise OptionalDependencyNotAvailable()
39
+ except OptionalDependencyNotAvailable:
40
+ pass
41
+ else:
42
+ _import_structure["tokenization_reformer_fast"] = ["ReformerTokenizerFast"]
43
+
44
+ try:
45
+ if not is_torch_available():
46
+ raise OptionalDependencyNotAvailable()
47
+ except OptionalDependencyNotAvailable:
48
+ pass
49
+ else:
50
+ _import_structure["modeling_reformer"] = [
51
+ "REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
52
+ "ReformerAttention",
53
+ "ReformerForMaskedLM",
54
+ "ReformerForQuestionAnswering",
55
+ "ReformerForSequenceClassification",
56
+ "ReformerLayer",
57
+ "ReformerModel",
58
+ "ReformerModelWithLMHead",
59
+ "ReformerPreTrainedModel",
60
+ ]
61
+
62
+
63
+ if TYPE_CHECKING:
64
+ from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
65
+
66
+ try:
67
+ if not is_sentencepiece_available():
68
+ raise OptionalDependencyNotAvailable()
69
+ except OptionalDependencyNotAvailable:
70
+ pass
71
+ else:
72
+ from .tokenization_reformer import ReformerTokenizer
73
+
74
+ try:
75
+ if not is_tokenizers_available():
76
+ raise OptionalDependencyNotAvailable()
77
+ except OptionalDependencyNotAvailable:
78
+ pass
79
+ else:
80
+ from .tokenization_reformer_fast import ReformerTokenizerFast
81
+
82
+ try:
83
+ if not is_torch_available():
84
+ raise OptionalDependencyNotAvailable()
85
+ except OptionalDependencyNotAvailable:
86
+ pass
87
+ else:
88
+ from .modeling_reformer import (
89
+ REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
90
+ ReformerAttention,
91
+ ReformerForMaskedLM,
92
+ ReformerForQuestionAnswering,
93
+ ReformerForSequenceClassification,
94
+ ReformerLayer,
95
+ ReformerModel,
96
+ ReformerModelWithLMHead,
97
+ ReformerPreTrainedModel,
98
+ )
99
+
100
+ else:
101
+ import sys
102
+
103
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
env-llmeval/lib/python3.10/site-packages/transformers/models/reformer/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.54 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/reformer/__pycache__/configuration_reformer.cpython-310.pyc ADDED
Binary file (11.7 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/reformer/__pycache__/convert_reformer_trax_checkpoint_to_pytorch.cpython-310.pyc ADDED
Binary file (4.94 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/reformer/__pycache__/modeling_reformer.cpython-310.pyc ADDED
Binary file (67 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/reformer/__pycache__/tokenization_reformer.cpython-310.pyc ADDED
Binary file (6.75 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/reformer/__pycache__/tokenization_reformer_fast.cpython-310.pyc ADDED
Binary file (4.13 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/reformer/configuration_reformer.py ADDED
@@ -0,0 +1,239 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The Trax Authors and The HuggingFace Inc. team.
3
+ # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """ Reformer model configuration"""
17
+
18
+ from ...configuration_utils import PretrainedConfig
19
+ from ...utils import logging
20
+
21
+
22
+ logger = logging.get_logger(__name__)
23
+
24
+ REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP = {
25
+ "google/reformer-crime-and-punishment": (
26
+ "https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/config.json"
27
+ ),
28
+ "google/reformer-enwik8": "https://huggingface.co/google/reformer-enwik8/resolve/main/config.json",
29
+ }
30
+
31
+
32
+ class ReformerConfig(PretrainedConfig):
33
+ r"""
34
+ This is the configuration class to store the configuration of a [`ReformerModel`]. It is used to instantiate a
35
+ Reformer model according to the specified arguments, defining the model architecture. Instantiating a configuration
36
+ with the defaults will yield a similar configuration to that of the ReFormer
37
+ [google/reformer-crime-and-punishment](https://huggingface.co/google/reformer-crime-and-punishment) architecture.
38
+
39
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
40
+ documentation from [`PretrainedConfig`] for more information.
41
+
42
+ Args:
43
+ attention_head_size (`int`, *optional*, defaults to 64):
44
+ Dimensionality of the projected key, query and value vectors
45
+ attn_layers (`List[str]`, *optional*, defaults to `["local", "lsh", "local", "lsh", "local", "lsh"]`):
46
+ List of attention layer types in ascending order. It can be chosen between a LSHSelfAttention layer
47
+ (`"lsh"`) and a LocalSelfAttention layer (`"local"`).
48
+
49
+ For more information on LSHSelfAttention layer, see [LSH Self Attention](reformer#lsh-self-attention). For
50
+ more information on LocalSelfAttention layer, see [Local Self Attention](reformer#local-self-attention).
51
+ axial_pos_embds (`bool`, *optional*, defaults to `True`):
52
+ Whether or not to use axial position embeddings. For more information on how axial position embeddings
53
+ work, see [Axial Position Encodings](reformer#axial-positional-encodings).
54
+ axial_norm_std (`float`, *optional*, defaults to 1.0):
55
+ The standard deviation of the normal_initializer for initializing the weight matrices of the axial
56
+ positional encodings.
57
+ axial_pos_shape (`List[int]`, *optional*, defaults to `[64, 64]`):
58
+ The position dims of the axial position encodings. During training, the product of the position dims has to
59
+ be equal to the sequence length.
60
+
61
+ For more information on how axial position embeddings work, see [Axial Position
62
+ Encodings](reformer#axial-positional-encodings).
63
+ axial_pos_embds_dim (`List[int]`, *optional*, defaults to `[64, 192]`):
64
+ The embedding dims of the axial position encodings. The sum of the embedding dims has to be equal to the
65
+ hidden size.
66
+
67
+ For more information on how axial position embeddings work, see [Axial Position
68
+ Encodings](reformer#axial-positional-encodings).
69
+ chunk_size_lm_head (`int`, *optional*, defaults to 0):
70
+ The chunk size of the final language model feed forward head layer. A chunk size of 0 means that the feed
71
+ forward layer is not chunked. A chunk size of n means that the feed forward layer processes n <
72
+ sequence_length embeddings at a time.
73
+
74
+ For more information on feed forward chunking, see [How does Feed Forward Chunking
75
+ work?](../glossary#feed-forward-chunking).
76
+ eos_token_id (`int`, *optional*, defaults to 2):
77
+ The token id for the end-of-sentence token.
78
+ feed_forward_size (`int`, *optional*, defaults to 512):
79
+ Dimensionality of the feed_forward layer in the residual attention block.
80
+ hash_seed (`int`, *optional*):
81
+ Seed that can be used to make local sensitive hashing in `LSHSelfAttention` deterministic. This should only
82
+ be set for testing purposed. For evaluation and training purposes `hash_seed` should be left as `None` to
83
+ ensure fully random rotations in local sensitive hashing scheme.
84
+ hidden_act (`str` or `Callable`, *optional*, defaults to `"relu"`):
85
+ The non-linear activation function (function or string) in the feed forward layer in the residual attention
86
+ block. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported.
87
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.05):
88
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
89
+ hidden_size (`int`, *optional*, defaults to 256):
90
+ Dimensionality of the output hidden states of the residual attention blocks.
91
+ initializer_range (`float`, *optional*, defaults to 0.02):
92
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
93
+ is_decoder (`bool`, *optional*, defaults to `False`):
94
+ Whether or not to use a causal mask in addition to the `attention_mask` passed to [`ReformerModel`]. When
95
+ using the Reformer for causal language modeling, this argument should be set to `True`.
96
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
97
+ The epsilon used by the layer normalization layers.
98
+ local_chunk_length (`int`, *optional*, defaults to 64):
99
+ Length of chunk which attends to itself in `LocalSelfAttention`. Chunking reduces memory complexity from
100
+ sequence length x sequence length (self attention) to chunk length x chunk length x sequence length / chunk
101
+ length (chunked self attention).
102
+ local_num_chunks_before (`int`, *optional*, defaults to 1):
103
+ Number of previous neighbouring chunks to attend to in `LocalSelfAttention` layer to itself.
104
+ local_num_chunks_after (`int`, *optional*, defaults to 0):
105
+ Number of following neighbouring chunks to attend to in `LocalSelfAttention` layer in addition to itself.
106
+ local_attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
107
+ The dropout ratio for the attention probabilities in `LocalSelfAttention`.
108
+ lsh_attn_chunk_length (`int`, *optional*, defaults to 64):
109
+ Length of chunk which attends to itself in `LSHSelfAttention`. Chunking reduces memory complexity from
110
+ sequence length x sequence length (self attention) to chunk length x chunk length x sequence length / chunk
111
+ length (chunked self attention).
112
+ lsh_num_chunks_before (`int`, *optional*, defaults to 1):
113
+ Number of previous neighbouring chunks to attend to in `LSHSelfAttention` layer to itself.
114
+ lsh_num_chunks_after (`int`, *optional*, defaults to 0):
115
+ Number of following neighbouring chunks to attend to in `LSHSelfAttention` layer to itself.
116
+ lsh_attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
117
+ The dropout ratio for the attention probabilities in `LSHSelfAttention`.
118
+ max_position_embeddings (`int`, *optional*, defaults to 4096):
119
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
120
+ just in case (e.g., 512 or 1024 or 2048).
121
+ num_attention_heads (`int`, *optional*, defaults to 12):
122
+ Number of attention heads for each attention layer in the Transformer encoder.
123
+ num_buckets (`int` or `List[int]`, *optional*):
124
+ Number of buckets, the key query vectors can be "hashed into" using the locality sensitive hashing scheme.
125
+ Each query key vector is hashed into a hash in `1, ..., num_buckets`. The number of buckets can also be
126
+ factorized into a list for improved memory complexity. In this case, each query key vector is hashed into a
127
+ hash in `1-1, 1-2, ..., num_buckets[0]-1, ..., num_buckets[0]-num_buckets[1]` if `num_buckets` is
128
+ factorized into two factors. The number of buckets (or the product the factors) should approximately equal
129
+ sequence length / lsh_chunk_length. If `num_buckets` not set, a good value is calculated on the fly.
130
+ num_hashes (`int`, *optional*, defaults to 1):
131
+ Number of hashing rounds (e.g., number of random rotations) in Local Sensitive Hashing scheme. The higher
132
+ `num_hashes`, the more accurate the `LSHSelfAttention` becomes, but also the more memory and time intensive
133
+ the hashing becomes.
134
+ pad_token_id (`int`, *optional*, defaults to 0):
135
+ The token id for the padding token.
136
+ vocab_size (`int`, *optional*, defaults to 320):\
137
+ Vocabulary size of the Reformer model. Defines the number of different tokens that can be represented by
138
+ the `inputs_ids` passed when calling [`ReformerModel`].
139
+ tie_word_embeddings (`bool`, *optional*, defaults to `False`):
140
+ Whether to tie input and output embeddings.
141
+ use_cache (`bool`, *optional*, defaults to `True`):
142
+ Whether or not the model should return the last key/values attentions (not used by all models).
143
+ classifier_dropout (`float`, *optional*):
144
+ The dropout ratio for the classification head.
145
+
146
+ Examples:
147
+
148
+ ```python
149
+ >>> from transformers import ReformerConfig, ReformerModel
150
+
151
+ >>> # Initializing a Reformer configuration
152
+ >>> configuration = ReformerConfig()
153
+
154
+ >>> # Initializing a Reformer model (with random weights)
155
+ >>> model = ReformerModel(configuration)
156
+
157
+ >>> # Accessing the model configuration
158
+ >>> configuration = model.config
159
+ ```
160
+ """
161
+
162
+ model_type = "reformer"
163
+ keys_to_ignore_at_inference = ["past_buckets_states"]
164
+ attribute_map = {}
165
+
166
+ def __init__(
167
+ self,
168
+ attention_head_size=64,
169
+ attn_layers=["local", "lsh", "local", "lsh", "local", "lsh"],
170
+ axial_norm_std=1.0,
171
+ axial_pos_embds=True,
172
+ axial_pos_shape=[64, 64],
173
+ axial_pos_embds_dim=[64, 192],
174
+ chunk_size_lm_head=0,
175
+ eos_token_id=2,
176
+ feed_forward_size=512,
177
+ hash_seed=None,
178
+ hidden_act="relu",
179
+ hidden_dropout_prob=0.05,
180
+ hidden_size=256,
181
+ initializer_range=0.02,
182
+ is_decoder=False,
183
+ layer_norm_eps=1e-12,
184
+ local_num_chunks_before=1,
185
+ local_num_chunks_after=0,
186
+ local_attention_probs_dropout_prob=0.05,
187
+ local_attn_chunk_length=64,
188
+ lsh_attn_chunk_length=64,
189
+ lsh_attention_probs_dropout_prob=0.0,
190
+ lsh_num_chunks_before=1,
191
+ lsh_num_chunks_after=0,
192
+ max_position_embeddings=4096,
193
+ num_attention_heads=12,
194
+ num_buckets=None,
195
+ num_hashes=1,
196
+ pad_token_id=0,
197
+ vocab_size=320,
198
+ tie_word_embeddings=False,
199
+ use_cache=True,
200
+ classifier_dropout=None,
201
+ **kwargs,
202
+ ):
203
+ self.hash_seed = hash_seed
204
+ self.vocab_size = vocab_size
205
+ self.attention_head_size = attention_head_size
206
+ self.hidden_size = hidden_size
207
+ self.num_attention_heads = num_attention_heads
208
+ self.num_hashes = num_hashes
209
+ self.num_hidden_layers = len(attn_layers)
210
+ self.num_buckets = tuple(num_buckets) if isinstance(num_buckets, list) else num_buckets
211
+ self.lsh_attn_chunk_length = lsh_attn_chunk_length
212
+ self.local_attn_chunk_length = local_attn_chunk_length
213
+ self.lsh_num_chunks_after = lsh_num_chunks_after
214
+ self.lsh_num_chunks_before = lsh_num_chunks_before
215
+ self.local_num_chunks_after = local_num_chunks_after
216
+ self.local_num_chunks_before = local_num_chunks_before
217
+ self.hidden_act = hidden_act
218
+ self.feed_forward_size = feed_forward_size
219
+ self.hidden_dropout_prob = hidden_dropout_prob
220
+ self.lsh_attention_probs_dropout_prob = lsh_attention_probs_dropout_prob
221
+ self.local_attention_probs_dropout_prob = local_attention_probs_dropout_prob
222
+ self.max_position_embeddings = max_position_embeddings
223
+ self.initializer_range = initializer_range
224
+ self.layer_norm_eps = layer_norm_eps
225
+ self.axial_pos_embds = axial_pos_embds
226
+ self.axial_pos_shape = tuple(axial_pos_shape)
227
+ self.axial_pos_embds_dim = tuple(axial_pos_embds_dim)
228
+ self.axial_norm_std = axial_norm_std
229
+ self.chunk_size_lm_head = chunk_size_lm_head
230
+ self.attn_layers = attn_layers
231
+ self.use_cache = use_cache
232
+ self.classifier_dropout = classifier_dropout
233
+ super().__init__(
234
+ pad_token_id=pad_token_id,
235
+ eos_token_id=eos_token_id,
236
+ is_decoder=is_decoder,
237
+ tie_word_embeddings=tie_word_embeddings,
238
+ **kwargs,
239
+ )
env-llmeval/lib/python3.10/site-packages/transformers/models/reformer/convert_reformer_trax_checkpoint_to_pytorch.py ADDED
@@ -0,0 +1,222 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Convert Reformer checkpoint."""
16
+
17
+
18
+ import argparse
19
+ import pickle
20
+
21
+ import numpy as np
22
+ import torch
23
+ from torch import nn
24
+
25
+ from transformers import ReformerConfig, ReformerModelWithLMHead
26
+ from transformers.utils import logging
27
+
28
+
29
+ logging.set_verbosity_info()
30
+
31
+
32
+ def set_param(torch_layer, weight, bias=None):
33
+ # set parameter of one layer
34
+ assert torch_layer.weight.shape == weight.shape, f"{torch_layer} layer.weight does not match"
35
+ torch_layer.weight = nn.Parameter(weight)
36
+ if bias is not None:
37
+ assert torch_layer.bias.shape == bias.shape, f"{torch_layer} layer.bias does not match"
38
+ torch_layer.bias = nn.Parameter(bias)
39
+
40
+
41
+ def set_layer_weights_in_torch_lsh(weights, torch_layer, hidden_size):
42
+ # set torch weights for 1-to-1 comparison
43
+ np_query_key = np.asarray(weights[0])
44
+ np_value = np.asarray(weights[1])
45
+ np_dense = np.asarray(weights[2])
46
+
47
+ set_param(
48
+ torch_layer.self_attention.query_key,
49
+ torch.tensor(np_query_key).transpose(1, 2).contiguous().view(-1, hidden_size),
50
+ )
51
+ set_param(
52
+ torch_layer.self_attention.value,
53
+ torch.tensor(np_value).transpose(1, 2).contiguous().view(-1, hidden_size),
54
+ )
55
+ set_param(
56
+ torch_layer.output.dense,
57
+ torch.tensor(np_dense).view(-1, hidden_size).contiguous().transpose(0, 1),
58
+ )
59
+
60
+
61
+ def set_layer_weights_in_torch_local(weights, torch_layer, hidden_size):
62
+ # set torch weights for 1-to-1 comparison
63
+ np_query = np.asarray(weights[0])
64
+ np_key = np.asarray(weights[1])
65
+ np_value = np.asarray(weights[2])
66
+ np_dense = np.asarray(weights[3])
67
+
68
+ set_param(
69
+ torch_layer.self_attention.query,
70
+ torch.tensor(np_query).transpose(1, 2).contiguous().view(-1, hidden_size),
71
+ )
72
+ set_param(
73
+ torch_layer.self_attention.key,
74
+ torch.tensor(np_key).transpose(1, 2).contiguous().view(-1, hidden_size),
75
+ )
76
+ set_param(
77
+ torch_layer.self_attention.value,
78
+ torch.tensor(np_value).transpose(1, 2).contiguous().view(-1, hidden_size),
79
+ )
80
+ set_param(
81
+ torch_layer.output.dense,
82
+ torch.tensor(np_dense).view(-1, hidden_size).contiguous().transpose(0, 1),
83
+ )
84
+
85
+
86
+ def set_block_weights_in_torch(weights, torch_block, hidden_size):
87
+ # layernorm 1
88
+ layer_norm_1 = weights[0][0][0]
89
+ layer_norm_1_weight = np.asarray(layer_norm_1[0])
90
+ layer_norm_1_bias = np.asarray(layer_norm_1[1])
91
+ set_param(
92
+ torch_block.attention.layer_norm,
93
+ torch.tensor(layer_norm_1_weight),
94
+ torch.tensor(layer_norm_1_bias),
95
+ )
96
+
97
+ # lsh weights + output
98
+ attn_weights = weights[0][1]
99
+ if len(attn_weights) < 4:
100
+ set_layer_weights_in_torch_lsh(attn_weights, torch_block.attention, hidden_size)
101
+ else:
102
+ set_layer_weights_in_torch_local(attn_weights, torch_block.attention, hidden_size)
103
+
104
+ # intermediate weighs
105
+ intermediate_weights = weights[2][0][1][2]
106
+
107
+ # Chunked Feed Forward
108
+ if len(intermediate_weights) == 4:
109
+ intermediate_weights = intermediate_weights[2]
110
+
111
+ # layernorm 2
112
+ layer_norm_2_weight = np.asarray(intermediate_weights[0][0])
113
+ layer_norm_2_bias = np.asarray(intermediate_weights[0][1])
114
+ set_param(
115
+ torch_block.feed_forward.layer_norm,
116
+ torch.tensor(layer_norm_2_weight),
117
+ torch.tensor(layer_norm_2_bias),
118
+ )
119
+
120
+ # intermediate dense
121
+ inter_dense_weight = np.asarray(intermediate_weights[1][0])
122
+ inter_dense_bias = np.asarray(intermediate_weights[1][1])
123
+ set_param(
124
+ torch_block.feed_forward.dense.dense,
125
+ torch.tensor(inter_dense_weight).transpose(0, 1).contiguous(),
126
+ torch.tensor(inter_dense_bias),
127
+ )
128
+
129
+ # intermediate out
130
+ out_dense_weight = np.asarray(intermediate_weights[4][0])
131
+ out_dense_bias = np.asarray(intermediate_weights[4][1])
132
+ set_param(
133
+ torch_block.feed_forward.output.dense,
134
+ torch.tensor(out_dense_weight).transpose(0, 1).contiguous(),
135
+ torch.tensor(out_dense_bias),
136
+ )
137
+
138
+
139
+ def set_model_weights_in_torch(weights, torch_model, hidden_size):
140
+ # reformer model
141
+ torch_model_reformer = torch_model.reformer
142
+
143
+ # word embeds
144
+ word_embeddings = np.asarray(weights[1])
145
+ set_param(
146
+ torch_model_reformer.embeddings.word_embeddings,
147
+ torch.tensor(word_embeddings),
148
+ )
149
+
150
+ if isinstance(weights[3], tuple):
151
+ position_embeddings = torch_model_reformer.embeddings.position_embeddings
152
+ for emb_idx in range(len(position_embeddings.weights)):
153
+ emb_weights = np.asarray(weights[3][emb_idx][0])
154
+ assert (
155
+ position_embeddings.weights[emb_idx].shape == emb_weights.shape
156
+ ), f"{position_embeddings[emb_idx]} emb does not match"
157
+ position_embeddings.weights[emb_idx] = nn.Parameter(torch.tensor(emb_weights))
158
+
159
+ trax_layer_weights = weights[5]
160
+ assert len(torch_model_reformer.encoder.layers) * 4 == len(
161
+ trax_layer_weights
162
+ ), "HF and trax model do not have the same number of layers"
163
+ for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers):
164
+ block_weights = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
165
+ set_block_weights_in_torch(block_weights, layer, hidden_size)
166
+
167
+ # output layer norm
168
+ layer_norm_out_weight = np.asarray(weights[7][0])
169
+ layer_norm_out_bias = np.asarray(weights[7][1])
170
+ set_param(
171
+ torch_model_reformer.encoder.layer_norm,
172
+ torch.tensor(layer_norm_out_weight),
173
+ torch.tensor(layer_norm_out_bias),
174
+ )
175
+
176
+ # output embeddings
177
+ output_embed_weights = np.asarray(weights[9][0])
178
+ output_embed_bias = np.asarray(weights[9][1])
179
+ set_param(
180
+ torch_model.lm_head.decoder,
181
+ torch.tensor(output_embed_weights).transpose(0, 1).contiguous(),
182
+ torch.tensor(output_embed_bias),
183
+ )
184
+
185
+
186
+ def convert_trax_checkpoint_to_pytorch(trax_model_pkl_path, config_file, pytorch_dump_path):
187
+ # Initialise PyTorch model
188
+ config = ReformerConfig.from_json_file(config_file)
189
+ print(f"Building PyTorch model from configuration: {config}")
190
+ model = ReformerModelWithLMHead(config)
191
+
192
+ with open(trax_model_pkl_path, "rb") as f:
193
+ model_weights = pickle.load(f)["weights"]
194
+
195
+ set_model_weights_in_torch(model_weights, model, config.hidden_size)
196
+
197
+ # Save pytorch-model
198
+ print(f"Save PyTorch model to {pytorch_dump_path}")
199
+ torch.save(model.state_dict(), pytorch_dump_path)
200
+
201
+
202
+ if __name__ == "__main__":
203
+ parser = argparse.ArgumentParser()
204
+ # Required parameters
205
+ parser.add_argument(
206
+ "--trax_model_pkl_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
207
+ )
208
+ parser.add_argument(
209
+ "--config_file",
210
+ default=None,
211
+ type=str,
212
+ required=True,
213
+ help=(
214
+ "The config json file corresponding to the pre-trained Reformer model. \n"
215
+ "This specifies the model architecture."
216
+ ),
217
+ )
218
+ parser.add_argument(
219
+ "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
220
+ )
221
+ args = parser.parse_args()
222
+ convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
env-llmeval/lib/python3.10/site-packages/transformers/models/reformer/modeling_reformer.py ADDED
The diff for this file is too large to render. See raw diff
 
env-llmeval/lib/python3.10/site-packages/transformers/models/reformer/tokenization_reformer.py ADDED
@@ -0,0 +1,186 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The Trax Authors and The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Tokenization class for model Reformer."""
16
+
17
+
18
+ import os
19
+ from shutil import copyfile
20
+ from typing import Any, Dict, List, Optional, Tuple
21
+
22
+ import sentencepiece as spm
23
+
24
+ from ...tokenization_utils import PreTrainedTokenizer
25
+ from ...utils import logging
26
+
27
+
28
+ logger = logging.get_logger(__name__)
29
+
30
+
31
+ SPIECE_UNDERLINE = "▁"
32
+
33
+ VOCAB_FILES_NAMES = {"vocab_file": "spiece.model"}
34
+
35
+ PRETRAINED_VOCAB_FILES_MAP = {
36
+ "vocab_file": {
37
+ "google/reformer-crime-and-punishment": (
38
+ "https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model"
39
+ )
40
+ }
41
+ }
42
+
43
+ PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
44
+ "google/reformer-crime-and-punishment": 524288,
45
+ }
46
+
47
+
48
+ class ReformerTokenizer(PreTrainedTokenizer):
49
+ """
50
+ Construct a Reformer tokenizer. Based on [SentencePiece](https://github.com/google/sentencepiece) .
51
+
52
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
53
+ this superclass for more information regarding those methods.
54
+
55
+ Args:
56
+ vocab_file (`str`):
57
+ [SentencePiece](https://github.com/google/sentencepiece) file (generally has a *.spm* extension) that
58
+ contains the vocabulary necessary to instantiate a tokenizer.
59
+ eos_token (`str`, *optional*, defaults to `"</s>"`):
60
+ The end of sequence token.
61
+
62
+ <Tip>
63
+
64
+ When building a sequence using special tokens, this is not the token that is used for the end of sequence.
65
+ The token used is the `sep_token`.
66
+
67
+ </Tip>
68
+
69
+ unk_token (`str`, *optional*, defaults to `"<unk>"`):
70
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
71
+ token instead.
72
+ additional_special_tokens (`List[str]`, *optional*, defaults to `[]`):
73
+ Additional special tokens used by the tokenizer.
74
+ sp_model_kwargs (`dict`, *optional*):
75
+ Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for
76
+ SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things,
77
+ to set:
78
+
79
+ - `enable_sampling`: Enable subword regularization.
80
+ - `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout.
81
+
82
+ - `nbest_size = {0,1}`: No sampling is performed.
83
+ - `nbest_size > 1`: samples from the nbest_size results.
84
+ - `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice)
85
+ using forward-filtering-and-backward-sampling algorithm.
86
+
87
+ - `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for
88
+ BPE-dropout.
89
+ """
90
+
91
+ vocab_files_names = VOCAB_FILES_NAMES
92
+ pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
93
+ max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
94
+ model_input_names = ["input_ids", "attention_mask"]
95
+
96
+ def __init__(
97
+ self,
98
+ vocab_file,
99
+ eos_token="</s>",
100
+ unk_token="<unk>",
101
+ additional_special_tokens=[],
102
+ sp_model_kwargs: Optional[Dict[str, Any]] = None,
103
+ **kwargs,
104
+ ) -> None:
105
+ self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
106
+
107
+ self.vocab_file = vocab_file
108
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
109
+ self.sp_model.Load(vocab_file)
110
+
111
+ super().__init__(
112
+ eos_token=eos_token,
113
+ unk_token=unk_token,
114
+ additional_special_tokens=additional_special_tokens,
115
+ sp_model_kwargs=self.sp_model_kwargs,
116
+ **kwargs,
117
+ )
118
+
119
+ @property
120
+ def vocab_size(self):
121
+ return self.sp_model.get_piece_size()
122
+
123
+ def get_vocab(self) -> Dict[str, int]:
124
+ vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
125
+ vocab.update(self.added_tokens_encoder)
126
+ return vocab
127
+
128
+ def __getstate__(self):
129
+ state = self.__dict__.copy()
130
+ state["sp_model"] = None
131
+ return state
132
+
133
+ def __setstate__(self, d):
134
+ self.__dict__ = d
135
+
136
+ # for backward compatibility
137
+ if not hasattr(self, "sp_model_kwargs"):
138
+ self.sp_model_kwargs = {}
139
+
140
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
141
+ self.sp_model.Load(self.vocab_file)
142
+
143
+ def _tokenize(self, text: str) -> List[str]:
144
+ """Take as input a string and return a list of strings (tokens) for words/sub-words"""
145
+ return self.sp_model.encode(text, out_type=str)
146
+
147
+ def _convert_token_to_id(self, token):
148
+ """Converts a token (str) in an id using the vocab."""
149
+ return self.sp_model.piece_to_id(token)
150
+
151
+ def _convert_id_to_token(self, index):
152
+ """Converts an index (integer) in a token (str) using the vocab."""
153
+ if index < self.sp_model.get_piece_size():
154
+ token = self.sp_model.IdToPiece(index)
155
+ return token
156
+
157
+ def convert_tokens_to_string(self, tokens):
158
+ """Converts a sequence of tokens (string) in a single string."""
159
+ current_sub_tokens = []
160
+ out_string = ""
161
+ for token in tokens:
162
+ # make sure that special tokens are not decoded using sentencepiece model
163
+ if token in self.all_special_tokens:
164
+ out_string += self.sp_model.decode(current_sub_tokens) + token
165
+ current_sub_tokens = []
166
+ else:
167
+ current_sub_tokens.append(token)
168
+ out_string += self.sp_model.decode(current_sub_tokens)
169
+ return out_string.strip()
170
+
171
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
172
+ if not os.path.isdir(save_directory):
173
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
174
+ return
175
+ out_vocab_file = os.path.join(
176
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
177
+ )
178
+
179
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
180
+ copyfile(self.vocab_file, out_vocab_file)
181
+ elif not os.path.isfile(self.vocab_file):
182
+ with open(out_vocab_file, "wb") as fi:
183
+ content_spiece_model = self.sp_model.serialized_model_proto()
184
+ fi.write(content_spiece_model)
185
+
186
+ return (out_vocab_file,)