applied-ai-018 commited on
Commit
aacdcba
·
verified ·
1 Parent(s): 4d5056d

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. llmeval-env/lib/python3.10/site-packages/transformers/models/blenderbot_small/__pycache__/modeling_blenderbot_small.cpython-310.pyc +0 -0
  2. llmeval-env/lib/python3.10/site-packages/transformers/models/blenderbot_small/__pycache__/modeling_flax_blenderbot_small.cpython-310.pyc +0 -0
  3. llmeval-env/lib/python3.10/site-packages/transformers/models/blenderbot_small/configuration_blenderbot_small.py +389 -0
  4. llmeval-env/lib/python3.10/site-packages/transformers/models/blenderbot_small/modeling_tf_blenderbot_small.py +1526 -0
  5. llmeval-env/lib/python3.10/site-packages/transformers/models/funnel/__init__.py +134 -0
  6. llmeval-env/lib/python3.10/site-packages/transformers/models/funnel/configuration_funnel.py +166 -0
  7. llmeval-env/lib/python3.10/site-packages/transformers/models/funnel/convert_funnel_original_tf_checkpoint_to_pytorch.py +65 -0
  8. llmeval-env/lib/python3.10/site-packages/transformers/models/funnel/modeling_funnel.py +1599 -0
  9. llmeval-env/lib/python3.10/site-packages/transformers/models/funnel/modeling_tf_funnel.py +1871 -0
  10. llmeval-env/lib/python3.10/site-packages/transformers/models/funnel/tokenization_funnel_fast.py +200 -0
  11. llmeval-env/lib/python3.10/site-packages/transformers/models/gptsan_japanese/__pycache__/__init__.cpython-310.pyc +0 -0
  12. llmeval-env/lib/python3.10/site-packages/transformers/models/gptsan_japanese/__pycache__/convert_gptsan_tf_checkpoint_to_pytorch.cpython-310.pyc +0 -0
  13. llmeval-env/lib/python3.10/site-packages/transformers/models/gptsan_japanese/__pycache__/modeling_gptsan_japanese.cpython-310.pyc +0 -0
  14. llmeval-env/lib/python3.10/site-packages/transformers/models/gptsan_japanese/__pycache__/tokenization_gptsan_japanese.cpython-310.pyc +0 -0
  15. llmeval-env/lib/python3.10/site-packages/transformers/models/openai/__init__.py +119 -0
  16. llmeval-env/lib/python3.10/site-packages/transformers/models/openai/__pycache__/__init__.cpython-310.pyc +0 -0
  17. llmeval-env/lib/python3.10/site-packages/transformers/models/openai/__pycache__/configuration_openai.cpython-310.pyc +0 -0
  18. llmeval-env/lib/python3.10/site-packages/transformers/models/openai/__pycache__/convert_openai_original_tf_checkpoint_to_pytorch.cpython-310.pyc +0 -0
  19. llmeval-env/lib/python3.10/site-packages/transformers/models/openai/__pycache__/modeling_openai.cpython-310.pyc +0 -0
  20. llmeval-env/lib/python3.10/site-packages/transformers/models/openai/__pycache__/modeling_tf_openai.cpython-310.pyc +0 -0
  21. llmeval-env/lib/python3.10/site-packages/transformers/models/openai/__pycache__/tokenization_openai.cpython-310.pyc +0 -0
  22. llmeval-env/lib/python3.10/site-packages/transformers/models/openai/__pycache__/tokenization_openai_fast.cpython-310.pyc +0 -0
  23. llmeval-env/lib/python3.10/site-packages/transformers/models/openai/configuration_openai.py +156 -0
  24. llmeval-env/lib/python3.10/site-packages/transformers/models/openai/convert_openai_original_tf_checkpoint_to_pytorch.py +75 -0
  25. llmeval-env/lib/python3.10/site-packages/transformers/models/openai/modeling_openai.py +859 -0
  26. llmeval-env/lib/python3.10/site-packages/transformers/models/openai/modeling_tf_openai.py +940 -0
  27. llmeval-env/lib/python3.10/site-packages/transformers/models/openai/tokenization_openai.py +394 -0
  28. llmeval-env/lib/python3.10/site-packages/transformers/models/openai/tokenization_openai_fast.py +64 -0
  29. llmeval-env/lib/python3.10/site-packages/transformers/models/qwen2_moe/__init__.py +62 -0
  30. llmeval-env/lib/python3.10/site-packages/transformers/models/qwen2_moe/__pycache__/__init__.cpython-310.pyc +0 -0
  31. llmeval-env/lib/python3.10/site-packages/transformers/models/qwen2_moe/__pycache__/configuration_qwen2_moe.cpython-310.pyc +0 -0
  32. llmeval-env/lib/python3.10/site-packages/transformers/models/qwen2_moe/__pycache__/modeling_qwen2_moe.cpython-310.pyc +0 -0
  33. llmeval-env/lib/python3.10/site-packages/transformers/models/qwen2_moe/configuration_qwen2_moe.py +175 -0
  34. llmeval-env/lib/python3.10/site-packages/transformers/models/qwen2_moe/modeling_qwen2_moe.py +1595 -0
  35. llmeval-env/lib/python3.10/site-packages/transformers/models/seamless_m4t_v2/convert_fairseq2_to_hf.py +405 -0
  36. llmeval-env/lib/python3.10/site-packages/transformers/models/speech_to_text/__init__.py +108 -0
  37. llmeval-env/lib/python3.10/site-packages/transformers/models/speech_to_text/__pycache__/__init__.cpython-310.pyc +0 -0
  38. llmeval-env/lib/python3.10/site-packages/transformers/models/speech_to_text/__pycache__/configuration_speech_to_text.cpython-310.pyc +0 -0
  39. llmeval-env/lib/python3.10/site-packages/transformers/models/speech_to_text/__pycache__/convert_s2t_fairseq_to_tfms.cpython-310.pyc +0 -0
  40. llmeval-env/lib/python3.10/site-packages/transformers/models/speech_to_text/__pycache__/feature_extraction_speech_to_text.cpython-310.pyc +0 -0
  41. llmeval-env/lib/python3.10/site-packages/transformers/models/speech_to_text/__pycache__/modeling_speech_to_text.cpython-310.pyc +0 -0
  42. llmeval-env/lib/python3.10/site-packages/transformers/models/speech_to_text/__pycache__/modeling_tf_speech_to_text.cpython-310.pyc +0 -0
  43. llmeval-env/lib/python3.10/site-packages/transformers/models/speech_to_text/__pycache__/processing_speech_to_text.cpython-310.pyc +0 -0
  44. llmeval-env/lib/python3.10/site-packages/transformers/models/speech_to_text/__pycache__/tokenization_speech_to_text.cpython-310.pyc +0 -0
  45. llmeval-env/lib/python3.10/site-packages/transformers/models/speech_to_text/configuration_speech_to_text.py +199 -0
  46. llmeval-env/lib/python3.10/site-packages/transformers/models/speech_to_text/convert_s2t_fairseq_to_tfms.py +121 -0
  47. llmeval-env/lib/python3.10/site-packages/transformers/models/speech_to_text/feature_extraction_speech_to_text.py +297 -0
  48. llmeval-env/lib/python3.10/site-packages/transformers/models/speech_to_text/modeling_speech_to_text.py +1370 -0
  49. llmeval-env/lib/python3.10/site-packages/transformers/models/speech_to_text/modeling_tf_speech_to_text.py +1607 -0
  50. llmeval-env/lib/python3.10/site-packages/transformers/models/speech_to_text/processing_speech_to_text.py +116 -0
llmeval-env/lib/python3.10/site-packages/transformers/models/blenderbot_small/__pycache__/modeling_blenderbot_small.cpython-310.pyc ADDED
Binary file (52.5 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/blenderbot_small/__pycache__/modeling_flax_blenderbot_small.cpython-310.pyc ADDED
Binary file (43.2 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/blenderbot_small/configuration_blenderbot_small.py ADDED
@@ -0,0 +1,389 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The Facebook, Inc. and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ BlenderbotSmall model configuration"""
16
+
17
+ from collections import OrderedDict
18
+ from typing import Any, Mapping, Optional
19
+
20
+ from ... import PreTrainedTokenizer
21
+ from ...configuration_utils import PretrainedConfig
22
+ from ...file_utils import TensorType, is_torch_available
23
+ from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeq2SeqConfigWithPast
24
+ from ...onnx.utils import compute_effective_axis_dimension
25
+ from ...utils import logging
26
+
27
+
28
+ logger = logging.get_logger(__name__)
29
+
30
+ from ..deprecated._archive_maps import BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
31
+
32
+
33
+ class BlenderbotSmallConfig(PretrainedConfig):
34
+ r"""
35
+ This is the configuration class to store the configuration of a [`BlenderbotSmallModel`]. It is used to instantiate
36
+ an BlenderbotSmall model according to the specified arguments, defining the model architecture. Instantiating a
37
+ configuration with the defaults will yield a similar configuration to that of the BlenderbotSmall
38
+ [facebook/blenderbot_small-90M](https://huggingface.co/facebook/blenderbot_small-90M) architecture.
39
+
40
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
41
+ documentation from [`PretrainedConfig`] for more information.
42
+
43
+
44
+ Args:
45
+ vocab_size (`int`, *optional*, defaults to 50265):
46
+ Vocabulary size of the BlenderbotSmall model. Defines the number of different tokens that can be
47
+ represented by the `inputs_ids` passed when calling [`BlenderbotSmallModel`] or [`TFBlenderbotSmallModel`].
48
+ d_model (`int`, *optional*, defaults to 512):
49
+ Dimensionality of the layers and the pooler layer.
50
+ encoder_layers (`int`, *optional*, defaults to 8):
51
+ Number of encoder layers.
52
+ decoder_layers (`int`, *optional*, defaults to 8):
53
+ Number of decoder layers.
54
+ encoder_attention_heads (`int`, *optional*, defaults to 16):
55
+ Number of attention heads for each attention layer in the Transformer encoder.
56
+ decoder_attention_heads (`int`, *optional*, defaults to 16):
57
+ Number of attention heads for each attention layer in the Transformer decoder.
58
+ decoder_ffn_dim (`int`, *optional*, defaults to 2048):
59
+ Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
60
+ encoder_ffn_dim (`int`, *optional*, defaults to 2048):
61
+ Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
62
+ activation_function (`str` or `function`, *optional*, defaults to `"gelu"`):
63
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
64
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
65
+ dropout (`float`, *optional*, defaults to 0.1):
66
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
67
+ attention_dropout (`float`, *optional*, defaults to 0.0):
68
+ The dropout ratio for the attention probabilities.
69
+ activation_dropout (`float`, *optional*, defaults to 0.0):
70
+ The dropout ratio for activations inside the fully connected layer.
71
+ max_position_embeddings (`int`, *optional*, defaults to 512):
72
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
73
+ just in case (e.g., 512 or 1024 or 2048).
74
+ init_std (`float`, *optional*, defaults to 0.02):
75
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
76
+ encoder_layerdrop (`float`, *optional*, defaults to 0.0):
77
+ The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556)
78
+ for more details.
79
+ decoder_layerdrop (`float`, *optional*, defaults to 0.0):
80
+ The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556)
81
+ for more details.
82
+ scale_embedding (`bool`, *optional*, defaults to `False`):
83
+ Scale embeddings by diving by sqrt(d_model).
84
+ use_cache (`bool`, *optional*, defaults to `True`):
85
+ Whether or not the model should return the last key/values attentions (not used by all models)
86
+ forced_eos_token_id (`int`, *optional*, defaults to 2):
87
+ The id of the token to force as the last generated token when `max_length` is reached. Usually set to
88
+ `eos_token_id`.
89
+
90
+ Example:
91
+
92
+ ```python
93
+ >>> from transformers import BlenderbotSmallConfig, BlenderbotSmallModel
94
+
95
+ >>> # Initializing a BlenderbotSmall facebook/blenderbot_small-90M style configuration
96
+ >>> configuration = BlenderbotSmallConfig()
97
+
98
+ >>> # Initializing a model (with random weights) from the facebook/blenderbot_small-90M style configuration
99
+ >>> model = BlenderbotSmallModel(configuration)
100
+
101
+ >>> # Accessing the model configuration
102
+ >>> configuration = model.config
103
+ ```"""
104
+
105
+ model_type = "blenderbot-small"
106
+ keys_to_ignore_at_inference = ["past_key_values"]
107
+ attribute_map = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
108
+
109
+ def __init__(
110
+ self,
111
+ vocab_size=50265,
112
+ max_position_embeddings=512,
113
+ encoder_layers=8,
114
+ encoder_ffn_dim=2048,
115
+ encoder_attention_heads=16,
116
+ decoder_layers=8,
117
+ decoder_ffn_dim=2048,
118
+ decoder_attention_heads=16,
119
+ encoder_layerdrop=0.0,
120
+ decoder_layerdrop=0.0,
121
+ use_cache=True,
122
+ is_encoder_decoder=True,
123
+ activation_function="gelu",
124
+ d_model=512,
125
+ dropout=0.1,
126
+ attention_dropout=0.0,
127
+ activation_dropout=0.0,
128
+ init_std=0.02,
129
+ decoder_start_token_id=1,
130
+ scale_embedding=False,
131
+ pad_token_id=0,
132
+ bos_token_id=1,
133
+ eos_token_id=2,
134
+ forced_eos_token_id=2,
135
+ **kwargs,
136
+ ):
137
+ self.vocab_size = vocab_size
138
+ self.max_position_embeddings = max_position_embeddings
139
+ self.d_model = d_model
140
+ self.encoder_ffn_dim = encoder_ffn_dim
141
+ self.encoder_layers = encoder_layers
142
+ self.encoder_attention_heads = encoder_attention_heads
143
+ self.decoder_ffn_dim = decoder_ffn_dim
144
+ self.decoder_layers = decoder_layers
145
+ self.decoder_attention_heads = decoder_attention_heads
146
+ self.dropout = dropout
147
+ self.attention_dropout = attention_dropout
148
+ self.activation_dropout = activation_dropout
149
+ self.activation_function = activation_function
150
+ self.init_std = init_std
151
+ self.encoder_layerdrop = encoder_layerdrop
152
+ self.decoder_layerdrop = decoder_layerdrop
153
+ self.use_cache = use_cache
154
+ self.num_hidden_layers = encoder_layers
155
+ self.scale_embedding = scale_embedding # scale factor will be sqrt(d_model) if True
156
+
157
+ super().__init__(
158
+ pad_token_id=pad_token_id,
159
+ bos_token_id=bos_token_id,
160
+ eos_token_id=eos_token_id,
161
+ is_encoder_decoder=is_encoder_decoder,
162
+ decoder_start_token_id=decoder_start_token_id,
163
+ forced_eos_token_id=forced_eos_token_id,
164
+ **kwargs,
165
+ )
166
+
167
+
168
+ # Copied from transformers.models.bart.configuration_bart.BartOnnxConfig
169
+ class BlenderbotSmallOnnxConfig(OnnxSeq2SeqConfigWithPast):
170
+ @property
171
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
172
+ if self.task in ["default", "seq2seq-lm"]:
173
+ common_inputs = OrderedDict(
174
+ [
175
+ ("input_ids", {0: "batch", 1: "encoder_sequence"}),
176
+ ("attention_mask", {0: "batch", 1: "encoder_sequence"}),
177
+ ]
178
+ )
179
+
180
+ if self.use_past:
181
+ common_inputs["decoder_input_ids"] = {0: "batch"}
182
+ common_inputs["decoder_attention_mask"] = {0: "batch", 1: "past_decoder_sequence + sequence"}
183
+ else:
184
+ common_inputs["decoder_input_ids"] = {0: "batch", 1: "decoder_sequence"}
185
+ common_inputs["decoder_attention_mask"] = {0: "batch", 1: "decoder_sequence"}
186
+
187
+ if self.use_past:
188
+ self.fill_with_past_key_values_(common_inputs, direction="inputs")
189
+ elif self.task == "causal-lm":
190
+ # TODO: figure this case out.
191
+ common_inputs = OrderedDict(
192
+ [
193
+ ("input_ids", {0: "batch", 1: "encoder_sequence"}),
194
+ ("attention_mask", {0: "batch", 1: "encoder_sequence"}),
195
+ ]
196
+ )
197
+ if self.use_past:
198
+ num_encoder_layers, _ = self.num_layers
199
+ for i in range(num_encoder_layers):
200
+ common_inputs[f"past_key_values.{i}.key"] = {0: "batch", 2: "past_sequence + sequence"}
201
+ common_inputs[f"past_key_values.{i}.value"] = {0: "batch", 2: "past_sequence + sequence"}
202
+ else:
203
+ common_inputs = OrderedDict(
204
+ [
205
+ ("input_ids", {0: "batch", 1: "encoder_sequence"}),
206
+ ("attention_mask", {0: "batch", 1: "encoder_sequence"}),
207
+ ("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
208
+ ("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
209
+ ]
210
+ )
211
+
212
+ return common_inputs
213
+
214
+ @property
215
+ def outputs(self) -> Mapping[str, Mapping[int, str]]:
216
+ if self.task in ["default", "seq2seq-lm"]:
217
+ common_outputs = super().outputs
218
+ else:
219
+ common_outputs = super(OnnxConfigWithPast, self).outputs
220
+ if self.use_past:
221
+ num_encoder_layers, _ = self.num_layers
222
+ for i in range(num_encoder_layers):
223
+ common_outputs[f"present.{i}.key"] = {0: "batch", 2: "past_sequence + sequence"}
224
+ common_outputs[f"present.{i}.value"] = {0: "batch", 2: "past_sequence + sequence"}
225
+ return common_outputs
226
+
227
+ def _generate_dummy_inputs_for_default_and_seq2seq_lm(
228
+ self,
229
+ tokenizer: PreTrainedTokenizer,
230
+ batch_size: int = -1,
231
+ seq_length: int = -1,
232
+ is_pair: bool = False,
233
+ framework: Optional[TensorType] = None,
234
+ ) -> Mapping[str, Any]:
235
+ encoder_inputs = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
236
+ tokenizer, batch_size, seq_length, is_pair, framework
237
+ )
238
+
239
+ # Generate decoder inputs
240
+ decoder_seq_length = seq_length if not self.use_past else 1
241
+ decoder_inputs = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
242
+ tokenizer, batch_size, decoder_seq_length, is_pair, framework
243
+ )
244
+ decoder_inputs = {f"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
245
+ common_inputs = dict(**encoder_inputs, **decoder_inputs)
246
+
247
+ if self.use_past:
248
+ if not is_torch_available():
249
+ raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed.")
250
+ else:
251
+ import torch
252
+ batch, encoder_seq_length = common_inputs["input_ids"].shape
253
+ decoder_seq_length = common_inputs["decoder_input_ids"].shape[1]
254
+ num_encoder_attention_heads, num_decoder_attention_heads = self.num_attention_heads
255
+ encoder_shape = (
256
+ batch,
257
+ num_encoder_attention_heads,
258
+ encoder_seq_length,
259
+ self._config.hidden_size // num_encoder_attention_heads,
260
+ )
261
+ decoder_past_length = decoder_seq_length + 3
262
+ decoder_shape = (
263
+ batch,
264
+ num_decoder_attention_heads,
265
+ decoder_past_length,
266
+ self._config.hidden_size // num_decoder_attention_heads,
267
+ )
268
+
269
+ common_inputs["decoder_attention_mask"] = torch.cat(
270
+ [common_inputs["decoder_attention_mask"], torch.ones(batch, decoder_past_length)], dim=1
271
+ )
272
+
273
+ common_inputs["past_key_values"] = []
274
+ # If the number of encoder and decoder layers are present in the model configuration, both are considered
275
+ num_encoder_layers, num_decoder_layers = self.num_layers
276
+ min_num_layers = min(num_encoder_layers, num_decoder_layers)
277
+ max_num_layers = max(num_encoder_layers, num_decoder_layers) - min_num_layers
278
+ remaining_side_name = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
279
+
280
+ for _ in range(min_num_layers):
281
+ common_inputs["past_key_values"].append(
282
+ (
283
+ torch.zeros(decoder_shape),
284
+ torch.zeros(decoder_shape),
285
+ torch.zeros(encoder_shape),
286
+ torch.zeros(encoder_shape),
287
+ )
288
+ )
289
+ # TODO: test this.
290
+ shape = encoder_shape if remaining_side_name == "encoder" else decoder_shape
291
+ for _ in range(min_num_layers, max_num_layers):
292
+ common_inputs["past_key_values"].append((torch.zeros(shape), torch.zeros(shape)))
293
+ return common_inputs
294
+
295
+ def _generate_dummy_inputs_for_causal_lm(
296
+ self,
297
+ tokenizer: PreTrainedTokenizer,
298
+ batch_size: int = -1,
299
+ seq_length: int = -1,
300
+ is_pair: bool = False,
301
+ framework: Optional[TensorType] = None,
302
+ ) -> Mapping[str, Any]:
303
+ common_inputs = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
304
+ tokenizer, batch_size, seq_length, is_pair, framework
305
+ )
306
+
307
+ if self.use_past:
308
+ if not is_torch_available():
309
+ raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed.")
310
+ else:
311
+ import torch
312
+ batch, seqlen = common_inputs["input_ids"].shape
313
+ # Not using the same length for past_key_values
314
+ past_key_values_length = seqlen + 2
315
+ num_encoder_layers, _ = self.num_layers
316
+ num_encoder_attention_heads, _ = self.num_attention_heads
317
+ past_shape = (
318
+ batch,
319
+ num_encoder_attention_heads,
320
+ past_key_values_length,
321
+ self._config.hidden_size // num_encoder_attention_heads,
322
+ )
323
+
324
+ mask_dtype = common_inputs["attention_mask"].dtype
325
+ common_inputs["attention_mask"] = torch.cat(
326
+ [common_inputs["attention_mask"], torch.ones(batch, past_key_values_length, dtype=mask_dtype)], dim=1
327
+ )
328
+ common_inputs["past_key_values"] = [
329
+ (torch.zeros(past_shape), torch.zeros(past_shape)) for _ in range(num_encoder_layers)
330
+ ]
331
+ return common_inputs
332
+
333
+ def _generate_dummy_inputs_for_sequence_classification_and_question_answering(
334
+ self,
335
+ tokenizer: PreTrainedTokenizer,
336
+ batch_size: int = -1,
337
+ seq_length: int = -1,
338
+ is_pair: bool = False,
339
+ framework: Optional[TensorType] = None,
340
+ ) -> Mapping[str, Any]:
341
+ # Copied from OnnxConfig.generate_dummy_inputs
342
+ # Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
343
+ # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
344
+ batch_size = compute_effective_axis_dimension(
345
+ batch_size, fixed_dimension=OnnxConfig.default_fixed_batch, num_token_to_add=0
346
+ )
347
+
348
+ # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
349
+ token_to_add = tokenizer.num_special_tokens_to_add(is_pair)
350
+ seq_length = compute_effective_axis_dimension(
351
+ seq_length, fixed_dimension=OnnxConfig.default_fixed_sequence, num_token_to_add=token_to_add
352
+ )
353
+
354
+ # Generate dummy inputs according to compute batch and sequence
355
+ dummy_input = [" ".join([tokenizer.unk_token]) * seq_length] * batch_size
356
+ common_inputs = dict(tokenizer(dummy_input, return_tensors=framework))
357
+ return common_inputs
358
+
359
+ def generate_dummy_inputs(
360
+ self,
361
+ tokenizer: PreTrainedTokenizer,
362
+ batch_size: int = -1,
363
+ seq_length: int = -1,
364
+ is_pair: bool = False,
365
+ framework: Optional[TensorType] = None,
366
+ ) -> Mapping[str, Any]:
367
+ if self.task in ["default", "seq2seq-lm"]:
368
+ common_inputs = self._generate_dummy_inputs_for_default_and_seq2seq_lm(
369
+ tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework
370
+ )
371
+
372
+ elif self.task == "causal-lm":
373
+ common_inputs = self._generate_dummy_inputs_for_causal_lm(
374
+ tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework
375
+ )
376
+ else:
377
+ common_inputs = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
378
+ tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework
379
+ )
380
+
381
+ return common_inputs
382
+
383
+ def _flatten_past_key_values_(self, flattened_output, name, idx, t):
384
+ if self.task in ["default", "seq2seq-lm"]:
385
+ flattened_output = super()._flatten_past_key_values_(flattened_output, name, idx, t)
386
+ else:
387
+ flattened_output = super(OnnxSeq2SeqConfigWithPast, self)._flatten_past_key_values_(
388
+ flattened_output, name, idx, t
389
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/blenderbot_small/modeling_tf_blenderbot_small.py ADDED
@@ -0,0 +1,1526 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The Facebook, Inc and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ TF 2.0 BlenderbotSmall model."""
16
+
17
+
18
+ from __future__ import annotations
19
+
20
+ import random
21
+ from typing import List, Optional, Tuple, Union
22
+
23
+ import numpy as np
24
+ import tensorflow as tf
25
+
26
+ from ...activations_tf import get_tf_activation
27
+ from ...modeling_tf_outputs import (
28
+ TFBaseModelOutput,
29
+ TFBaseModelOutputWithPastAndCrossAttentions,
30
+ TFSeq2SeqLMOutput,
31
+ TFSeq2SeqModelOutput,
32
+ )
33
+
34
+ # Public API
35
+ from ...modeling_tf_utils import (
36
+ TFCausalLanguageModelingLoss,
37
+ TFPreTrainedModel,
38
+ keras,
39
+ keras_serializable,
40
+ unpack_inputs,
41
+ )
42
+ from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax
43
+ from ...utils import (
44
+ add_code_sample_docstrings,
45
+ add_end_docstrings,
46
+ add_start_docstrings,
47
+ add_start_docstrings_to_model_forward,
48
+ logging,
49
+ replace_return_docstrings,
50
+ )
51
+ from .configuration_blenderbot_small import BlenderbotSmallConfig
52
+
53
+
54
+ logger = logging.get_logger(__name__)
55
+
56
+ _CHECKPOINT_FOR_DOC = "facebook/blenderbot_small-90M"
57
+ _CONFIG_FOR_DOC = "BlenderbotSmallConfig"
58
+
59
+
60
+ LARGE_NEGATIVE = -1e8
61
+
62
+
63
+ # Copied from transformers.models.bart.modeling_tf_bart.shift_tokens_right
64
+ def shift_tokens_right(input_ids: tf.Tensor, pad_token_id: int, decoder_start_token_id: int):
65
+ pad_token_id = tf.cast(pad_token_id, input_ids.dtype)
66
+ decoder_start_token_id = tf.cast(decoder_start_token_id, input_ids.dtype)
67
+ start_tokens = tf.fill(
68
+ (shape_list(input_ids)[0], 1), tf.convert_to_tensor(decoder_start_token_id, input_ids.dtype)
69
+ )
70
+ shifted_input_ids = tf.concat([start_tokens, input_ids[:, :-1]], -1)
71
+ # replace possible -100 values in labels by `pad_token_id`
72
+ shifted_input_ids = tf.where(
73
+ shifted_input_ids == -100,
74
+ tf.fill(shape_list(shifted_input_ids), tf.convert_to_tensor(pad_token_id, input_ids.dtype)),
75
+ shifted_input_ids,
76
+ )
77
+
78
+ # "Verify that `labels` has only positive values and -100"
79
+ assert_gte0 = tf.debugging.assert_greater_equal(shifted_input_ids, tf.constant(0, dtype=input_ids.dtype))
80
+
81
+ # Make sure the assertion op is called by wrapping the result in an identity no-op
82
+ with tf.control_dependencies([assert_gte0]):
83
+ shifted_input_ids = tf.identity(shifted_input_ids)
84
+
85
+ return shifted_input_ids
86
+
87
+
88
+ # Copied from transformers.models.bart.modeling_tf_bart._make_causal_mask
89
+ def _make_causal_mask(input_ids_shape: tf.TensorShape, past_key_values_length: int = 0):
90
+ """
91
+ Make causal mask used for bi-directional self-attention.
92
+ """
93
+ bsz = input_ids_shape[0]
94
+ tgt_len = input_ids_shape[1]
95
+ mask = tf.ones((tgt_len, tgt_len)) * LARGE_NEGATIVE
96
+ mask_cond = tf.range(shape_list(mask)[-1])
97
+
98
+ mask = tf.where(mask_cond < tf.reshape(mask_cond + 1, (shape_list(mask)[-1], 1)), 0.0, mask)
99
+
100
+ if past_key_values_length > 0:
101
+ mask = tf.concat([tf.zeros((tgt_len, past_key_values_length)), mask], axis=-1)
102
+
103
+ return tf.tile(mask[None, None, :, :], (bsz, 1, 1, 1))
104
+
105
+
106
+ # Copied from transformers.models.bart.modeling_tf_bart._expand_mask
107
+ def _expand_mask(mask: tf.Tensor, tgt_len: Optional[int] = None):
108
+ """
109
+ Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
110
+ """
111
+ src_len = shape_list(mask)[1]
112
+ tgt_len = tgt_len if tgt_len is not None else src_len
113
+ one_cst = tf.constant(1.0)
114
+ mask = tf.cast(mask, dtype=one_cst.dtype)
115
+ expanded_mask = tf.tile(mask[:, None, None, :], (1, 1, tgt_len, 1))
116
+
117
+ return (one_cst - expanded_mask) * LARGE_NEGATIVE
118
+
119
+
120
+ # Copied from transformers.models.blenderbot.modeling_tf_blenderbot.TFBlenderbotLearnedPositionalEmbedding with Blenderbot->BlenderbotSmall
121
+ class TFBlenderbotSmallLearnedPositionalEmbedding(keras.layers.Embedding):
122
+ """
123
+ This module learns positional embeddings up to a fixed maximum size.
124
+ """
125
+
126
+ def __init__(self, num_embeddings: int, embedding_dim: int, **kwargs):
127
+ super().__init__(num_embeddings, embedding_dim, **kwargs)
128
+
129
+ def call(
130
+ self, input_shape: tf.TensorShape, past_key_values_length: int = 0, position_ids: tf.Tensor | None = None
131
+ ):
132
+ """Input is expected to be of size [bsz x seqlen]."""
133
+ if position_ids is None:
134
+ seq_len = input_shape[1]
135
+ position_ids = tf.range(seq_len, delta=1, name="range")
136
+ position_ids += past_key_values_length
137
+
138
+ return super().call(tf.cast(position_ids, dtype=tf.int32))
139
+
140
+
141
+ # Copied from transformers.models.bart.modeling_tf_bart.TFBartAttention with Bart->BlenderbotSmall
142
+ class TFBlenderbotSmallAttention(keras.layers.Layer):
143
+ """Multi-headed attention from "Attention Is All You Need"""
144
+
145
+ def __init__(
146
+ self,
147
+ embed_dim: int,
148
+ num_heads: int,
149
+ dropout: float = 0.0,
150
+ is_decoder: bool = False,
151
+ bias: bool = True,
152
+ **kwargs,
153
+ ):
154
+ super().__init__(**kwargs)
155
+ self.embed_dim = embed_dim
156
+
157
+ self.num_heads = num_heads
158
+ self.dropout = keras.layers.Dropout(dropout)
159
+ self.head_dim = embed_dim // num_heads
160
+ if (self.head_dim * num_heads) != self.embed_dim:
161
+ raise ValueError(
162
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
163
+ f" and `num_heads`: {num_heads})."
164
+ )
165
+ self.scaling = self.head_dim**-0.5
166
+ self.is_decoder = is_decoder
167
+
168
+ self.k_proj = keras.layers.Dense(embed_dim, use_bias=bias, name="k_proj")
169
+ self.q_proj = keras.layers.Dense(embed_dim, use_bias=bias, name="q_proj")
170
+ self.v_proj = keras.layers.Dense(embed_dim, use_bias=bias, name="v_proj")
171
+ self.out_proj = keras.layers.Dense(embed_dim, use_bias=bias, name="out_proj")
172
+
173
+ def _shape(self, tensor: tf.Tensor, seq_len: int, bsz: int):
174
+ return tf.transpose(tf.reshape(tensor, (bsz, seq_len, self.num_heads, self.head_dim)), (0, 2, 1, 3))
175
+
176
+ def call(
177
+ self,
178
+ hidden_states: tf.Tensor,
179
+ key_value_states: tf.Tensor | None = None,
180
+ past_key_value: Tuple[Tuple[tf.Tensor]] | None = None,
181
+ attention_mask: tf.Tensor | None = None,
182
+ layer_head_mask: tf.Tensor | None = None,
183
+ training: Optional[bool] = False,
184
+ ) -> Tuple[tf.Tensor, tf.Tensor | None]:
185
+ """Input shape: Batch x Time x Channel"""
186
+
187
+ # if key_value_states are provided this layer is used as a cross-attention layer
188
+ # for the decoder
189
+ is_cross_attention = key_value_states is not None
190
+ bsz, tgt_len, embed_dim = shape_list(hidden_states)
191
+
192
+ # get query proj
193
+ query_states = self.q_proj(hidden_states) * self.scaling
194
+ # get key, value proj
195
+ if is_cross_attention and past_key_value is not None:
196
+ # reuse k,v, cross_attentions
197
+ key_states = past_key_value[0]
198
+ value_states = past_key_value[1]
199
+ elif is_cross_attention:
200
+ # cross_attentions
201
+ key_states = self._shape(self.k_proj(key_value_states), -1, bsz)
202
+ value_states = self._shape(self.v_proj(key_value_states), -1, bsz)
203
+ elif past_key_value is not None:
204
+ # reuse k, v, self_attention
205
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
206
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
207
+ key_states = tf.concat([past_key_value[0], key_states], axis=2)
208
+ value_states = tf.concat([past_key_value[1], value_states], axis=2)
209
+ else:
210
+ # self_attention
211
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
212
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
213
+
214
+ if self.is_decoder:
215
+ # if cross_attention save Tuple(tf.Tensor, tf.Tensor) of all cross attention key/value_states.
216
+ # Further calls to cross_attention layer can then reuse all cross-attention
217
+ # key/value_states (first "if" case)
218
+ # if uni-directional self-attention (decoder) save Tuple(tf.Tensor, tf.Tensor) of
219
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
220
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
221
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
222
+ past_key_value = (key_states, value_states)
223
+
224
+ proj_shape = (bsz * self.num_heads, -1, self.head_dim)
225
+ query_states = tf.reshape(self._shape(query_states, tgt_len, bsz), proj_shape)
226
+ key_states = tf.reshape(key_states, proj_shape)
227
+ value_states = tf.reshape(value_states, proj_shape)
228
+
229
+ src_len = shape_list(key_states)[1]
230
+ attn_weights = tf.matmul(query_states, key_states, transpose_b=True)
231
+
232
+ tf.debugging.assert_equal(
233
+ shape_list(attn_weights),
234
+ [bsz * self.num_heads, tgt_len, src_len],
235
+ message=(
236
+ f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is"
237
+ f" {shape_list(attn_weights)}"
238
+ ),
239
+ )
240
+
241
+ if attention_mask is not None:
242
+ tf.debugging.assert_equal(
243
+ shape_list(attention_mask),
244
+ [bsz, 1, tgt_len, src_len],
245
+ message=(
246
+ f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is"
247
+ f" {shape_list(attention_mask)}"
248
+ ),
249
+ )
250
+
251
+ attention_mask = tf.cast(attention_mask, dtype=attn_weights.dtype)
252
+ attn_weights = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len)) + attention_mask
253
+ attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len))
254
+
255
+ attn_weights = stable_softmax(attn_weights, axis=-1)
256
+
257
+ if layer_head_mask is not None:
258
+ tf.debugging.assert_equal(
259
+ shape_list(layer_head_mask),
260
+ [self.num_heads],
261
+ message=(
262
+ f"Head mask for a single layer should be of size {(self.num_heads)}, but is"
263
+ f" {shape_list(layer_head_mask)}"
264
+ ),
265
+ )
266
+
267
+ attn_weights = tf.reshape(layer_head_mask, (1, -1, 1, 1)) * tf.reshape(
268
+ attn_weights, (bsz, self.num_heads, tgt_len, src_len)
269
+ )
270
+ attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len))
271
+
272
+ attn_probs = self.dropout(attn_weights, training=training)
273
+ attn_output = tf.matmul(attn_probs, value_states)
274
+
275
+ tf.debugging.assert_equal(
276
+ shape_list(attn_output),
277
+ [bsz * self.num_heads, tgt_len, self.head_dim],
278
+ message=(
279
+ f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is"
280
+ f" {shape_list(attn_output)}"
281
+ ),
282
+ )
283
+
284
+ attn_output = tf.transpose(
285
+ tf.reshape(attn_output, (bsz, self.num_heads, tgt_len, self.head_dim)), (0, 2, 1, 3)
286
+ )
287
+ attn_output = tf.reshape(attn_output, (bsz, tgt_len, embed_dim))
288
+
289
+ attn_output = self.out_proj(attn_output)
290
+ attn_weights: tf.Tensor = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len))
291
+
292
+ return attn_output, attn_weights, past_key_value
293
+
294
+ def build(self, input_shape=None):
295
+ if self.built:
296
+ return
297
+ self.built = True
298
+ if getattr(self, "k_proj", None) is not None:
299
+ with tf.name_scope(self.k_proj.name):
300
+ self.k_proj.build([None, None, self.embed_dim])
301
+ if getattr(self, "q_proj", None) is not None:
302
+ with tf.name_scope(self.q_proj.name):
303
+ self.q_proj.build([None, None, self.embed_dim])
304
+ if getattr(self, "v_proj", None) is not None:
305
+ with tf.name_scope(self.v_proj.name):
306
+ self.v_proj.build([None, None, self.embed_dim])
307
+ if getattr(self, "out_proj", None) is not None:
308
+ with tf.name_scope(self.out_proj.name):
309
+ self.out_proj.build([None, None, self.embed_dim])
310
+
311
+
312
+ # Copied from transformers.models.bart.modeling_tf_bart.TFBartEncoderLayer with Bart->BlenderbotSmall
313
+ class TFBlenderbotSmallEncoderLayer(keras.layers.Layer):
314
+ def __init__(self, config: BlenderbotSmallConfig, **kwargs):
315
+ super().__init__(**kwargs)
316
+ self.embed_dim = config.d_model
317
+ self.self_attn = TFBlenderbotSmallAttention(
318
+ self.embed_dim, config.encoder_attention_heads, dropout=config.attention_dropout, name="self_attn"
319
+ )
320
+ self.self_attn_layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="self_attn_layer_norm")
321
+ self.dropout = keras.layers.Dropout(config.dropout)
322
+ self.activation_fn = get_tf_activation(config.activation_function)
323
+ self.activation_dropout = keras.layers.Dropout(config.activation_dropout)
324
+ self.fc1 = keras.layers.Dense(config.encoder_ffn_dim, name="fc1")
325
+ self.fc2 = keras.layers.Dense(self.embed_dim, name="fc2")
326
+ self.final_layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="final_layer_norm")
327
+ self.config = config
328
+
329
+ def call(
330
+ self,
331
+ hidden_states: tf.Tensor,
332
+ attention_mask: np.ndarray | tf.Tensor | None,
333
+ layer_head_mask: tf.Tensor | None,
334
+ training: Optional[bool] = False,
335
+ ) -> tf.Tensor:
336
+ """
337
+ Args:
338
+ hidden_states (`tf.Tensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
339
+ attention_mask (`tf.Tensor`): attention mask of size
340
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
341
+ layer_head_mask (`tf.Tensor`): mask for attention heads in a given layer of size
342
+ `(encoder_attention_heads,)`
343
+ """
344
+ residual = hidden_states
345
+ hidden_states, self_attn_weights, _ = self.self_attn(
346
+ hidden_states=hidden_states, attention_mask=attention_mask, layer_head_mask=layer_head_mask
347
+ )
348
+
349
+ tf.debugging.assert_equal(
350
+ shape_list(hidden_states),
351
+ shape_list(residual),
352
+ message=f"Self attn modified the shape of query {shape_list(residual)} to {shape_list(hidden_states)}",
353
+ )
354
+
355
+ hidden_states = self.dropout(hidden_states, training=training)
356
+ hidden_states = residual + hidden_states
357
+ hidden_states = self.self_attn_layer_norm(hidden_states)
358
+
359
+ residual = hidden_states
360
+ hidden_states = self.activation_fn(self.fc1(hidden_states))
361
+ hidden_states = self.activation_dropout(hidden_states, training=training)
362
+ hidden_states = self.fc2(hidden_states)
363
+ hidden_states = self.dropout(hidden_states, training=training)
364
+ hidden_states = residual + hidden_states
365
+ hidden_states = self.final_layer_norm(hidden_states)
366
+
367
+ return hidden_states, self_attn_weights
368
+
369
+ def build(self, input_shape=None):
370
+ if self.built:
371
+ return
372
+ self.built = True
373
+ if getattr(self, "self_attn", None) is not None:
374
+ with tf.name_scope(self.self_attn.name):
375
+ self.self_attn.build(None)
376
+ if getattr(self, "self_attn_layer_norm", None) is not None:
377
+ with tf.name_scope(self.self_attn_layer_norm.name):
378
+ self.self_attn_layer_norm.build([None, None, self.embed_dim])
379
+ if getattr(self, "fc1", None) is not None:
380
+ with tf.name_scope(self.fc1.name):
381
+ self.fc1.build([None, None, self.embed_dim])
382
+ if getattr(self, "fc2", None) is not None:
383
+ with tf.name_scope(self.fc2.name):
384
+ self.fc2.build([None, None, self.config.encoder_ffn_dim])
385
+ if getattr(self, "final_layer_norm", None) is not None:
386
+ with tf.name_scope(self.final_layer_norm.name):
387
+ self.final_layer_norm.build([None, None, self.embed_dim])
388
+
389
+
390
+ # Copied from transformers.models.bart.modeling_tf_bart.TFBartDecoderLayer with Bart->BlenderbotSmall
391
+ class TFBlenderbotSmallDecoderLayer(keras.layers.Layer):
392
+ def __init__(self, config: BlenderbotSmallConfig, **kwargs):
393
+ super().__init__(**kwargs)
394
+ self.embed_dim = config.d_model
395
+ self.self_attn = TFBlenderbotSmallAttention(
396
+ embed_dim=self.embed_dim,
397
+ num_heads=config.decoder_attention_heads,
398
+ dropout=config.attention_dropout,
399
+ name="self_attn",
400
+ is_decoder=True,
401
+ )
402
+ self.dropout = keras.layers.Dropout(config.dropout)
403
+ self.activation_fn = get_tf_activation(config.activation_function)
404
+ self.activation_dropout = keras.layers.Dropout(config.activation_dropout)
405
+
406
+ self.self_attn_layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="self_attn_layer_norm")
407
+ self.encoder_attn = TFBlenderbotSmallAttention(
408
+ self.embed_dim,
409
+ config.decoder_attention_heads,
410
+ dropout=config.attention_dropout,
411
+ name="encoder_attn",
412
+ is_decoder=True,
413
+ )
414
+ self.encoder_attn_layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="encoder_attn_layer_norm")
415
+ self.fc1 = keras.layers.Dense(config.decoder_ffn_dim, name="fc1")
416
+ self.fc2 = keras.layers.Dense(self.embed_dim, name="fc2")
417
+ self.final_layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="final_layer_norm")
418
+ self.config = config
419
+
420
+ def call(
421
+ self,
422
+ hidden_states: tf.Tensor,
423
+ attention_mask: np.ndarray | tf.Tensor | None = None,
424
+ encoder_hidden_states: np.ndarray | tf.Tensor | None = None,
425
+ encoder_attention_mask: np.ndarray | tf.Tensor | None = None,
426
+ layer_head_mask: tf.Tensor | None = None,
427
+ cross_attn_layer_head_mask: tf.Tensor | None = None,
428
+ past_key_value: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None,
429
+ training: Optional[bool] = False,
430
+ ) -> Tuple[tf.Tensor, tf.Tensor, Tuple[Tuple[tf.Tensor]]]:
431
+ """
432
+ Args:
433
+ hidden_states (`tf.Tensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
434
+ attention_mask (`tf.Tensor`): attention mask of size
435
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
436
+ encoder_hidden_states (`tf.Tensor`):
437
+ cross attention input to the layer of shape `(batch, seq_len, embed_dim)`
438
+ encoder_attention_mask (`tf.Tensor`): encoder attention mask of size
439
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
440
+ layer_head_mask (`tf.Tensor`): mask for attention heads in a given layer of size
441
+ `(decoder_attention_heads,)`
442
+ cross_attn_layer_head_mask (`tf.Tensor`): mask for heads of the cross-attention module.
443
+ `(decoder_attention_heads,)`
444
+ past_key_value (`Tuple(tf.Tensor)`): cached past key and value projection states
445
+ """
446
+ residual = hidden_states
447
+
448
+ # Self Attention
449
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
450
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
451
+ # add present self-attn cache to positions 1,2 of present_key_value tuple
452
+ hidden_states, self_attn_weights, present_key_value = self.self_attn(
453
+ hidden_states=hidden_states,
454
+ past_key_value=self_attn_past_key_value,
455
+ attention_mask=attention_mask,
456
+ layer_head_mask=layer_head_mask,
457
+ )
458
+ hidden_states = self.dropout(hidden_states, training=training)
459
+ hidden_states = residual + hidden_states
460
+ hidden_states = self.self_attn_layer_norm(hidden_states)
461
+
462
+ # Cross-Attention Block
463
+ cross_attn_present_key_value = None
464
+ cross_attn_weights = None
465
+ if encoder_hidden_states is not None:
466
+ residual = hidden_states
467
+
468
+ # cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple
469
+ cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
470
+ hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn(
471
+ hidden_states=hidden_states,
472
+ key_value_states=encoder_hidden_states,
473
+ attention_mask=encoder_attention_mask,
474
+ layer_head_mask=cross_attn_layer_head_mask,
475
+ past_key_value=cross_attn_past_key_value,
476
+ )
477
+ hidden_states = self.dropout(hidden_states, training=training)
478
+ hidden_states = residual + hidden_states
479
+ hidden_states = self.encoder_attn_layer_norm(hidden_states)
480
+
481
+ # add cross-attn to positions 3,4 of present_key_value tuple
482
+ present_key_value = present_key_value + cross_attn_present_key_value
483
+
484
+ # Fully Connected
485
+ residual = hidden_states
486
+ hidden_states = self.activation_fn(self.fc1(hidden_states))
487
+ hidden_states = self.activation_dropout(hidden_states, training=training)
488
+ hidden_states = self.fc2(hidden_states)
489
+ hidden_states = self.dropout(hidden_states, training=training)
490
+ hidden_states = residual + hidden_states
491
+ hidden_states = self.final_layer_norm(hidden_states)
492
+
493
+ return (
494
+ hidden_states,
495
+ self_attn_weights,
496
+ cross_attn_weights,
497
+ present_key_value,
498
+ )
499
+
500
+ def build(self, input_shape=None):
501
+ if self.built:
502
+ return
503
+ self.built = True
504
+ if getattr(self, "self_attn", None) is not None:
505
+ with tf.name_scope(self.self_attn.name):
506
+ self.self_attn.build(None)
507
+ if getattr(self, "self_attn_layer_norm", None) is not None:
508
+ with tf.name_scope(self.self_attn_layer_norm.name):
509
+ self.self_attn_layer_norm.build([None, None, self.embed_dim])
510
+ if getattr(self, "encoder_attn", None) is not None:
511
+ with tf.name_scope(self.encoder_attn.name):
512
+ self.encoder_attn.build(None)
513
+ if getattr(self, "encoder_attn_layer_norm", None) is not None:
514
+ with tf.name_scope(self.encoder_attn_layer_norm.name):
515
+ self.encoder_attn_layer_norm.build([None, None, self.embed_dim])
516
+ if getattr(self, "fc1", None) is not None:
517
+ with tf.name_scope(self.fc1.name):
518
+ self.fc1.build([None, None, self.embed_dim])
519
+ if getattr(self, "fc2", None) is not None:
520
+ with tf.name_scope(self.fc2.name):
521
+ self.fc2.build([None, None, self.config.decoder_ffn_dim])
522
+ if getattr(self, "final_layer_norm", None) is not None:
523
+ with tf.name_scope(self.final_layer_norm.name):
524
+ self.final_layer_norm.build([None, None, self.embed_dim])
525
+
526
+
527
+ class TFBlenderbotSmallPreTrainedModel(TFPreTrainedModel):
528
+ config_class = BlenderbotSmallConfig
529
+ base_model_prefix = "model"
530
+
531
+
532
+ BLENDERBOT_SMALL_START_DOCSTRING = r"""
533
+ This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
534
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
535
+ etc.)
536
+
537
+ This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
538
+ as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
539
+ behavior.
540
+
541
+ <Tip>
542
+
543
+ TensorFlow models and layers in `transformers` accept two formats as input:
544
+
545
+ - having all inputs as keyword arguments (like PyTorch models), or
546
+ - having all inputs as a list, tuple or dict in the first positional argument.
547
+
548
+ The reason the second format is supported is that Keras methods prefer this format when passing inputs to models
549
+ and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just
550
+ pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second
551
+ format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with
552
+ the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first
553
+ positional argument:
554
+
555
+ - a single Tensor with `input_ids` only and nothing else: `model(input_ids)`
556
+ - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
557
+ `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`
558
+ - a dictionary with one or several input Tensors associated to the input names given in the docstring:
559
+ `model({"input_ids": input_ids, "token_type_ids": token_type_ids})`
560
+
561
+ Note that when creating models and layers with
562
+ [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry
563
+ about any of this, as you can just pass inputs like you would to any other Python function!
564
+
565
+ </Tip>
566
+
567
+ Args:
568
+ config ([`BlenderbotSmallConfig`]): Model configuration class with all the parameters of the model.
569
+ Initializing with a config file does not load the weights associated with the model, only the
570
+ configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
571
+ """
572
+
573
+ BLENDERBOT_SMALL_GENERATION_EXAMPLE = r"""
574
+ Conversation example::
575
+
576
+ ```py
577
+ >>> from transformers import AutoTokenizer, TFBlenderbotSmallForConditionalGeneration
578
+
579
+ >>> mname = "facebook/blenderbot_small-90M"
580
+ >>> model = BlenderbotSmallForConditionalGeneration.from_pretrained(mname)
581
+ >>> tokenizer = AutoTokenizer.from_pretrained(mname)
582
+
583
+ >>> UTTERANCE = "My friends are cool but they eat too many carbs."
584
+ >>> print("Human: ", UTTERANCE)
585
+ >>> inputs = tokenizer([UTTERANCE], return_tensors="tf")
586
+
587
+ >>> reply_ids = model.generate(**inputs)
588
+ >>> print("Bot: ", tokenizer.batch_decode(reply_ids, skip_special_tokens=True)[0])
589
+ what kind of carbs do they eat? i don't know much about carbs.
590
+
591
+ >>> REPLY = "I'm not sure"
592
+ >>> print("Human: ", REPLY)
593
+ >>> NEXT_UTTERANCE = (
594
+ ... "My friends are cool but they eat too many carbs.</s> "
595
+ ... "<s>what kind of carbs do they eat? i don't know much about carbs.</s> "
596
+ ... "<s>I'm not sure."
597
+ ... )
598
+
599
+ >>> inputs = tokenizer([NEXT_UTTERANCE], return_tensors="tf")
600
+ >>> inputs.pop("token_type_ids")
601
+ >>> next_reply_ids = model.generate(**inputs)
602
+ >>> print("Bot: ", tokenizer.batch_decode(next_reply_ids, skip_special_tokens=True)[0])
603
+ ```
604
+ """
605
+
606
+ BLENDERBOT_SMALL_INPUTS_DOCSTRING = r"""
607
+ Args:
608
+ input_ids (`tf.Tensor` of shape `({0})`):
609
+ Indices of input sequence tokens in the vocabulary.
610
+
611
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
612
+ [`PreTrainedTokenizer.__call__`] for details.
613
+
614
+ [What are input IDs?](../glossary#input-ids)
615
+ attention_mask (`tf.Tensor` of shape `({0})`, *optional*):
616
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
617
+
618
+ - 1 for tokens that are **not masked**,
619
+ - 0 for tokens that are **masked**.
620
+
621
+ [What are attention masks?](../glossary#attention-mask)
622
+ decoder_input_ids (`tf.Tensor` of shape `(batch_size, target_sequence_length)`, *optional*):
623
+ Indices of decoder input sequence tokens in the vocabulary.
624
+
625
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
626
+ [`PreTrainedTokenizer.__call__`] for details.
627
+
628
+ [What are decoder input IDs?](../glossary#decoder-input-ids)
629
+
630
+ BlenderbotSmall uses the `bos_token_id` as the starting token for `decoder_input_ids` generation. If
631
+ `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
632
+ `past_key_values`).
633
+ decoder_attention_mask (`tf.Tensor` of shape `(batch_size, target_sequence_length)`, *optional*):
634
+ will be made by default and ignore pad tokens. It is not recommended to set this for most use cases.
635
+ decoder_position_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
636
+ Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the
637
+ range `[0, config.max_position_embeddings - 1]`.
638
+ head_mask (`tf.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
639
+ Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`:
640
+
641
+ - 1 indicates the head is **not masked**,
642
+ - 0 indicates the head is **masked**.
643
+
644
+ decoder_head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
645
+ Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in `[0, 1]`:
646
+
647
+ - 1 indicates the head is **not masked**,
648
+ - 0 indicates the head is **masked**.
649
+
650
+ cross_attn_head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
651
+ Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:
652
+
653
+ - 1 indicates the head is **not masked**,
654
+ - 0 indicates the head is **masked**.
655
+
656
+ encoder_outputs (`tf.FloatTensor`, *optional*):
657
+ hidden states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.
658
+ of shape `(batch_size, sequence_length, hidden_size)` is a sequence of
659
+ past_key_values (`Tuple[Tuple[tf.Tensor]]` of length `config.n_layers`)
660
+ contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
661
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
662
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
663
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
664
+ use_cache (`bool`, *optional*, defaults to `True`):
665
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
666
+ `past_key_values`). Set to `False` during training, `True` during generation
667
+ output_attentions (`bool`, *optional*):
668
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
669
+ tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the
670
+ config will be used instead.
671
+ output_hidden_states (`bool`, *optional*):
672
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
673
+ more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
674
+ used instead.
675
+ return_dict (`bool`, *optional*):
676
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in
677
+ eager mode, in graph mode the value will always be set to True.
678
+ training (`bool`, *optional*, defaults to `False`):
679
+ Whether or not to use the model in training mode (some modules like dropout modules have different
680
+ behaviors between training and evaluation).
681
+ """
682
+
683
+
684
+ @keras_serializable
685
+ class TFBlenderbotSmallEncoder(keras.layers.Layer):
686
+ config_class = BlenderbotSmallConfig
687
+ """
688
+ Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a
689
+ [`TFBlenderbotSmallEncoderLayer`].
690
+
691
+ Args:
692
+ config: BlenderbotSmallConfig
693
+ """
694
+
695
+ def __init__(self, config: BlenderbotSmallConfig, embed_tokens: Optional[keras.layers.Embedding] = None, **kwargs):
696
+ super().__init__(**kwargs)
697
+ self.config = config
698
+ self.dropout = keras.layers.Dropout(config.dropout)
699
+ self.layerdrop = config.encoder_layerdrop
700
+ self.padding_idx = config.pad_token_id
701
+ self.max_source_positions = config.max_position_embeddings
702
+ self.embed_scale = tf.math.sqrt(float(config.d_model)) if config.scale_embedding else 1.0
703
+
704
+ self.embed_tokens = embed_tokens
705
+ self.embed_positions = TFBlenderbotSmallLearnedPositionalEmbedding(
706
+ config.max_position_embeddings,
707
+ config.d_model,
708
+ name="embed_positions",
709
+ )
710
+ self.layers = [TFBlenderbotSmallEncoderLayer(config, name=f"layers.{i}") for i in range(config.encoder_layers)]
711
+ self.layernorm_embedding = keras.layers.LayerNormalization(epsilon=1e-5, name="layernorm_embedding")
712
+ self.embed_dim = config.d_model
713
+
714
+ def get_embed_tokens(self):
715
+ return self.embed_tokens
716
+
717
+ def set_embed_tokens(self, embed_tokens):
718
+ self.embed_tokens = embed_tokens
719
+
720
+ @unpack_inputs
721
+ def call(
722
+ self,
723
+ input_ids=None,
724
+ inputs_embeds=None,
725
+ attention_mask=None,
726
+ head_mask=None,
727
+ output_attentions=None,
728
+ output_hidden_states=None,
729
+ return_dict=None,
730
+ training=False,
731
+ ):
732
+ """
733
+ Args:
734
+ input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`):
735
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
736
+ provide it.
737
+
738
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
739
+ [`PreTrainedTokenizer.__call__`] for details.
740
+
741
+ [What are input IDs?](../glossary#input-ids)
742
+ attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
743
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
744
+
745
+ - 1 for tokens that are **not masked**,
746
+ - 0 for tokens that are **masked**.
747
+
748
+ [What are attention masks?](../glossary#attention-mask)
749
+ head_mask (`tf.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, `optional):
750
+ Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
751
+
752
+ - 1 indicates the head is **not masked**,
753
+ - 0 indicates the head is **masked**.
754
+
755
+ inputs_embeds (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
756
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
757
+ This is useful if you want more control over how to convert `input_ids` indices into associated vectors
758
+ than the model's internal embedding lookup matrix.
759
+ output_attentions (`bool`, *optional*):
760
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
761
+ returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value
762
+ in the config will be used instead.
763
+ output_hidden_states (`bool`, *optional*):
764
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
765
+ for more detail. This argument can be used only in eager mode, in graph mode the value in the config
766
+ will be used instead.
767
+ return_dict (`bool`, *optional*):
768
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used
769
+ in eager mode, in graph mode the value will always be set to True.
770
+ training (`bool`, *optional*, defaults to `False`):
771
+ Whether or not to use the model in training mode (some modules like dropout modules have different
772
+ behaviors between training and evaluation).
773
+ """
774
+ if input_ids is not None and inputs_embeds is not None:
775
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
776
+ elif input_ids is not None:
777
+ input_shape = shape_list(input_ids)
778
+ elif inputs_embeds is not None:
779
+ input_shape = shape_list(inputs_embeds)[:-1]
780
+ else:
781
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
782
+
783
+ if inputs_embeds is None:
784
+ check_embeddings_within_bounds(input_ids, self.embed_tokens.input_dim)
785
+ inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
786
+
787
+ embed_pos = self.embed_positions(input_shape)
788
+ hidden_states = inputs_embeds + embed_pos
789
+ hidden_states = self.layernorm_embedding(hidden_states)
790
+ hidden_states = self.dropout(hidden_states, training=training)
791
+
792
+ # check attention mask and invert
793
+ if attention_mask is not None:
794
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
795
+ attention_mask = _expand_mask(attention_mask)
796
+ else:
797
+ attention_mask = None
798
+
799
+ encoder_states = () if output_hidden_states else None
800
+ all_attentions = () if output_attentions else None
801
+
802
+ # check if head_mask has a correct number of layers specified if desired
803
+ if head_mask is not None:
804
+ tf.debugging.assert_equal(
805
+ shape_list(head_mask)[0],
806
+ len(self.layers),
807
+ message=(
808
+ f"The head_mask should be specified for {len(self.layers)} layers, but it is for"
809
+ f" {shape_list(head_mask)[0]}."
810
+ ),
811
+ )
812
+
813
+ # encoder layers
814
+ for idx, encoder_layer in enumerate(self.layers):
815
+ if output_hidden_states:
816
+ encoder_states = encoder_states + (hidden_states,)
817
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
818
+ dropout_probability = random.uniform(0, 1)
819
+ if training and (dropout_probability < self.layerdrop): # skip the layer
820
+ continue
821
+
822
+ hidden_states, attn = encoder_layer(
823
+ hidden_states,
824
+ attention_mask,
825
+ head_mask[idx] if head_mask is not None else None,
826
+ )
827
+
828
+ if output_attentions:
829
+ all_attentions += (attn,)
830
+
831
+ if output_hidden_states:
832
+ encoder_states = encoder_states + (hidden_states,)
833
+
834
+ if not return_dict:
835
+ return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
836
+ return TFBaseModelOutput(
837
+ last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
838
+ )
839
+
840
+ def build(self, input_shape=None):
841
+ if self.built:
842
+ return
843
+ self.built = True
844
+ if getattr(self, "embed_positions", None) is not None:
845
+ with tf.name_scope(self.embed_positions.name):
846
+ self.embed_positions.build(None)
847
+ if getattr(self, "layernorm_embedding", None) is not None:
848
+ with tf.name_scope(self.layernorm_embedding.name):
849
+ self.layernorm_embedding.build([None, None, self.embed_dim])
850
+ if getattr(self, "layers", None) is not None:
851
+ for layer in self.layers:
852
+ with tf.name_scope(layer.name):
853
+ layer.build(None)
854
+
855
+
856
+ @keras_serializable
857
+ class TFBlenderbotSmallDecoder(keras.layers.Layer):
858
+ config_class = BlenderbotSmallConfig
859
+ """
860
+ Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`TFBlenderbotSmallDecoderLayer`]
861
+
862
+ Args:
863
+ config: BlenderbotSmallConfig
864
+ embed_tokens: output embedding
865
+ """
866
+
867
+ def __init__(self, config: BlenderbotSmallConfig, embed_tokens: Optional[keras.layers.Embedding] = None, **kwargs):
868
+ super().__init__(**kwargs)
869
+ self.config = config
870
+ self.padding_idx = config.pad_token_id
871
+ self.embed_tokens = embed_tokens
872
+ self.layerdrop = config.decoder_layerdrop
873
+ self.embed_positions = TFBlenderbotSmallLearnedPositionalEmbedding(
874
+ config.max_position_embeddings,
875
+ config.d_model,
876
+ name="embed_positions",
877
+ )
878
+ self.embed_scale = tf.math.sqrt(float(config.d_model)) if config.scale_embedding else 1.0
879
+ self.layers = [TFBlenderbotSmallDecoderLayer(config, name=f"layers.{i}") for i in range(config.decoder_layers)]
880
+ self.layernorm_embedding = keras.layers.LayerNormalization(epsilon=1e-5, name="layernorm_embedding")
881
+
882
+ self.dropout = keras.layers.Dropout(config.dropout)
883
+
884
+ def get_embed_tokens(self):
885
+ return self.embed_tokens
886
+
887
+ def set_embed_tokens(self, embed_tokens):
888
+ self.embed_tokens = embed_tokens
889
+
890
+ @unpack_inputs
891
+ def call(
892
+ self,
893
+ input_ids=None,
894
+ inputs_embeds=None,
895
+ attention_mask=None,
896
+ position_ids=None,
897
+ encoder_hidden_states=None,
898
+ encoder_attention_mask=None,
899
+ head_mask=None,
900
+ cross_attn_head_mask=None,
901
+ past_key_values=None,
902
+ use_cache=None,
903
+ output_attentions=None,
904
+ output_hidden_states=None,
905
+ return_dict=None,
906
+ training=False,
907
+ ):
908
+ r"""
909
+ Args:
910
+ input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`):
911
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
912
+ provide it.
913
+
914
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
915
+ [`PreTrainedTokenizer.__call__`] for details.
916
+
917
+ [What are input IDs?](../glossary#input-ids)
918
+ attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
919
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
920
+
921
+ - 1 for tokens that are **not masked**,
922
+ - 0 for tokens that are **masked**.
923
+
924
+ [What are attention masks?](../glossary#attention-mask)
925
+ position_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
926
+ Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the
927
+ range `[0, config.max_position_embeddings - 1]`.
928
+ encoder_hidden_states (`tf.Tensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*):
929
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
930
+ of the decoder.
931
+ encoder_attention_mask (`tf.Tensor` of shape `(batch_size, encoder_sequence_length)`, *optional*):
932
+ Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values
933
+ selected in `[0, 1]`:
934
+
935
+ - 1 for tokens that are **not masked**,
936
+ - 0 for tokens that are **masked**.
937
+
938
+ [What are attention masks?](../glossary#attention-mask)
939
+ head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
940
+ Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
941
+
942
+ - 1 indicates the head is **not masked**,
943
+ - 0 indicates the head is **masked**.
944
+
945
+ cross_attn_head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
946
+ Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:
947
+
948
+ - 1 indicates the head is **not masked**,
949
+ - 0 indicates the head is **masked**.
950
+
951
+ past_key_values (`Tuple[Tuple[tf.Tensor]]` of length `config.n_layers` with each tuple having 2 tuples each of which has 2 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
952
+ Contains precomputed key and value hidden-states of the attention blocks. Can be used to speed up
953
+ decoding.
954
+
955
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those
956
+ that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of
957
+ all `decoder_input_ids` of shape `(batch_size, sequence_length)`.
958
+ inputs_embeds (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
959
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
960
+ This is useful if you want more control over how to convert `input_ids` indices into associated vectors
961
+ than the model's internal embedding lookup matrix.
962
+ output_attentions (`bool`, *optional*):
963
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
964
+ returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value
965
+ in the config will be used instead.
966
+ output_hidden_states (`bool`, *optional*):
967
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
968
+ for more detail. This argument can be used only in eager mode, in graph mode the value in the config
969
+ will be used instead.
970
+ return_dict (`bool`, *optional*):
971
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used
972
+ in eager mode, in graph mode the value will always be set to True.
973
+ training (`bool`, *optional*, defaults to `False`):
974
+ Whether or not to use the model in training mode (some modules like dropout modules have different
975
+ behaviors between training and evaluation).
976
+ """
977
+ if input_ids is not None and inputs_embeds is not None:
978
+ raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
979
+ elif input_ids is not None:
980
+ input_shape = shape_list(input_ids)
981
+ elif inputs_embeds is not None:
982
+ input_shape = shape_list(inputs_embeds)[:-1]
983
+ else:
984
+ raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
985
+
986
+ past_key_values_length = shape_list(past_key_values[0][0])[2] if past_key_values is not None else 0
987
+
988
+ if inputs_embeds is None:
989
+ check_embeddings_within_bounds(input_ids, self.embed_tokens.input_dim)
990
+ inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
991
+
992
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
993
+ if input_shape[-1] > 1:
994
+ combined_attention_mask = _make_causal_mask(input_shape, past_key_values_length=past_key_values_length)
995
+ else:
996
+ combined_attention_mask = _expand_mask(
997
+ tf.ones((input_shape[0], input_shape[1] + past_key_values_length)), tgt_len=input_shape[-1]
998
+ )
999
+
1000
+ if attention_mask is not None:
1001
+ combined_attention_mask = combined_attention_mask + _expand_mask(attention_mask, tgt_len=input_shape[-1])
1002
+
1003
+ if encoder_hidden_states is not None and encoder_attention_mask is not None:
1004
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
1005
+ encoder_attention_mask = _expand_mask(encoder_attention_mask, tgt_len=input_shape[-1])
1006
+
1007
+ # embed positions
1008
+ if position_ids is None:
1009
+ positions = self.embed_positions(input_shape, past_key_values_length)
1010
+ else:
1011
+ positions = self.embed_positions(input_shape, position_ids=position_ids)
1012
+
1013
+ hidden_states = self.layernorm_embedding(inputs_embeds) + positions
1014
+ hidden_states = self.dropout(hidden_states, training=training)
1015
+
1016
+ # decoder layers
1017
+ all_hidden_states = () if output_hidden_states else None
1018
+ all_self_attns = () if output_attentions else None
1019
+ all_cross_attns = () if (output_attentions and encoder_hidden_states is not None) else None
1020
+ present_key_values = () if use_cache else None
1021
+
1022
+ # check if head_mask and cross_attn_head_mask have a correct number of layers specified if desired
1023
+ for attn_mask_name, attn_mask in [("head_mask", head_mask), ("cross_attn_head_mask", cross_attn_head_mask)]:
1024
+ if attn_mask is not None:
1025
+ tf.debugging.assert_equal(
1026
+ shape_list(attn_mask)[0],
1027
+ len(self.layers),
1028
+ message=(
1029
+ f"The {attn_mask_name} should be specified for {len(self.layers)} layers, but it is for"
1030
+ f" {shape_list(attn_mask)[0]}."
1031
+ ),
1032
+ )
1033
+
1034
+ for idx, decoder_layer in enumerate(self.layers):
1035
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
1036
+ if output_hidden_states:
1037
+ all_hidden_states += (hidden_states,)
1038
+ dropout_probability = random.uniform(0, 1)
1039
+
1040
+ if training and (dropout_probability < self.layerdrop):
1041
+ continue
1042
+
1043
+ past_key_value = past_key_values[idx] if past_key_values is not None else None
1044
+
1045
+ hidden_states, layer_self_attn, layer_cross_attn, present_key_value = decoder_layer(
1046
+ hidden_states,
1047
+ attention_mask=combined_attention_mask,
1048
+ encoder_hidden_states=encoder_hidden_states,
1049
+ encoder_attention_mask=encoder_attention_mask,
1050
+ layer_head_mask=head_mask[idx] if head_mask is not None else None,
1051
+ cross_attn_layer_head_mask=cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None,
1052
+ past_key_value=past_key_value,
1053
+ )
1054
+
1055
+ if use_cache:
1056
+ present_key_values += (present_key_value,)
1057
+
1058
+ if output_attentions:
1059
+ all_self_attns += (layer_self_attn,)
1060
+
1061
+ if encoder_hidden_states is not None:
1062
+ all_cross_attns += (layer_cross_attn,)
1063
+
1064
+ if output_hidden_states:
1065
+ all_hidden_states += (hidden_states,)
1066
+
1067
+ if not return_dict:
1068
+ return hidden_states, present_key_values, all_hidden_states, all_self_attns, all_cross_attns
1069
+ else:
1070
+ return TFBaseModelOutputWithPastAndCrossAttentions(
1071
+ last_hidden_state=hidden_states,
1072
+ past_key_values=present_key_values,
1073
+ hidden_states=all_hidden_states,
1074
+ attentions=all_self_attns,
1075
+ cross_attentions=all_cross_attns,
1076
+ )
1077
+
1078
+ def build(self, input_shape=None):
1079
+ if self.built:
1080
+ return
1081
+ self.built = True
1082
+ if getattr(self, "embed_positions", None) is not None:
1083
+ with tf.name_scope(self.embed_positions.name):
1084
+ self.embed_positions.build(None)
1085
+ if getattr(self, "layernorm_embedding", None) is not None:
1086
+ with tf.name_scope(self.layernorm_embedding.name):
1087
+ self.layernorm_embedding.build([None, None, self.config.d_model])
1088
+ if getattr(self, "layers", None) is not None:
1089
+ for layer in self.layers:
1090
+ with tf.name_scope(layer.name):
1091
+ layer.build(None)
1092
+
1093
+
1094
+ @keras_serializable
1095
+ class TFBlenderbotSmallMainLayer(keras.layers.Layer):
1096
+ config_class = BlenderbotSmallConfig
1097
+
1098
+ def __init__(self, config: BlenderbotSmallConfig, **kwargs):
1099
+ super().__init__(**kwargs)
1100
+
1101
+ self.config = config
1102
+ self.shared = keras.layers.Embedding(
1103
+ input_dim=config.vocab_size,
1104
+ output_dim=config.d_model,
1105
+ embeddings_initializer=keras.initializers.TruncatedNormal(stddev=self.config.init_std),
1106
+ name="model.shared",
1107
+ )
1108
+ # Additional attribute to specify the expected name scope of the layer (for loading/storing weights)
1109
+ self.shared.load_weight_prefix = "model.shared"
1110
+
1111
+ self.encoder = TFBlenderbotSmallEncoder(config, self.shared, name="encoder")
1112
+ self.decoder = TFBlenderbotSmallDecoder(config, self.shared, name="decoder")
1113
+
1114
+ def get_input_embeddings(self):
1115
+ return self.shared
1116
+
1117
+ def set_input_embeddings(self, new_embeddings):
1118
+ self.shared = new_embeddings
1119
+ self.encoder.embed_tokens = self.shared
1120
+ self.decoder.embed_tokens = self.shared
1121
+
1122
+ @unpack_inputs
1123
+ def call(
1124
+ self,
1125
+ input_ids=None,
1126
+ attention_mask=None,
1127
+ decoder_input_ids=None,
1128
+ decoder_attention_mask=None,
1129
+ decoder_position_ids=None,
1130
+ head_mask=None,
1131
+ decoder_head_mask=None,
1132
+ cross_attn_head_mask=None,
1133
+ encoder_outputs: Optional[Union[Tuple, TFBaseModelOutput]] = None,
1134
+ past_key_values=None,
1135
+ inputs_embeds=None,
1136
+ decoder_inputs_embeds=None,
1137
+ use_cache=None,
1138
+ output_attentions=None,
1139
+ output_hidden_states=None,
1140
+ return_dict=None,
1141
+ training=False,
1142
+ **kwargs,
1143
+ ):
1144
+ output_hidden_states = (
1145
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1146
+ )
1147
+
1148
+ if encoder_outputs is None:
1149
+ encoder_outputs = self.encoder(
1150
+ input_ids=input_ids,
1151
+ attention_mask=attention_mask,
1152
+ head_mask=head_mask,
1153
+ inputs_embeds=inputs_embeds,
1154
+ output_attentions=output_attentions,
1155
+ output_hidden_states=output_hidden_states,
1156
+ return_dict=return_dict,
1157
+ training=training,
1158
+ )
1159
+ # If the user passed a tuple for encoder_outputs, we wrap it in a TFBaseModelOutput when return_dict=True
1160
+ elif return_dict and not isinstance(encoder_outputs, TFBaseModelOutput):
1161
+ encoder_outputs = TFBaseModelOutput(
1162
+ last_hidden_state=encoder_outputs[0],
1163
+ hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
1164
+ attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
1165
+ )
1166
+ # If the user passed a TFBaseModelOutput for encoder_outputs, we wrap it in a tuple when return_dict=False
1167
+ elif not return_dict and not isinstance(encoder_outputs, tuple):
1168
+ encoder_outputs = encoder_outputs.to_tuple()
1169
+
1170
+ decoder_outputs = self.decoder(
1171
+ decoder_input_ids,
1172
+ attention_mask=decoder_attention_mask,
1173
+ position_ids=decoder_position_ids,
1174
+ encoder_hidden_states=encoder_outputs[0],
1175
+ encoder_attention_mask=attention_mask,
1176
+ head_mask=decoder_head_mask,
1177
+ cross_attn_head_mask=cross_attn_head_mask,
1178
+ past_key_values=past_key_values,
1179
+ inputs_embeds=decoder_inputs_embeds,
1180
+ use_cache=use_cache,
1181
+ output_attentions=output_attentions,
1182
+ output_hidden_states=output_hidden_states,
1183
+ return_dict=return_dict,
1184
+ training=training,
1185
+ )
1186
+
1187
+ if not return_dict:
1188
+ return decoder_outputs + encoder_outputs
1189
+
1190
+ return TFSeq2SeqModelOutput(
1191
+ last_hidden_state=decoder_outputs.last_hidden_state,
1192
+ past_key_values=decoder_outputs.past_key_values,
1193
+ decoder_hidden_states=decoder_outputs.hidden_states,
1194
+ decoder_attentions=decoder_outputs.attentions,
1195
+ cross_attentions=decoder_outputs.cross_attentions,
1196
+ encoder_last_hidden_state=encoder_outputs.last_hidden_state,
1197
+ encoder_hidden_states=encoder_outputs.hidden_states,
1198
+ encoder_attentions=encoder_outputs.attentions,
1199
+ )
1200
+
1201
+ def build(self, input_shape=None):
1202
+ if self.built:
1203
+ return
1204
+ self.built = True
1205
+ # The shared/tied weights expect to be in the model base namespace
1206
+ # Adding "/" to the end (not the start!) of a tf.name_scope puts it in the root namespace rather than
1207
+ # the current one.
1208
+ with tf.name_scope(self.shared.load_weight_prefix + "/" + self.shared.name + "/"):
1209
+ self.shared.build(None)
1210
+ if getattr(self, "encoder", None) is not None:
1211
+ with tf.name_scope(self.encoder.name):
1212
+ self.encoder.build(None)
1213
+ if getattr(self, "decoder", None) is not None:
1214
+ with tf.name_scope(self.decoder.name):
1215
+ self.decoder.build(None)
1216
+
1217
+
1218
+ @add_start_docstrings(
1219
+ "The bare BLENDERBOT_SMALL Model outputting raw hidden-states without any specific head on top.",
1220
+ BLENDERBOT_SMALL_START_DOCSTRING,
1221
+ )
1222
+ class TFBlenderbotSmallModel(TFBlenderbotSmallPreTrainedModel):
1223
+ def __init__(self, config: BlenderbotSmallConfig, *inputs, **kwargs):
1224
+ super().__init__(config, *inputs, **kwargs)
1225
+
1226
+ self.model = TFBlenderbotSmallMainLayer(config, name="model")
1227
+
1228
+ def get_encoder(self):
1229
+ return self.model.encoder
1230
+
1231
+ def get_decoder(self):
1232
+ return self.model.decoder
1233
+
1234
+ @unpack_inputs
1235
+ @add_start_docstrings_to_model_forward(BLENDERBOT_SMALL_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1236
+ @add_code_sample_docstrings(
1237
+ checkpoint=_CHECKPOINT_FOR_DOC,
1238
+ output_type=TFSeq2SeqModelOutput,
1239
+ config_class=_CONFIG_FOR_DOC,
1240
+ )
1241
+ def call(
1242
+ self,
1243
+ input_ids: tf.Tensor | None = None,
1244
+ attention_mask: tf.Tensor | None = None,
1245
+ decoder_input_ids: tf.Tensor | None = None,
1246
+ decoder_attention_mask: tf.Tensor | None = None,
1247
+ decoder_position_ids: tf.Tensor | None = None,
1248
+ head_mask: tf.Tensor | None = None,
1249
+ decoder_head_mask: tf.Tensor | None = None,
1250
+ cross_attn_head_mask: tf.Tensor | None = None,
1251
+ encoder_outputs: Optional[Union[Tuple, TFBaseModelOutput]] = None,
1252
+ past_key_values: List[tf.Tensor] | None = None,
1253
+ inputs_embeds: tf.Tensor | None = None,
1254
+ decoder_inputs_embeds: tf.Tensor | None = None,
1255
+ use_cache: Optional[bool] = None,
1256
+ output_attentions: Optional[bool] = None,
1257
+ output_hidden_states: Optional[bool] = None,
1258
+ return_dict: Optional[bool] = None,
1259
+ training: Optional[bool] = False,
1260
+ **kwargs,
1261
+ ) -> Union[Tuple[tf.Tensor], TFSeq2SeqModelOutput]:
1262
+ outputs = self.model(
1263
+ input_ids=input_ids,
1264
+ attention_mask=attention_mask,
1265
+ decoder_input_ids=decoder_input_ids,
1266
+ decoder_attention_mask=decoder_attention_mask,
1267
+ decoder_position_ids=decoder_position_ids,
1268
+ head_mask=head_mask,
1269
+ decoder_head_mask=decoder_head_mask,
1270
+ cross_attn_head_mask=cross_attn_head_mask,
1271
+ encoder_outputs=encoder_outputs,
1272
+ past_key_values=past_key_values,
1273
+ inputs_embeds=inputs_embeds,
1274
+ decoder_inputs_embeds=decoder_inputs_embeds,
1275
+ use_cache=use_cache,
1276
+ output_attentions=output_attentions,
1277
+ output_hidden_states=output_hidden_states,
1278
+ return_dict=return_dict,
1279
+ training=training,
1280
+ )
1281
+
1282
+ return outputs
1283
+
1284
+ # Copied from transformers.models.bart.modeling_tf_bart.TFBartModel.serving_output
1285
+ def serving_output(self, output):
1286
+ pkv = tf.tuple(output.past_key_values)[1] if self.config.use_cache else None
1287
+ dec_hs = tf.convert_to_tensor(output.decoder_hidden_states) if self.config.output_hidden_states else None
1288
+ dec_attns = tf.convert_to_tensor(output.decoder_attentions) if self.config.output_attentions else None
1289
+ cross_attns = tf.convert_to_tensor(output.cross_attentions) if self.config.output_attentions else None
1290
+ enc_hs = tf.convert_to_tensor(output.encoder_hidden_states) if self.config.output_hidden_states else None
1291
+ enc_attns = tf.convert_to_tensor(output.encoder_attentions) if self.config.output_attentions else None
1292
+
1293
+ return TFSeq2SeqModelOutput(
1294
+ last_hidden_state=output.last_hidden_state,
1295
+ past_key_values=pkv,
1296
+ decoder_hidden_states=dec_hs,
1297
+ decoder_attentions=dec_attns,
1298
+ cross_attentions=cross_attns,
1299
+ encoder_last_hidden_state=output.encoder_last_hidden_state,
1300
+ encoder_hidden_states=enc_hs,
1301
+ encoder_attentions=enc_attns,
1302
+ )
1303
+
1304
+ def build(self, input_shape=None):
1305
+ if self.built:
1306
+ return
1307
+ self.built = True
1308
+ if getattr(self, "model", None) is not None:
1309
+ with tf.name_scope(self.model.name):
1310
+ self.model.build(None)
1311
+
1312
+
1313
+ # Copied from transformers.models.bart.modeling_tf_bart.BiasLayer
1314
+ class BiasLayer(keras.layers.Layer):
1315
+ """
1316
+ Bias as a layer. It is used for serialization purposes: `keras.Model.save_weights` stores on a per-layer basis,
1317
+ so all weights have to be registered in a layer.
1318
+ """
1319
+
1320
+ def __init__(self, shape, initializer, trainable, name, **kwargs):
1321
+ super().__init__(name=name, **kwargs)
1322
+ # Note: the name of this variable will NOT be scoped when serialized, i.e. it will not be in the format of
1323
+ # "outer_layer/inner_layer/.../name:0". Instead, it will be "name:0". For further details, see:
1324
+ # https://github.com/huggingface/transformers/pull/18833#issuecomment-1233090214
1325
+ self.bias = self.add_weight(name=name, shape=shape, initializer=initializer, trainable=trainable)
1326
+
1327
+ def call(self, x):
1328
+ return x + self.bias
1329
+
1330
+
1331
+ @add_start_docstrings(
1332
+ "The BLENDERBOT_SMALL Model with a language modeling head. Can be used for summarization.",
1333
+ BLENDERBOT_SMALL_START_DOCSTRING,
1334
+ )
1335
+ class TFBlenderbotSmallForConditionalGeneration(TFBlenderbotSmallPreTrainedModel, TFCausalLanguageModelingLoss):
1336
+ _keys_to_ignore_on_load_unexpected = [
1337
+ r"model.encoder.embed_tokens.weight",
1338
+ r"model.decoder.embed_tokens.weight",
1339
+ ]
1340
+
1341
+ def __init__(self, config, *inputs, **kwargs):
1342
+ super().__init__(config, *inputs, **kwargs)
1343
+ self.model = TFBlenderbotSmallMainLayer(config, name="model")
1344
+ self.use_cache = config.use_cache
1345
+ # final_bias_logits is registered as a buffer in pytorch, so not trainable for the sake of consistency.
1346
+ self.bias_layer = BiasLayer(
1347
+ name="final_logits_bias", shape=[1, config.vocab_size], initializer="zeros", trainable=False
1348
+ )
1349
+
1350
+ def get_decoder(self):
1351
+ return self.model.decoder
1352
+
1353
+ def get_encoder(self):
1354
+ return self.model.encoder
1355
+
1356
+ def get_output_embeddings(self):
1357
+ return self.get_input_embeddings()
1358
+
1359
+ def set_output_embeddings(self, value):
1360
+ self.set_input_embeddings(value)
1361
+
1362
+ def get_bias(self):
1363
+ return {"final_logits_bias": self.bias_layer.bias}
1364
+
1365
+ def set_bias(self, value):
1366
+ # Replaces the existing layers containing bias for correct (de)serialization.
1367
+ vocab_size = value["final_logits_bias"].shape[-1]
1368
+ self.bias_layer = BiasLayer(
1369
+ name="final_logits_bias", shape=[1, vocab_size], initializer="zeros", trainable=False
1370
+ )
1371
+ self.bias_layer.bias.assign(value["final_logits_bias"])
1372
+
1373
+ @unpack_inputs
1374
+ @add_start_docstrings_to_model_forward(BLENDERBOT_SMALL_INPUTS_DOCSTRING)
1375
+ @replace_return_docstrings(output_type=TFSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)
1376
+ @add_end_docstrings(BLENDERBOT_SMALL_GENERATION_EXAMPLE)
1377
+ def call(
1378
+ self,
1379
+ input_ids: tf.Tensor | None = None,
1380
+ attention_mask: tf.Tensor | None = None,
1381
+ decoder_input_ids: tf.Tensor | None = None,
1382
+ decoder_attention_mask: tf.Tensor | None = None,
1383
+ decoder_position_ids: tf.Tensor | None = None,
1384
+ head_mask: tf.Tensor | None = None,
1385
+ decoder_head_mask: tf.Tensor | None = None,
1386
+ cross_attn_head_mask: tf.Tensor | None = None,
1387
+ encoder_outputs: Optional[TFBaseModelOutput] = None,
1388
+ past_key_values: List[tf.Tensor] | None = None,
1389
+ inputs_embeds: tf.Tensor | None = None,
1390
+ decoder_inputs_embeds: tf.Tensor | None = None,
1391
+ use_cache: Optional[bool] = None,
1392
+ output_attentions: Optional[bool] = None,
1393
+ output_hidden_states: Optional[bool] = None,
1394
+ return_dict: Optional[bool] = None,
1395
+ labels: tf.Tensor | None = None,
1396
+ training: Optional[bool] = False,
1397
+ ) -> Union[Tuple[tf.Tensor], TFSeq2SeqLMOutput]:
1398
+ r"""
1399
+ labels (`tf.tensor` of shape `(batch_size, sequence_length)`, *optional*):
1400
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
1401
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
1402
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
1403
+
1404
+ Returns:
1405
+
1406
+ """
1407
+
1408
+ if labels is not None:
1409
+ labels = tf.where(
1410
+ labels == self.config.pad_token_id,
1411
+ tf.cast(tf.fill(shape_list(labels), -100), labels.dtype),
1412
+ labels,
1413
+ )
1414
+ use_cache = False
1415
+ if decoder_input_ids is None and decoder_inputs_embeds is None:
1416
+ decoder_input_ids = shift_tokens_right(
1417
+ labels, self.config.pad_token_id, self.config.decoder_start_token_id
1418
+ )
1419
+
1420
+ outputs = self.model(
1421
+ input_ids,
1422
+ attention_mask=attention_mask,
1423
+ decoder_input_ids=decoder_input_ids,
1424
+ decoder_attention_mask=decoder_attention_mask,
1425
+ decoder_position_ids=decoder_position_ids,
1426
+ head_mask=head_mask,
1427
+ decoder_head_mask=decoder_head_mask,
1428
+ cross_attn_head_mask=cross_attn_head_mask,
1429
+ encoder_outputs=encoder_outputs,
1430
+ past_key_values=past_key_values,
1431
+ inputs_embeds=inputs_embeds,
1432
+ decoder_inputs_embeds=decoder_inputs_embeds,
1433
+ use_cache=use_cache,
1434
+ output_attentions=output_attentions,
1435
+ output_hidden_states=output_hidden_states,
1436
+ return_dict=return_dict,
1437
+ training=training,
1438
+ )
1439
+ lm_logits = tf.matmul(outputs[0], self.model.shared.weights, transpose_b=True)
1440
+ lm_logits = self.bias_layer(lm_logits)
1441
+ masked_lm_loss = None if labels is None else self.hf_compute_loss(labels, lm_logits)
1442
+
1443
+ if not return_dict:
1444
+ output = (lm_logits,) + outputs[1:]
1445
+ return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
1446
+ return TFSeq2SeqLMOutput(
1447
+ loss=masked_lm_loss,
1448
+ logits=lm_logits,
1449
+ past_key_values=outputs.past_key_values, # index 1 of d outputs
1450
+ decoder_hidden_states=outputs.decoder_hidden_states, # index 2 of d outputs
1451
+ decoder_attentions=outputs.decoder_attentions, # index 3 of d outputs
1452
+ cross_attentions=outputs.cross_attentions, # index 4 of d outputs
1453
+ encoder_last_hidden_state=outputs.encoder_last_hidden_state, # index 0 of encoder outputs
1454
+ encoder_hidden_states=outputs.encoder_hidden_states, # 1 of e out
1455
+ encoder_attentions=outputs.encoder_attentions, # 2 of e out
1456
+ )
1457
+
1458
+ # Copied from transformers.models.bart.modeling_tf_bart.TFBartForConditionalGeneration.serving_output
1459
+ def serving_output(self, output):
1460
+ pkv = tf.tuple(output.past_key_values)[1] if self.config.use_cache else None
1461
+ dec_hs = tf.convert_to_tensor(output.decoder_hidden_states) if self.config.output_hidden_states else None
1462
+ dec_attns = tf.convert_to_tensor(output.decoder_attentions) if self.config.output_attentions else None
1463
+ cross_attns = tf.convert_to_tensor(output.cross_attentions) if self.config.output_attentions else None
1464
+ enc_hs = tf.convert_to_tensor(output.encoder_hidden_states) if self.config.output_hidden_states else None
1465
+ enc_attns = tf.convert_to_tensor(output.encoder_attentions) if self.config.output_attentions else None
1466
+
1467
+ return TFSeq2SeqLMOutput(
1468
+ logits=output.logits,
1469
+ past_key_values=pkv,
1470
+ decoder_hidden_states=dec_hs,
1471
+ decoder_attentions=dec_attns,
1472
+ cross_attentions=cross_attns,
1473
+ encoder_last_hidden_state=output.encoder_last_hidden_state,
1474
+ encoder_hidden_states=enc_hs,
1475
+ encoder_attentions=enc_attns,
1476
+ )
1477
+
1478
+ # Copied from transformers.models.bart.modeling_tf_bart.TFBartForConditionalGeneration.prepare_inputs_for_generation
1479
+ def prepare_inputs_for_generation(
1480
+ self,
1481
+ decoder_input_ids,
1482
+ past_key_values=None,
1483
+ attention_mask=None,
1484
+ decoder_attention_mask=None,
1485
+ head_mask=None,
1486
+ decoder_head_mask=None,
1487
+ cross_attn_head_mask=None,
1488
+ use_cache=None,
1489
+ encoder_outputs=None,
1490
+ **kwargs,
1491
+ ):
1492
+ # cut decoder_input_ids if past_key_values is used
1493
+ if past_key_values is not None:
1494
+ decoder_input_ids = decoder_input_ids[:, -1:]
1495
+
1496
+ if decoder_attention_mask is not None: # xla
1497
+ decoder_position_ids = tf.math.cumsum(decoder_attention_mask, axis=-1, exclusive=True)[:, -1:]
1498
+ elif past_key_values is not None: # no xla + past_key_values
1499
+ decoder_position_ids = past_key_values[0][0].shape[2]
1500
+ else: # no xla + no past_key_values
1501
+ decoder_position_ids = tf.range(decoder_input_ids.shape[1])
1502
+
1503
+ return {
1504
+ "input_ids": None, # encoder_outputs is defined. input_ids not needed
1505
+ "encoder_outputs": encoder_outputs,
1506
+ "past_key_values": past_key_values,
1507
+ "decoder_input_ids": decoder_input_ids,
1508
+ "attention_mask": attention_mask,
1509
+ "decoder_attention_mask": decoder_attention_mask,
1510
+ "decoder_position_ids": decoder_position_ids,
1511
+ "head_mask": head_mask,
1512
+ "decoder_head_mask": decoder_head_mask,
1513
+ "cross_attn_head_mask": cross_attn_head_mask,
1514
+ "use_cache": use_cache, # change this to avoid caching (presumably for debugging)
1515
+ }
1516
+
1517
+ def build(self, input_shape=None):
1518
+ if self.built:
1519
+ return
1520
+ self.built = True
1521
+ if getattr(self, "model", None) is not None:
1522
+ with tf.name_scope(self.model.name):
1523
+ self.model.build(None)
1524
+ if getattr(self, "bias_layer", None) is not None:
1525
+ with tf.name_scope(self.bias_layer.name):
1526
+ self.bias_layer.build(None)
llmeval-env/lib/python3.10/site-packages/transformers/models/funnel/__init__.py ADDED
@@ -0,0 +1,134 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import TYPE_CHECKING
16
+
17
+ from ...utils import (
18
+ OptionalDependencyNotAvailable,
19
+ _LazyModule,
20
+ is_tf_available,
21
+ is_tokenizers_available,
22
+ is_torch_available,
23
+ )
24
+
25
+
26
+ _import_structure = {
27
+ "configuration_funnel": ["FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP", "FunnelConfig"],
28
+ "convert_funnel_original_tf_checkpoint_to_pytorch": [],
29
+ "tokenization_funnel": ["FunnelTokenizer"],
30
+ }
31
+
32
+ try:
33
+ if not is_tokenizers_available():
34
+ raise OptionalDependencyNotAvailable()
35
+ except OptionalDependencyNotAvailable:
36
+ pass
37
+ else:
38
+ _import_structure["tokenization_funnel_fast"] = ["FunnelTokenizerFast"]
39
+
40
+ try:
41
+ if not is_torch_available():
42
+ raise OptionalDependencyNotAvailable()
43
+ except OptionalDependencyNotAvailable:
44
+ pass
45
+ else:
46
+ _import_structure["modeling_funnel"] = [
47
+ "FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
48
+ "FunnelBaseModel",
49
+ "FunnelForMaskedLM",
50
+ "FunnelForMultipleChoice",
51
+ "FunnelForPreTraining",
52
+ "FunnelForQuestionAnswering",
53
+ "FunnelForSequenceClassification",
54
+ "FunnelForTokenClassification",
55
+ "FunnelModel",
56
+ "FunnelPreTrainedModel",
57
+ "load_tf_weights_in_funnel",
58
+ ]
59
+
60
+ try:
61
+ if not is_tf_available():
62
+ raise OptionalDependencyNotAvailable()
63
+ except OptionalDependencyNotAvailable:
64
+ pass
65
+ else:
66
+ _import_structure["modeling_tf_funnel"] = [
67
+ "TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
68
+ "TFFunnelBaseModel",
69
+ "TFFunnelForMaskedLM",
70
+ "TFFunnelForMultipleChoice",
71
+ "TFFunnelForPreTraining",
72
+ "TFFunnelForQuestionAnswering",
73
+ "TFFunnelForSequenceClassification",
74
+ "TFFunnelForTokenClassification",
75
+ "TFFunnelModel",
76
+ "TFFunnelPreTrainedModel",
77
+ ]
78
+
79
+
80
+ if TYPE_CHECKING:
81
+ from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
82
+ from .tokenization_funnel import FunnelTokenizer
83
+
84
+ try:
85
+ if not is_tokenizers_available():
86
+ raise OptionalDependencyNotAvailable()
87
+ except OptionalDependencyNotAvailable:
88
+ pass
89
+ else:
90
+ from .tokenization_funnel_fast import FunnelTokenizerFast
91
+
92
+ try:
93
+ if not is_torch_available():
94
+ raise OptionalDependencyNotAvailable()
95
+ except OptionalDependencyNotAvailable:
96
+ pass
97
+ else:
98
+ from .modeling_funnel import (
99
+ FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
100
+ FunnelBaseModel,
101
+ FunnelForMaskedLM,
102
+ FunnelForMultipleChoice,
103
+ FunnelForPreTraining,
104
+ FunnelForQuestionAnswering,
105
+ FunnelForSequenceClassification,
106
+ FunnelForTokenClassification,
107
+ FunnelModel,
108
+ FunnelPreTrainedModel,
109
+ load_tf_weights_in_funnel,
110
+ )
111
+
112
+ try:
113
+ if not is_tf_available():
114
+ raise OptionalDependencyNotAvailable()
115
+ except OptionalDependencyNotAvailable:
116
+ pass
117
+ else:
118
+ from .modeling_tf_funnel import (
119
+ TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
120
+ TFFunnelBaseModel,
121
+ TFFunnelForMaskedLM,
122
+ TFFunnelForMultipleChoice,
123
+ TFFunnelForPreTraining,
124
+ TFFunnelForQuestionAnswering,
125
+ TFFunnelForSequenceClassification,
126
+ TFFunnelForTokenClassification,
127
+ TFFunnelModel,
128
+ TFFunnelPreTrainedModel,
129
+ )
130
+
131
+ else:
132
+ import sys
133
+
134
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
llmeval-env/lib/python3.10/site-packages/transformers/models/funnel/configuration_funnel.py ADDED
@@ -0,0 +1,166 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020, Hugging Face
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Funnel Transformer model configuration"""
16
+
17
+ from ...configuration_utils import PretrainedConfig
18
+ from ...utils import logging
19
+
20
+
21
+ logger = logging.get_logger(__name__)
22
+
23
+
24
+ from ..deprecated._archive_maps import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
25
+
26
+
27
+ class FunnelConfig(PretrainedConfig):
28
+ r"""
29
+ This is the configuration class to store the configuration of a [`FunnelModel`] or a [`TFBertModel`]. It is used to
30
+ instantiate a Funnel Transformer model according to the specified arguments, defining the model architecture.
31
+ Instantiating a configuration with the defaults will yield a similar configuration to that of the Funnel
32
+ Transformer [funnel-transformer/small](https://huggingface.co/funnel-transformer/small) architecture.
33
+
34
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
35
+ documentation from [`PretrainedConfig`] for more information.
36
+
37
+ Args:
38
+ vocab_size (`int`, *optional*, defaults to 30522):
39
+ Vocabulary size of the Funnel transformer. Defines the number of different tokens that can be represented
40
+ by the `inputs_ids` passed when calling [`FunnelModel`] or [`TFFunnelModel`].
41
+ block_sizes (`List[int]`, *optional*, defaults to `[4, 4, 4]`):
42
+ The sizes of the blocks used in the model.
43
+ block_repeats (`List[int]`, *optional*):
44
+ If passed along, each layer of each block is repeated the number of times indicated.
45
+ num_decoder_layers (`int`, *optional*, defaults to 2):
46
+ The number of layers in the decoder (when not using the base model).
47
+ d_model (`int`, *optional*, defaults to 768):
48
+ Dimensionality of the model's hidden states.
49
+ n_head (`int`, *optional*, defaults to 12):
50
+ Number of attention heads for each attention layer in the Transformer encoder.
51
+ d_head (`int`, *optional*, defaults to 64):
52
+ Dimensionality of the model's heads.
53
+ d_inner (`int`, *optional*, defaults to 3072):
54
+ Inner dimension in the feed-forward blocks.
55
+ hidden_act (`str` or `callable`, *optional*, defaults to `"gelu_new"`):
56
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
57
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
58
+ hidden_dropout (`float`, *optional*, defaults to 0.1):
59
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
60
+ attention_dropout (`float`, *optional*, defaults to 0.1):
61
+ The dropout probability for the attention probabilities.
62
+ activation_dropout (`float`, *optional*, defaults to 0.0):
63
+ The dropout probability used between the two layers of the feed-forward blocks.
64
+ initializer_range (`float`, *optional*, defaults to 0.1):
65
+ The upper bound of the *uniform initializer* for initializing all weight matrices in attention layers.
66
+ initializer_std (`float`, *optional*):
67
+ The standard deviation of the *normal initializer* for initializing the embedding matrix and the weight of
68
+ linear layers. Will default to 1 for the embedding matrix and the value given by Xavier initialization for
69
+ linear layers.
70
+ layer_norm_eps (`float`, *optional*, defaults to 1e-09):
71
+ The epsilon used by the layer normalization layers.
72
+ pooling_type (`str`, *optional*, defaults to `"mean"`):
73
+ Possible values are `"mean"` or `"max"`. The way pooling is performed at the beginning of each block.
74
+ attention_type (`str`, *optional*, defaults to `"relative_shift"`):
75
+ Possible values are `"relative_shift"` or `"factorized"`. The former is faster on CPU/GPU while the latter
76
+ is faster on TPU.
77
+ separate_cls (`bool`, *optional*, defaults to `True`):
78
+ Whether or not to separate the cls token when applying pooling.
79
+ truncate_seq (`bool`, *optional*, defaults to `True`):
80
+ When using `separate_cls`, whether or not to truncate the last token when pooling, to avoid getting a
81
+ sequence length that is not a multiple of 2.
82
+ pool_q_only (`bool`, *optional*, defaults to `True`):
83
+ Whether or not to apply the pooling only to the query or to query, key and values for the attention layers.
84
+ """
85
+
86
+ model_type = "funnel"
87
+ attribute_map = {
88
+ "hidden_size": "d_model",
89
+ "num_attention_heads": "n_head",
90
+ }
91
+
92
+ def __init__(
93
+ self,
94
+ vocab_size=30522,
95
+ block_sizes=[4, 4, 4],
96
+ block_repeats=None,
97
+ num_decoder_layers=2,
98
+ d_model=768,
99
+ n_head=12,
100
+ d_head=64,
101
+ d_inner=3072,
102
+ hidden_act="gelu_new",
103
+ hidden_dropout=0.1,
104
+ attention_dropout=0.1,
105
+ activation_dropout=0.0,
106
+ initializer_range=0.1,
107
+ initializer_std=None,
108
+ layer_norm_eps=1e-9,
109
+ pooling_type="mean",
110
+ attention_type="relative_shift",
111
+ separate_cls=True,
112
+ truncate_seq=True,
113
+ pool_q_only=True,
114
+ **kwargs,
115
+ ):
116
+ self.vocab_size = vocab_size
117
+ self.block_sizes = block_sizes
118
+ self.block_repeats = [1] * len(block_sizes) if block_repeats is None else block_repeats
119
+ assert len(block_sizes) == len(
120
+ self.block_repeats
121
+ ), "`block_sizes` and `block_repeats` should have the same length."
122
+ self.num_decoder_layers = num_decoder_layers
123
+ self.d_model = d_model
124
+ self.n_head = n_head
125
+ self.d_head = d_head
126
+ self.d_inner = d_inner
127
+ self.hidden_act = hidden_act
128
+ self.hidden_dropout = hidden_dropout
129
+ self.attention_dropout = attention_dropout
130
+ self.activation_dropout = activation_dropout
131
+ self.initializer_range = initializer_range
132
+ self.initializer_std = initializer_std
133
+ self.layer_norm_eps = layer_norm_eps
134
+ assert pooling_type in [
135
+ "mean",
136
+ "max",
137
+ ], f"Got {pooling_type} for `pooling_type` but only 'mean' and 'max' are supported."
138
+ self.pooling_type = pooling_type
139
+ assert attention_type in [
140
+ "relative_shift",
141
+ "factorized",
142
+ ], f"Got {attention_type} for `attention_type` but only 'relative_shift' and 'factorized' are supported."
143
+ self.attention_type = attention_type
144
+ self.separate_cls = separate_cls
145
+ self.truncate_seq = truncate_seq
146
+ self.pool_q_only = pool_q_only
147
+
148
+ super().__init__(**kwargs)
149
+
150
+ @property
151
+ def num_hidden_layers(self):
152
+ return sum(self.block_sizes)
153
+
154
+ @num_hidden_layers.setter
155
+ def num_hidden_layers(self, value):
156
+ raise NotImplementedError(
157
+ "This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`."
158
+ )
159
+
160
+ @property
161
+ def num_blocks(self):
162
+ return len(self.block_sizes)
163
+
164
+ @num_blocks.setter
165
+ def num_blocks(self, value):
166
+ raise NotImplementedError("This model does not support the setting of `num_blocks`. Please set `block_sizes`.")
llmeval-env/lib/python3.10/site-packages/transformers/models/funnel/convert_funnel_original_tf_checkpoint_to_pytorch.py ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Convert Funnel checkpoint."""
16
+
17
+
18
+ import argparse
19
+
20
+ import torch
21
+
22
+ from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
23
+ from transformers.utils import logging
24
+
25
+
26
+ logging.set_verbosity_info()
27
+
28
+
29
+ def convert_tf_checkpoint_to_pytorch(tf_checkpoint_path, config_file, pytorch_dump_path, base_model):
30
+ # Initialise PyTorch model
31
+ config = FunnelConfig.from_json_file(config_file)
32
+ print(f"Building PyTorch model from configuration: {config}")
33
+ model = FunnelBaseModel(config) if base_model else FunnelModel(config)
34
+
35
+ # Load weights from tf checkpoint
36
+ load_tf_weights_in_funnel(model, config, tf_checkpoint_path)
37
+
38
+ # Save pytorch-model
39
+ print(f"Save PyTorch model to {pytorch_dump_path}")
40
+ torch.save(model.state_dict(), pytorch_dump_path)
41
+
42
+
43
+ if __name__ == "__main__":
44
+ parser = argparse.ArgumentParser()
45
+ # Required parameters
46
+ parser.add_argument(
47
+ "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
48
+ )
49
+ parser.add_argument(
50
+ "--config_file",
51
+ default=None,
52
+ type=str,
53
+ required=True,
54
+ help="The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.",
55
+ )
56
+ parser.add_argument(
57
+ "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
58
+ )
59
+ parser.add_argument(
60
+ "--base_model", action="store_true", help="Whether you want just the base model (no decoder) or not."
61
+ )
62
+ args = parser.parse_args()
63
+ convert_tf_checkpoint_to_pytorch(
64
+ args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
65
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/funnel/modeling_funnel.py ADDED
@@ -0,0 +1,1599 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020-present Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch Funnel Transformer model."""
16
+
17
+ import os
18
+ from dataclasses import dataclass
19
+ from typing import List, Optional, Tuple, Union
20
+
21
+ import numpy as np
22
+ import torch
23
+ from torch import nn
24
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
25
+
26
+ from ...activations import ACT2FN
27
+ from ...modeling_outputs import (
28
+ BaseModelOutput,
29
+ MaskedLMOutput,
30
+ MultipleChoiceModelOutput,
31
+ QuestionAnsweringModelOutput,
32
+ SequenceClassifierOutput,
33
+ TokenClassifierOutput,
34
+ )
35
+ from ...modeling_utils import PreTrainedModel
36
+ from ...utils import (
37
+ ModelOutput,
38
+ add_code_sample_docstrings,
39
+ add_start_docstrings,
40
+ add_start_docstrings_to_model_forward,
41
+ logging,
42
+ replace_return_docstrings,
43
+ )
44
+ from .configuration_funnel import FunnelConfig
45
+
46
+
47
+ logger = logging.get_logger(__name__)
48
+
49
+ _CONFIG_FOR_DOC = "FunnelConfig"
50
+ _CHECKPOINT_FOR_DOC = "funnel-transformer/small"
51
+
52
+
53
+ from ..deprecated._archive_maps import FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
54
+
55
+
56
+ INF = 1e6
57
+
58
+
59
+ def load_tf_weights_in_funnel(model, config, tf_checkpoint_path):
60
+ """Load tf checkpoints in a pytorch model."""
61
+ try:
62
+ import re
63
+
64
+ import numpy as np
65
+ import tensorflow as tf
66
+ except ImportError:
67
+ logger.error(
68
+ "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
69
+ "https://www.tensorflow.org/install/ for installation instructions."
70
+ )
71
+ raise
72
+ tf_path = os.path.abspath(tf_checkpoint_path)
73
+ logger.info(f"Converting TensorFlow checkpoint from {tf_path}")
74
+ # Load weights from TF model
75
+ init_vars = tf.train.list_variables(tf_path)
76
+ names = []
77
+ arrays = []
78
+ for name, shape in init_vars:
79
+ logger.info(f"Loading TF weight {name} with shape {shape}")
80
+ array = tf.train.load_variable(tf_path, name)
81
+ names.append(name)
82
+ arrays.append(array)
83
+
84
+ _layer_map = {
85
+ "k": "k_head",
86
+ "q": "q_head",
87
+ "v": "v_head",
88
+ "o": "post_proj",
89
+ "layer_1": "linear_1",
90
+ "layer_2": "linear_2",
91
+ "rel_attn": "attention",
92
+ "ff": "ffn",
93
+ "kernel": "weight",
94
+ "gamma": "weight",
95
+ "beta": "bias",
96
+ "lookup_table": "weight",
97
+ "word_embedding": "word_embeddings",
98
+ "input": "embeddings",
99
+ }
100
+
101
+ for name, array in zip(names, arrays):
102
+ name = name.split("/")
103
+ # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
104
+ # which are not required for using pretrained model
105
+ if any(
106
+ n in ["adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step"]
107
+ for n in name
108
+ ):
109
+ logger.info(f"Skipping {'/'.join(name)}")
110
+ continue
111
+ if name[0] == "generator":
112
+ continue
113
+ pointer = model
114
+ skipped = False
115
+ for m_name in name[1:]:
116
+ if not isinstance(pointer, FunnelPositionwiseFFN) and re.fullmatch(r"layer_\d+", m_name):
117
+ layer_index = int(re.search(r"layer_(\d+)", m_name).groups()[0])
118
+ if layer_index < config.num_hidden_layers:
119
+ block_idx = 0
120
+ while layer_index >= config.block_sizes[block_idx]:
121
+ layer_index -= config.block_sizes[block_idx]
122
+ block_idx += 1
123
+ pointer = pointer.blocks[block_idx][layer_index]
124
+ else:
125
+ layer_index -= config.num_hidden_layers
126
+ pointer = pointer.layers[layer_index]
127
+ elif m_name == "r" and isinstance(pointer, FunnelRelMultiheadAttention):
128
+ pointer = pointer.r_kernel
129
+ break
130
+ elif m_name in _layer_map:
131
+ pointer = getattr(pointer, _layer_map[m_name])
132
+ else:
133
+ try:
134
+ pointer = getattr(pointer, m_name)
135
+ except AttributeError:
136
+ print(f"Skipping {'/'.join(name)}", array.shape)
137
+ skipped = True
138
+ break
139
+ if not skipped:
140
+ if len(pointer.shape) != len(array.shape):
141
+ array = array.reshape(pointer.shape)
142
+ if m_name == "kernel":
143
+ array = np.transpose(array)
144
+ pointer.data = torch.from_numpy(array)
145
+
146
+ return model
147
+
148
+
149
+ class FunnelEmbeddings(nn.Module):
150
+ def __init__(self, config: FunnelConfig) -> None:
151
+ super().__init__()
152
+ self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
153
+ self.layer_norm = nn.LayerNorm(config.d_model, eps=config.layer_norm_eps)
154
+ self.dropout = nn.Dropout(config.hidden_dropout)
155
+
156
+ def forward(
157
+ self, input_ids: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None
158
+ ) -> torch.Tensor:
159
+ if inputs_embeds is None:
160
+ inputs_embeds = self.word_embeddings(input_ids)
161
+ embeddings = self.layer_norm(inputs_embeds)
162
+ embeddings = self.dropout(embeddings)
163
+ return embeddings
164
+
165
+
166
+ class FunnelAttentionStructure(nn.Module):
167
+ """
168
+ Contains helpers for `FunnelRelMultiheadAttention `.
169
+ """
170
+
171
+ cls_token_type_id: int = 2
172
+
173
+ def __init__(self, config: FunnelConfig) -> None:
174
+ super().__init__()
175
+ self.config = config
176
+ self.sin_dropout = nn.Dropout(config.hidden_dropout)
177
+ self.cos_dropout = nn.Dropout(config.hidden_dropout)
178
+ # Track where we are at in terms of pooling from the original input, e.g., by how much the sequence length was
179
+ # divided.
180
+ self.pooling_mult = None
181
+
182
+ def init_attention_inputs(
183
+ self,
184
+ inputs_embeds: torch.Tensor,
185
+ attention_mask: Optional[torch.Tensor] = None,
186
+ token_type_ids: Optional[torch.Tensor] = None,
187
+ ) -> Tuple[torch.Tensor]:
188
+ """Returns the attention inputs associated to the inputs of the model."""
189
+ # inputs_embeds has shape batch_size x seq_len x d_model
190
+ # attention_mask and token_type_ids have shape batch_size x seq_len
191
+ self.pooling_mult = 1
192
+ self.seq_len = seq_len = inputs_embeds.size(1)
193
+ position_embeds = self.get_position_embeds(seq_len, inputs_embeds.dtype, inputs_embeds.device)
194
+ token_type_mat = self.token_type_ids_to_mat(token_type_ids) if token_type_ids is not None else None
195
+ cls_mask = (
196
+ nn.functional.pad(inputs_embeds.new_ones([seq_len - 1, seq_len - 1]), (1, 0, 1, 0))
197
+ if self.config.separate_cls
198
+ else None
199
+ )
200
+ return (position_embeds, token_type_mat, attention_mask, cls_mask)
201
+
202
+ def token_type_ids_to_mat(self, token_type_ids: torch.Tensor) -> torch.Tensor:
203
+ """Convert `token_type_ids` to `token_type_mat`."""
204
+ token_type_mat = token_type_ids[:, :, None] == token_type_ids[:, None]
205
+ # Treat <cls> as in the same segment as both A & B
206
+ cls_ids = token_type_ids == self.cls_token_type_id
207
+ cls_mat = cls_ids[:, :, None] | cls_ids[:, None]
208
+ return cls_mat | token_type_mat
209
+
210
+ def get_position_embeds(
211
+ self, seq_len: int, dtype: torch.dtype, device: torch.device
212
+ ) -> Union[Tuple[torch.Tensor], List[List[torch.Tensor]]]:
213
+ """
214
+ Create and cache inputs related to relative position encoding. Those are very different depending on whether we
215
+ are using the factorized or the relative shift attention:
216
+
217
+ For the factorized attention, it returns the matrices (phi, pi, psi, omega) used in the paper, appendix A.2.2,
218
+ final formula.
219
+
220
+ For the relative shift attention, it returns all possible vectors R used in the paper, appendix A.2.1, final
221
+ formula.
222
+
223
+ Paper link: https://arxiv.org/abs/2006.03236
224
+ """
225
+ d_model = self.config.d_model
226
+ if self.config.attention_type == "factorized":
227
+ # Notations from the paper, appending A.2.2, final formula.
228
+ # We need to create and return the matrices phi, psi, pi and omega.
229
+ pos_seq = torch.arange(0, seq_len, 1.0, dtype=torch.int64, device=device).to(dtype)
230
+ freq_seq = torch.arange(0, d_model // 2, 1.0, dtype=torch.int64, device=device).to(dtype)
231
+ inv_freq = 1 / (10000 ** (freq_seq / (d_model // 2)))
232
+ sinusoid = pos_seq[:, None] * inv_freq[None]
233
+ sin_embed = torch.sin(sinusoid)
234
+ sin_embed_d = self.sin_dropout(sin_embed)
235
+ cos_embed = torch.cos(sinusoid)
236
+ cos_embed_d = self.cos_dropout(cos_embed)
237
+ # This is different from the formula on the paper...
238
+ phi = torch.cat([sin_embed_d, sin_embed_d], dim=-1)
239
+ psi = torch.cat([cos_embed, sin_embed], dim=-1)
240
+ pi = torch.cat([cos_embed_d, cos_embed_d], dim=-1)
241
+ omega = torch.cat([-sin_embed, cos_embed], dim=-1)
242
+ return (phi, pi, psi, omega)
243
+ else:
244
+ # Notations from the paper, appending A.2.1, final formula.
245
+ # We need to create and return all the possible vectors R for all blocks and shifts.
246
+ freq_seq = torch.arange(0, d_model // 2, 1.0, dtype=torch.int64, device=device).to(dtype)
247
+ inv_freq = 1 / (10000 ** (freq_seq / (d_model // 2)))
248
+ # Maximum relative positions for the first input
249
+ rel_pos_id = torch.arange(-seq_len * 2, seq_len * 2, 1.0, dtype=torch.int64, device=device).to(dtype)
250
+ zero_offset = seq_len * 2
251
+ sinusoid = rel_pos_id[:, None] * inv_freq[None]
252
+ sin_embed = self.sin_dropout(torch.sin(sinusoid))
253
+ cos_embed = self.cos_dropout(torch.cos(sinusoid))
254
+ pos_embed = torch.cat([sin_embed, cos_embed], dim=-1)
255
+
256
+ pos = torch.arange(0, seq_len, dtype=torch.int64, device=device).to(dtype)
257
+ pooled_pos = pos
258
+ position_embeds_list = []
259
+ for block_index in range(0, self.config.num_blocks):
260
+ # For each block with block_index > 0, we need two types position embeddings:
261
+ # - Attention(pooled-q, unpooled-kv)
262
+ # - Attention(pooled-q, pooled-kv)
263
+ # For block_index = 0 we only need the second one and leave the first one as None.
264
+
265
+ # First type
266
+ if block_index == 0:
267
+ position_embeds_pooling = None
268
+ else:
269
+ pooled_pos = self.stride_pool_pos(pos, block_index)
270
+
271
+ # construct rel_pos_id
272
+ stride = 2 ** (block_index - 1)
273
+ rel_pos = self.relative_pos(pos, stride, pooled_pos, shift=2)
274
+ rel_pos = rel_pos[:, None] + zero_offset
275
+ rel_pos = rel_pos.expand(rel_pos.size(0), d_model)
276
+ position_embeds_pooling = torch.gather(pos_embed, 0, rel_pos)
277
+
278
+ # Second type
279
+ pos = pooled_pos
280
+ stride = 2**block_index
281
+ rel_pos = self.relative_pos(pos, stride)
282
+
283
+ rel_pos = rel_pos[:, None] + zero_offset
284
+ rel_pos = rel_pos.expand(rel_pos.size(0), d_model)
285
+ position_embeds_no_pooling = torch.gather(pos_embed, 0, rel_pos)
286
+
287
+ position_embeds_list.append([position_embeds_no_pooling, position_embeds_pooling])
288
+ return position_embeds_list
289
+
290
+ def stride_pool_pos(self, pos_id: torch.Tensor, block_index: int):
291
+ """
292
+ Pool `pos_id` while keeping the cls token separate (if `config.separate_cls=True`).
293
+ """
294
+ if self.config.separate_cls:
295
+ # Under separate <cls>, we treat the <cls> as the first token in
296
+ # the previous block of the 1st real block. Since the 1st real
297
+ # block always has position 1, the position of the previous block
298
+ # will be at `1 - 2 ** block_index`.
299
+ cls_pos = pos_id.new_tensor([-(2**block_index) + 1])
300
+ pooled_pos_id = pos_id[1:-1] if self.config.truncate_seq else pos_id[1:]
301
+ return torch.cat([cls_pos, pooled_pos_id[::2]], 0)
302
+ else:
303
+ return pos_id[::2]
304
+
305
+ def relative_pos(self, pos: torch.Tensor, stride: int, pooled_pos=None, shift: int = 1) -> torch.Tensor:
306
+ """
307
+ Build the relative positional vector between `pos` and `pooled_pos`.
308
+ """
309
+ if pooled_pos is None:
310
+ pooled_pos = pos
311
+
312
+ ref_point = pooled_pos[0] - pos[0]
313
+ num_remove = shift * len(pooled_pos)
314
+ max_dist = ref_point + num_remove * stride
315
+ min_dist = pooled_pos[0] - pos[-1]
316
+
317
+ return torch.arange(max_dist, min_dist - 1, -stride, dtype=torch.long, device=pos.device)
318
+
319
+ def stride_pool(
320
+ self,
321
+ tensor: Union[torch.Tensor, Tuple[torch.Tensor], List[torch.Tensor]],
322
+ axis: Union[int, Tuple[int], List[int]],
323
+ ) -> torch.Tensor:
324
+ """
325
+ Perform pooling by stride slicing the tensor along the given axis.
326
+ """
327
+ if tensor is None:
328
+ return None
329
+
330
+ # Do the stride pool recursively if axis is a list or a tuple of ints.
331
+ if isinstance(axis, (list, tuple)):
332
+ for ax in axis:
333
+ tensor = self.stride_pool(tensor, ax)
334
+ return tensor
335
+
336
+ # Do the stride pool recursively if tensor is a list or tuple of tensors.
337
+ if isinstance(tensor, (tuple, list)):
338
+ return type(tensor)(self.stride_pool(x, axis) for x in tensor)
339
+
340
+ # Deal with negative axis
341
+ axis %= tensor.ndim
342
+
343
+ axis_slice = (
344
+ slice(None, -1, 2) if self.config.separate_cls and self.config.truncate_seq else slice(None, None, 2)
345
+ )
346
+ enc_slice = [slice(None)] * axis + [axis_slice]
347
+ if self.config.separate_cls:
348
+ cls_slice = [slice(None)] * axis + [slice(None, 1)]
349
+ tensor = torch.cat([tensor[cls_slice], tensor], axis=axis)
350
+ return tensor[enc_slice]
351
+
352
+ def pool_tensor(
353
+ self, tensor: Union[torch.Tensor, Tuple[torch.Tensor], List[torch.Tensor]], mode: str = "mean", stride: int = 2
354
+ ) -> torch.Tensor:
355
+ """Apply 1D pooling to a tensor of size [B x T (x H)]."""
356
+ if tensor is None:
357
+ return None
358
+
359
+ # Do the pool recursively if tensor is a list or tuple of tensors.
360
+ if isinstance(tensor, (tuple, list)):
361
+ return type(tensor)(self.pool_tensor(tensor, mode=mode, stride=stride) for x in tensor)
362
+
363
+ if self.config.separate_cls:
364
+ suffix = tensor[:, :-1] if self.config.truncate_seq else tensor
365
+ tensor = torch.cat([tensor[:, :1], suffix], dim=1)
366
+
367
+ ndim = tensor.ndim
368
+ if ndim == 2:
369
+ tensor = tensor[:, None, :, None]
370
+ elif ndim == 3:
371
+ tensor = tensor[:, None, :, :]
372
+ # Stride is applied on the second-to-last dimension.
373
+ stride = (stride, 1)
374
+
375
+ if mode == "mean":
376
+ tensor = nn.functional.avg_pool2d(tensor, stride, stride=stride, ceil_mode=True)
377
+ elif mode == "max":
378
+ tensor = nn.functional.max_pool2d(tensor, stride, stride=stride, ceil_mode=True)
379
+ elif mode == "min":
380
+ tensor = -nn.functional.max_pool2d(-tensor, stride, stride=stride, ceil_mode=True)
381
+ else:
382
+ raise NotImplementedError("The supported modes are 'mean', 'max' and 'min'.")
383
+
384
+ if ndim == 2:
385
+ return tensor[:, 0, :, 0]
386
+ elif ndim == 3:
387
+ return tensor[:, 0]
388
+ return tensor
389
+
390
+ def pre_attention_pooling(
391
+ self, output, attention_inputs: Tuple[torch.Tensor]
392
+ ) -> Tuple[torch.Tensor, Tuple[torch.Tensor]]:
393
+ """Pool `output` and the proper parts of `attention_inputs` before the attention layer."""
394
+ position_embeds, token_type_mat, attention_mask, cls_mask = attention_inputs
395
+ if self.config.pool_q_only:
396
+ if self.config.attention_type == "factorized":
397
+ position_embeds = self.stride_pool(position_embeds[:2], 0) + position_embeds[2:]
398
+ token_type_mat = self.stride_pool(token_type_mat, 1)
399
+ cls_mask = self.stride_pool(cls_mask, 0)
400
+ output = self.pool_tensor(output, mode=self.config.pooling_type)
401
+ else:
402
+ self.pooling_mult *= 2
403
+ if self.config.attention_type == "factorized":
404
+ position_embeds = self.stride_pool(position_embeds, 0)
405
+ token_type_mat = self.stride_pool(token_type_mat, [1, 2])
406
+ cls_mask = self.stride_pool(cls_mask, [1, 2])
407
+ attention_mask = self.pool_tensor(attention_mask, mode="min")
408
+ output = self.pool_tensor(output, mode=self.config.pooling_type)
409
+ attention_inputs = (position_embeds, token_type_mat, attention_mask, cls_mask)
410
+ return output, attention_inputs
411
+
412
+ def post_attention_pooling(self, attention_inputs: Tuple[torch.Tensor]) -> Tuple[torch.Tensor]:
413
+ """Pool the proper parts of `attention_inputs` after the attention layer."""
414
+ position_embeds, token_type_mat, attention_mask, cls_mask = attention_inputs
415
+ if self.config.pool_q_only:
416
+ self.pooling_mult *= 2
417
+ if self.config.attention_type == "factorized":
418
+ position_embeds = position_embeds[:2] + self.stride_pool(position_embeds[2:], 0)
419
+ token_type_mat = self.stride_pool(token_type_mat, 2)
420
+ cls_mask = self.stride_pool(cls_mask, 1)
421
+ attention_mask = self.pool_tensor(attention_mask, mode="min")
422
+ attention_inputs = (position_embeds, token_type_mat, attention_mask, cls_mask)
423
+ return attention_inputs
424
+
425
+
426
+ def _relative_shift_gather(positional_attn: torch.Tensor, context_len: int, shift: int) -> torch.Tensor:
427
+ batch_size, n_head, seq_len, max_rel_len = positional_attn.shape
428
+ # max_rel_len = 2 * context_len + shift -1 is the numbers of possible relative positions i-j
429
+
430
+ # What's next is the same as doing the following gather, which might be clearer code but less efficient.
431
+ # idxs = context_len + torch.arange(0, context_len).unsqueeze(0) - torch.arange(0, seq_len).unsqueeze(1)
432
+ # # matrix of context_len + i-j
433
+ # return positional_attn.gather(3, idxs.expand([batch_size, n_head, context_len, context_len]))
434
+
435
+ positional_attn = torch.reshape(positional_attn, [batch_size, n_head, max_rel_len, seq_len])
436
+ positional_attn = positional_attn[:, :, shift:, :]
437
+ positional_attn = torch.reshape(positional_attn, [batch_size, n_head, seq_len, max_rel_len - shift])
438
+ positional_attn = positional_attn[..., :context_len]
439
+ return positional_attn
440
+
441
+
442
+ class FunnelRelMultiheadAttention(nn.Module):
443
+ def __init__(self, config: FunnelConfig, block_index: int) -> None:
444
+ super().__init__()
445
+ self.config = config
446
+ self.block_index = block_index
447
+ d_model, n_head, d_head = config.d_model, config.n_head, config.d_head
448
+
449
+ self.hidden_dropout = nn.Dropout(config.hidden_dropout)
450
+ self.attention_dropout = nn.Dropout(config.attention_dropout)
451
+
452
+ self.q_head = nn.Linear(d_model, n_head * d_head, bias=False)
453
+ self.k_head = nn.Linear(d_model, n_head * d_head)
454
+ self.v_head = nn.Linear(d_model, n_head * d_head)
455
+
456
+ self.r_w_bias = nn.Parameter(torch.zeros([n_head, d_head]))
457
+ self.r_r_bias = nn.Parameter(torch.zeros([n_head, d_head]))
458
+ self.r_kernel = nn.Parameter(torch.zeros([d_model, n_head, d_head]))
459
+ self.r_s_bias = nn.Parameter(torch.zeros([n_head, d_head]))
460
+ self.seg_embed = nn.Parameter(torch.zeros([2, n_head, d_head]))
461
+
462
+ self.post_proj = nn.Linear(n_head * d_head, d_model)
463
+ self.layer_norm = nn.LayerNorm(d_model, eps=config.layer_norm_eps)
464
+ self.scale = 1.0 / (d_head**0.5)
465
+
466
+ def relative_positional_attention(self, position_embeds, q_head, context_len, cls_mask=None):
467
+ """Relative attention score for the positional encodings"""
468
+ # q_head has shape batch_size x sea_len x n_head x d_head
469
+ if self.config.attention_type == "factorized":
470
+ # Notations from the paper, appending A.2.2, final formula (https://arxiv.org/abs/2006.03236)
471
+ # phi and pi have shape seq_len x d_model, psi and omega have shape context_len x d_model
472
+ phi, pi, psi, omega = position_embeds
473
+ # Shape n_head x d_head
474
+ u = self.r_r_bias * self.scale
475
+ # Shape d_model x n_head x d_head
476
+ w_r = self.r_kernel
477
+
478
+ # Shape batch_size x sea_len x n_head x d_model
479
+ q_r_attention = torch.einsum("binh,dnh->bind", q_head + u, w_r)
480
+ q_r_attention_1 = q_r_attention * phi[:, None]
481
+ q_r_attention_2 = q_r_attention * pi[:, None]
482
+
483
+ # Shape batch_size x n_head x seq_len x context_len
484
+ positional_attn = torch.einsum("bind,jd->bnij", q_r_attention_1, psi) + torch.einsum(
485
+ "bind,jd->bnij", q_r_attention_2, omega
486
+ )
487
+ else:
488
+ shift = 2 if q_head.shape[1] != context_len else 1
489
+ # Notations from the paper, appending A.2.1, final formula (https://arxiv.org/abs/2006.03236)
490
+ # Grab the proper positional encoding, shape max_rel_len x d_model
491
+ r = position_embeds[self.block_index][shift - 1]
492
+ # Shape n_head x d_head
493
+ v = self.r_r_bias * self.scale
494
+ # Shape d_model x n_head x d_head
495
+ w_r = self.r_kernel
496
+
497
+ # Shape max_rel_len x n_head x d_model
498
+ r_head = torch.einsum("td,dnh->tnh", r, w_r)
499
+ # Shape batch_size x n_head x seq_len x max_rel_len
500
+ positional_attn = torch.einsum("binh,tnh->bnit", q_head + v, r_head)
501
+ # Shape batch_size x n_head x seq_len x context_len
502
+ positional_attn = _relative_shift_gather(positional_attn, context_len, shift)
503
+
504
+ if cls_mask is not None:
505
+ positional_attn *= cls_mask
506
+ return positional_attn
507
+
508
+ def relative_token_type_attention(self, token_type_mat, q_head, cls_mask=None):
509
+ """Relative attention score for the token_type_ids"""
510
+ if token_type_mat is None:
511
+ return 0
512
+ batch_size, seq_len, context_len = token_type_mat.shape
513
+ # q_head has shape batch_size x seq_len x n_head x d_head
514
+ # Shape n_head x d_head
515
+ r_s_bias = self.r_s_bias * self.scale
516
+
517
+ # Shape batch_size x n_head x seq_len x 2
518
+ token_type_bias = torch.einsum("bind,snd->bnis", q_head + r_s_bias, self.seg_embed)
519
+ # Shape batch_size x n_head x seq_len x context_len
520
+ token_type_mat = token_type_mat[:, None].expand([batch_size, q_head.shape[2], seq_len, context_len])
521
+ # Shapes batch_size x n_head x seq_len
522
+ diff_token_type, same_token_type = torch.split(token_type_bias, 1, dim=-1)
523
+ # Shape batch_size x n_head x seq_len x context_len
524
+ token_type_attn = torch.where(
525
+ token_type_mat, same_token_type.expand(token_type_mat.shape), diff_token_type.expand(token_type_mat.shape)
526
+ )
527
+
528
+ if cls_mask is not None:
529
+ token_type_attn *= cls_mask
530
+ return token_type_attn
531
+
532
+ def forward(
533
+ self,
534
+ query: torch.Tensor,
535
+ key: torch.Tensor,
536
+ value: torch.Tensor,
537
+ attention_inputs: Tuple[torch.Tensor],
538
+ output_attentions: bool = False,
539
+ ) -> Tuple[torch.Tensor, ...]:
540
+ # query has shape batch_size x seq_len x d_model
541
+ # key and value have shapes batch_size x context_len x d_model
542
+ position_embeds, token_type_mat, attention_mask, cls_mask = attention_inputs
543
+
544
+ batch_size, seq_len, _ = query.shape
545
+ context_len = key.shape[1]
546
+ n_head, d_head = self.config.n_head, self.config.d_head
547
+
548
+ # Shape batch_size x seq_len x n_head x d_head
549
+ q_head = self.q_head(query).view(batch_size, seq_len, n_head, d_head)
550
+ # Shapes batch_size x context_len x n_head x d_head
551
+ k_head = self.k_head(key).view(batch_size, context_len, n_head, d_head)
552
+ v_head = self.v_head(value).view(batch_size, context_len, n_head, d_head)
553
+
554
+ q_head = q_head * self.scale
555
+ # Shape n_head x d_head
556
+ r_w_bias = self.r_w_bias * self.scale
557
+ # Shapes batch_size x n_head x seq_len x context_len
558
+ content_score = torch.einsum("bind,bjnd->bnij", q_head + r_w_bias, k_head)
559
+ positional_attn = self.relative_positional_attention(position_embeds, q_head, context_len, cls_mask)
560
+ token_type_attn = self.relative_token_type_attention(token_type_mat, q_head, cls_mask)
561
+
562
+ # merge attention scores
563
+ attn_score = content_score + positional_attn + token_type_attn
564
+
565
+ # precision safe in case of mixed precision training
566
+ dtype = attn_score.dtype
567
+ attn_score = attn_score.float()
568
+ # perform masking
569
+ if attention_mask is not None:
570
+ attn_score = attn_score - INF * (1 - attention_mask[:, None, None].float())
571
+ # attention probability
572
+ attn_prob = torch.softmax(attn_score, dim=-1, dtype=dtype)
573
+ attn_prob = self.attention_dropout(attn_prob)
574
+
575
+ # attention output, shape batch_size x seq_len x n_head x d_head
576
+ attn_vec = torch.einsum("bnij,bjnd->bind", attn_prob, v_head)
577
+
578
+ # Shape shape batch_size x seq_len x d_model
579
+ attn_out = self.post_proj(attn_vec.reshape(batch_size, seq_len, n_head * d_head))
580
+ attn_out = self.hidden_dropout(attn_out)
581
+
582
+ output = self.layer_norm(query + attn_out)
583
+ return (output, attn_prob) if output_attentions else (output,)
584
+
585
+
586
+ class FunnelPositionwiseFFN(nn.Module):
587
+ def __init__(self, config: FunnelConfig) -> None:
588
+ super().__init__()
589
+ self.linear_1 = nn.Linear(config.d_model, config.d_inner)
590
+ self.activation_function = ACT2FN[config.hidden_act]
591
+ self.activation_dropout = nn.Dropout(config.activation_dropout)
592
+ self.linear_2 = nn.Linear(config.d_inner, config.d_model)
593
+ self.dropout = nn.Dropout(config.hidden_dropout)
594
+ self.layer_norm = nn.LayerNorm(config.d_model, config.layer_norm_eps)
595
+
596
+ def forward(self, hidden: torch.Tensor) -> torch.Tensor:
597
+ h = self.linear_1(hidden)
598
+ h = self.activation_function(h)
599
+ h = self.activation_dropout(h)
600
+ h = self.linear_2(h)
601
+ h = self.dropout(h)
602
+ return self.layer_norm(hidden + h)
603
+
604
+
605
+ class FunnelLayer(nn.Module):
606
+ def __init__(self, config: FunnelConfig, block_index: int) -> None:
607
+ super().__init__()
608
+ self.attention = FunnelRelMultiheadAttention(config, block_index)
609
+ self.ffn = FunnelPositionwiseFFN(config)
610
+
611
+ def forward(
612
+ self,
613
+ query: torch.Tensor,
614
+ key: torch.Tensor,
615
+ value: torch.Tensor,
616
+ attention_inputs,
617
+ output_attentions: bool = False,
618
+ ) -> Tuple:
619
+ attn = self.attention(query, key, value, attention_inputs, output_attentions=output_attentions)
620
+ output = self.ffn(attn[0])
621
+ return (output, attn[1]) if output_attentions else (output,)
622
+
623
+
624
+ class FunnelEncoder(nn.Module):
625
+ def __init__(self, config: FunnelConfig) -> None:
626
+ super().__init__()
627
+ self.config = config
628
+ self.attention_structure = FunnelAttentionStructure(config)
629
+ self.blocks = nn.ModuleList(
630
+ [
631
+ nn.ModuleList([FunnelLayer(config, block_index) for _ in range(block_size)])
632
+ for block_index, block_size in enumerate(config.block_sizes)
633
+ ]
634
+ )
635
+
636
+ def forward(
637
+ self,
638
+ inputs_embeds: torch.Tensor,
639
+ attention_mask: Optional[torch.Tensor] = None,
640
+ token_type_ids: Optional[torch.Tensor] = None,
641
+ output_attentions: bool = False,
642
+ output_hidden_states: bool = False,
643
+ return_dict: bool = True,
644
+ ) -> Union[Tuple, BaseModelOutput]:
645
+ # The pooling is not implemented on long tensors, so we convert this mask.
646
+ attention_mask = attention_mask.type_as(inputs_embeds)
647
+ attention_inputs = self.attention_structure.init_attention_inputs(
648
+ inputs_embeds,
649
+ attention_mask=attention_mask,
650
+ token_type_ids=token_type_ids,
651
+ )
652
+ hidden = inputs_embeds
653
+
654
+ all_hidden_states = (inputs_embeds,) if output_hidden_states else None
655
+ all_attentions = () if output_attentions else None
656
+
657
+ for block_index, block in enumerate(self.blocks):
658
+ pooling_flag = hidden.size(1) > (2 if self.config.separate_cls else 1)
659
+ pooling_flag = pooling_flag and block_index > 0
660
+ if pooling_flag:
661
+ pooled_hidden, attention_inputs = self.attention_structure.pre_attention_pooling(
662
+ hidden, attention_inputs
663
+ )
664
+ for layer_index, layer in enumerate(block):
665
+ for repeat_index in range(self.config.block_repeats[block_index]):
666
+ do_pooling = (repeat_index == 0) and (layer_index == 0) and pooling_flag
667
+ if do_pooling:
668
+ query = pooled_hidden
669
+ key = value = hidden if self.config.pool_q_only else pooled_hidden
670
+ else:
671
+ query = key = value = hidden
672
+ layer_output = layer(query, key, value, attention_inputs, output_attentions=output_attentions)
673
+ hidden = layer_output[0]
674
+ if do_pooling:
675
+ attention_inputs = self.attention_structure.post_attention_pooling(attention_inputs)
676
+
677
+ if output_attentions:
678
+ all_attentions = all_attentions + layer_output[1:]
679
+ if output_hidden_states:
680
+ all_hidden_states = all_hidden_states + (hidden,)
681
+
682
+ if not return_dict:
683
+ return tuple(v for v in [hidden, all_hidden_states, all_attentions] if v is not None)
684
+ return BaseModelOutput(last_hidden_state=hidden, hidden_states=all_hidden_states, attentions=all_attentions)
685
+
686
+
687
+ def upsample(
688
+ x: torch.Tensor, stride: int, target_len: int, separate_cls: bool = True, truncate_seq: bool = False
689
+ ) -> torch.Tensor:
690
+ """
691
+ Upsample tensor `x` to match `target_len` by repeating the tokens `stride` time on the sequence length dimension.
692
+ """
693
+ if stride == 1:
694
+ return x
695
+ if separate_cls:
696
+ cls = x[:, :1]
697
+ x = x[:, 1:]
698
+ output = torch.repeat_interleave(x, repeats=stride, dim=1)
699
+ if separate_cls:
700
+ if truncate_seq:
701
+ output = nn.functional.pad(output, (0, 0, 0, stride - 1, 0, 0))
702
+ output = output[:, : target_len - 1]
703
+ output = torch.cat([cls, output], dim=1)
704
+ else:
705
+ output = output[:, :target_len]
706
+ return output
707
+
708
+
709
+ class FunnelDecoder(nn.Module):
710
+ def __init__(self, config: FunnelConfig) -> None:
711
+ super().__init__()
712
+ self.config = config
713
+ self.attention_structure = FunnelAttentionStructure(config)
714
+ self.layers = nn.ModuleList([FunnelLayer(config, 0) for _ in range(config.num_decoder_layers)])
715
+
716
+ def forward(
717
+ self,
718
+ final_hidden: torch.Tensor,
719
+ first_block_hidden: torch.Tensor,
720
+ attention_mask: Optional[torch.Tensor] = None,
721
+ token_type_ids: Optional[torch.Tensor] = None,
722
+ output_attentions: bool = False,
723
+ output_hidden_states: bool = False,
724
+ return_dict: bool = True,
725
+ ) -> Union[Tuple, BaseModelOutput]:
726
+ upsampled_hidden = upsample(
727
+ final_hidden,
728
+ stride=2 ** (len(self.config.block_sizes) - 1),
729
+ target_len=first_block_hidden.shape[1],
730
+ separate_cls=self.config.separate_cls,
731
+ truncate_seq=self.config.truncate_seq,
732
+ )
733
+
734
+ hidden = upsampled_hidden + first_block_hidden
735
+ all_hidden_states = (hidden,) if output_hidden_states else None
736
+ all_attentions = () if output_attentions else None
737
+
738
+ attention_inputs = self.attention_structure.init_attention_inputs(
739
+ hidden,
740
+ attention_mask=attention_mask,
741
+ token_type_ids=token_type_ids,
742
+ )
743
+
744
+ for layer in self.layers:
745
+ layer_output = layer(hidden, hidden, hidden, attention_inputs, output_attentions=output_attentions)
746
+ hidden = layer_output[0]
747
+
748
+ if output_attentions:
749
+ all_attentions = all_attentions + layer_output[1:]
750
+ if output_hidden_states:
751
+ all_hidden_states = all_hidden_states + (hidden,)
752
+
753
+ if not return_dict:
754
+ return tuple(v for v in [hidden, all_hidden_states, all_attentions] if v is not None)
755
+ return BaseModelOutput(last_hidden_state=hidden, hidden_states=all_hidden_states, attentions=all_attentions)
756
+
757
+
758
+ class FunnelDiscriminatorPredictions(nn.Module):
759
+ """Prediction module for the discriminator, made up of two dense layers."""
760
+
761
+ def __init__(self, config: FunnelConfig) -> None:
762
+ super().__init__()
763
+ self.config = config
764
+ self.dense = nn.Linear(config.d_model, config.d_model)
765
+ self.dense_prediction = nn.Linear(config.d_model, 1)
766
+
767
+ def forward(self, discriminator_hidden_states: torch.Tensor) -> torch.Tensor:
768
+ hidden_states = self.dense(discriminator_hidden_states)
769
+ hidden_states = ACT2FN[self.config.hidden_act](hidden_states)
770
+ logits = self.dense_prediction(hidden_states).squeeze(-1)
771
+ return logits
772
+
773
+
774
+ class FunnelPreTrainedModel(PreTrainedModel):
775
+ """
776
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
777
+ models.
778
+ """
779
+
780
+ config_class = FunnelConfig
781
+ load_tf_weights = load_tf_weights_in_funnel
782
+ base_model_prefix = "funnel"
783
+
784
+ def _init_weights(self, module):
785
+ classname = module.__class__.__name__
786
+ if classname.find("Linear") != -1:
787
+ if getattr(module, "weight", None) is not None:
788
+ if self.config.initializer_std is None:
789
+ fan_out, fan_in = module.weight.shape
790
+ std = np.sqrt(1.0 / float(fan_in + fan_out))
791
+ else:
792
+ std = self.config.initializer_std
793
+ nn.init.normal_(module.weight, std=std)
794
+ if getattr(module, "bias", None) is not None:
795
+ nn.init.constant_(module.bias, 0.0)
796
+ elif classname == "FunnelRelMultiheadAttention":
797
+ nn.init.uniform_(module.r_w_bias, b=self.config.initializer_range)
798
+ nn.init.uniform_(module.r_r_bias, b=self.config.initializer_range)
799
+ nn.init.uniform_(module.r_kernel, b=self.config.initializer_range)
800
+ nn.init.uniform_(module.r_s_bias, b=self.config.initializer_range)
801
+ nn.init.uniform_(module.seg_embed, b=self.config.initializer_range)
802
+ elif classname == "FunnelEmbeddings":
803
+ std = 1.0 if self.config.initializer_std is None else self.config.initializer_std
804
+ nn.init.normal_(module.word_embeddings.weight, std=std)
805
+ if module.word_embeddings.padding_idx is not None:
806
+ module.word_embeddings.weight.data[module.padding_idx].zero_()
807
+
808
+
809
+ class FunnelClassificationHead(nn.Module):
810
+ def __init__(self, config: FunnelConfig, n_labels: int) -> None:
811
+ super().__init__()
812
+ self.linear_hidden = nn.Linear(config.d_model, config.d_model)
813
+ self.dropout = nn.Dropout(config.hidden_dropout)
814
+ self.linear_out = nn.Linear(config.d_model, n_labels)
815
+
816
+ def forward(self, hidden: torch.Tensor) -> torch.Tensor:
817
+ hidden = self.linear_hidden(hidden)
818
+ hidden = torch.tanh(hidden)
819
+ hidden = self.dropout(hidden)
820
+ return self.linear_out(hidden)
821
+
822
+
823
+ @dataclass
824
+ class FunnelForPreTrainingOutput(ModelOutput):
825
+ """
826
+ Output type of [`FunnelForPreTraining`].
827
+
828
+ Args:
829
+ loss (*optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`):
830
+ Total loss of the ELECTRA-style objective.
831
+ logits (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):
832
+ Prediction scores of the head (scores for each token before SoftMax).
833
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
834
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
835
+ shape `(batch_size, sequence_length, hidden_size)`.
836
+
837
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
838
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
839
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
840
+ sequence_length)`.
841
+
842
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
843
+ heads.
844
+ """
845
+
846
+ loss: Optional[torch.FloatTensor] = None
847
+ logits: torch.FloatTensor = None
848
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
849
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
850
+
851
+
852
+ FUNNEL_START_DOCSTRING = r"""
853
+
854
+ The Funnel Transformer model was proposed in [Funnel-Transformer: Filtering out Sequential Redundancy for Efficient
855
+ Language Processing](https://arxiv.org/abs/2006.03236) by Zihang Dai, Guokun Lai, Yiming Yang, Quoc V. Le.
856
+
857
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
858
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
859
+ etc.)
860
+
861
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
862
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
863
+ and behavior.
864
+
865
+ Parameters:
866
+ config ([`FunnelConfig`]): Model configuration class with all the parameters of the model.
867
+ Initializing with a config file does not load the weights associated with the model, only the
868
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
869
+ """
870
+
871
+ FUNNEL_INPUTS_DOCSTRING = r"""
872
+ Args:
873
+ input_ids (`torch.LongTensor` of shape `({0})`):
874
+ Indices of input sequence tokens in the vocabulary.
875
+
876
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
877
+ [`PreTrainedTokenizer.__call__`] for details.
878
+
879
+ [What are input IDs?](../glossary#input-ids)
880
+ attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
881
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
882
+
883
+ - 1 for tokens that are **not masked**,
884
+ - 0 for tokens that are **masked**.
885
+
886
+ [What are attention masks?](../glossary#attention-mask)
887
+ token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
888
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
889
+ 1]`:
890
+
891
+ - 0 corresponds to a *sentence A* token,
892
+ - 1 corresponds to a *sentence B* token.
893
+
894
+ [What are token type IDs?](../glossary#token-type-ids)
895
+ inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
896
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
897
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
898
+ model's internal embedding lookup matrix.
899
+ output_attentions (`bool`, *optional*):
900
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
901
+ tensors for more detail.
902
+ output_hidden_states (`bool`, *optional*):
903
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
904
+ more detail.
905
+ return_dict (`bool`, *optional*):
906
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
907
+ """
908
+
909
+
910
+ @add_start_docstrings(
911
+ """
912
+ The base Funnel Transformer Model transformer outputting raw hidden-states without upsampling head (also called
913
+ decoder) or any task-specific head on top.
914
+ """,
915
+ FUNNEL_START_DOCSTRING,
916
+ )
917
+ class FunnelBaseModel(FunnelPreTrainedModel):
918
+ def __init__(self, config: FunnelConfig) -> None:
919
+ super().__init__(config)
920
+
921
+ self.embeddings = FunnelEmbeddings(config)
922
+ self.encoder = FunnelEncoder(config)
923
+
924
+ # Initialize weights and apply final processing
925
+ self.post_init()
926
+
927
+ def get_input_embeddings(self) -> nn.Embedding:
928
+ return self.embeddings.word_embeddings
929
+
930
+ def set_input_embeddings(self, new_embeddings: nn.Embedding) -> None:
931
+ self.embeddings.word_embeddings = new_embeddings
932
+
933
+ @add_start_docstrings_to_model_forward(FUNNEL_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
934
+ @add_code_sample_docstrings(
935
+ checkpoint="funnel-transformer/small-base",
936
+ output_type=BaseModelOutput,
937
+ config_class=_CONFIG_FOR_DOC,
938
+ )
939
+ def forward(
940
+ self,
941
+ input_ids: Optional[torch.Tensor] = None,
942
+ attention_mask: Optional[torch.Tensor] = None,
943
+ token_type_ids: Optional[torch.Tensor] = None,
944
+ position_ids: Optional[torch.Tensor] = None,
945
+ head_mask: Optional[torch.Tensor] = None,
946
+ inputs_embeds: Optional[torch.Tensor] = None,
947
+ output_attentions: Optional[bool] = None,
948
+ output_hidden_states: Optional[bool] = None,
949
+ return_dict: Optional[bool] = None,
950
+ ) -> Union[Tuple, BaseModelOutput]:
951
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
952
+ output_hidden_states = (
953
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
954
+ )
955
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
956
+
957
+ if input_ids is not None and inputs_embeds is not None:
958
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
959
+ elif input_ids is not None:
960
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
961
+ input_shape = input_ids.size()
962
+ elif inputs_embeds is not None:
963
+ input_shape = inputs_embeds.size()[:-1]
964
+ else:
965
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
966
+
967
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
968
+
969
+ if attention_mask is None:
970
+ attention_mask = torch.ones(input_shape, device=device)
971
+ if token_type_ids is None:
972
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
973
+
974
+ # TODO: deal with head_mask
975
+ if inputs_embeds is None:
976
+ inputs_embeds = self.embeddings(input_ids)
977
+
978
+ encoder_outputs = self.encoder(
979
+ inputs_embeds,
980
+ attention_mask=attention_mask,
981
+ token_type_ids=token_type_ids,
982
+ output_attentions=output_attentions,
983
+ output_hidden_states=output_hidden_states,
984
+ return_dict=return_dict,
985
+ )
986
+
987
+ return encoder_outputs
988
+
989
+
990
+ @add_start_docstrings(
991
+ "The bare Funnel Transformer Model transformer outputting raw hidden-states without any specific head on top.",
992
+ FUNNEL_START_DOCSTRING,
993
+ )
994
+ class FunnelModel(FunnelPreTrainedModel):
995
+ def __init__(self, config: FunnelConfig) -> None:
996
+ super().__init__(config)
997
+ self.config = config
998
+ self.embeddings = FunnelEmbeddings(config)
999
+ self.encoder = FunnelEncoder(config)
1000
+ self.decoder = FunnelDecoder(config)
1001
+
1002
+ # Initialize weights and apply final processing
1003
+ self.post_init()
1004
+
1005
+ def get_input_embeddings(self) -> nn.Embedding:
1006
+ return self.embeddings.word_embeddings
1007
+
1008
+ def set_input_embeddings(self, new_embeddings: nn.Embedding) -> None:
1009
+ self.embeddings.word_embeddings = new_embeddings
1010
+
1011
+ @add_start_docstrings_to_model_forward(FUNNEL_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1012
+ @add_code_sample_docstrings(
1013
+ checkpoint=_CHECKPOINT_FOR_DOC,
1014
+ output_type=BaseModelOutput,
1015
+ config_class=_CONFIG_FOR_DOC,
1016
+ )
1017
+ def forward(
1018
+ self,
1019
+ input_ids: Optional[torch.Tensor] = None,
1020
+ attention_mask: Optional[torch.Tensor] = None,
1021
+ token_type_ids: Optional[torch.Tensor] = None,
1022
+ inputs_embeds: Optional[torch.Tensor] = None,
1023
+ output_attentions: Optional[bool] = None,
1024
+ output_hidden_states: Optional[bool] = None,
1025
+ return_dict: Optional[bool] = None,
1026
+ ) -> Union[Tuple, BaseModelOutput]:
1027
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1028
+ output_hidden_states = (
1029
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1030
+ )
1031
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1032
+
1033
+ if input_ids is not None and inputs_embeds is not None:
1034
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
1035
+ elif input_ids is not None:
1036
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
1037
+ input_shape = input_ids.size()
1038
+ elif inputs_embeds is not None:
1039
+ input_shape = inputs_embeds.size()[:-1]
1040
+ else:
1041
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
1042
+
1043
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
1044
+
1045
+ if attention_mask is None:
1046
+ attention_mask = torch.ones(input_shape, device=device)
1047
+ if token_type_ids is None:
1048
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
1049
+
1050
+ # TODO: deal with head_mask
1051
+ if inputs_embeds is None:
1052
+ inputs_embeds = self.embeddings(input_ids)
1053
+
1054
+ encoder_outputs = self.encoder(
1055
+ inputs_embeds,
1056
+ attention_mask=attention_mask,
1057
+ token_type_ids=token_type_ids,
1058
+ output_attentions=output_attentions,
1059
+ output_hidden_states=True,
1060
+ return_dict=return_dict,
1061
+ )
1062
+
1063
+ decoder_outputs = self.decoder(
1064
+ final_hidden=encoder_outputs[0],
1065
+ first_block_hidden=encoder_outputs[1][self.config.block_sizes[0]],
1066
+ attention_mask=attention_mask,
1067
+ token_type_ids=token_type_ids,
1068
+ output_attentions=output_attentions,
1069
+ output_hidden_states=output_hidden_states,
1070
+ return_dict=return_dict,
1071
+ )
1072
+
1073
+ if not return_dict:
1074
+ idx = 0
1075
+ outputs = (decoder_outputs[0],)
1076
+ if output_hidden_states:
1077
+ idx += 1
1078
+ outputs = outputs + (encoder_outputs[1] + decoder_outputs[idx],)
1079
+ if output_attentions:
1080
+ idx += 1
1081
+ outputs = outputs + (encoder_outputs[2] + decoder_outputs[idx],)
1082
+ return outputs
1083
+
1084
+ return BaseModelOutput(
1085
+ last_hidden_state=decoder_outputs[0],
1086
+ hidden_states=(encoder_outputs.hidden_states + decoder_outputs.hidden_states)
1087
+ if output_hidden_states
1088
+ else None,
1089
+ attentions=(encoder_outputs.attentions + decoder_outputs.attentions) if output_attentions else None,
1090
+ )
1091
+
1092
+
1093
+ add_start_docstrings(
1094
+ """
1095
+ Funnel Transformer model with a binary classification head on top as used during pretraining for identifying
1096
+ generated tokens.
1097
+ """,
1098
+ FUNNEL_START_DOCSTRING,
1099
+ )
1100
+
1101
+
1102
+ class FunnelForPreTraining(FunnelPreTrainedModel):
1103
+ def __init__(self, config: FunnelConfig) -> None:
1104
+ super().__init__(config)
1105
+
1106
+ self.funnel = FunnelModel(config)
1107
+ self.discriminator_predictions = FunnelDiscriminatorPredictions(config)
1108
+ # Initialize weights and apply final processing
1109
+ self.post_init()
1110
+
1111
+ @add_start_docstrings_to_model_forward(FUNNEL_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1112
+ @replace_return_docstrings(output_type=FunnelForPreTrainingOutput, config_class=_CONFIG_FOR_DOC)
1113
+ def forward(
1114
+ self,
1115
+ input_ids: Optional[torch.Tensor] = None,
1116
+ attention_mask: Optional[torch.Tensor] = None,
1117
+ token_type_ids: Optional[torch.Tensor] = None,
1118
+ inputs_embeds: Optional[torch.Tensor] = None,
1119
+ labels: Optional[torch.Tensor] = None,
1120
+ output_attentions: Optional[bool] = None,
1121
+ output_hidden_states: Optional[bool] = None,
1122
+ return_dict: Optional[bool] = None,
1123
+ ) -> Union[Tuple, FunnelForPreTrainingOutput]:
1124
+ r"""
1125
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1126
+ Labels for computing the ELECTRA-style loss. Input should be a sequence of tokens (see `input_ids`
1127
+ docstring) Indices should be in `[0, 1]`:
1128
+
1129
+ - 0 indicates the token is an original token,
1130
+ - 1 indicates the token was replaced.
1131
+
1132
+ Returns:
1133
+
1134
+ Examples:
1135
+
1136
+ ```python
1137
+ >>> from transformers import AutoTokenizer, FunnelForPreTraining
1138
+ >>> import torch
1139
+
1140
+ >>> tokenizer = AutoTokenizer.from_pretrained("funnel-transformer/small")
1141
+ >>> model = FunnelForPreTraining.from_pretrained("funnel-transformer/small")
1142
+
1143
+ >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
1144
+ >>> logits = model(**inputs).logits
1145
+ ```"""
1146
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1147
+
1148
+ discriminator_hidden_states = self.funnel(
1149
+ input_ids,
1150
+ attention_mask=attention_mask,
1151
+ token_type_ids=token_type_ids,
1152
+ inputs_embeds=inputs_embeds,
1153
+ output_attentions=output_attentions,
1154
+ output_hidden_states=output_hidden_states,
1155
+ return_dict=return_dict,
1156
+ )
1157
+ discriminator_sequence_output = discriminator_hidden_states[0]
1158
+
1159
+ logits = self.discriminator_predictions(discriminator_sequence_output)
1160
+
1161
+ loss = None
1162
+ if labels is not None:
1163
+ loss_fct = nn.BCEWithLogitsLoss()
1164
+ if attention_mask is not None:
1165
+ active_loss = attention_mask.view(-1, discriminator_sequence_output.shape[1]) == 1
1166
+ active_logits = logits.view(-1, discriminator_sequence_output.shape[1])[active_loss]
1167
+ active_labels = labels[active_loss]
1168
+ loss = loss_fct(active_logits, active_labels.float())
1169
+ else:
1170
+ loss = loss_fct(logits.view(-1, discriminator_sequence_output.shape[1]), labels.float())
1171
+
1172
+ if not return_dict:
1173
+ output = (logits,) + discriminator_hidden_states[1:]
1174
+ return ((loss,) + output) if loss is not None else output
1175
+
1176
+ return FunnelForPreTrainingOutput(
1177
+ loss=loss,
1178
+ logits=logits,
1179
+ hidden_states=discriminator_hidden_states.hidden_states,
1180
+ attentions=discriminator_hidden_states.attentions,
1181
+ )
1182
+
1183
+
1184
+ @add_start_docstrings("""Funnel Transformer Model with a `language modeling` head on top.""", FUNNEL_START_DOCSTRING)
1185
+ class FunnelForMaskedLM(FunnelPreTrainedModel):
1186
+ _tied_weights_keys = ["lm_head.weight"]
1187
+
1188
+ def __init__(self, config: FunnelConfig) -> None:
1189
+ super().__init__(config)
1190
+
1191
+ self.funnel = FunnelModel(config)
1192
+ self.lm_head = nn.Linear(config.d_model, config.vocab_size)
1193
+
1194
+ # Initialize weights and apply final processing
1195
+ self.post_init()
1196
+
1197
+ def get_output_embeddings(self) -> nn.Linear:
1198
+ return self.lm_head
1199
+
1200
+ def set_output_embeddings(self, new_embeddings: nn.Embedding) -> None:
1201
+ self.lm_head = new_embeddings
1202
+
1203
+ @add_start_docstrings_to_model_forward(FUNNEL_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1204
+ @add_code_sample_docstrings(
1205
+ checkpoint=_CHECKPOINT_FOR_DOC,
1206
+ output_type=MaskedLMOutput,
1207
+ config_class=_CONFIG_FOR_DOC,
1208
+ mask="<mask>",
1209
+ )
1210
+ def forward(
1211
+ self,
1212
+ input_ids: Optional[torch.Tensor] = None,
1213
+ attention_mask: Optional[torch.Tensor] = None,
1214
+ token_type_ids: Optional[torch.Tensor] = None,
1215
+ inputs_embeds: Optional[torch.Tensor] = None,
1216
+ labels: Optional[torch.Tensor] = None,
1217
+ output_attentions: Optional[bool] = None,
1218
+ output_hidden_states: Optional[bool] = None,
1219
+ return_dict: Optional[bool] = None,
1220
+ ) -> Union[Tuple, MaskedLMOutput]:
1221
+ r"""
1222
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1223
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
1224
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
1225
+ loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
1226
+ """
1227
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1228
+
1229
+ outputs = self.funnel(
1230
+ input_ids,
1231
+ attention_mask=attention_mask,
1232
+ token_type_ids=token_type_ids,
1233
+ inputs_embeds=inputs_embeds,
1234
+ output_attentions=output_attentions,
1235
+ output_hidden_states=output_hidden_states,
1236
+ return_dict=return_dict,
1237
+ )
1238
+
1239
+ last_hidden_state = outputs[0]
1240
+ prediction_logits = self.lm_head(last_hidden_state)
1241
+
1242
+ masked_lm_loss = None
1243
+ if labels is not None:
1244
+ loss_fct = CrossEntropyLoss() # -100 index = padding token
1245
+ masked_lm_loss = loss_fct(prediction_logits.view(-1, self.config.vocab_size), labels.view(-1))
1246
+
1247
+ if not return_dict:
1248
+ output = (prediction_logits,) + outputs[1:]
1249
+ return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
1250
+
1251
+ return MaskedLMOutput(
1252
+ loss=masked_lm_loss,
1253
+ logits=prediction_logits,
1254
+ hidden_states=outputs.hidden_states,
1255
+ attentions=outputs.attentions,
1256
+ )
1257
+
1258
+
1259
+ @add_start_docstrings(
1260
+ """
1261
+ Funnel Transformer Model with a sequence classification/regression head on top (two linear layer on top of the
1262
+ first timestep of the last hidden state) e.g. for GLUE tasks.
1263
+ """,
1264
+ FUNNEL_START_DOCSTRING,
1265
+ )
1266
+ class FunnelForSequenceClassification(FunnelPreTrainedModel):
1267
+ def __init__(self, config: FunnelConfig) -> None:
1268
+ super().__init__(config)
1269
+ self.num_labels = config.num_labels
1270
+ self.config = config
1271
+
1272
+ self.funnel = FunnelBaseModel(config)
1273
+ self.classifier = FunnelClassificationHead(config, config.num_labels)
1274
+ # Initialize weights and apply final processing
1275
+ self.post_init()
1276
+
1277
+ @add_start_docstrings_to_model_forward(FUNNEL_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1278
+ @add_code_sample_docstrings(
1279
+ checkpoint="funnel-transformer/small-base",
1280
+ output_type=SequenceClassifierOutput,
1281
+ config_class=_CONFIG_FOR_DOC,
1282
+ )
1283
+ def forward(
1284
+ self,
1285
+ input_ids: Optional[torch.Tensor] = None,
1286
+ attention_mask: Optional[torch.Tensor] = None,
1287
+ token_type_ids: Optional[torch.Tensor] = None,
1288
+ inputs_embeds: Optional[torch.Tensor] = None,
1289
+ labels: Optional[torch.Tensor] = None,
1290
+ output_attentions: Optional[bool] = None,
1291
+ output_hidden_states: Optional[bool] = None,
1292
+ return_dict: Optional[bool] = None,
1293
+ ) -> Union[Tuple, SequenceClassifierOutput]:
1294
+ r"""
1295
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1296
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1297
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1298
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1299
+ """
1300
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1301
+
1302
+ outputs = self.funnel(
1303
+ input_ids,
1304
+ attention_mask=attention_mask,
1305
+ token_type_ids=token_type_ids,
1306
+ inputs_embeds=inputs_embeds,
1307
+ output_attentions=output_attentions,
1308
+ output_hidden_states=output_hidden_states,
1309
+ return_dict=return_dict,
1310
+ )
1311
+
1312
+ last_hidden_state = outputs[0]
1313
+ pooled_output = last_hidden_state[:, 0]
1314
+ logits = self.classifier(pooled_output)
1315
+
1316
+ loss = None
1317
+ if labels is not None:
1318
+ if self.config.problem_type is None:
1319
+ if self.num_labels == 1:
1320
+ self.config.problem_type = "regression"
1321
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
1322
+ self.config.problem_type = "single_label_classification"
1323
+ else:
1324
+ self.config.problem_type = "multi_label_classification"
1325
+
1326
+ if self.config.problem_type == "regression":
1327
+ loss_fct = MSELoss()
1328
+ if self.num_labels == 1:
1329
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
1330
+ else:
1331
+ loss = loss_fct(logits, labels)
1332
+ elif self.config.problem_type == "single_label_classification":
1333
+ loss_fct = CrossEntropyLoss()
1334
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1335
+ elif self.config.problem_type == "multi_label_classification":
1336
+ loss_fct = BCEWithLogitsLoss()
1337
+ loss = loss_fct(logits, labels)
1338
+
1339
+ if not return_dict:
1340
+ output = (logits,) + outputs[1:]
1341
+ return ((loss,) + output) if loss is not None else output
1342
+
1343
+ return SequenceClassifierOutput(
1344
+ loss=loss,
1345
+ logits=logits,
1346
+ hidden_states=outputs.hidden_states,
1347
+ attentions=outputs.attentions,
1348
+ )
1349
+
1350
+
1351
+ @add_start_docstrings(
1352
+ """
1353
+ Funnel Transformer Model with a multiple choice classification head on top (two linear layer on top of the first
1354
+ timestep of the last hidden state, and a softmax) e.g. for RocStories/SWAG tasks.
1355
+ """,
1356
+ FUNNEL_START_DOCSTRING,
1357
+ )
1358
+ class FunnelForMultipleChoice(FunnelPreTrainedModel):
1359
+ def __init__(self, config: FunnelConfig) -> None:
1360
+ super().__init__(config)
1361
+
1362
+ self.funnel = FunnelBaseModel(config)
1363
+ self.classifier = FunnelClassificationHead(config, 1)
1364
+ # Initialize weights and apply final processing
1365
+ self.post_init()
1366
+
1367
+ @add_start_docstrings_to_model_forward(FUNNEL_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length"))
1368
+ @add_code_sample_docstrings(
1369
+ checkpoint="funnel-transformer/small-base",
1370
+ output_type=MultipleChoiceModelOutput,
1371
+ config_class=_CONFIG_FOR_DOC,
1372
+ )
1373
+ def forward(
1374
+ self,
1375
+ input_ids: Optional[torch.Tensor] = None,
1376
+ attention_mask: Optional[torch.Tensor] = None,
1377
+ token_type_ids: Optional[torch.Tensor] = None,
1378
+ inputs_embeds: Optional[torch.Tensor] = None,
1379
+ labels: Optional[torch.Tensor] = None,
1380
+ output_attentions: Optional[bool] = None,
1381
+ output_hidden_states: Optional[bool] = None,
1382
+ return_dict: Optional[bool] = None,
1383
+ ) -> Union[Tuple, MultipleChoiceModelOutput]:
1384
+ r"""
1385
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1386
+ Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
1387
+ num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
1388
+ `input_ids` above)
1389
+ """
1390
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1391
+ num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
1392
+
1393
+ input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
1394
+ attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
1395
+ token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
1396
+ inputs_embeds = (
1397
+ inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
1398
+ if inputs_embeds is not None
1399
+ else None
1400
+ )
1401
+
1402
+ outputs = self.funnel(
1403
+ input_ids,
1404
+ attention_mask=attention_mask,
1405
+ token_type_ids=token_type_ids,
1406
+ inputs_embeds=inputs_embeds,
1407
+ output_attentions=output_attentions,
1408
+ output_hidden_states=output_hidden_states,
1409
+ return_dict=return_dict,
1410
+ )
1411
+
1412
+ last_hidden_state = outputs[0]
1413
+ pooled_output = last_hidden_state[:, 0]
1414
+ logits = self.classifier(pooled_output)
1415
+ reshaped_logits = logits.view(-1, num_choices)
1416
+
1417
+ loss = None
1418
+ if labels is not None:
1419
+ loss_fct = CrossEntropyLoss()
1420
+ loss = loss_fct(reshaped_logits, labels)
1421
+
1422
+ if not return_dict:
1423
+ output = (reshaped_logits,) + outputs[1:]
1424
+ return ((loss,) + output) if loss is not None else output
1425
+
1426
+ return MultipleChoiceModelOutput(
1427
+ loss=loss,
1428
+ logits=reshaped_logits,
1429
+ hidden_states=outputs.hidden_states,
1430
+ attentions=outputs.attentions,
1431
+ )
1432
+
1433
+
1434
+ @add_start_docstrings(
1435
+ """
1436
+ Funnel Transformer Model with a token classification head on top (a linear layer on top of the hidden-states
1437
+ output) e.g. for Named-Entity-Recognition (NER) tasks.
1438
+ """,
1439
+ FUNNEL_START_DOCSTRING,
1440
+ )
1441
+ class FunnelForTokenClassification(FunnelPreTrainedModel):
1442
+ def __init__(self, config: FunnelConfig) -> None:
1443
+ super().__init__(config)
1444
+ self.num_labels = config.num_labels
1445
+
1446
+ self.funnel = FunnelModel(config)
1447
+ self.dropout = nn.Dropout(config.hidden_dropout)
1448
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
1449
+
1450
+ # Initialize weights and apply final processing
1451
+ self.post_init()
1452
+
1453
+ @add_start_docstrings_to_model_forward(FUNNEL_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1454
+ @add_code_sample_docstrings(
1455
+ checkpoint=_CHECKPOINT_FOR_DOC,
1456
+ output_type=TokenClassifierOutput,
1457
+ config_class=_CONFIG_FOR_DOC,
1458
+ )
1459
+ def forward(
1460
+ self,
1461
+ input_ids: Optional[torch.Tensor] = None,
1462
+ attention_mask: Optional[torch.Tensor] = None,
1463
+ token_type_ids: Optional[torch.Tensor] = None,
1464
+ inputs_embeds: Optional[torch.Tensor] = None,
1465
+ labels: Optional[torch.Tensor] = None,
1466
+ output_attentions: Optional[bool] = None,
1467
+ output_hidden_states: Optional[bool] = None,
1468
+ return_dict: Optional[bool] = None,
1469
+ ) -> Union[Tuple, TokenClassifierOutput]:
1470
+ r"""
1471
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1472
+ Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
1473
+ """
1474
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1475
+
1476
+ outputs = self.funnel(
1477
+ input_ids,
1478
+ attention_mask=attention_mask,
1479
+ token_type_ids=token_type_ids,
1480
+ inputs_embeds=inputs_embeds,
1481
+ output_attentions=output_attentions,
1482
+ output_hidden_states=output_hidden_states,
1483
+ return_dict=return_dict,
1484
+ )
1485
+
1486
+ last_hidden_state = outputs[0]
1487
+ last_hidden_state = self.dropout(last_hidden_state)
1488
+ logits = self.classifier(last_hidden_state)
1489
+
1490
+ loss = None
1491
+ if labels is not None:
1492
+ loss_fct = CrossEntropyLoss()
1493
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1494
+
1495
+ if not return_dict:
1496
+ output = (logits,) + outputs[1:]
1497
+ return ((loss,) + output) if loss is not None else output
1498
+
1499
+ return TokenClassifierOutput(
1500
+ loss=loss,
1501
+ logits=logits,
1502
+ hidden_states=outputs.hidden_states,
1503
+ attentions=outputs.attentions,
1504
+ )
1505
+
1506
+
1507
+ @add_start_docstrings(
1508
+ """
1509
+ Funnel Transformer Model with a span classification head on top for extractive question-answering tasks like SQuAD
1510
+ (a linear layer on top of the hidden-states output to compute `span start logits` and `span end logits`).
1511
+ """,
1512
+ FUNNEL_START_DOCSTRING,
1513
+ )
1514
+ class FunnelForQuestionAnswering(FunnelPreTrainedModel):
1515
+ def __init__(self, config: FunnelConfig) -> None:
1516
+ super().__init__(config)
1517
+ self.num_labels = config.num_labels
1518
+
1519
+ self.funnel = FunnelModel(config)
1520
+ self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
1521
+
1522
+ # Initialize weights and apply final processing
1523
+ self.post_init()
1524
+
1525
+ @add_start_docstrings_to_model_forward(FUNNEL_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1526
+ @add_code_sample_docstrings(
1527
+ checkpoint=_CHECKPOINT_FOR_DOC,
1528
+ output_type=QuestionAnsweringModelOutput,
1529
+ config_class=_CONFIG_FOR_DOC,
1530
+ )
1531
+ def forward(
1532
+ self,
1533
+ input_ids: Optional[torch.Tensor] = None,
1534
+ attention_mask: Optional[torch.Tensor] = None,
1535
+ token_type_ids: Optional[torch.Tensor] = None,
1536
+ inputs_embeds: Optional[torch.Tensor] = None,
1537
+ start_positions: Optional[torch.Tensor] = None,
1538
+ end_positions: Optional[torch.Tensor] = None,
1539
+ output_attentions: Optional[bool] = None,
1540
+ output_hidden_states: Optional[bool] = None,
1541
+ return_dict: Optional[bool] = None,
1542
+ ) -> Union[Tuple, QuestionAnsweringModelOutput]:
1543
+ r"""
1544
+ start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1545
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
1546
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1547
+ are not taken into account for computing the loss.
1548
+ end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1549
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
1550
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1551
+ are not taken into account for computing the loss.
1552
+ """
1553
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1554
+
1555
+ outputs = self.funnel(
1556
+ input_ids,
1557
+ attention_mask=attention_mask,
1558
+ token_type_ids=token_type_ids,
1559
+ inputs_embeds=inputs_embeds,
1560
+ output_attentions=output_attentions,
1561
+ output_hidden_states=output_hidden_states,
1562
+ return_dict=return_dict,
1563
+ )
1564
+
1565
+ last_hidden_state = outputs[0]
1566
+
1567
+ logits = self.qa_outputs(last_hidden_state)
1568
+ start_logits, end_logits = logits.split(1, dim=-1)
1569
+ start_logits = start_logits.squeeze(-1).contiguous()
1570
+ end_logits = end_logits.squeeze(-1).contiguous()
1571
+
1572
+ total_loss = None
1573
+ if start_positions is not None and end_positions is not None:
1574
+ # If we are on multi-GPU, split add a dimension
1575
+ if len(start_positions.size()) > 1:
1576
+ start_positions = start_positions.squeze(-1)
1577
+ if len(end_positions.size()) > 1:
1578
+ end_positions = end_positions.squeeze(-1)
1579
+ # sometimes the start/end positions are outside our model inputs, we ignore these terms
1580
+ ignored_index = start_logits.size(1)
1581
+ start_positions = start_positions.clamp(0, ignored_index)
1582
+ end_positions = end_positions.clamp(0, ignored_index)
1583
+
1584
+ loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
1585
+ start_loss = loss_fct(start_logits, start_positions)
1586
+ end_loss = loss_fct(end_logits, end_positions)
1587
+ total_loss = (start_loss + end_loss) / 2
1588
+
1589
+ if not return_dict:
1590
+ output = (start_logits, end_logits) + outputs[1:]
1591
+ return ((total_loss,) + output) if total_loss is not None else output
1592
+
1593
+ return QuestionAnsweringModelOutput(
1594
+ loss=total_loss,
1595
+ start_logits=start_logits,
1596
+ end_logits=end_logits,
1597
+ hidden_states=outputs.hidden_states,
1598
+ attentions=outputs.attentions,
1599
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/funnel/modeling_tf_funnel.py ADDED
@@ -0,0 +1,1871 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020-present Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ TF 2.0 Funnel model."""
16
+
17
+
18
+ from __future__ import annotations
19
+
20
+ import warnings
21
+ from dataclasses import dataclass
22
+ from typing import Optional, Tuple, Union
23
+
24
+ import numpy as np
25
+ import tensorflow as tf
26
+
27
+ from ...activations_tf import get_tf_activation
28
+ from ...modeling_tf_outputs import (
29
+ TFBaseModelOutput,
30
+ TFMaskedLMOutput,
31
+ TFMultipleChoiceModelOutput,
32
+ TFQuestionAnsweringModelOutput,
33
+ TFSequenceClassifierOutput,
34
+ TFTokenClassifierOutput,
35
+ )
36
+ from ...modeling_tf_utils import (
37
+ TFMaskedLanguageModelingLoss,
38
+ TFModelInputType,
39
+ TFMultipleChoiceLoss,
40
+ TFPreTrainedModel,
41
+ TFQuestionAnsweringLoss,
42
+ TFSequenceClassificationLoss,
43
+ TFTokenClassificationLoss,
44
+ get_initializer,
45
+ keras,
46
+ keras_serializable,
47
+ unpack_inputs,
48
+ )
49
+ from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax
50
+ from ...utils import (
51
+ ModelOutput,
52
+ add_code_sample_docstrings,
53
+ add_start_docstrings,
54
+ add_start_docstrings_to_model_forward,
55
+ logging,
56
+ replace_return_docstrings,
57
+ )
58
+ from .configuration_funnel import FunnelConfig
59
+
60
+
61
+ logger = logging.get_logger(__name__)
62
+
63
+ _CONFIG_FOR_DOC = "FunnelConfig"
64
+
65
+
66
+ from ..deprecated._archive_maps import TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
67
+
68
+
69
+ INF = 1e6
70
+
71
+
72
+ class TFFunnelEmbeddings(keras.layers.Layer):
73
+ """Construct the embeddings from word, position and token_type embeddings."""
74
+
75
+ def __init__(self, config, **kwargs):
76
+ super().__init__(**kwargs)
77
+
78
+ self.config = config
79
+ self.hidden_size = config.hidden_size
80
+ self.initializer_std = 1.0 if config.initializer_std is None else config.initializer_std
81
+
82
+ self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm")
83
+ self.dropout = keras.layers.Dropout(rate=config.hidden_dropout)
84
+
85
+ def build(self, input_shape=None):
86
+ with tf.name_scope("word_embeddings"):
87
+ self.weight = self.add_weight(
88
+ name="weight",
89
+ shape=[self.config.vocab_size, self.hidden_size],
90
+ initializer=get_initializer(initializer_range=self.initializer_std),
91
+ )
92
+
93
+ if self.built:
94
+ return
95
+ self.built = True
96
+ if getattr(self, "LayerNorm", None) is not None:
97
+ with tf.name_scope(self.LayerNorm.name):
98
+ self.LayerNorm.build([None, None, self.config.d_model])
99
+
100
+ def call(self, input_ids=None, inputs_embeds=None, training=False):
101
+ """
102
+ Applies embedding based on inputs tensor.
103
+
104
+ Returns:
105
+ final_embeddings (`tf.Tensor`): output embedding tensor.
106
+ """
107
+ assert not (input_ids is None and inputs_embeds is None)
108
+ assert not (input_ids is not None and inputs_embeds is not None)
109
+
110
+ if input_ids is not None:
111
+ check_embeddings_within_bounds(input_ids, self.config.vocab_size)
112
+ inputs_embeds = tf.gather(self.weight, input_ids)
113
+
114
+ final_embeddings = self.LayerNorm(inputs=inputs_embeds)
115
+ final_embeddings = self.dropout(inputs=final_embeddings, training=training)
116
+
117
+ return final_embeddings
118
+
119
+
120
+ class TFFunnelAttentionStructure:
121
+ """
122
+ Contains helpers for `TFFunnelRelMultiheadAttention `.
123
+ """
124
+
125
+ cls_token_type_id: int = 2
126
+
127
+ def __init__(self, config):
128
+ self.d_model = config.d_model
129
+ self.attention_type = config.attention_type
130
+ self.num_blocks = config.num_blocks
131
+ self.separate_cls = config.separate_cls
132
+ self.truncate_seq = config.truncate_seq
133
+ self.pool_q_only = config.pool_q_only
134
+ self.pooling_type = config.pooling_type
135
+
136
+ self.sin_dropout = keras.layers.Dropout(config.hidden_dropout)
137
+ self.cos_dropout = keras.layers.Dropout(config.hidden_dropout)
138
+ # Track where we are at in terms of pooling from the original input, e.g., by how much the sequence length was
139
+ # divided.
140
+ self.pooling_mult = None
141
+
142
+ def init_attention_inputs(self, inputs_embeds, attention_mask=None, token_type_ids=None, training=False):
143
+ """Returns the attention inputs associated to the inputs of the model."""
144
+ # inputs_embeds has shape batch_size x seq_len x d_model
145
+ # attention_mask and token_type_ids have shape batch_size x seq_len
146
+ self.pooling_mult = 1
147
+ self.seq_len = seq_len = shape_list(inputs_embeds)[1]
148
+ position_embeds = self.get_position_embeds(seq_len, training=training)
149
+ token_type_mat = self.token_type_ids_to_mat(token_type_ids) if token_type_ids is not None else None
150
+ cls_mask = (
151
+ tf.pad(tf.ones([seq_len - 1, seq_len - 1], dtype=inputs_embeds.dtype), [[1, 0], [1, 0]])
152
+ if self.separate_cls
153
+ else None
154
+ )
155
+ return (position_embeds, token_type_mat, attention_mask, cls_mask)
156
+
157
+ def token_type_ids_to_mat(self, token_type_ids):
158
+ """Convert `token_type_ids` to `token_type_mat`."""
159
+ token_type_mat = tf.equal(tf.expand_dims(token_type_ids, -1), tf.expand_dims(token_type_ids, -2))
160
+ # Treat <cls> as in the same segment as both A & B
161
+ cls_ids = tf.equal(token_type_ids, tf.constant([self.cls_token_type_id], dtype=token_type_ids.dtype))
162
+ cls_mat = tf.logical_or(tf.expand_dims(cls_ids, -1), tf.expand_dims(cls_ids, -2))
163
+ return tf.logical_or(cls_mat, token_type_mat)
164
+
165
+ def get_position_embeds(self, seq_len, training=False):
166
+ """
167
+ Create and cache inputs related to relative position encoding. Those are very different depending on whether we
168
+ are using the factorized or the relative shift attention:
169
+
170
+ For the factorized attention, it returns the matrices (phi, pi, psi, omega) used in the paper, appendix A.2.2,
171
+ final formula.
172
+
173
+ For the relative shift attention, it returns all possible vectors R used in the paper, appendix A.2.1, final
174
+ formula.
175
+
176
+ Paper link: https://arxiv.org/abs/2006.03236
177
+ """
178
+ if self.attention_type == "factorized":
179
+ # Notations from the paper, appending A.2.2, final formula.
180
+ # We need to create and return the matrices phi, psi, pi and omega.
181
+ pos_seq = tf.range(0, seq_len, 1.0)
182
+ freq_seq = tf.range(0, self.d_model // 2, 1.0)
183
+ inv_freq = 1 / (10000 ** (freq_seq / (self.d_model // 2)))
184
+ sinusoid = tf.einsum("i,d->id", pos_seq, inv_freq)
185
+
186
+ sin_embed = tf.sin(sinusoid)
187
+ sin_embed_d = self.sin_dropout(sin_embed, training=training)
188
+ cos_embed = tf.cos(sinusoid)
189
+ cos_embed_d = self.cos_dropout(cos_embed, training=training)
190
+ # This is different from the formula on the paper...
191
+ phi = tf.concat([sin_embed_d, sin_embed_d], axis=-1)
192
+ psi = tf.concat([cos_embed, sin_embed], axis=-1)
193
+ pi = tf.concat([cos_embed_d, cos_embed_d], axis=-1)
194
+ omega = tf.concat([-sin_embed, cos_embed], axis=-1)
195
+ return (phi, pi, psi, omega)
196
+ else:
197
+ # Notations from the paper, appending A.2.1, final formula.
198
+ # We need to create and return all the possible vectors R for all blocks and shifts.
199
+ freq_seq = tf.range(0, self.d_model // 2, 1.0)
200
+ inv_freq = 1 / (10000 ** (freq_seq / (self.d_model // 2)))
201
+ # Maximum relative positions for the first input
202
+ rel_pos_id = tf.range(-seq_len * 2, seq_len * 2, 1.0)
203
+ zero_offset = seq_len * tf.constant(2)
204
+ sinusoid = tf.einsum("i,d->id", rel_pos_id, inv_freq)
205
+ sin_embed = self.sin_dropout(tf.sin(sinusoid), training=training)
206
+ cos_embed = self.cos_dropout(tf.cos(sinusoid), training=training)
207
+ pos_embed = tf.concat([sin_embed, cos_embed], axis=-1)
208
+
209
+ pos = tf.range(0, seq_len)
210
+ pooled_pos = pos
211
+ position_embeds_list = []
212
+ for block_index in range(0, self.num_blocks):
213
+ # For each block with block_index > 0, we need two types position embeddings:
214
+ # - Attention(pooled-q, unpooled-kv)
215
+ # - Attention(pooled-q, pooled-kv)
216
+ # For block_index = 0 we only need the second one and leave the first one as None.
217
+
218
+ # First type
219
+ position_embeds_pooling = tf.fill([1], value=-1.0)
220
+
221
+ if block_index != 0:
222
+ pooled_pos = self.stride_pool_pos(pos, block_index)
223
+
224
+ # construct rel_pos_id
225
+ stride = 2 ** (block_index - 1)
226
+ rel_pos = self.relative_pos(pos, stride, pooled_pos, shift=2)
227
+ # rel_pos = tf.expand_dims(rel_pos,1) + zero_offset
228
+ # rel_pos = tf.broadcast_to(rel_pos, (rel_pos.shape[0], self.d_model))
229
+ rel_pos = tf.cast(rel_pos, dtype=zero_offset.dtype)
230
+ rel_pos = rel_pos + zero_offset
231
+ position_embeds_pooling = tf.gather(pos_embed, rel_pos, axis=0)
232
+
233
+ # Second type
234
+ pos = pooled_pos
235
+ stride = 2**block_index
236
+ rel_pos = self.relative_pos(pos, stride)
237
+
238
+ # rel_pos = tf.expand_dims(rel_pos,1) + zero_offset
239
+ # rel_pos = tf.broadcast_to(rel_pos, (rel_pos.shape[0], self.d_model))
240
+ rel_pos = tf.cast(rel_pos, dtype=zero_offset.dtype)
241
+ rel_pos = rel_pos + zero_offset
242
+ tf.debugging.assert_less(rel_pos, tf.shape(pos_embed)[0])
243
+ position_embeds_no_pooling = tf.gather(pos_embed, rel_pos, axis=0)
244
+
245
+ position_embeds_list.append([position_embeds_no_pooling, position_embeds_pooling])
246
+ return position_embeds_list
247
+
248
+ def stride_pool_pos(self, pos_id, block_index):
249
+ """
250
+ Pool `pos_id` while keeping the cls token separate (if `self.separate_cls=True`).
251
+ """
252
+ if self.separate_cls:
253
+ # Under separate <cls>, we treat the <cls> as the first token in
254
+ # the previous block of the 1st real block. Since the 1st real
255
+ # block always has position 1, the position of the previous block
256
+ # will be at `1 - 2 ** block_index`.
257
+ cls_pos = tf.constant([-(2**block_index) + 1], dtype=pos_id.dtype)
258
+ pooled_pos_id = pos_id[1:-1] if self.truncate_seq else pos_id[1:]
259
+ return tf.concat([cls_pos, pooled_pos_id[::2]], 0)
260
+ else:
261
+ return pos_id[::2]
262
+
263
+ def relative_pos(self, pos, stride, pooled_pos=None, shift=1):
264
+ """
265
+ Build the relative positional vector between `pos` and `pooled_pos`.
266
+ """
267
+ if pooled_pos is None:
268
+ pooled_pos = pos
269
+
270
+ ref_point = pooled_pos[0] - pos[0]
271
+ num_remove = shift * shape_list(pooled_pos)[0]
272
+ max_dist = ref_point + num_remove * stride
273
+ min_dist = pooled_pos[0] - pos[-1]
274
+
275
+ return tf.range(max_dist, min_dist - 1, -stride)
276
+
277
+ def stride_pool(self, tensor, axis):
278
+ """
279
+ Perform pooling by stride slicing the tensor along the given axis.
280
+ """
281
+ if tensor is None:
282
+ return None
283
+
284
+ # Do the stride pool recursively if axis is a list or a tuple of ints.
285
+ if isinstance(axis, (list, tuple)):
286
+ for ax in axis:
287
+ tensor = self.stride_pool(tensor, ax)
288
+ return tensor
289
+
290
+ # Do the stride pool recursively if tensor is a list or tuple of tensors.
291
+ if isinstance(tensor, (tuple, list)):
292
+ return type(tensor)(self.stride_pool(x, axis) for x in tensor)
293
+
294
+ # Deal with negative axis
295
+ axis %= len(shape_list(tensor))
296
+
297
+ axis_slice = slice(None, -1, 2) if self.separate_cls and self.truncate_seq else slice(None, None, 2)
298
+ enc_slice = [slice(None)] * axis + [axis_slice]
299
+ if self.separate_cls:
300
+ cls_slice = [slice(None)] * axis + [slice(None, 1)]
301
+ tensor = tf.concat([tensor[cls_slice], tensor], axis)
302
+ return tensor[enc_slice]
303
+
304
+ def pool_tensor(self, tensor, mode="mean", stride=2):
305
+ """Apply 1D pooling to a tensor of size [B x T (x H)]."""
306
+ if tensor is None:
307
+ return None
308
+
309
+ # Do the pool recursively if tensor is a list or tuple of tensors.
310
+ if isinstance(tensor, (tuple, list)):
311
+ return type(tensor)(self.pool_tensor(tensor, mode=mode, stride=stride) for x in tensor)
312
+
313
+ if self.separate_cls:
314
+ suffix = tensor[:, :-1] if self.truncate_seq else tensor
315
+ tensor = tf.concat([tensor[:, :1], suffix], axis=1)
316
+
317
+ ndim = len(shape_list(tensor))
318
+ if ndim == 2:
319
+ tensor = tensor[:, :, None]
320
+
321
+ if mode == "mean":
322
+ tensor = tf.nn.avg_pool1d(tensor, stride, strides=stride, data_format="NWC", padding="SAME")
323
+ elif mode == "max":
324
+ tensor = tf.nn.max_pool1d(tensor, stride, strides=stride, data_format="NWC", padding="SAME")
325
+ elif mode == "min":
326
+ tensor = -tf.nn.max_pool1d(-tensor, stride, strides=stride, data_format="NWC", padding="SAME")
327
+ else:
328
+ raise NotImplementedError("The supported modes are 'mean', 'max' and 'min'.")
329
+
330
+ return tf.squeeze(tensor, 2) if ndim == 2 else tensor
331
+
332
+ def pre_attention_pooling(self, output, attention_inputs):
333
+ """Pool `output` and the proper parts of `attention_inputs` before the attention layer."""
334
+ position_embeds, token_type_mat, attention_mask, cls_mask = attention_inputs
335
+ if self.pool_q_only:
336
+ if self.attention_type == "factorized":
337
+ position_embeds = self.stride_pool(position_embeds[:2], 0) + position_embeds[2:]
338
+ token_type_mat = self.stride_pool(token_type_mat, 1)
339
+ cls_mask = self.stride_pool(cls_mask, 0)
340
+ output = self.pool_tensor(output, mode=self.pooling_type)
341
+ else:
342
+ self.pooling_mult *= 2
343
+ if self.attention_type == "factorized":
344
+ position_embeds = self.stride_pool(position_embeds, 0)
345
+ token_type_mat = self.stride_pool(token_type_mat, [1, 2])
346
+ cls_mask = self.stride_pool(cls_mask, [1, 2])
347
+ attention_mask = self.pool_tensor(attention_mask, mode="min")
348
+ output = self.pool_tensor(output, mode=self.pooling_type)
349
+ attention_inputs = (position_embeds, token_type_mat, attention_mask, cls_mask)
350
+ return output, attention_inputs
351
+
352
+ def post_attention_pooling(self, attention_inputs):
353
+ """Pool the proper parts of `attention_inputs` after the attention layer."""
354
+ position_embeds, token_type_mat, attention_mask, cls_mask = attention_inputs
355
+ if self.pool_q_only:
356
+ self.pooling_mult *= 2
357
+ if self.attention_type == "factorized":
358
+ position_embeds = position_embeds[:2] + self.stride_pool(position_embeds[2:], 0)
359
+ token_type_mat = self.stride_pool(token_type_mat, 2)
360
+ cls_mask = self.stride_pool(cls_mask, 1)
361
+ attention_mask = self.pool_tensor(attention_mask, mode="min")
362
+ attention_inputs = (position_embeds, token_type_mat, attention_mask, cls_mask)
363
+ return attention_inputs
364
+
365
+
366
+ def _relative_shift_gather(positional_attn, context_len, shift):
367
+ batch_size, n_head, seq_len, max_rel_len = shape_list(positional_attn)
368
+ # max_rel_len = 2 * context_len + shift -1 is the numbers of possible relative positions i-j
369
+
370
+ # What's next is the same as doing the following gather in PyTorch, which might be clearer code but less efficient.
371
+ # idxs = context_len + torch.arange(0, context_len).unsqueeze(0) - torch.arange(0, seq_len).unsqueeze(1)
372
+ # # matrix of context_len + i-j
373
+ # return positional_attn.gather(3, idxs.expand([batch_size, n_head, context_len, context_len]))
374
+
375
+ positional_attn = tf.reshape(positional_attn, [batch_size, n_head, max_rel_len, seq_len])
376
+ positional_attn = positional_attn[:, :, shift:, :]
377
+ positional_attn = tf.reshape(positional_attn, [batch_size, n_head, seq_len, max_rel_len - shift])
378
+ positional_attn = positional_attn[..., :context_len]
379
+ return positional_attn
380
+
381
+
382
+ class TFFunnelRelMultiheadAttention(keras.layers.Layer):
383
+ def __init__(self, config, block_index, **kwargs):
384
+ super().__init__(**kwargs)
385
+ self.attention_type = config.attention_type
386
+ self.n_head = n_head = config.n_head
387
+ self.d_head = d_head = config.d_head
388
+ self.d_model = d_model = config.d_model
389
+ self.initializer_range = config.initializer_range
390
+ self.block_index = block_index
391
+
392
+ self.hidden_dropout = keras.layers.Dropout(config.hidden_dropout)
393
+ self.attention_dropout = keras.layers.Dropout(config.attention_dropout)
394
+
395
+ initializer = get_initializer(config.initializer_range)
396
+
397
+ self.q_head = keras.layers.Dense(
398
+ n_head * d_head, use_bias=False, kernel_initializer=initializer, name="q_head"
399
+ )
400
+ self.k_head = keras.layers.Dense(n_head * d_head, kernel_initializer=initializer, name="k_head")
401
+ self.v_head = keras.layers.Dense(n_head * d_head, kernel_initializer=initializer, name="v_head")
402
+
403
+ self.post_proj = keras.layers.Dense(d_model, kernel_initializer=initializer, name="post_proj")
404
+ self.layer_norm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm")
405
+ self.scale = 1.0 / (d_head**0.5)
406
+
407
+ def build(self, input_shape=None):
408
+ n_head, d_head, d_model = self.n_head, self.d_head, self.d_model
409
+ initializer = get_initializer(self.initializer_range)
410
+
411
+ self.r_w_bias = self.add_weight(
412
+ shape=(n_head, d_head), initializer=initializer, trainable=True, name="r_w_bias"
413
+ )
414
+ self.r_r_bias = self.add_weight(
415
+ shape=(n_head, d_head), initializer=initializer, trainable=True, name="r_r_bias"
416
+ )
417
+ self.r_kernel = self.add_weight(
418
+ shape=(d_model, n_head, d_head), initializer=initializer, trainable=True, name="r_kernel"
419
+ )
420
+ self.r_s_bias = self.add_weight(
421
+ shape=(n_head, d_head), initializer=initializer, trainable=True, name="r_s_bias"
422
+ )
423
+ self.seg_embed = self.add_weight(
424
+ shape=(2, n_head, d_head), initializer=initializer, trainable=True, name="seg_embed"
425
+ )
426
+
427
+ if self.built:
428
+ return
429
+ self.built = True
430
+ if getattr(self, "q_head", None) is not None:
431
+ with tf.name_scope(self.q_head.name):
432
+ self.q_head.build([None, None, d_model])
433
+ if getattr(self, "k_head", None) is not None:
434
+ with tf.name_scope(self.k_head.name):
435
+ self.k_head.build([None, None, d_model])
436
+ if getattr(self, "v_head", None) is not None:
437
+ with tf.name_scope(self.v_head.name):
438
+ self.v_head.build([None, None, d_model])
439
+ if getattr(self, "post_proj", None) is not None:
440
+ with tf.name_scope(self.post_proj.name):
441
+ self.post_proj.build([None, None, n_head * d_head])
442
+ if getattr(self, "layer_norm", None) is not None:
443
+ with tf.name_scope(self.layer_norm.name):
444
+ self.layer_norm.build([None, None, d_model])
445
+
446
+ def relative_positional_attention(self, position_embeds, q_head, context_len, cls_mask=None):
447
+ """Relative attention score for the positional encodings"""
448
+ # q_head has shape batch_size x sea_len x n_head x d_head
449
+ if self.attention_type == "factorized":
450
+ # Notations from the paper, appending A.2.2, final formula (https://arxiv.org/abs/2006.03236)
451
+ # phi and pi have shape seq_len x d_model, psi and omega have shape context_len x d_model
452
+ phi, pi, psi, omega = position_embeds
453
+ # Shape n_head x d_head
454
+ u = self.r_r_bias * self.scale
455
+ # Shape d_model x n_head x d_head
456
+ w_r = self.r_kernel
457
+
458
+ # Shape batch_size x sea_len x n_head x d_model
459
+ q_r_attention = tf.einsum("binh,dnh->bind", q_head + u, w_r)
460
+ q_r_attention_1 = q_r_attention * phi[:, None]
461
+ q_r_attention_2 = q_r_attention * pi[:, None]
462
+
463
+ # Shape batch_size x n_head x seq_len x context_len
464
+ positional_attn = tf.einsum("bind,jd->bnij", q_r_attention_1, psi) + tf.einsum(
465
+ "bind,jd->bnij", q_r_attention_2, omega
466
+ )
467
+ else:
468
+ # Notations from the paper, appending A.2.1, final formula (https://arxiv.org/abs/2006.03236)
469
+ # Grab the proper positional encoding, shape max_rel_len x d_model
470
+ if shape_list(q_head)[1] != context_len:
471
+ shift = 2
472
+ r = position_embeds[self.block_index][1]
473
+ else:
474
+ shift = 1
475
+ r = position_embeds[self.block_index][0]
476
+ # Shape n_head x d_head
477
+ v = self.r_r_bias * self.scale
478
+ # Shape d_model x n_head x d_head
479
+ w_r = self.r_kernel
480
+
481
+ # Shape max_rel_len x n_head x d_model
482
+ r_head = tf.einsum("td,dnh->tnh", r, w_r)
483
+ # Shape batch_size x n_head x seq_len x max_rel_len
484
+ positional_attn = tf.einsum("binh,tnh->bnit", q_head + v, r_head)
485
+ # Shape batch_size x n_head x seq_len x context_len
486
+ positional_attn = _relative_shift_gather(positional_attn, context_len, shift)
487
+
488
+ if cls_mask is not None:
489
+ positional_attn *= cls_mask
490
+ return positional_attn
491
+
492
+ def relative_token_type_attention(self, token_type_mat, q_head, cls_mask=None):
493
+ """Relative attention score for the token_type_ids"""
494
+ if token_type_mat is None:
495
+ return 0
496
+ batch_size, seq_len, context_len = shape_list(token_type_mat)
497
+ # q_head has shape batch_size x seq_len x n_head x d_head
498
+ # Shape n_head x d_head
499
+ r_s_bias = self.r_s_bias * self.scale
500
+
501
+ # Shape batch_size x n_head x seq_len x 2
502
+ token_type_bias = tf.einsum("bind,snd->bnis", q_head + r_s_bias, self.seg_embed)
503
+ # Shape batch_size x n_head x seq_len x context_len
504
+ token_type_mat = tf.tile(token_type_mat[:, None], [1, shape_list(q_head)[2], 1, 1])
505
+ # token_type_mat = tf.broadcast_to(token_type_mat[:, None], new_shape)
506
+ # Shapes batch_size x n_head x seq_len
507
+ diff_token_type, same_token_type = tf.split(token_type_bias, 2, axis=-1)
508
+ # Shape batch_size x n_head x seq_len x context_len
509
+ token_type_attn = tf.where(
510
+ token_type_mat,
511
+ tf.tile(same_token_type, [1, 1, 1, context_len]),
512
+ tf.tile(diff_token_type, [1, 1, 1, context_len]),
513
+ )
514
+
515
+ if cls_mask is not None:
516
+ token_type_attn *= cls_mask
517
+ return token_type_attn
518
+
519
+ def call(self, query, key, value, attention_inputs, output_attentions=False, training=False):
520
+ # query has shape batch_size x seq_len x d_model
521
+ # key and value have shapes batch_size x context_len x d_model
522
+ position_embeds, token_type_mat, attention_mask, cls_mask = attention_inputs
523
+
524
+ batch_size, seq_len, _ = shape_list(query)
525
+ context_len = shape_list(key)[1]
526
+ n_head, d_head = self.n_head, self.d_head
527
+
528
+ # Shape batch_size x seq_len x n_head x d_head
529
+ q_head = tf.reshape(self.q_head(query), [batch_size, seq_len, n_head, d_head])
530
+ # Shapes batch_size x context_len x n_head x d_head
531
+ k_head = tf.reshape(self.k_head(key), [batch_size, context_len, n_head, d_head])
532
+ v_head = tf.reshape(self.v_head(value), [batch_size, context_len, n_head, d_head])
533
+
534
+ q_head = q_head * self.scale
535
+ # Shape n_head x d_head
536
+ r_w_bias = self.r_w_bias * self.scale
537
+ # Shapes batch_size x n_head x seq_len x context_len
538
+ content_score = tf.einsum("bind,bjnd->bnij", q_head + r_w_bias, k_head)
539
+ positional_attn = self.relative_positional_attention(position_embeds, q_head, context_len, cls_mask)
540
+ token_type_attn = self.relative_token_type_attention(token_type_mat, q_head, cls_mask)
541
+
542
+ # merge attention scores
543
+ attn_score = content_score + positional_attn + token_type_attn
544
+
545
+ # perform masking
546
+ if attention_mask is not None:
547
+ attention_mask = tf.cast(attention_mask, dtype=attn_score.dtype)
548
+ attn_score = attn_score - (INF * (1 - attention_mask[:, None, None]))
549
+
550
+ # attention probability
551
+ attn_prob = stable_softmax(attn_score, axis=-1)
552
+ attn_prob = self.attention_dropout(attn_prob, training=training)
553
+
554
+ # attention output, shape batch_size x seq_len x n_head x d_head
555
+ attn_vec = tf.einsum("bnij,bjnd->bind", attn_prob, v_head)
556
+
557
+ # Shape shape batch_size x seq_len x d_model
558
+ attn_out = self.post_proj(tf.reshape(attn_vec, [batch_size, seq_len, n_head * d_head]))
559
+ attn_out = self.hidden_dropout(attn_out, training=training)
560
+
561
+ output = self.layer_norm(query + attn_out)
562
+ return (output, attn_prob) if output_attentions else (output,)
563
+
564
+
565
+ class TFFunnelPositionwiseFFN(keras.layers.Layer):
566
+ def __init__(self, config, **kwargs):
567
+ super().__init__(**kwargs)
568
+ initializer = get_initializer(config.initializer_range)
569
+ self.linear_1 = keras.layers.Dense(config.d_inner, kernel_initializer=initializer, name="linear_1")
570
+ self.activation_function = get_tf_activation(config.hidden_act)
571
+ self.activation_dropout = keras.layers.Dropout(config.activation_dropout)
572
+ self.linear_2 = keras.layers.Dense(config.d_model, kernel_initializer=initializer, name="linear_2")
573
+ self.dropout = keras.layers.Dropout(config.hidden_dropout)
574
+ self.layer_norm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm")
575
+ self.config = config
576
+
577
+ def call(self, hidden, training=False):
578
+ h = self.linear_1(hidden)
579
+ h = self.activation_function(h)
580
+ h = self.activation_dropout(h, training=training)
581
+ h = self.linear_2(h)
582
+ h = self.dropout(h, training=training)
583
+ return self.layer_norm(hidden + h)
584
+
585
+ def build(self, input_shape=None):
586
+ if self.built:
587
+ return
588
+ self.built = True
589
+ if getattr(self, "linear_1", None) is not None:
590
+ with tf.name_scope(self.linear_1.name):
591
+ self.linear_1.build([None, None, self.config.d_model])
592
+ if getattr(self, "linear_2", None) is not None:
593
+ with tf.name_scope(self.linear_2.name):
594
+ self.linear_2.build([None, None, self.config.d_inner])
595
+ if getattr(self, "layer_norm", None) is not None:
596
+ with tf.name_scope(self.layer_norm.name):
597
+ self.layer_norm.build([None, None, self.config.d_model])
598
+
599
+
600
+ class TFFunnelLayer(keras.layers.Layer):
601
+ def __init__(self, config, block_index, **kwargs):
602
+ super().__init__(**kwargs)
603
+ self.attention = TFFunnelRelMultiheadAttention(config, block_index, name="attention")
604
+ self.ffn = TFFunnelPositionwiseFFN(config, name="ffn")
605
+
606
+ def call(self, query, key, value, attention_inputs, output_attentions=False, training=False):
607
+ attn = self.attention(
608
+ query, key, value, attention_inputs, output_attentions=output_attentions, training=training
609
+ )
610
+ output = self.ffn(attn[0], training=training)
611
+ return (output, attn[1]) if output_attentions else (output,)
612
+
613
+ def build(self, input_shape=None):
614
+ if self.built:
615
+ return
616
+ self.built = True
617
+ if getattr(self, "attention", None) is not None:
618
+ with tf.name_scope(self.attention.name):
619
+ self.attention.build(None)
620
+ if getattr(self, "ffn", None) is not None:
621
+ with tf.name_scope(self.ffn.name):
622
+ self.ffn.build(None)
623
+
624
+
625
+ class TFFunnelEncoder(keras.layers.Layer):
626
+ def __init__(self, config, **kwargs):
627
+ super().__init__(**kwargs)
628
+ self.separate_cls = config.separate_cls
629
+ self.pool_q_only = config.pool_q_only
630
+ self.block_repeats = config.block_repeats
631
+ self.attention_structure = TFFunnelAttentionStructure(config)
632
+ self.blocks = [
633
+ [TFFunnelLayer(config, block_index, name=f"blocks_._{block_index}_._{i}") for i in range(block_size)]
634
+ for block_index, block_size in enumerate(config.block_sizes)
635
+ ]
636
+
637
+ def call(
638
+ self,
639
+ inputs_embeds,
640
+ attention_mask=None,
641
+ token_type_ids=None,
642
+ output_attentions=False,
643
+ output_hidden_states=False,
644
+ return_dict=True,
645
+ training=False,
646
+ ):
647
+ # The pooling is not implemented on long tensors, so we convert this mask.
648
+ # attention_mask = tf.cast(attention_mask, inputs_embeds.dtype)
649
+ attention_inputs = self.attention_structure.init_attention_inputs(
650
+ inputs_embeds,
651
+ attention_mask=attention_mask,
652
+ token_type_ids=token_type_ids,
653
+ training=training,
654
+ )
655
+ hidden = inputs_embeds
656
+
657
+ all_hidden_states = (inputs_embeds,) if output_hidden_states else None
658
+ all_attentions = () if output_attentions else None
659
+
660
+ for block_index, block in enumerate(self.blocks):
661
+ pooling_flag = shape_list(hidden)[1] > (2 if self.separate_cls else 1)
662
+ pooling_flag = pooling_flag and block_index > 0
663
+ pooled_hidden = tf.zeros(shape_list(hidden))
664
+
665
+ if pooling_flag:
666
+ pooled_hidden, attention_inputs = self.attention_structure.pre_attention_pooling(
667
+ hidden, attention_inputs
668
+ )
669
+
670
+ for layer_index, layer in enumerate(block):
671
+ for repeat_index in range(self.block_repeats[block_index]):
672
+ do_pooling = (repeat_index == 0) and (layer_index == 0) and pooling_flag
673
+ if do_pooling:
674
+ query = pooled_hidden
675
+ key = value = hidden if self.pool_q_only else pooled_hidden
676
+ else:
677
+ query = key = value = hidden
678
+ layer_output = layer(
679
+ query, key, value, attention_inputs, output_attentions=output_attentions, training=training
680
+ )
681
+ hidden = layer_output[0]
682
+ if do_pooling:
683
+ attention_inputs = self.attention_structure.post_attention_pooling(attention_inputs)
684
+
685
+ if output_attentions:
686
+ all_attentions = all_attentions + layer_output[1:]
687
+ if output_hidden_states:
688
+ all_hidden_states = all_hidden_states + (hidden,)
689
+
690
+ if not return_dict:
691
+ return tuple(v for v in [hidden, all_hidden_states, all_attentions] if v is not None)
692
+ return TFBaseModelOutput(last_hidden_state=hidden, hidden_states=all_hidden_states, attentions=all_attentions)
693
+
694
+ def build(self, input_shape=None):
695
+ if self.built:
696
+ return
697
+ self.built = True
698
+ for block in self.blocks:
699
+ for layer in block:
700
+ with tf.name_scope(layer.name):
701
+ layer.build(None)
702
+
703
+
704
+ def upsample(x, stride, target_len, separate_cls=True, truncate_seq=False):
705
+ """
706
+ Upsample tensor `x` to match `target_len` by repeating the tokens `stride` time on the sequence length dimension.
707
+ """
708
+ if stride == 1:
709
+ return x
710
+ if separate_cls:
711
+ cls = x[:, :1]
712
+ x = x[:, 1:]
713
+ output = tf.repeat(x, repeats=stride, axis=1)
714
+ if separate_cls:
715
+ if truncate_seq:
716
+ output = tf.pad(output, [[0, 0], [0, stride - 1], [0, 0]])
717
+ output = output[:, : target_len - 1]
718
+ output = tf.concat([cls, output], axis=1)
719
+ else:
720
+ output = output[:, :target_len]
721
+ return output
722
+
723
+
724
+ class TFFunnelDecoder(keras.layers.Layer):
725
+ def __init__(self, config, **kwargs):
726
+ super().__init__(**kwargs)
727
+ self.separate_cls = config.separate_cls
728
+ self.truncate_seq = config.truncate_seq
729
+ self.stride = 2 ** (len(config.block_sizes) - 1)
730
+ self.attention_structure = TFFunnelAttentionStructure(config)
731
+ self.layers = [TFFunnelLayer(config, 0, name=f"layers_._{i}") for i in range(config.num_decoder_layers)]
732
+
733
+ def call(
734
+ self,
735
+ final_hidden,
736
+ first_block_hidden,
737
+ attention_mask=None,
738
+ token_type_ids=None,
739
+ output_attentions=False,
740
+ output_hidden_states=False,
741
+ return_dict=True,
742
+ training=False,
743
+ ):
744
+ upsampled_hidden = upsample(
745
+ final_hidden,
746
+ stride=self.stride,
747
+ target_len=shape_list(first_block_hidden)[1],
748
+ separate_cls=self.separate_cls,
749
+ truncate_seq=self.truncate_seq,
750
+ )
751
+
752
+ hidden = upsampled_hidden + first_block_hidden
753
+ all_hidden_states = (hidden,) if output_hidden_states else None
754
+ all_attentions = () if output_attentions else None
755
+
756
+ attention_inputs = self.attention_structure.init_attention_inputs(
757
+ hidden,
758
+ attention_mask=attention_mask,
759
+ token_type_ids=token_type_ids,
760
+ training=training,
761
+ )
762
+
763
+ for layer in self.layers:
764
+ layer_output = layer(
765
+ hidden, hidden, hidden, attention_inputs, output_attentions=output_attentions, training=training
766
+ )
767
+ hidden = layer_output[0]
768
+
769
+ if output_attentions:
770
+ all_attentions = all_attentions + layer_output[1:]
771
+ if output_hidden_states:
772
+ all_hidden_states = all_hidden_states + (hidden,)
773
+
774
+ if not return_dict:
775
+ return tuple(v for v in [hidden, all_hidden_states, all_attentions] if v is not None)
776
+ return TFBaseModelOutput(last_hidden_state=hidden, hidden_states=all_hidden_states, attentions=all_attentions)
777
+
778
+ def build(self, input_shape=None):
779
+ if self.built:
780
+ return
781
+ self.built = True
782
+ if getattr(self, "layers", None) is not None:
783
+ for layer in self.layers:
784
+ with tf.name_scope(layer.name):
785
+ layer.build(None)
786
+
787
+
788
+ @keras_serializable
789
+ class TFFunnelBaseLayer(keras.layers.Layer):
790
+ """Base model without decoder"""
791
+
792
+ config_class = FunnelConfig
793
+
794
+ def __init__(self, config, **kwargs):
795
+ super().__init__(**kwargs)
796
+
797
+ self.config = config
798
+ self.output_attentions = config.output_attentions
799
+ self.output_hidden_states = config.output_hidden_states
800
+ self.return_dict = config.use_return_dict
801
+
802
+ self.embeddings = TFFunnelEmbeddings(config, name="embeddings")
803
+ self.encoder = TFFunnelEncoder(config, name="encoder")
804
+
805
+ def get_input_embeddings(self):
806
+ return self.embeddings
807
+
808
+ def set_input_embeddings(self, value):
809
+ self.embeddings.weight = value
810
+ self.embeddings.vocab_size = shape_list(value)[0]
811
+
812
+ def _prune_heads(self, heads_to_prune):
813
+ raise NotImplementedError # Not implemented yet in the library fr TF 2.0 models
814
+
815
+ @unpack_inputs
816
+ def call(
817
+ self,
818
+ input_ids=None,
819
+ attention_mask=None,
820
+ token_type_ids=None,
821
+ inputs_embeds=None,
822
+ output_attentions=None,
823
+ output_hidden_states=None,
824
+ return_dict=None,
825
+ training=False,
826
+ ):
827
+ if input_ids is not None and inputs_embeds is not None:
828
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
829
+ elif input_ids is not None:
830
+ input_shape = shape_list(input_ids)
831
+ elif inputs_embeds is not None:
832
+ input_shape = shape_list(inputs_embeds)[:-1]
833
+ else:
834
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
835
+
836
+ if attention_mask is None:
837
+ attention_mask = tf.fill(input_shape, 1)
838
+
839
+ if token_type_ids is None:
840
+ token_type_ids = tf.fill(input_shape, 0)
841
+
842
+ if inputs_embeds is None:
843
+ inputs_embeds = self.embeddings(input_ids, training=training)
844
+
845
+ encoder_outputs = self.encoder(
846
+ inputs_embeds,
847
+ attention_mask=attention_mask,
848
+ token_type_ids=token_type_ids,
849
+ output_attentions=output_attentions,
850
+ output_hidden_states=output_hidden_states,
851
+ return_dict=return_dict,
852
+ training=training,
853
+ )
854
+
855
+ return encoder_outputs
856
+
857
+ def build(self, input_shape=None):
858
+ if self.built:
859
+ return
860
+ self.built = True
861
+ if getattr(self, "embeddings", None) is not None:
862
+ with tf.name_scope(self.embeddings.name):
863
+ self.embeddings.build(None)
864
+ if getattr(self, "encoder", None) is not None:
865
+ with tf.name_scope(self.encoder.name):
866
+ self.encoder.build(None)
867
+
868
+
869
+ @keras_serializable
870
+ class TFFunnelMainLayer(keras.layers.Layer):
871
+ """Base model with decoder"""
872
+
873
+ config_class = FunnelConfig
874
+
875
+ def __init__(self, config, **kwargs):
876
+ super().__init__(**kwargs)
877
+
878
+ self.config = config
879
+ self.block_sizes = config.block_sizes
880
+ self.output_attentions = config.output_attentions
881
+ self.output_hidden_states = config.output_hidden_states
882
+ self.return_dict = config.use_return_dict
883
+
884
+ self.embeddings = TFFunnelEmbeddings(config, name="embeddings")
885
+ self.encoder = TFFunnelEncoder(config, name="encoder")
886
+ self.decoder = TFFunnelDecoder(config, name="decoder")
887
+
888
+ def get_input_embeddings(self):
889
+ return self.embeddings
890
+
891
+ def set_input_embeddings(self, value):
892
+ self.embeddings.weight = value
893
+ self.embeddings.vocab_size = shape_list(value)[0]
894
+
895
+ def _prune_heads(self, heads_to_prune):
896
+ raise NotImplementedError # Not implemented yet in the library fr TF 2.0 models
897
+
898
+ @unpack_inputs
899
+ def call(
900
+ self,
901
+ input_ids=None,
902
+ attention_mask=None,
903
+ token_type_ids=None,
904
+ inputs_embeds=None,
905
+ output_attentions=None,
906
+ output_hidden_states=None,
907
+ return_dict=None,
908
+ training=False,
909
+ ):
910
+ if input_ids is not None and inputs_embeds is not None:
911
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
912
+ elif input_ids is not None:
913
+ input_shape = shape_list(input_ids)
914
+ elif inputs_embeds is not None:
915
+ input_shape = shape_list(inputs_embeds)[:-1]
916
+ else:
917
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
918
+
919
+ if attention_mask is None:
920
+ attention_mask = tf.fill(input_shape, 1)
921
+
922
+ if token_type_ids is None:
923
+ token_type_ids = tf.fill(input_shape, 0)
924
+
925
+ if inputs_embeds is None:
926
+ inputs_embeds = self.embeddings(input_ids, training=training)
927
+
928
+ encoder_outputs = self.encoder(
929
+ inputs_embeds,
930
+ attention_mask=attention_mask,
931
+ token_type_ids=token_type_ids,
932
+ output_attentions=output_attentions,
933
+ output_hidden_states=True,
934
+ return_dict=return_dict,
935
+ training=training,
936
+ )
937
+
938
+ decoder_outputs = self.decoder(
939
+ final_hidden=encoder_outputs[0],
940
+ first_block_hidden=encoder_outputs[1][self.block_sizes[0]],
941
+ attention_mask=attention_mask,
942
+ token_type_ids=token_type_ids,
943
+ output_attentions=output_attentions,
944
+ output_hidden_states=output_hidden_states,
945
+ return_dict=return_dict,
946
+ training=training,
947
+ )
948
+
949
+ if not return_dict:
950
+ idx = 0
951
+ outputs = (decoder_outputs[0],)
952
+ if output_hidden_states:
953
+ idx += 1
954
+ outputs = outputs + (encoder_outputs[1] + decoder_outputs[idx],)
955
+ if output_attentions:
956
+ idx += 1
957
+ outputs = outputs + (encoder_outputs[2] + decoder_outputs[idx],)
958
+ return outputs
959
+
960
+ return TFBaseModelOutput(
961
+ last_hidden_state=decoder_outputs[0],
962
+ hidden_states=(encoder_outputs.hidden_states + decoder_outputs.hidden_states)
963
+ if output_hidden_states
964
+ else None,
965
+ attentions=(encoder_outputs.attentions + decoder_outputs.attentions) if output_attentions else None,
966
+ )
967
+
968
+ def build(self, input_shape=None):
969
+ if self.built:
970
+ return
971
+ self.built = True
972
+ if getattr(self, "embeddings", None) is not None:
973
+ with tf.name_scope(self.embeddings.name):
974
+ self.embeddings.build(None)
975
+ if getattr(self, "encoder", None) is not None:
976
+ with tf.name_scope(self.encoder.name):
977
+ self.encoder.build(None)
978
+ if getattr(self, "decoder", None) is not None:
979
+ with tf.name_scope(self.decoder.name):
980
+ self.decoder.build(None)
981
+
982
+
983
+ class TFFunnelDiscriminatorPredictions(keras.layers.Layer):
984
+ """Prediction module for the discriminator, made up of two dense layers."""
985
+
986
+ def __init__(self, config, **kwargs):
987
+ super().__init__(**kwargs)
988
+ initializer = get_initializer(config.initializer_range)
989
+ self.dense = keras.layers.Dense(config.d_model, kernel_initializer=initializer, name="dense")
990
+ self.activation_function = get_tf_activation(config.hidden_act)
991
+ self.dense_prediction = keras.layers.Dense(1, kernel_initializer=initializer, name="dense_prediction")
992
+ self.config = config
993
+
994
+ def call(self, discriminator_hidden_states):
995
+ hidden_states = self.dense(discriminator_hidden_states)
996
+ hidden_states = self.activation_function(hidden_states)
997
+ logits = tf.squeeze(self.dense_prediction(hidden_states))
998
+ return logits
999
+
1000
+ def build(self, input_shape=None):
1001
+ if self.built:
1002
+ return
1003
+ self.built = True
1004
+ if getattr(self, "dense", None) is not None:
1005
+ with tf.name_scope(self.dense.name):
1006
+ self.dense.build([None, None, self.config.d_model])
1007
+ if getattr(self, "dense_prediction", None) is not None:
1008
+ with tf.name_scope(self.dense_prediction.name):
1009
+ self.dense_prediction.build([None, None, self.config.d_model])
1010
+
1011
+
1012
+ class TFFunnelMaskedLMHead(keras.layers.Layer):
1013
+ def __init__(self, config, input_embeddings, **kwargs):
1014
+ super().__init__(**kwargs)
1015
+ self.config = config
1016
+ self.hidden_size = config.hidden_size
1017
+ self.input_embeddings = input_embeddings
1018
+
1019
+ def build(self, input_shape):
1020
+ self.bias = self.add_weight(shape=(self.config.vocab_size,), initializer="zeros", trainable=True, name="bias")
1021
+
1022
+ super().build(input_shape)
1023
+
1024
+ def get_output_embeddings(self):
1025
+ return self.input_embeddings
1026
+
1027
+ def set_output_embeddings(self, value):
1028
+ self.input_embeddings.weight = value
1029
+ self.input_embeddings.vocab_size = shape_list(value)[0]
1030
+
1031
+ def get_bias(self):
1032
+ return {"bias": self.bias}
1033
+
1034
+ def set_bias(self, value):
1035
+ self.bias = value["bias"]
1036
+ self.config.vocab_size = shape_list(value["bias"])[0]
1037
+
1038
+ def call(self, hidden_states, training=False):
1039
+ seq_length = shape_list(tensor=hidden_states)[1]
1040
+ hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, self.hidden_size])
1041
+ hidden_states = tf.matmul(a=hidden_states, b=self.input_embeddings.weight, transpose_b=True)
1042
+ hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, seq_length, self.config.vocab_size])
1043
+ hidden_states = tf.nn.bias_add(value=hidden_states, bias=self.bias)
1044
+
1045
+ return hidden_states
1046
+
1047
+
1048
+ class TFFunnelClassificationHead(keras.layers.Layer):
1049
+ def __init__(self, config, n_labels, **kwargs):
1050
+ super().__init__(**kwargs)
1051
+ initializer = get_initializer(config.initializer_range)
1052
+ self.linear_hidden = keras.layers.Dense(config.d_model, kernel_initializer=initializer, name="linear_hidden")
1053
+ self.dropout = keras.layers.Dropout(config.hidden_dropout)
1054
+ self.linear_out = keras.layers.Dense(n_labels, kernel_initializer=initializer, name="linear_out")
1055
+ self.config = config
1056
+
1057
+ def call(self, hidden, training=False):
1058
+ hidden = self.linear_hidden(hidden)
1059
+ hidden = keras.activations.tanh(hidden)
1060
+ hidden = self.dropout(hidden, training=training)
1061
+ return self.linear_out(hidden)
1062
+
1063
+ def build(self, input_shape=None):
1064
+ if self.built:
1065
+ return
1066
+ self.built = True
1067
+ if getattr(self, "linear_hidden", None) is not None:
1068
+ with tf.name_scope(self.linear_hidden.name):
1069
+ self.linear_hidden.build([None, None, self.config.d_model])
1070
+ if getattr(self, "linear_out", None) is not None:
1071
+ with tf.name_scope(self.linear_out.name):
1072
+ self.linear_out.build([None, None, self.config.d_model])
1073
+
1074
+
1075
+ class TFFunnelPreTrainedModel(TFPreTrainedModel):
1076
+ """
1077
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
1078
+ models.
1079
+ """
1080
+
1081
+ config_class = FunnelConfig
1082
+ base_model_prefix = "funnel"
1083
+
1084
+ @property
1085
+ def dummy_inputs(self):
1086
+ # Funnel misbehaves with very small inputs, so we override and make them a bit bigger
1087
+ return {"input_ids": tf.ones((1, 3), dtype=tf.int32)}
1088
+
1089
+
1090
+ @dataclass
1091
+ class TFFunnelForPreTrainingOutput(ModelOutput):
1092
+ """
1093
+ Output type of [`FunnelForPreTraining`].
1094
+
1095
+ Args:
1096
+ logits (`tf.Tensor` of shape `(batch_size, sequence_length)`):
1097
+ Prediction scores of the head (scores for each token before SoftMax).
1098
+ hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
1099
+ Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape
1100
+ `(batch_size, sequence_length, hidden_size)`.
1101
+
1102
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
1103
+ attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
1104
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
1105
+ sequence_length)`.
1106
+
1107
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
1108
+ heads.
1109
+ """
1110
+
1111
+ logits: tf.Tensor = None
1112
+ hidden_states: Tuple[tf.Tensor] | None = None
1113
+ attentions: Tuple[tf.Tensor] | None = None
1114
+
1115
+
1116
+ FUNNEL_START_DOCSTRING = r"""
1117
+
1118
+ The Funnel Transformer model was proposed in [Funnel-Transformer: Filtering out Sequential Redundancy for Efficient
1119
+ Language Processing](https://arxiv.org/abs/2006.03236) by Zihang Dai, Guokun Lai, Yiming Yang, Quoc V. Le.
1120
+
1121
+ This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
1122
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
1123
+ etc.)
1124
+
1125
+ This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
1126
+ as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
1127
+ behavior.
1128
+
1129
+ <Tip>
1130
+
1131
+ TensorFlow models and layers in `transformers` accept two formats as input:
1132
+
1133
+ - having all inputs as keyword arguments (like PyTorch models), or
1134
+ - having all inputs as a list, tuple or dict in the first positional argument.
1135
+
1136
+ The reason the second format is supported is that Keras methods prefer this format when passing inputs to models
1137
+ and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just
1138
+ pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second
1139
+ format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with
1140
+ the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first
1141
+ positional argument:
1142
+
1143
+ - a single Tensor with `input_ids` only and nothing else: `model(input_ids)`
1144
+ - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
1145
+ `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`
1146
+ - a dictionary with one or several input Tensors associated to the input names given in the docstring:
1147
+ `model({"input_ids": input_ids, "token_type_ids": token_type_ids})`
1148
+
1149
+ Note that when creating models and layers with
1150
+ [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry
1151
+ about any of this, as you can just pass inputs like you would to any other Python function!
1152
+
1153
+ </Tip>
1154
+
1155
+ Parameters:
1156
+ config ([`XxxConfig`]): Model configuration class with all the parameters of the model.
1157
+ Initializing with a config file does not load the weights associated with the model, only the
1158
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
1159
+ """
1160
+
1161
+ FUNNEL_INPUTS_DOCSTRING = r"""
1162
+ Args:
1163
+ input_ids (`Numpy array` or `tf.Tensor` of shape `({0})`):
1164
+ Indices of input sequence tokens in the vocabulary.
1165
+
1166
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and
1167
+ [`PreTrainedTokenizer.encode`] for details.
1168
+
1169
+ [What are input IDs?](../glossary#input-ids)
1170
+ attention_mask (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*):
1171
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
1172
+
1173
+ - 1 for tokens that are **not masked**,
1174
+ - 0 for tokens that are **masked**.
1175
+
1176
+ [What are attention masks?](../glossary#attention-mask)
1177
+ token_type_ids (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*):
1178
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
1179
+ 1]`:
1180
+
1181
+ - 0 corresponds to a *sentence A* token,
1182
+ - 1 corresponds to a *sentence B* token.
1183
+
1184
+ [What are token type IDs?](../glossary#token-type-ids)
1185
+ inputs_embeds (`tf.Tensor` of shape `({0}, hidden_size)`, *optional*):
1186
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
1187
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
1188
+ model's internal embedding lookup matrix.
1189
+ output_attentions (`bool`, *optional*):
1190
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
1191
+ tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the
1192
+ config will be used instead.
1193
+ output_hidden_states (`bool`, *optional*):
1194
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
1195
+ more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
1196
+ used instead.
1197
+ return_dict (`bool`, *optional*):
1198
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in
1199
+ eager mode, in graph mode the value will always be set to True.
1200
+ training (`bool`, *optional*, defaults to `False`):
1201
+ Whether or not to use the model in training mode (some modules like dropout modules have different
1202
+ behaviors between training and evaluation).
1203
+ """
1204
+
1205
+
1206
+ @add_start_docstrings(
1207
+ """
1208
+ The base Funnel Transformer Model transformer outputting raw hidden-states without upsampling head (also called
1209
+ decoder) or any task-specific head on top.
1210
+ """,
1211
+ FUNNEL_START_DOCSTRING,
1212
+ )
1213
+ class TFFunnelBaseModel(TFFunnelPreTrainedModel):
1214
+ def __init__(self, config: FunnelConfig, *inputs, **kwargs) -> None:
1215
+ super().__init__(config, *inputs, **kwargs)
1216
+ self.funnel = TFFunnelBaseLayer(config, name="funnel")
1217
+
1218
+ @add_start_docstrings_to_model_forward(FUNNEL_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1219
+ @add_code_sample_docstrings(
1220
+ checkpoint="funnel-transformer/small-base",
1221
+ output_type=TFBaseModelOutput,
1222
+ config_class=_CONFIG_FOR_DOC,
1223
+ )
1224
+ @unpack_inputs
1225
+ def call(
1226
+ self,
1227
+ input_ids: TFModelInputType | None = None,
1228
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1229
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1230
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1231
+ output_attentions: Optional[bool] = None,
1232
+ output_hidden_states: Optional[bool] = None,
1233
+ return_dict: Optional[bool] = None,
1234
+ training: bool = False,
1235
+ ) -> Union[Tuple[tf.Tensor], TFBaseModelOutput]:
1236
+ return self.funnel(
1237
+ input_ids=input_ids,
1238
+ attention_mask=attention_mask,
1239
+ token_type_ids=token_type_ids,
1240
+ inputs_embeds=inputs_embeds,
1241
+ output_attentions=output_attentions,
1242
+ output_hidden_states=output_hidden_states,
1243
+ return_dict=return_dict,
1244
+ training=training,
1245
+ )
1246
+
1247
+ def serving_output(self, output):
1248
+ # hidden_states and attentions not converted to Tensor with tf.convert_to_tensor as they are all of
1249
+ # different dimensions
1250
+ return TFBaseModelOutput(
1251
+ last_hidden_state=output.last_hidden_state,
1252
+ hidden_states=output.hidden_states,
1253
+ attentions=output.attentions,
1254
+ )
1255
+
1256
+ def build(self, input_shape=None):
1257
+ if self.built:
1258
+ return
1259
+ self.built = True
1260
+ if getattr(self, "funnel", None) is not None:
1261
+ with tf.name_scope(self.funnel.name):
1262
+ self.funnel.build(None)
1263
+
1264
+
1265
+ @add_start_docstrings(
1266
+ "The bare Funnel Transformer Model transformer outputting raw hidden-states without any specific head on top.",
1267
+ FUNNEL_START_DOCSTRING,
1268
+ )
1269
+ class TFFunnelModel(TFFunnelPreTrainedModel):
1270
+ def __init__(self, config: FunnelConfig, *inputs, **kwargs) -> None:
1271
+ super().__init__(config, *inputs, **kwargs)
1272
+ self.funnel = TFFunnelMainLayer(config, name="funnel")
1273
+
1274
+ @unpack_inputs
1275
+ @add_start_docstrings_to_model_forward(FUNNEL_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1276
+ @add_code_sample_docstrings(
1277
+ checkpoint="funnel-transformer/small",
1278
+ output_type=TFBaseModelOutput,
1279
+ config_class=_CONFIG_FOR_DOC,
1280
+ )
1281
+ def call(
1282
+ self,
1283
+ input_ids: TFModelInputType | None = None,
1284
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1285
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1286
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1287
+ output_attentions: Optional[bool] = None,
1288
+ output_hidden_states: Optional[bool] = None,
1289
+ return_dict: Optional[bool] = None,
1290
+ training: bool = False,
1291
+ ) -> Union[Tuple[tf.Tensor], TFBaseModelOutput]:
1292
+ return self.funnel(
1293
+ input_ids=input_ids,
1294
+ attention_mask=attention_mask,
1295
+ token_type_ids=token_type_ids,
1296
+ inputs_embeds=inputs_embeds,
1297
+ output_attentions=output_attentions,
1298
+ output_hidden_states=output_hidden_states,
1299
+ return_dict=return_dict,
1300
+ training=training,
1301
+ )
1302
+
1303
+ def serving_output(self, output):
1304
+ # hidden_states and attentions not converted to Tensor with tf.convert_to_tensor as they are all of
1305
+ # different dimensions
1306
+ return TFBaseModelOutput(
1307
+ last_hidden_state=output.last_hidden_state,
1308
+ hidden_states=output.hidden_states,
1309
+ attentions=output.attentions,
1310
+ )
1311
+
1312
+ def build(self, input_shape=None):
1313
+ if self.built:
1314
+ return
1315
+ self.built = True
1316
+ if getattr(self, "funnel", None) is not None:
1317
+ with tf.name_scope(self.funnel.name):
1318
+ self.funnel.build(None)
1319
+
1320
+
1321
+ @add_start_docstrings(
1322
+ """
1323
+ Funnel model with a binary classification head on top as used during pretraining for identifying generated tokens.
1324
+ """,
1325
+ FUNNEL_START_DOCSTRING,
1326
+ )
1327
+ class TFFunnelForPreTraining(TFFunnelPreTrainedModel):
1328
+ def __init__(self, config: FunnelConfig, **kwargs) -> None:
1329
+ super().__init__(config, **kwargs)
1330
+
1331
+ self.funnel = TFFunnelMainLayer(config, name="funnel")
1332
+ self.discriminator_predictions = TFFunnelDiscriminatorPredictions(config, name="discriminator_predictions")
1333
+
1334
+ @unpack_inputs
1335
+ @add_start_docstrings_to_model_forward(FUNNEL_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1336
+ @replace_return_docstrings(output_type=TFFunnelForPreTrainingOutput, config_class=_CONFIG_FOR_DOC)
1337
+ def call(
1338
+ self,
1339
+ input_ids: TFModelInputType | None = None,
1340
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1341
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1342
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1343
+ output_attentions: Optional[bool] = None,
1344
+ output_hidden_states: Optional[bool] = None,
1345
+ return_dict: Optional[bool] = None,
1346
+ training: bool = False,
1347
+ **kwargs,
1348
+ ) -> Union[Tuple[tf.Tensor], TFFunnelForPreTrainingOutput]:
1349
+ r"""
1350
+ Returns:
1351
+
1352
+ Examples:
1353
+
1354
+ ```python
1355
+ >>> from transformers import AutoTokenizer, TFFunnelForPreTraining
1356
+ >>> import torch
1357
+
1358
+ >>> tokenizer = AutoTokenizer.from_pretrained("funnel-transformer/small")
1359
+ >>> model = TFFunnelForPreTraining.from_pretrained("funnel-transformer/small")
1360
+
1361
+ >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="tf")
1362
+ >>> logits = model(inputs).logits
1363
+ ```"""
1364
+ discriminator_hidden_states = self.funnel(
1365
+ input_ids,
1366
+ attention_mask,
1367
+ token_type_ids,
1368
+ inputs_embeds,
1369
+ output_attentions,
1370
+ output_hidden_states,
1371
+ return_dict=return_dict,
1372
+ training=training,
1373
+ )
1374
+ discriminator_sequence_output = discriminator_hidden_states[0]
1375
+ logits = self.discriminator_predictions(discriminator_sequence_output)
1376
+
1377
+ if not return_dict:
1378
+ return (logits,) + discriminator_hidden_states[1:]
1379
+
1380
+ return TFFunnelForPreTrainingOutput(
1381
+ logits=logits,
1382
+ hidden_states=discriminator_hidden_states.hidden_states,
1383
+ attentions=discriminator_hidden_states.attentions,
1384
+ )
1385
+
1386
+ def serving_output(self, output):
1387
+ # hidden_states and attentions not converted to Tensor with tf.convert_to_tensor as they are all of
1388
+ # different dimensions
1389
+ return TFFunnelForPreTrainingOutput(
1390
+ logits=output.logits, hidden_states=output.hidden_states, attentions=output.attentions
1391
+ )
1392
+
1393
+ def build(self, input_shape=None):
1394
+ if self.built:
1395
+ return
1396
+ self.built = True
1397
+ if getattr(self, "funnel", None) is not None:
1398
+ with tf.name_scope(self.funnel.name):
1399
+ self.funnel.build(None)
1400
+ if getattr(self, "discriminator_predictions", None) is not None:
1401
+ with tf.name_scope(self.discriminator_predictions.name):
1402
+ self.discriminator_predictions.build(None)
1403
+
1404
+
1405
+ @add_start_docstrings("""Funnel Model with a `language modeling` head on top.""", FUNNEL_START_DOCSTRING)
1406
+ class TFFunnelForMaskedLM(TFFunnelPreTrainedModel, TFMaskedLanguageModelingLoss):
1407
+ def __init__(self, config: FunnelConfig, *inputs, **kwargs) -> None:
1408
+ super().__init__(config, *inputs, **kwargs)
1409
+
1410
+ self.funnel = TFFunnelMainLayer(config, name="funnel")
1411
+ self.lm_head = TFFunnelMaskedLMHead(config, self.funnel.embeddings, name="lm_head")
1412
+
1413
+ def get_lm_head(self) -> TFFunnelMaskedLMHead:
1414
+ return self.lm_head
1415
+
1416
+ def get_prefix_bias_name(self) -> str:
1417
+ warnings.warn("The method get_prefix_bias_name is deprecated. Please use `get_bias` instead.", FutureWarning)
1418
+ return self.name + "/" + self.lm_head.name
1419
+
1420
+ @unpack_inputs
1421
+ @add_start_docstrings_to_model_forward(FUNNEL_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1422
+ @add_code_sample_docstrings(
1423
+ checkpoint="funnel-transformer/small",
1424
+ output_type=TFMaskedLMOutput,
1425
+ config_class=_CONFIG_FOR_DOC,
1426
+ )
1427
+ def call(
1428
+ self,
1429
+ input_ids: TFModelInputType | None = None,
1430
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1431
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1432
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1433
+ output_attentions: Optional[bool] = None,
1434
+ output_hidden_states: Optional[bool] = None,
1435
+ return_dict: Optional[bool] = None,
1436
+ labels: np.ndarray | tf.Tensor | None = None,
1437
+ training: bool = False,
1438
+ ) -> Union[Tuple[tf.Tensor], TFMaskedLMOutput]:
1439
+ r"""
1440
+ labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
1441
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
1442
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
1443
+ loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
1444
+ """
1445
+ outputs = self.funnel(
1446
+ input_ids,
1447
+ attention_mask,
1448
+ token_type_ids,
1449
+ inputs_embeds,
1450
+ output_attentions,
1451
+ output_hidden_states,
1452
+ return_dict=return_dict,
1453
+ training=training,
1454
+ )
1455
+ sequence_output = outputs[0]
1456
+ prediction_scores = self.lm_head(sequence_output, training=training)
1457
+
1458
+ loss = None if labels is None else self.hf_compute_loss(labels, prediction_scores)
1459
+
1460
+ if not return_dict:
1461
+ output = (prediction_scores,) + outputs[1:]
1462
+ return ((loss,) + output) if loss is not None else output
1463
+
1464
+ return TFMaskedLMOutput(
1465
+ loss=loss,
1466
+ logits=prediction_scores,
1467
+ hidden_states=outputs.hidden_states,
1468
+ attentions=outputs.attentions,
1469
+ )
1470
+
1471
+ def serving_output(self, output: TFMaskedLMOutput) -> TFMaskedLMOutput:
1472
+ # hidden_states and attentions not converted to Tensor with tf.convert_to_tensor as they are all of
1473
+ # different dimensions
1474
+ return TFMaskedLMOutput(logits=output.logits, hidden_states=output.hidden_states, attentions=output.attentions)
1475
+
1476
+ def build(self, input_shape=None):
1477
+ if self.built:
1478
+ return
1479
+ self.built = True
1480
+ if getattr(self, "funnel", None) is not None:
1481
+ with tf.name_scope(self.funnel.name):
1482
+ self.funnel.build(None)
1483
+ if getattr(self, "lm_head", None) is not None:
1484
+ with tf.name_scope(self.lm_head.name):
1485
+ self.lm_head.build(None)
1486
+
1487
+
1488
+ @add_start_docstrings(
1489
+ """
1490
+ Funnel Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled
1491
+ output) e.g. for GLUE tasks.
1492
+ """,
1493
+ FUNNEL_START_DOCSTRING,
1494
+ )
1495
+ class TFFunnelForSequenceClassification(TFFunnelPreTrainedModel, TFSequenceClassificationLoss):
1496
+ def __init__(self, config: FunnelConfig, *inputs, **kwargs) -> None:
1497
+ super().__init__(config, *inputs, **kwargs)
1498
+ self.num_labels = config.num_labels
1499
+
1500
+ self.funnel = TFFunnelBaseLayer(config, name="funnel")
1501
+ self.classifier = TFFunnelClassificationHead(config, config.num_labels, name="classifier")
1502
+
1503
+ @unpack_inputs
1504
+ @add_start_docstrings_to_model_forward(FUNNEL_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1505
+ @add_code_sample_docstrings(
1506
+ checkpoint="funnel-transformer/small-base",
1507
+ output_type=TFSequenceClassifierOutput,
1508
+ config_class=_CONFIG_FOR_DOC,
1509
+ )
1510
+ def call(
1511
+ self,
1512
+ input_ids: TFModelInputType | None = None,
1513
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1514
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1515
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1516
+ output_attentions: Optional[bool] = None,
1517
+ output_hidden_states: Optional[bool] = None,
1518
+ return_dict: Optional[bool] = None,
1519
+ labels: np.ndarray | tf.Tensor | None = None,
1520
+ training: bool = False,
1521
+ ) -> Union[Tuple[tf.Tensor], TFSequenceClassifierOutput]:
1522
+ r"""
1523
+ labels (`tf.Tensor` of shape `(batch_size,)`, *optional*):
1524
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1525
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1526
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1527
+ """
1528
+ outputs = self.funnel(
1529
+ input_ids,
1530
+ attention_mask,
1531
+ token_type_ids,
1532
+ inputs_embeds,
1533
+ output_attentions,
1534
+ output_hidden_states,
1535
+ return_dict=return_dict,
1536
+ training=training,
1537
+ )
1538
+ last_hidden_state = outputs[0]
1539
+ pooled_output = last_hidden_state[:, 0]
1540
+ logits = self.classifier(pooled_output, training=training)
1541
+
1542
+ loss = None if labels is None else self.hf_compute_loss(labels, logits)
1543
+
1544
+ if not return_dict:
1545
+ output = (logits,) + outputs[1:]
1546
+ return ((loss,) + output) if loss is not None else output
1547
+
1548
+ return TFSequenceClassifierOutput(
1549
+ loss=loss,
1550
+ logits=logits,
1551
+ hidden_states=outputs.hidden_states,
1552
+ attentions=outputs.attentions,
1553
+ )
1554
+
1555
+ def serving_output(self, output: TFSequenceClassifierOutput) -> TFSequenceClassifierOutput:
1556
+ # hidden_states and attentions not converted to Tensor with tf.convert_to_tensor as they are all of
1557
+ # different dimensions
1558
+ return TFSequenceClassifierOutput(
1559
+ logits=output.logits, hidden_states=output.hidden_states, attentions=output.attentions
1560
+ )
1561
+
1562
+ def build(self, input_shape=None):
1563
+ if self.built:
1564
+ return
1565
+ self.built = True
1566
+ if getattr(self, "funnel", None) is not None:
1567
+ with tf.name_scope(self.funnel.name):
1568
+ self.funnel.build(None)
1569
+ if getattr(self, "classifier", None) is not None:
1570
+ with tf.name_scope(self.classifier.name):
1571
+ self.classifier.build(None)
1572
+
1573
+
1574
+ @add_start_docstrings(
1575
+ """
1576
+ Funnel Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
1577
+ softmax) e.g. for RocStories/SWAG tasks.
1578
+ """,
1579
+ FUNNEL_START_DOCSTRING,
1580
+ )
1581
+ class TFFunnelForMultipleChoice(TFFunnelPreTrainedModel, TFMultipleChoiceLoss):
1582
+ def __init__(self, config: FunnelConfig, *inputs, **kwargs) -> None:
1583
+ super().__init__(config, *inputs, **kwargs)
1584
+
1585
+ self.funnel = TFFunnelBaseLayer(config, name="funnel")
1586
+ self.classifier = TFFunnelClassificationHead(config, 1, name="classifier")
1587
+
1588
+ @property
1589
+ def dummy_inputs(self):
1590
+ return {"input_ids": tf.ones((3, 3, 4), dtype=tf.int32)}
1591
+
1592
+ @unpack_inputs
1593
+ @add_start_docstrings_to_model_forward(FUNNEL_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length"))
1594
+ @add_code_sample_docstrings(
1595
+ checkpoint="funnel-transformer/small-base",
1596
+ output_type=TFMultipleChoiceModelOutput,
1597
+ config_class=_CONFIG_FOR_DOC,
1598
+ )
1599
+ def call(
1600
+ self,
1601
+ input_ids: TFModelInputType | None = None,
1602
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1603
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1604
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1605
+ output_attentions: Optional[bool] = None,
1606
+ output_hidden_states: Optional[bool] = None,
1607
+ return_dict: Optional[bool] = None,
1608
+ labels: np.ndarray | tf.Tensor | None = None,
1609
+ training: bool = False,
1610
+ ) -> Union[Tuple[tf.Tensor], TFMultipleChoiceModelOutput]:
1611
+ r"""
1612
+ labels (`tf.Tensor` of shape `(batch_size,)`, *optional*):
1613
+ Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices]`
1614
+ where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above)
1615
+ """
1616
+ if input_ids is not None:
1617
+ num_choices = shape_list(input_ids)[1]
1618
+ seq_length = shape_list(input_ids)[2]
1619
+ else:
1620
+ num_choices = shape_list(inputs_embeds)[1]
1621
+ seq_length = shape_list(inputs_embeds)[2]
1622
+
1623
+ flat_input_ids = tf.reshape(input_ids, (-1, seq_length)) if input_ids is not None else None
1624
+ flat_attention_mask = tf.reshape(attention_mask, (-1, seq_length)) if attention_mask is not None else None
1625
+ flat_token_type_ids = tf.reshape(token_type_ids, (-1, seq_length)) if token_type_ids is not None else None
1626
+ flat_inputs_embeds = (
1627
+ tf.reshape(inputs_embeds, (-1, seq_length, shape_list(inputs_embeds)[3]))
1628
+ if inputs_embeds is not None
1629
+ else None
1630
+ )
1631
+
1632
+ outputs = self.funnel(
1633
+ flat_input_ids,
1634
+ attention_mask=flat_attention_mask,
1635
+ token_type_ids=flat_token_type_ids,
1636
+ inputs_embeds=flat_inputs_embeds,
1637
+ output_attentions=output_attentions,
1638
+ output_hidden_states=output_hidden_states,
1639
+ return_dict=return_dict,
1640
+ training=training,
1641
+ )
1642
+
1643
+ last_hidden_state = outputs[0]
1644
+ pooled_output = last_hidden_state[:, 0]
1645
+ logits = self.classifier(pooled_output, training=training)
1646
+ reshaped_logits = tf.reshape(logits, (-1, num_choices))
1647
+
1648
+ loss = None if labels is None else self.hf_compute_loss(labels, reshaped_logits)
1649
+
1650
+ if not return_dict:
1651
+ output = (reshaped_logits,) + outputs[1:]
1652
+ return ((loss,) + output) if loss is not None else output
1653
+
1654
+ return TFMultipleChoiceModelOutput(
1655
+ loss=loss,
1656
+ logits=reshaped_logits,
1657
+ hidden_states=outputs.hidden_states,
1658
+ attentions=outputs.attentions,
1659
+ )
1660
+
1661
+ def serving_output(self, output: TFMultipleChoiceModelOutput) -> TFMultipleChoiceModelOutput:
1662
+ # hidden_states and attentions not converted to Tensor with tf.convert_to_tensor as they are all of
1663
+ # different dimensions
1664
+ return TFMultipleChoiceModelOutput(
1665
+ logits=output.logits, hidden_states=output.hidden_states, attentions=output.attentions
1666
+ )
1667
+
1668
+ def build(self, input_shape=None):
1669
+ if self.built:
1670
+ return
1671
+ self.built = True
1672
+ if getattr(self, "funnel", None) is not None:
1673
+ with tf.name_scope(self.funnel.name):
1674
+ self.funnel.build(None)
1675
+ if getattr(self, "classifier", None) is not None:
1676
+ with tf.name_scope(self.classifier.name):
1677
+ self.classifier.build(None)
1678
+
1679
+
1680
+ @add_start_docstrings(
1681
+ """
1682
+ Funnel Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
1683
+ Named-Entity-Recognition (NER) tasks.
1684
+ """,
1685
+ FUNNEL_START_DOCSTRING,
1686
+ )
1687
+ class TFFunnelForTokenClassification(TFFunnelPreTrainedModel, TFTokenClassificationLoss):
1688
+ def __init__(self, config: FunnelConfig, *inputs, **kwargs) -> None:
1689
+ super().__init__(config, *inputs, **kwargs)
1690
+ self.num_labels = config.num_labels
1691
+
1692
+ self.funnel = TFFunnelMainLayer(config, name="funnel")
1693
+ self.dropout = keras.layers.Dropout(config.hidden_dropout)
1694
+ self.classifier = keras.layers.Dense(
1695
+ config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
1696
+ )
1697
+ self.config = config
1698
+
1699
+ @unpack_inputs
1700
+ @add_start_docstrings_to_model_forward(FUNNEL_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1701
+ @add_code_sample_docstrings(
1702
+ checkpoint="funnel-transformer/small",
1703
+ output_type=TFTokenClassifierOutput,
1704
+ config_class=_CONFIG_FOR_DOC,
1705
+ )
1706
+ def call(
1707
+ self,
1708
+ input_ids: TFModelInputType | None = None,
1709
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1710
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1711
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1712
+ output_attentions: Optional[bool] = None,
1713
+ output_hidden_states: Optional[bool] = None,
1714
+ return_dict: Optional[bool] = None,
1715
+ labels: np.ndarray | tf.Tensor | None = None,
1716
+ training: bool = False,
1717
+ ) -> Union[Tuple[tf.Tensor], TFTokenClassifierOutput]:
1718
+ r"""
1719
+ labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
1720
+ Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
1721
+ """
1722
+ outputs = self.funnel(
1723
+ input_ids,
1724
+ attention_mask,
1725
+ token_type_ids,
1726
+ inputs_embeds,
1727
+ output_attentions,
1728
+ output_hidden_states,
1729
+ return_dict=return_dict,
1730
+ training=training,
1731
+ )
1732
+ sequence_output = outputs[0]
1733
+
1734
+ sequence_output = self.dropout(sequence_output, training=training)
1735
+ logits = self.classifier(sequence_output)
1736
+
1737
+ loss = None if labels is None else self.hf_compute_loss(labels, logits)
1738
+
1739
+ if not return_dict:
1740
+ output = (logits,) + outputs[1:]
1741
+ return ((loss,) + output) if loss is not None else output
1742
+
1743
+ return TFTokenClassifierOutput(
1744
+ loss=loss,
1745
+ logits=logits,
1746
+ hidden_states=outputs.hidden_states,
1747
+ attentions=outputs.attentions,
1748
+ )
1749
+
1750
+ def serving_output(self, output: TFTokenClassifierOutput) -> TFTokenClassifierOutput:
1751
+ # hidden_states and attentions not converted to Tensor with tf.convert_to_tensor as they are all of
1752
+ # different dimensions
1753
+ return TFTokenClassifierOutput(
1754
+ logits=output.logits, hidden_states=output.hidden_states, attentions=output.attentions
1755
+ )
1756
+
1757
+ def build(self, input_shape=None):
1758
+ if self.built:
1759
+ return
1760
+ self.built = True
1761
+ if getattr(self, "funnel", None) is not None:
1762
+ with tf.name_scope(self.funnel.name):
1763
+ self.funnel.build(None)
1764
+ if getattr(self, "classifier", None) is not None:
1765
+ with tf.name_scope(self.classifier.name):
1766
+ self.classifier.build([None, None, self.config.hidden_size])
1767
+
1768
+
1769
+ @add_start_docstrings(
1770
+ """
1771
+ Funnel Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
1772
+ layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
1773
+ """,
1774
+ FUNNEL_START_DOCSTRING,
1775
+ )
1776
+ class TFFunnelForQuestionAnswering(TFFunnelPreTrainedModel, TFQuestionAnsweringLoss):
1777
+ def __init__(self, config: FunnelConfig, *inputs, **kwargs) -> None:
1778
+ super().__init__(config, *inputs, **kwargs)
1779
+ self.num_labels = config.num_labels
1780
+
1781
+ self.funnel = TFFunnelMainLayer(config, name="funnel")
1782
+ self.qa_outputs = keras.layers.Dense(
1783
+ config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="qa_outputs"
1784
+ )
1785
+ self.config = config
1786
+
1787
+ @unpack_inputs
1788
+ @add_start_docstrings_to_model_forward(FUNNEL_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1789
+ @add_code_sample_docstrings(
1790
+ checkpoint="funnel-transformer/small",
1791
+ output_type=TFQuestionAnsweringModelOutput,
1792
+ config_class=_CONFIG_FOR_DOC,
1793
+ )
1794
+ def call(
1795
+ self,
1796
+ input_ids: TFModelInputType | None = None,
1797
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1798
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1799
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1800
+ output_attentions: Optional[bool] = None,
1801
+ output_hidden_states: Optional[bool] = None,
1802
+ return_dict: Optional[bool] = None,
1803
+ start_positions: np.ndarray | tf.Tensor | None = None,
1804
+ end_positions: np.ndarray | tf.Tensor | None = None,
1805
+ training: bool = False,
1806
+ ) -> Union[Tuple[tf.Tensor], TFQuestionAnsweringModelOutput]:
1807
+ r"""
1808
+ start_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*):
1809
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
1810
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1811
+ are not taken into account for computing the loss.
1812
+ end_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*):
1813
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
1814
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1815
+ are not taken into account for computing the loss.
1816
+ """
1817
+
1818
+ outputs = self.funnel(
1819
+ input_ids,
1820
+ attention_mask,
1821
+ token_type_ids,
1822
+ inputs_embeds,
1823
+ output_attentions,
1824
+ output_hidden_states,
1825
+ return_dict=return_dict,
1826
+ training=training,
1827
+ )
1828
+ sequence_output = outputs[0]
1829
+
1830
+ logits = self.qa_outputs(sequence_output)
1831
+ start_logits, end_logits = tf.split(logits, 2, axis=-1)
1832
+ start_logits = tf.squeeze(start_logits, axis=-1)
1833
+ end_logits = tf.squeeze(end_logits, axis=-1)
1834
+
1835
+ loss = None
1836
+ if start_positions is not None and end_positions is not None:
1837
+ labels = {"start_position": start_positions, "end_position": end_positions}
1838
+ loss = self.hf_compute_loss(labels, (start_logits, end_logits))
1839
+
1840
+ if not return_dict:
1841
+ output = (start_logits, end_logits) + outputs[1:]
1842
+ return ((loss,) + output) if loss is not None else output
1843
+
1844
+ return TFQuestionAnsweringModelOutput(
1845
+ loss=loss,
1846
+ start_logits=start_logits,
1847
+ end_logits=end_logits,
1848
+ hidden_states=outputs.hidden_states,
1849
+ attentions=outputs.attentions,
1850
+ )
1851
+
1852
+ def serving_output(self, output: TFQuestionAnsweringModelOutput) -> TFQuestionAnsweringModelOutput:
1853
+ # hidden_states and attentions not converted to Tensor with tf.convert_to_tensor as they are all of
1854
+ # different dimensions
1855
+ return TFQuestionAnsweringModelOutput(
1856
+ start_logits=output.start_logits,
1857
+ end_logits=output.end_logits,
1858
+ hidden_states=output.hidden_states,
1859
+ attentions=output.attentions,
1860
+ )
1861
+
1862
+ def build(self, input_shape=None):
1863
+ if self.built:
1864
+ return
1865
+ self.built = True
1866
+ if getattr(self, "funnel", None) is not None:
1867
+ with tf.name_scope(self.funnel.name):
1868
+ self.funnel.build(None)
1869
+ if getattr(self, "qa_outputs", None) is not None:
1870
+ with tf.name_scope(self.qa_outputs.name):
1871
+ self.qa_outputs.build([None, None, self.config.hidden_size])
llmeval-env/lib/python3.10/site-packages/transformers/models/funnel/tokenization_funnel_fast.py ADDED
@@ -0,0 +1,200 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Tokenization class for Funnel Transformer."""
16
+
17
+ import json
18
+ from typing import List, Optional, Tuple
19
+
20
+ from tokenizers import normalizers
21
+
22
+ from ...tokenization_utils_fast import PreTrainedTokenizerFast
23
+ from ...utils import logging
24
+ from .tokenization_funnel import FunnelTokenizer
25
+
26
+
27
+ logger = logging.get_logger(__name__)
28
+
29
+ VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
30
+
31
+ _model_names = [
32
+ "small",
33
+ "small-base",
34
+ "medium",
35
+ "medium-base",
36
+ "intermediate",
37
+ "intermediate-base",
38
+ "large",
39
+ "large-base",
40
+ "xlarge",
41
+ "xlarge-base",
42
+ ]
43
+
44
+
45
+ class FunnelTokenizerFast(PreTrainedTokenizerFast):
46
+ r"""
47
+ Construct a "fast" Funnel Transformer tokenizer (backed by HuggingFace's *tokenizers* library). Based on WordPiece.
48
+
49
+ This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
50
+ refer to this superclass for more information regarding those methods.
51
+
52
+ Args:
53
+ vocab_file (`str`):
54
+ File containing the vocabulary.
55
+ do_lower_case (`bool`, *optional*, defaults to `True`):
56
+ Whether or not to lowercase the input when tokenizing.
57
+ unk_token (`str`, *optional*, defaults to `"<unk>"`):
58
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
59
+ token instead.
60
+ sep_token (`str`, *optional*, defaults to `"<sep>"`):
61
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
62
+ sequence classification or for a text and a question for question answering. It is also used as the last
63
+ token of a sequence built with special tokens.
64
+ pad_token (`str`, *optional*, defaults to `"<pad>"`):
65
+ The token used for padding, for example when batching sequences of different lengths.
66
+ cls_token (`str`, *optional*, defaults to `"<cls>"`):
67
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
68
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
69
+ mask_token (`str`, *optional*, defaults to `"<mask>"`):
70
+ The token used for masking values. This is the token used when training this model with masked language
71
+ modeling. This is the token which the model will try to predict.
72
+ clean_text (`bool`, *optional*, defaults to `True`):
73
+ Whether or not to clean the text before tokenization by removing any control characters and replacing all
74
+ whitespaces by the classic one.
75
+ tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
76
+ Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see [this
77
+ issue](https://github.com/huggingface/transformers/issues/328)).
78
+ bos_token (`str`, `optional`, defaults to `"<s>"`):
79
+ The beginning of sentence token.
80
+ eos_token (`str`, `optional`, defaults to `"</s>"`):
81
+ The end of sentence token.
82
+ strip_accents (`bool`, *optional*):
83
+ Whether or not to strip all accents. If this option is not specified, then it will be determined by the
84
+ value for `lowercase` (as in the original BERT).
85
+ wordpieces_prefix (`str`, *optional*, defaults to `"##"`):
86
+ The prefix for subwords.
87
+ """
88
+
89
+ vocab_files_names = VOCAB_FILES_NAMES
90
+ slow_tokenizer_class = FunnelTokenizer
91
+ cls_token_type_id: int = 2
92
+
93
+ def __init__(
94
+ self,
95
+ vocab_file=None,
96
+ tokenizer_file=None,
97
+ do_lower_case=True,
98
+ unk_token="<unk>",
99
+ sep_token="<sep>",
100
+ pad_token="<pad>",
101
+ cls_token="<cls>",
102
+ mask_token="<mask>",
103
+ bos_token="<s>",
104
+ eos_token="</s>",
105
+ clean_text=True,
106
+ tokenize_chinese_chars=True,
107
+ strip_accents=None,
108
+ wordpieces_prefix="##",
109
+ **kwargs,
110
+ ):
111
+ super().__init__(
112
+ vocab_file,
113
+ tokenizer_file=tokenizer_file,
114
+ do_lower_case=do_lower_case,
115
+ unk_token=unk_token,
116
+ sep_token=sep_token,
117
+ pad_token=pad_token,
118
+ cls_token=cls_token,
119
+ mask_token=mask_token,
120
+ bos_token=bos_token,
121
+ eos_token=eos_token,
122
+ clean_text=clean_text,
123
+ tokenize_chinese_chars=tokenize_chinese_chars,
124
+ strip_accents=strip_accents,
125
+ wordpieces_prefix=wordpieces_prefix,
126
+ **kwargs,
127
+ )
128
+
129
+ normalizer_state = json.loads(self.backend_tokenizer.normalizer.__getstate__())
130
+ if (
131
+ normalizer_state.get("lowercase", do_lower_case) != do_lower_case
132
+ or normalizer_state.get("strip_accents", strip_accents) != strip_accents
133
+ or normalizer_state.get("handle_chinese_chars", tokenize_chinese_chars) != tokenize_chinese_chars
134
+ ):
135
+ normalizer_class = getattr(normalizers, normalizer_state.pop("type"))
136
+ normalizer_state["lowercase"] = do_lower_case
137
+ normalizer_state["strip_accents"] = strip_accents
138
+ normalizer_state["handle_chinese_chars"] = tokenize_chinese_chars
139
+ self.backend_tokenizer.normalizer = normalizer_class(**normalizer_state)
140
+
141
+ self.do_lower_case = do_lower_case
142
+
143
+ # Copied from transformers.models.bert.tokenization_bert_fast.BertTokenizerFast.build_inputs_with_special_tokens with BERT->Funnel
144
+ def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
145
+ """
146
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
147
+ adding special tokens. A Funnel sequence has the following format:
148
+
149
+ - single sequence: `[CLS] X [SEP]`
150
+ - pair of sequences: `[CLS] A [SEP] B [SEP]`
151
+
152
+ Args:
153
+ token_ids_0 (`List[int]`):
154
+ List of IDs to which the special tokens will be added.
155
+ token_ids_1 (`List[int]`, *optional*):
156
+ Optional second list of IDs for sequence pairs.
157
+
158
+ Returns:
159
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
160
+ """
161
+ output = [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
162
+
163
+ if token_ids_1 is not None:
164
+ output += token_ids_1 + [self.sep_token_id]
165
+
166
+ return output
167
+
168
+ def create_token_type_ids_from_sequences(
169
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
170
+ ) -> List[int]:
171
+ """
172
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. A Funnel
173
+ Transformer sequence pair mask has the following format:
174
+
175
+ ```
176
+ 2 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
177
+ | first sequence | second sequence |
178
+ ```
179
+
180
+ If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
181
+
182
+ Args:
183
+ token_ids_0 (`List[int]`):
184
+ List of IDs.
185
+ token_ids_1 (`List[int]`, *optional*):
186
+ Optional second list of IDs for sequence pairs.
187
+
188
+ Returns:
189
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
190
+ """
191
+ sep = [self.sep_token_id]
192
+ cls = [self.cls_token_id]
193
+ if token_ids_1 is None:
194
+ return len(cls) * [self.cls_token_type_id] + len(token_ids_0 + sep) * [0]
195
+ return len(cls) * [self.cls_token_type_id] + len(token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
196
+
197
+ # Copied from transformers.models.bert.tokenization_bert_fast.BertTokenizerFast.save_vocabulary
198
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
199
+ files = self._tokenizer.model.save(save_directory, name=filename_prefix)
200
+ return tuple(files)
llmeval-env/lib/python3.10/site-packages/transformers/models/gptsan_japanese/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.19 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/gptsan_japanese/__pycache__/convert_gptsan_tf_checkpoint_to_pytorch.cpython-310.pyc ADDED
Binary file (4.84 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/gptsan_japanese/__pycache__/modeling_gptsan_japanese.cpython-310.pyc ADDED
Binary file (45.4 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/gptsan_japanese/__pycache__/tokenization_gptsan_japanese.cpython-310.pyc ADDED
Binary file (20.1 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/openai/__init__.py ADDED
@@ -0,0 +1,119 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import TYPE_CHECKING
16
+
17
+ from ...utils import (
18
+ OptionalDependencyNotAvailable,
19
+ _LazyModule,
20
+ is_tf_available,
21
+ is_tokenizers_available,
22
+ is_torch_available,
23
+ )
24
+
25
+
26
+ _import_structure = {
27
+ "configuration_openai": ["OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "OpenAIGPTConfig"],
28
+ "tokenization_openai": ["OpenAIGPTTokenizer"],
29
+ }
30
+
31
+ try:
32
+ if not is_tokenizers_available():
33
+ raise OptionalDependencyNotAvailable()
34
+ except OptionalDependencyNotAvailable:
35
+ pass
36
+ else:
37
+ _import_structure["tokenization_openai_fast"] = ["OpenAIGPTTokenizerFast"]
38
+
39
+ try:
40
+ if not is_torch_available():
41
+ raise OptionalDependencyNotAvailable()
42
+ except OptionalDependencyNotAvailable:
43
+ pass
44
+ else:
45
+ _import_structure["modeling_openai"] = [
46
+ "OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST",
47
+ "OpenAIGPTDoubleHeadsModel",
48
+ "OpenAIGPTForSequenceClassification",
49
+ "OpenAIGPTLMHeadModel",
50
+ "OpenAIGPTModel",
51
+ "OpenAIGPTPreTrainedModel",
52
+ "load_tf_weights_in_openai_gpt",
53
+ ]
54
+
55
+ try:
56
+ if not is_tf_available():
57
+ raise OptionalDependencyNotAvailable()
58
+ except OptionalDependencyNotAvailable:
59
+ pass
60
+ else:
61
+ _import_structure["modeling_tf_openai"] = [
62
+ "TF_OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST",
63
+ "TFOpenAIGPTDoubleHeadsModel",
64
+ "TFOpenAIGPTForSequenceClassification",
65
+ "TFOpenAIGPTLMHeadModel",
66
+ "TFOpenAIGPTMainLayer",
67
+ "TFOpenAIGPTModel",
68
+ "TFOpenAIGPTPreTrainedModel",
69
+ ]
70
+
71
+
72
+ if TYPE_CHECKING:
73
+ from .configuration_openai import OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OpenAIGPTConfig
74
+ from .tokenization_openai import OpenAIGPTTokenizer
75
+
76
+ try:
77
+ if not is_tokenizers_available():
78
+ raise OptionalDependencyNotAvailable()
79
+ except OptionalDependencyNotAvailable:
80
+ pass
81
+ else:
82
+ from .tokenization_openai_fast import OpenAIGPTTokenizerFast
83
+
84
+ try:
85
+ if not is_torch_available():
86
+ raise OptionalDependencyNotAvailable()
87
+ except OptionalDependencyNotAvailable:
88
+ pass
89
+ else:
90
+ from .modeling_openai import (
91
+ OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
92
+ OpenAIGPTDoubleHeadsModel,
93
+ OpenAIGPTForSequenceClassification,
94
+ OpenAIGPTLMHeadModel,
95
+ OpenAIGPTModel,
96
+ OpenAIGPTPreTrainedModel,
97
+ load_tf_weights_in_openai_gpt,
98
+ )
99
+
100
+ try:
101
+ if not is_tf_available():
102
+ raise OptionalDependencyNotAvailable()
103
+ except OptionalDependencyNotAvailable:
104
+ pass
105
+ else:
106
+ from .modeling_tf_openai import (
107
+ TF_OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
108
+ TFOpenAIGPTDoubleHeadsModel,
109
+ TFOpenAIGPTForSequenceClassification,
110
+ TFOpenAIGPTLMHeadModel,
111
+ TFOpenAIGPTMainLayer,
112
+ TFOpenAIGPTModel,
113
+ TFOpenAIGPTPreTrainedModel,
114
+ )
115
+
116
+ else:
117
+ import sys
118
+
119
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
llmeval-env/lib/python3.10/site-packages/transformers/models/openai/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.81 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/openai/__pycache__/configuration_openai.cpython-310.pyc ADDED
Binary file (6.42 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/openai/__pycache__/convert_openai_original_tf_checkpoint_to_pytorch.cpython-310.pyc ADDED
Binary file (1.8 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/openai/__pycache__/modeling_openai.cpython-310.pyc ADDED
Binary file (28.6 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/openai/__pycache__/modeling_tf_openai.cpython-310.pyc ADDED
Binary file (30.9 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/openai/__pycache__/tokenization_openai.cpython-310.pyc ADDED
Binary file (12.5 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/openai/__pycache__/tokenization_openai_fast.cpython-310.pyc ADDED
Binary file (2.49 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/openai/configuration_openai.py ADDED
@@ -0,0 +1,156 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The OpenAI Team Authors and HuggingFace Inc. team.
3
+ # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """ OpenAI GPT configuration"""
17
+
18
+ from ...configuration_utils import PretrainedConfig
19
+ from ...utils import logging
20
+
21
+
22
+ logger = logging.get_logger(__name__)
23
+
24
+
25
+ from ..deprecated._archive_maps import OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
26
+
27
+
28
+ class OpenAIGPTConfig(PretrainedConfig):
29
+ """
30
+ This is the configuration class to store the configuration of a [`OpenAIGPTModel`] or a [`TFOpenAIGPTModel`]. It is
31
+ used to instantiate a GPT model according to the specified arguments, defining the model architecture.
32
+ Instantiating a configuration with the defaults will yield a similar configuration to that of the GPT
33
+ [openai-community/openai-gpt](https://huggingface.co/openai-community/openai-gpt) architecture from OpenAI.
34
+
35
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
36
+ documentation from [`PretrainedConfig`] for more information.
37
+
38
+ Args:
39
+ vocab_size (`int`, *optional*, defaults to 40478):
40
+ Vocabulary size of the GPT-2 model. Defines the number of different tokens that can be represented by the
41
+ `inputs_ids` passed when calling [`OpenAIGPTModel`] or [`TFOpenAIGPTModel`].
42
+ n_positions (`int`, *optional*, defaults to 512):
43
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
44
+ just in case (e.g., 512 or 1024 or 2048).
45
+ n_embd (`int`, *optional*, defaults to 768):
46
+ Dimensionality of the embeddings and hidden states.
47
+ n_layer (`int`, *optional*, defaults to 12):
48
+ Number of hidden layers in the Transformer encoder.
49
+ n_head (`int`, *optional*, defaults to 12):
50
+ Number of attention heads for each attention layer in the Transformer encoder.
51
+ afn (`str` or `Callable`, *optional*, defaults to `"gelu"`):
52
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
53
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
54
+ resid_pdrop (`float`, *optional*, defaults to 0.1):
55
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
56
+ embd_pdrop (`int`, *optional*, defaults to 0.1):
57
+ The dropout ratio for the embeddings.
58
+ attn_pdrop (`float`, *optional*, defaults to 0.1):
59
+ The dropout ratio for the attention.
60
+ layer_norm_epsilon (`float`, *optional*, defaults to 1e-05):
61
+ The epsilon to use in the layer normalization layers
62
+ initializer_range (`float`, *optional*, defaults to 0.02):
63
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
64
+ summary_type (`str`, *optional*, defaults to `"cls_index"`):
65
+ Argument used when doing sequence summary, used in the models [`OpenAIGPTDoubleHeadsModel`] and
66
+ [`OpenAIGPTDoubleHeadsModel`].
67
+
68
+ Has to be one of the following options:
69
+
70
+ - `"last"`: Take the last token hidden state (like XLNet).
71
+ - `"first"`: Take the first token hidden state (like BERT).
72
+ - `"mean"`: Take the mean of all tokens hidden states.
73
+ - `"cls_index"`: Supply a Tensor of classification token position (like GPT/GPT-2).
74
+ - `"attn"`: Not implemented now, use multi-head attention.
75
+ summary_use_proj (`bool`, *optional*, defaults to `True`):
76
+ Argument used when doing sequence summary, used in the models [`OpenAIGPTDoubleHeadsModel`] and
77
+ [`OpenAIGPTDoubleHeadsModel`].
78
+
79
+ Whether or not to add a projection after the vector extraction.
80
+ summary_activation (`str`, *optional*):
81
+ Argument used when doing sequence summary, used in the models [`OpenAIGPTDoubleHeadsModel`] and
82
+ [`OpenAIGPTDoubleHeadsModel`].
83
+
84
+ Pass `"tanh"` for a tanh activation to the output, any other value will result in no activation.
85
+ summary_proj_to_labels (`bool`, *optional*, defaults to `True`):
86
+ Argument used when doing sequence summary, used in the models [`OpenAIGPTDoubleHeadsModel`] and
87
+ [`OpenAIGPTDoubleHeadsModel`].
88
+
89
+ Whether the projection outputs should have `config.num_labels` or `config.hidden_size` classes.
90
+ summary_first_dropout (`float`, *optional*, defaults to 0.1):
91
+ Argument used when doing sequence summary, used in the models [`OpenAIGPTDoubleHeadsModel`] and
92
+ [`OpenAIGPTDoubleHeadsModel`].
93
+
94
+ The dropout ratio to be used after the projection and activation.
95
+
96
+
97
+ Examples:
98
+
99
+ ```python
100
+ >>> from transformers import OpenAIGPTConfig, OpenAIGPTModel
101
+
102
+ >>> # Initializing a GPT configuration
103
+ >>> configuration = OpenAIGPTConfig()
104
+
105
+ >>> # Initializing a model (with random weights) from the configuration
106
+ >>> model = OpenAIGPTModel(configuration)
107
+
108
+ >>> # Accessing the model configuration
109
+ >>> configuration = model.config
110
+ ```"""
111
+
112
+ model_type = "openai-gpt"
113
+ attribute_map = {
114
+ "max_position_embeddings": "n_positions",
115
+ "hidden_size": "n_embd",
116
+ "num_attention_heads": "n_head",
117
+ "num_hidden_layers": "n_layer",
118
+ }
119
+
120
+ def __init__(
121
+ self,
122
+ vocab_size=40478,
123
+ n_positions=512,
124
+ n_embd=768,
125
+ n_layer=12,
126
+ n_head=12,
127
+ afn="gelu",
128
+ resid_pdrop=0.1,
129
+ embd_pdrop=0.1,
130
+ attn_pdrop=0.1,
131
+ layer_norm_epsilon=1e-5,
132
+ initializer_range=0.02,
133
+ summary_type="cls_index",
134
+ summary_use_proj=True,
135
+ summary_activation=None,
136
+ summary_proj_to_labels=True,
137
+ summary_first_dropout=0.1,
138
+ **kwargs,
139
+ ):
140
+ self.vocab_size = vocab_size
141
+ self.n_positions = n_positions
142
+ self.n_embd = n_embd
143
+ self.n_layer = n_layer
144
+ self.n_head = n_head
145
+ self.afn = afn
146
+ self.resid_pdrop = resid_pdrop
147
+ self.embd_pdrop = embd_pdrop
148
+ self.attn_pdrop = attn_pdrop
149
+ self.layer_norm_epsilon = layer_norm_epsilon
150
+ self.initializer_range = initializer_range
151
+ self.summary_type = summary_type
152
+ self.summary_use_proj = summary_use_proj
153
+ self.summary_activation = summary_activation
154
+ self.summary_first_dropout = summary_first_dropout
155
+ self.summary_proj_to_labels = summary_proj_to_labels
156
+ super().__init__(**kwargs)
llmeval-env/lib/python3.10/site-packages/transformers/models/openai/convert_openai_original_tf_checkpoint_to_pytorch.py ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Convert OpenAI GPT checkpoint."""
16
+
17
+
18
+ import argparse
19
+
20
+ import torch
21
+
22
+ from transformers import OpenAIGPTConfig, OpenAIGPTModel, load_tf_weights_in_openai_gpt
23
+ from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
24
+
25
+
26
+ logging.set_verbosity_info()
27
+
28
+
29
+ def convert_openai_checkpoint_to_pytorch(openai_checkpoint_folder_path, openai_config_file, pytorch_dump_folder_path):
30
+ # Construct model
31
+ if openai_config_file == "":
32
+ config = OpenAIGPTConfig()
33
+ else:
34
+ config = OpenAIGPTConfig.from_json_file(openai_config_file)
35
+ model = OpenAIGPTModel(config)
36
+
37
+ # Load weights from numpy
38
+ load_tf_weights_in_openai_gpt(model, config, openai_checkpoint_folder_path)
39
+
40
+ # Save pytorch-model
41
+ pytorch_weights_dump_path = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
42
+ pytorch_config_dump_path = pytorch_dump_folder_path + "/" + CONFIG_NAME
43
+ print(f"Save PyTorch model to {pytorch_weights_dump_path}")
44
+ torch.save(model.state_dict(), pytorch_weights_dump_path)
45
+ print(f"Save configuration file to {pytorch_config_dump_path}")
46
+ with open(pytorch_config_dump_path, "w", encoding="utf-8") as f:
47
+ f.write(config.to_json_string())
48
+
49
+
50
+ if __name__ == "__main__":
51
+ parser = argparse.ArgumentParser()
52
+ # Required parameters
53
+ parser.add_argument(
54
+ "--openai_checkpoint_folder_path",
55
+ default=None,
56
+ type=str,
57
+ required=True,
58
+ help="Path to the TensorFlow checkpoint path.",
59
+ )
60
+ parser.add_argument(
61
+ "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
62
+ )
63
+ parser.add_argument(
64
+ "--openai_config_file",
65
+ default="",
66
+ type=str,
67
+ help=(
68
+ "An optional config json file corresponding to the pre-trained OpenAI model. \n"
69
+ "This specifies the model architecture."
70
+ ),
71
+ )
72
+ args = parser.parse_args()
73
+ convert_openai_checkpoint_to_pytorch(
74
+ args.openai_checkpoint_folder_path, args.openai_config_file, args.pytorch_dump_folder_path
75
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/openai/modeling_openai.py ADDED
@@ -0,0 +1,859 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The OpenAI Team Authors and HuggingFace Inc. team.
3
+ # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """PyTorch OpenAI GPT model."""
17
+
18
+
19
+ import json
20
+ import math
21
+ import os
22
+ from dataclasses import dataclass
23
+ from typing import Any, Dict, Optional, Tuple, Union
24
+
25
+ import torch
26
+ from torch import nn
27
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
28
+
29
+ from ...activations import gelu_new, silu
30
+ from ...modeling_outputs import BaseModelOutput, CausalLMOutput, SequenceClassifierOutput
31
+ from ...modeling_utils import PreTrainedModel, SequenceSummary
32
+ from ...pytorch_utils import Conv1D, find_pruneable_heads_and_indices, prune_conv1d_layer
33
+ from ...utils import (
34
+ ModelOutput,
35
+ add_code_sample_docstrings,
36
+ add_start_docstrings,
37
+ add_start_docstrings_to_model_forward,
38
+ logging,
39
+ replace_return_docstrings,
40
+ )
41
+ from .configuration_openai import OpenAIGPTConfig
42
+
43
+
44
+ logger = logging.get_logger(__name__)
45
+
46
+ _CHECKPOINT_FOR_DOC = "openai-community/openai-gpt"
47
+ _CONFIG_FOR_DOC = "OpenAIGPTConfig"
48
+
49
+
50
+ from ..deprecated._archive_maps import OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
51
+
52
+
53
+ def load_tf_weights_in_openai_gpt(model, config, openai_checkpoint_folder_path):
54
+ """Load tf pre-trained weights in a pytorch model (from NumPy arrays here)"""
55
+ import re
56
+
57
+ import numpy as np
58
+
59
+ if ".ckpt" in openai_checkpoint_folder_path:
60
+ openai_checkpoint_folder_path = os.path.dirname(openai_checkpoint_folder_path)
61
+
62
+ logger.info(f"Loading weights from {openai_checkpoint_folder_path}")
63
+
64
+ with open(openai_checkpoint_folder_path + "/parameters_names.json", "r", encoding="utf-8") as names_handle:
65
+ names = json.load(names_handle)
66
+ with open(openai_checkpoint_folder_path + "/params_shapes.json", "r", encoding="utf-8") as shapes_handle:
67
+ shapes = json.load(shapes_handle)
68
+ offsets = np.cumsum([np.prod(shape) for shape in shapes])
69
+ init_params = [np.load(openai_checkpoint_folder_path + f"/params_{n}.npy") for n in range(10)]
70
+ init_params = np.split(np.concatenate(init_params, 0), offsets)[:-1]
71
+ init_params = [param.reshape(shape) for param, shape in zip(init_params, shapes)]
72
+
73
+ # This was used when we had a single embedding matrix for positions and tokens
74
+ # init_params[0] = np.concatenate([init_params[1], init_params[0]], 0)
75
+ # del init_params[1]
76
+ init_params = [arr.squeeze() for arr in init_params]
77
+
78
+ # Check that the token and position embeddings weight dimensions map those of the init parameters.
79
+ if model.tokens_embed.weight.shape != init_params[1].shape:
80
+ raise ValueError(
81
+ f"tokens_embed.weight.shape: {model.tokens_embed.weight.shape} does not match init_param[1].shape:"
82
+ f" {init_params[1].shape}"
83
+ )
84
+
85
+ if model.positions_embed.weight.shape != init_params[0].shape:
86
+ raise ValueError(
87
+ f"positions_embed.weight.shape: {model.positions_embed.weight.shape} does not match init_param[0].shape:"
88
+ f" {init_params[0].shape}"
89
+ )
90
+
91
+ model.tokens_embed.weight.data = torch.from_numpy(init_params[1])
92
+ model.positions_embed.weight.data = torch.from_numpy(init_params[0])
93
+ names.pop(0)
94
+ # Pop position and token embedding arrays
95
+ init_params.pop(0)
96
+ init_params.pop(0)
97
+
98
+ for name, array in zip(names, init_params): # names[1:n_transfer], init_params[1:n_transfer]):
99
+ name = name[6:] # skip "model/"
100
+ if name[-2:] != ":0":
101
+ raise ValueError(f"Layer {name} does not end with :0")
102
+ name = name[:-2]
103
+ name = name.split("/")
104
+ pointer = model
105
+ for m_name in name:
106
+ if re.fullmatch(r"[A-Za-z]+\d+", m_name):
107
+ scope_names = re.split(r"(\d+)", m_name)
108
+ else:
109
+ scope_names = [m_name]
110
+ if scope_names[0] == "g":
111
+ pointer = getattr(pointer, "weight")
112
+ elif scope_names[0] == "b":
113
+ pointer = getattr(pointer, "bias")
114
+ elif scope_names[0] == "w":
115
+ pointer = getattr(pointer, "weight")
116
+ else:
117
+ pointer = getattr(pointer, scope_names[0])
118
+ if len(scope_names) >= 2:
119
+ num = int(scope_names[1])
120
+ pointer = pointer[num]
121
+
122
+ # Ensure that the pointer and array have compatible shapes.
123
+ if pointer.shape != array.shape:
124
+ raise ValueError(f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched")
125
+
126
+ logger.info(f"Initialize PyTorch weight {name}")
127
+ pointer.data = torch.from_numpy(array)
128
+ return model
129
+
130
+
131
+ ACT_FNS = {"relu": nn.ReLU(), "silu": silu, "gelu": gelu_new, "swish": silu}
132
+
133
+
134
+ class Attention(nn.Module):
135
+ def __init__(self, nx, n_positions, config, scale=False):
136
+ super().__init__()
137
+ n_state = nx # in Attention: n_state=768 (nx=n_embd)
138
+ # [switch nx => n_state from Block to Attention to keep identical to TF implementation]
139
+ if n_state % config.n_head != 0:
140
+ raise ValueError(f"Attention n_state shape: {n_state} must be divisible by config.n_head {config.n_head}")
141
+ self.register_buffer(
142
+ "bias",
143
+ torch.tril(torch.ones(n_positions, n_positions)).view(1, 1, n_positions, n_positions),
144
+ persistent=False,
145
+ )
146
+ self.n_head = config.n_head
147
+ self.split_size = n_state
148
+ self.scale = scale
149
+
150
+ self.c_attn = Conv1D(n_state * 3, nx)
151
+ self.c_proj = Conv1D(n_state, nx)
152
+ self.attn_dropout = nn.Dropout(config.attn_pdrop)
153
+ self.resid_dropout = nn.Dropout(config.resid_pdrop)
154
+ self.pruned_heads = set()
155
+
156
+ def prune_heads(self, heads):
157
+ if len(heads) == 0:
158
+ return
159
+ heads, index = find_pruneable_heads_and_indices(
160
+ heads, self.n_head, self.split_size // self.n_head, self.pruned_heads
161
+ )
162
+ index_attn = torch.cat([index, index + self.split_size, index + (2 * self.split_size)])
163
+ # Prune conv1d layers
164
+ self.c_attn = prune_conv1d_layer(self.c_attn, index_attn, dim=1)
165
+ self.c_proj = prune_conv1d_layer(self.c_proj, index, dim=0)
166
+ # Update hyper params
167
+ self.split_size = (self.split_size // self.n_head) * (self.n_head - len(heads))
168
+ self.n_head = self.n_head - len(heads)
169
+ self.pruned_heads = self.pruned_heads.union(heads)
170
+
171
+ def _attn(self, q, k, v, attention_mask=None, head_mask=None, output_attentions=False):
172
+ w = torch.matmul(q, k)
173
+ if self.scale:
174
+ w = w / math.sqrt(v.size(-1))
175
+ # w = w * self.bias + -1e9 * (1 - self.bias) # TF implementation method: mask_attn_weights
176
+ # XD: self.b may be larger than w, so we need to crop it
177
+ b = self.bias[:, :, : w.size(-2), : w.size(-1)]
178
+ w = w * b + -1e4 * (1 - b)
179
+
180
+ if attention_mask is not None:
181
+ # Apply the attention mask
182
+ w = w + attention_mask
183
+
184
+ w = nn.functional.softmax(w, dim=-1)
185
+ w = self.attn_dropout(w)
186
+
187
+ # Mask heads if we want to
188
+ if head_mask is not None:
189
+ w = w * head_mask
190
+
191
+ outputs = [torch.matmul(w, v)]
192
+ if output_attentions:
193
+ outputs.append(w)
194
+ return outputs
195
+
196
+ def merge_heads(self, x):
197
+ x = x.permute(0, 2, 1, 3).contiguous()
198
+ new_x_shape = x.size()[:-2] + (x.size(-2) * x.size(-1),)
199
+ return x.view(*new_x_shape) # in Tensorflow implementation: fct merge_states
200
+
201
+ def split_heads(self, x, k=False):
202
+ new_x_shape = x.size()[:-1] + (self.n_head, x.size(-1) // self.n_head)
203
+ x = x.view(*new_x_shape) # in Tensorflow implementation: fct split_states
204
+ if k:
205
+ return x.permute(0, 2, 3, 1)
206
+ else:
207
+ return x.permute(0, 2, 1, 3)
208
+
209
+ def forward(self, x, attention_mask=None, head_mask=None, output_attentions=False):
210
+ x = self.c_attn(x)
211
+ query, key, value = x.split(self.split_size, dim=2)
212
+ query = self.split_heads(query)
213
+ key = self.split_heads(key, k=True)
214
+ value = self.split_heads(value)
215
+
216
+ attn_outputs = self._attn(query, key, value, attention_mask, head_mask, output_attentions)
217
+ a = attn_outputs[0]
218
+
219
+ a = self.merge_heads(a)
220
+ a = self.c_proj(a)
221
+ a = self.resid_dropout(a)
222
+
223
+ outputs = [a] + attn_outputs[1:]
224
+ return outputs # a, (attentions)
225
+
226
+
227
+ class MLP(nn.Module):
228
+ def __init__(self, n_state, config): # in MLP: n_state=3072 (4 * n_embd)
229
+ super().__init__()
230
+ nx = config.n_embd
231
+ self.c_fc = Conv1D(n_state, nx)
232
+ self.c_proj = Conv1D(nx, n_state)
233
+ self.act = ACT_FNS[config.afn]
234
+ self.dropout = nn.Dropout(config.resid_pdrop)
235
+
236
+ def forward(self, x):
237
+ h = self.act(self.c_fc(x))
238
+ h2 = self.c_proj(h)
239
+ return self.dropout(h2)
240
+
241
+
242
+ class Block(nn.Module):
243
+ def __init__(self, n_positions, config, scale=False):
244
+ super().__init__()
245
+ nx = config.n_embd
246
+ self.attn = Attention(nx, n_positions, config, scale)
247
+ self.ln_1 = nn.LayerNorm(nx, eps=config.layer_norm_epsilon)
248
+ self.mlp = MLP(4 * nx, config)
249
+ self.ln_2 = nn.LayerNorm(nx, eps=config.layer_norm_epsilon)
250
+
251
+ def forward(self, x, attention_mask=None, head_mask=None, output_attentions=False):
252
+ attn_outputs = self.attn(
253
+ x,
254
+ attention_mask=attention_mask,
255
+ head_mask=head_mask,
256
+ output_attentions=output_attentions,
257
+ )
258
+ a = attn_outputs[0]
259
+
260
+ n = self.ln_1(x + a)
261
+ m = self.mlp(n)
262
+ h = self.ln_2(n + m)
263
+
264
+ outputs = [h] + attn_outputs[1:]
265
+ return outputs
266
+
267
+
268
+ class OpenAIGPTPreTrainedModel(PreTrainedModel):
269
+ """
270
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
271
+ models.
272
+ """
273
+
274
+ config_class = OpenAIGPTConfig
275
+ load_tf_weights = load_tf_weights_in_openai_gpt
276
+ base_model_prefix = "transformer"
277
+
278
+ def _init_weights(self, module):
279
+ """Initialize the weights."""
280
+ if isinstance(module, (nn.Linear, Conv1D)):
281
+ # Slightly different from the TF version which uses truncated_normal for initialization
282
+ # cf https://github.com/pytorch/pytorch/pull/5617
283
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
284
+ if module.bias is not None:
285
+ module.bias.data.zero_()
286
+ elif isinstance(module, nn.Embedding):
287
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
288
+ if module.padding_idx is not None:
289
+ module.weight.data[module.padding_idx].zero_()
290
+ elif isinstance(module, nn.LayerNorm):
291
+ module.bias.data.zero_()
292
+ module.weight.data.fill_(1.0)
293
+
294
+
295
+ @dataclass
296
+ class OpenAIGPTDoubleHeadsModelOutput(ModelOutput):
297
+ """
298
+ Base class for outputs of models predicting if two sentences are consecutive or not.
299
+
300
+ Args:
301
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
302
+ Language modeling loss.
303
+ mc_loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `mc_labels` is provided):
304
+ Multiple choice classification loss.
305
+ logits (`torch.FloatTensor` of shape `(batch_size, num_choices, sequence_length, config.vocab_size)`):
306
+ Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
307
+ mc_logits (`torch.FloatTensor` of shape `(batch_size, num_choices)`):
308
+ Prediction scores of the multiple choice classification head (scores for each choice before SoftMax).
309
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
310
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
311
+ shape `(batch_size, sequence_length, hidden_size)`.
312
+
313
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
314
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
315
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
316
+ sequence_length)`.
317
+
318
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
319
+ heads.
320
+ """
321
+
322
+ loss: Optional[torch.FloatTensor] = None
323
+ mc_loss: Optional[torch.FloatTensor] = None
324
+ logits: torch.FloatTensor = None
325
+ mc_logits: torch.FloatTensor = None
326
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
327
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
328
+
329
+
330
+ OPENAI_GPT_START_DOCSTRING = r"""
331
+
332
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
333
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
334
+ etc.)
335
+
336
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
337
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
338
+ and behavior.
339
+
340
+ Parameters:
341
+ config ([`OpenAIGPTConfig`]): Model configuration class with all the parameters of the model.
342
+ Initializing with a config file does not load the weights associated with the model, only the
343
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
344
+ """
345
+
346
+ OPENAI_GPT_INPUTS_DOCSTRING = r"""
347
+ Args:
348
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
349
+ Indices of input sequence tokens in the vocabulary.
350
+
351
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
352
+ [`PreTrainedTokenizer.__call__`] for details.
353
+
354
+ [What are input IDs?](../glossary#input-ids)
355
+ attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
356
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
357
+
358
+ - 1 for tokens that are **not masked**,
359
+ - 0 for tokens that are **masked**.
360
+
361
+ [What are attention masks?](../glossary#attention-mask)
362
+ token_type_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
363
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
364
+ 1]`:
365
+
366
+ - 0 corresponds to a *sentence A* token,
367
+ - 1 corresponds to a *sentence B* token.
368
+
369
+ [What are token type IDs?](../glossary#token-type-ids)
370
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
371
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
372
+ config.max_position_embeddings - 1]`.
373
+
374
+ [What are position IDs?](../glossary#position-ids)
375
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
376
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
377
+
378
+ - 1 indicates the head is **not masked**,
379
+ - 0 indicates the head is **masked**.
380
+
381
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
382
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
383
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
384
+ model's internal embedding lookup matrix.
385
+ output_attentions (`bool`, *optional*):
386
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
387
+ tensors for more detail.
388
+ output_hidden_states (`bool`, *optional*):
389
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
390
+ more detail.
391
+ return_dict (`bool`, *optional*):
392
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
393
+ """
394
+
395
+
396
+ @add_start_docstrings(
397
+ "The bare OpenAI GPT transformer model outputting raw hidden-states without any specific head on top.",
398
+ OPENAI_GPT_START_DOCSTRING,
399
+ )
400
+ class OpenAIGPTModel(OpenAIGPTPreTrainedModel):
401
+ def __init__(self, config):
402
+ super().__init__(config)
403
+
404
+ self.tokens_embed = nn.Embedding(config.vocab_size, config.n_embd)
405
+ self.positions_embed = nn.Embedding(config.n_positions, config.n_embd)
406
+ self.drop = nn.Dropout(config.embd_pdrop)
407
+ self.h = nn.ModuleList([Block(config.n_positions, config, scale=True) for _ in range(config.n_layer)])
408
+
409
+ self.register_buffer("position_ids", torch.arange(config.n_positions), persistent=False)
410
+ # Initialize weights and apply final processing
411
+ self.post_init()
412
+
413
+ def get_input_embeddings(self):
414
+ return self.tokens_embed
415
+
416
+ def set_input_embeddings(self, new_embeddings):
417
+ self.tokens_embed = new_embeddings
418
+
419
+ def _prune_heads(self, heads_to_prune):
420
+ """
421
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
422
+ """
423
+ for layer, heads in heads_to_prune.items():
424
+ self.h[layer].attn.prune_heads(heads)
425
+
426
+ @add_start_docstrings_to_model_forward(OPENAI_GPT_INPUTS_DOCSTRING)
427
+ @add_code_sample_docstrings(
428
+ checkpoint=_CHECKPOINT_FOR_DOC,
429
+ output_type=BaseModelOutput,
430
+ config_class=_CONFIG_FOR_DOC,
431
+ )
432
+ def forward(
433
+ self,
434
+ input_ids: Optional[torch.LongTensor] = None,
435
+ attention_mask: Optional[torch.FloatTensor] = None,
436
+ token_type_ids: Optional[torch.LongTensor] = None,
437
+ position_ids: Optional[torch.LongTensor] = None,
438
+ head_mask: Optional[torch.FloatTensor] = None,
439
+ inputs_embeds: Optional[torch.FloatTensor] = None,
440
+ output_attentions: Optional[bool] = None,
441
+ output_hidden_states: Optional[bool] = None,
442
+ return_dict: Optional[bool] = None,
443
+ ) -> Union[Tuple[torch.Tensor], BaseModelOutput]:
444
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
445
+ output_hidden_states = (
446
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
447
+ )
448
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
449
+
450
+ if input_ids is not None and inputs_embeds is not None:
451
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
452
+ elif input_ids is not None:
453
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
454
+ input_shape = input_ids.size()
455
+ input_ids = input_ids.view(-1, input_shape[-1])
456
+ elif inputs_embeds is not None:
457
+ input_shape = inputs_embeds.size()[:-1]
458
+ else:
459
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
460
+
461
+ if position_ids is None:
462
+ # Code is different from when we had a single embedding matrix from position and token embeddings
463
+ position_ids = self.position_ids[None, : input_shape[-1]]
464
+
465
+ # Attention mask.
466
+ if attention_mask is not None:
467
+ # We create a 3D attention mask from a 2D tensor mask.
468
+ # Sizes are [batch_size, 1, 1, to_seq_length]
469
+ # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
470
+ # this attention mask is more simple than the triangular masking of causal attention
471
+ # used in OpenAI GPT, we just need to prepare the broadcast dimension here.
472
+ attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
473
+
474
+ # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
475
+ # masked positions, this operation will create a tensor which is 0.0 for
476
+ # positions we want to attend and the dtype's smallest value for masked positions.
477
+ # Since we are adding it to the raw scores before the softmax, this is
478
+ # effectively the same as removing these entirely.
479
+ attention_mask = attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
480
+ attention_mask = (1.0 - attention_mask) * torch.finfo(self.dtype).min
481
+
482
+ # Prepare head mask if needed
483
+ head_mask = self.get_head_mask(head_mask, self.config.n_layer)
484
+
485
+ if inputs_embeds is None:
486
+ inputs_embeds = self.tokens_embed(input_ids)
487
+ position_embeds = self.positions_embed(position_ids)
488
+ if token_type_ids is not None:
489
+ token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1))
490
+ token_type_embeds = self.tokens_embed(token_type_ids)
491
+ else:
492
+ token_type_embeds = 0
493
+ hidden_states = inputs_embeds + position_embeds + token_type_embeds
494
+ hidden_states = self.drop(hidden_states)
495
+
496
+ output_shape = input_shape + (hidden_states.size(-1),)
497
+
498
+ all_attentions = () if output_attentions else None
499
+ all_hidden_states = () if output_hidden_states else None
500
+ for i, block in enumerate(self.h):
501
+ if output_hidden_states:
502
+ all_hidden_states = all_hidden_states + (hidden_states,)
503
+
504
+ outputs = block(hidden_states, attention_mask, head_mask[i], output_attentions=output_attentions)
505
+ hidden_states = outputs[0]
506
+ if output_attentions:
507
+ all_attentions = all_attentions + (outputs[1],)
508
+
509
+ hidden_states = hidden_states.view(*output_shape)
510
+ # Add last layer
511
+ if output_hidden_states:
512
+ all_hidden_states = all_hidden_states + (hidden_states,)
513
+
514
+ if not return_dict:
515
+ return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None)
516
+
517
+ return BaseModelOutput(
518
+ last_hidden_state=hidden_states,
519
+ hidden_states=all_hidden_states,
520
+ attentions=all_attentions,
521
+ )
522
+
523
+
524
+ @add_start_docstrings(
525
+ """
526
+ OpenAI GPT Model transformer with a language modeling head on top (linear layer with weights tied to the input
527
+ embeddings).
528
+ """,
529
+ OPENAI_GPT_START_DOCSTRING,
530
+ )
531
+ class OpenAIGPTLMHeadModel(OpenAIGPTPreTrainedModel):
532
+ _tied_weights_keys = ["lm_head.weight"]
533
+
534
+ def __init__(self, config):
535
+ super().__init__(config)
536
+ self.transformer = OpenAIGPTModel(config)
537
+ self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
538
+
539
+ # Initialize weights and apply final processing
540
+ self.post_init()
541
+
542
+ def get_output_embeddings(self):
543
+ return self.lm_head
544
+
545
+ def set_output_embeddings(self, new_embeddings):
546
+ self.lm_head = new_embeddings
547
+
548
+ @add_start_docstrings_to_model_forward(OPENAI_GPT_INPUTS_DOCSTRING)
549
+ @add_code_sample_docstrings(
550
+ checkpoint=_CHECKPOINT_FOR_DOC,
551
+ output_type=CausalLMOutput,
552
+ config_class=_CONFIG_FOR_DOC,
553
+ )
554
+ def forward(
555
+ self,
556
+ input_ids: Optional[torch.LongTensor] = None,
557
+ attention_mask: Optional[torch.FloatTensor] = None,
558
+ token_type_ids: Optional[torch.LongTensor] = None,
559
+ position_ids: Optional[torch.LongTensor] = None,
560
+ head_mask: Optional[torch.FloatTensor] = None,
561
+ inputs_embeds: Optional[torch.FloatTensor] = None,
562
+ labels: Optional[torch.LongTensor] = None,
563
+ output_attentions: Optional[bool] = None,
564
+ output_hidden_states: Optional[bool] = None,
565
+ return_dict: Optional[bool] = None,
566
+ ) -> Union[Tuple[torch.Tensor], CausalLMOutput]:
567
+ r"""
568
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
569
+ Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
570
+ `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100`
571
+ are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]`
572
+ """
573
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
574
+
575
+ transformer_outputs = self.transformer(
576
+ input_ids,
577
+ attention_mask=attention_mask,
578
+ token_type_ids=token_type_ids,
579
+ position_ids=position_ids,
580
+ head_mask=head_mask,
581
+ inputs_embeds=inputs_embeds,
582
+ output_attentions=output_attentions,
583
+ output_hidden_states=output_hidden_states,
584
+ return_dict=return_dict,
585
+ )
586
+ hidden_states = transformer_outputs[0]
587
+ lm_logits = self.lm_head(hidden_states)
588
+
589
+ loss = None
590
+ if labels is not None:
591
+ # Shift so that tokens < n predict n
592
+ shift_logits = lm_logits[..., :-1, :].contiguous()
593
+ shift_labels = labels[..., 1:].contiguous()
594
+ # Flatten the tokens
595
+ loss_fct = CrossEntropyLoss()
596
+ loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
597
+
598
+ if not return_dict:
599
+ output = (lm_logits,) + transformer_outputs[1:]
600
+ return ((loss,) + output) if loss is not None else output
601
+
602
+ return CausalLMOutput(
603
+ loss=loss,
604
+ logits=lm_logits,
605
+ hidden_states=transformer_outputs.hidden_states,
606
+ attentions=transformer_outputs.attentions,
607
+ )
608
+
609
+ def prepare_inputs_for_generation(self, input_ids: torch.LongTensor, **kwargs) -> Dict[str, Any]:
610
+ return {"input_ids": input_ids}
611
+
612
+
613
+ @add_start_docstrings(
614
+ """
615
+ OpenAI GPT Model transformer with a language modeling and a multiple-choice classification head on top e.g. for
616
+ RocStories/SWAG tasks. The two heads are two linear layers. The language modeling head has its weights tied to the
617
+ input embeddings, the classification head takes as input the input of a specified classification token index in the
618
+ input sequence).
619
+ """,
620
+ OPENAI_GPT_START_DOCSTRING,
621
+ )
622
+ class OpenAIGPTDoubleHeadsModel(OpenAIGPTPreTrainedModel):
623
+ _tied_weights_keys = ["lm_head.weight"]
624
+
625
+ def __init__(self, config):
626
+ super().__init__(config)
627
+
628
+ config.num_labels = 1
629
+ self.transformer = OpenAIGPTModel(config)
630
+ self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
631
+ self.multiple_choice_head = SequenceSummary(config)
632
+
633
+ # Initialize weights and apply final processing
634
+ self.post_init()
635
+
636
+ def get_output_embeddings(self):
637
+ return self.lm_head
638
+
639
+ def set_output_embeddings(self, new_embeddings):
640
+ self.lm_head = new_embeddings
641
+
642
+ @add_start_docstrings_to_model_forward(OPENAI_GPT_INPUTS_DOCSTRING)
643
+ @replace_return_docstrings(output_type=OpenAIGPTDoubleHeadsModelOutput, config_class=_CONFIG_FOR_DOC)
644
+ def forward(
645
+ self,
646
+ input_ids: Optional[torch.LongTensor] = None,
647
+ attention_mask: Optional[torch.FloatTensor] = None,
648
+ token_type_ids: Optional[torch.LongTensor] = None,
649
+ position_ids: Optional[torch.LongTensor] = None,
650
+ head_mask: Optional[torch.FloatTensor] = None,
651
+ inputs_embeds: Optional[torch.FloatTensor] = None,
652
+ mc_token_ids: Optional[torch.LongTensor] = None,
653
+ labels: Optional[torch.LongTensor] = None,
654
+ mc_labels: Optional[torch.LongTensor] = None,
655
+ output_attentions: Optional[bool] = None,
656
+ output_hidden_states: Optional[bool] = None,
657
+ return_dict: Optional[bool] = None,
658
+ ) -> Union[Tuple[torch.Tensor], OpenAIGPTDoubleHeadsModelOutput]:
659
+ r"""
660
+ mc_token_ids (`torch.LongTensor` of shape `(batch_size, num_choices)`, *optional*, default to index of the last token of the input):
661
+ Index of the classification token in each input sequence. Selected in the range `[0, input_ids.size(-1) -
662
+ 1]`.
663
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
664
+ Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
665
+ `labels = input_ids` Indices are selected in `[-1, 0, ..., config.vocab_size]` All labels set to `-100` are
666
+ ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]`
667
+ mc_labels (`torch.LongTensor` of shape `(batch_size)`, *optional*):
668
+ Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices]`
669
+ where *num_choices* is the size of the second dimension of the input tensors. (see *input_ids* above)
670
+
671
+ Return:
672
+
673
+ Examples:
674
+
675
+ ```python
676
+ >>> from transformers import AutoTokenizer, OpenAIGPTDoubleHeadsModel
677
+ >>> import torch
678
+
679
+ >>> tokenizer = AutoTokenizer.from_pretrained("openai-community/openai-gpt")
680
+ >>> model = OpenAIGPTDoubleHeadsModel.from_pretrained("openai-community/openai-gpt")
681
+ >>> tokenizer.add_special_tokens(
682
+ ... {"cls_token": "[CLS]"}
683
+ ... ) # Add a [CLS] to the vocabulary (we should train it also!)
684
+ >>> model.resize_token_embeddings(len(tokenizer))
685
+
686
+ >>> choices = ["Hello, my dog is cute [CLS]", "Hello, my cat is cute [CLS]"]
687
+ >>> input_ids = torch.tensor([tokenizer.encode(s) for s in choices]).unsqueeze(0) # Batch size 1, 2 choices
688
+ >>> mc_token_ids = torch.tensor([input_ids.size(-1) - 1, input_ids.size(-1) - 1]).unsqueeze(0) # Batch size 1
689
+
690
+ >>> outputs = model(input_ids, mc_token_ids=mc_token_ids)
691
+ >>> lm_logits = outputs.logits
692
+ >>> mc_logits = outputs.mc_logits
693
+ ```"""
694
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
695
+
696
+ transformer_outputs = self.transformer(
697
+ input_ids,
698
+ attention_mask=attention_mask,
699
+ token_type_ids=token_type_ids,
700
+ position_ids=position_ids,
701
+ head_mask=head_mask,
702
+ inputs_embeds=inputs_embeds,
703
+ output_attentions=output_attentions,
704
+ output_hidden_states=output_hidden_states,
705
+ return_dict=return_dict,
706
+ )
707
+ hidden_states = transformer_outputs[0]
708
+
709
+ lm_logits = self.lm_head(hidden_states)
710
+ mc_logits = self.multiple_choice_head(hidden_states, mc_token_ids).squeeze(-1)
711
+
712
+ lm_loss, mc_loss = None, None
713
+ if mc_labels is not None:
714
+ loss_fct = CrossEntropyLoss()
715
+ mc_loss = loss_fct(mc_logits.view(-1, mc_logits.size(-1)), mc_labels.view(-1))
716
+ if labels is not None:
717
+ shift_logits = lm_logits[..., :-1, :].contiguous()
718
+ shift_labels = labels[..., 1:].contiguous()
719
+ loss_fct = CrossEntropyLoss()
720
+ lm_loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
721
+
722
+ if not return_dict:
723
+ output = (lm_logits, mc_logits) + transformer_outputs[1:]
724
+ if mc_loss is not None:
725
+ output = (mc_loss,) + output
726
+ return ((lm_loss,) + output) if lm_loss is not None else output
727
+
728
+ return OpenAIGPTDoubleHeadsModelOutput(
729
+ loss=lm_loss,
730
+ mc_loss=mc_loss,
731
+ logits=lm_logits,
732
+ mc_logits=mc_logits,
733
+ hidden_states=transformer_outputs.hidden_states,
734
+ attentions=transformer_outputs.attentions,
735
+ )
736
+
737
+
738
+ @add_start_docstrings(
739
+ """
740
+ The Original OpenAI GPT Model transformer with a sequence classification head on top (linear layer).
741
+ [`OpenAIGPTForSequenceClassification`] uses the last token in order to do the classification, as other causal
742
+ models (e.g. GPT-2) do. Since it does classification on the last token, it requires to know the position of the
743
+ last token. If a `pad_token_id` is defined in the configuration, it finds the last token that is not a padding
744
+ token in each row. If no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since
745
+ it cannot guess the padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take
746
+ the last value in each row of the batch).
747
+ """,
748
+ OPENAI_GPT_START_DOCSTRING,
749
+ )
750
+ class OpenAIGPTForSequenceClassification(OpenAIGPTPreTrainedModel):
751
+ def __init__(self, config):
752
+ super().__init__(config)
753
+ self.num_labels = config.num_labels
754
+ self.transformer = OpenAIGPTModel(config)
755
+ self.score = nn.Linear(config.n_embd, self.num_labels, bias=False)
756
+
757
+ # Initialize weights and apply final processing
758
+ self.post_init()
759
+
760
+ @add_start_docstrings_to_model_forward(OPENAI_GPT_INPUTS_DOCSTRING)
761
+ @add_code_sample_docstrings(
762
+ checkpoint=_CHECKPOINT_FOR_DOC,
763
+ output_type=SequenceClassifierOutput,
764
+ config_class=_CONFIG_FOR_DOC,
765
+ )
766
+ def forward(
767
+ self,
768
+ input_ids: Optional[torch.LongTensor] = None,
769
+ attention_mask: Optional[torch.FloatTensor] = None,
770
+ token_type_ids: Optional[torch.LongTensor] = None,
771
+ position_ids: Optional[torch.LongTensor] = None,
772
+ head_mask: Optional[torch.FloatTensor] = None,
773
+ inputs_embeds: Optional[torch.FloatTensor] = None,
774
+ labels: Optional[torch.LongTensor] = None,
775
+ output_attentions: Optional[bool] = None,
776
+ output_hidden_states: Optional[bool] = None,
777
+ return_dict: Optional[bool] = None,
778
+ ) -> Union[Tuple[torch.Tensor], SequenceClassifierOutput]:
779
+ r"""
780
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
781
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
782
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
783
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
784
+ """
785
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
786
+
787
+ transformer_outputs = self.transformer(
788
+ input_ids,
789
+ attention_mask=attention_mask,
790
+ token_type_ids=token_type_ids,
791
+ position_ids=position_ids,
792
+ head_mask=head_mask,
793
+ inputs_embeds=inputs_embeds,
794
+ output_attentions=output_attentions,
795
+ output_hidden_states=output_hidden_states,
796
+ return_dict=return_dict,
797
+ )
798
+
799
+ hidden_states = transformer_outputs[0]
800
+ logits = self.score(hidden_states)
801
+
802
+ if input_ids is not None:
803
+ batch_size, sequence_length = input_ids.shape[:2]
804
+ else:
805
+ batch_size, sequence_length = inputs_embeds.shape[:2]
806
+
807
+ # Ensure the batch size is > 1 if there is no padding.
808
+ if self.config.pad_token_id is None and batch_size != 1:
809
+ raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.")
810
+
811
+ if self.config.pad_token_id is None:
812
+ sequence_lengths = -1
813
+ else:
814
+ if input_ids is not None:
815
+ # if no pad token found, use modulo instead of reverse indexing for ONNX compatibility
816
+ sequence_lengths = torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1
817
+ sequence_lengths = sequence_lengths % input_ids.shape[-1]
818
+ sequence_lengths = sequence_lengths.to(logits.device)
819
+ else:
820
+ sequence_lengths = -1
821
+ logger.warning(
822
+ f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be "
823
+ "unexpected if using padding tokens in conjunction with `inputs_embeds.`"
824
+ )
825
+
826
+ pooled_logits = logits[range(batch_size), sequence_lengths]
827
+
828
+ loss = None
829
+ if labels is not None:
830
+ if self.config.problem_type is None:
831
+ if self.num_labels == 1:
832
+ self.config.problem_type = "regression"
833
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
834
+ self.config.problem_type = "single_label_classification"
835
+ else:
836
+ self.config.problem_type = "multi_label_classification"
837
+
838
+ if self.config.problem_type == "regression":
839
+ loss_fct = MSELoss()
840
+ if self.num_labels == 1:
841
+ loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())
842
+ else:
843
+ loss = loss_fct(pooled_logits, labels)
844
+ elif self.config.problem_type == "single_label_classification":
845
+ loss_fct = CrossEntropyLoss()
846
+ loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))
847
+ elif self.config.problem_type == "multi_label_classification":
848
+ loss_fct = BCEWithLogitsLoss()
849
+ loss = loss_fct(pooled_logits, labels)
850
+ if not return_dict:
851
+ output = (pooled_logits,) + transformer_outputs[1:]
852
+ return ((loss,) + output) if loss is not None else output
853
+
854
+ return SequenceClassifierOutput(
855
+ loss=loss,
856
+ logits=pooled_logits,
857
+ hidden_states=transformer_outputs.hidden_states,
858
+ attentions=transformer_outputs.attentions,
859
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/openai/modeling_tf_openai.py ADDED
@@ -0,0 +1,940 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The OpenAI Team Authors and HuggingFace Inc. team.
3
+ # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """ TF 2.0 OpenAI GPT model."""
17
+
18
+ from __future__ import annotations
19
+
20
+ from dataclasses import dataclass
21
+ from typing import Optional, Tuple, Union
22
+
23
+ import numpy as np
24
+ import tensorflow as tf
25
+
26
+ from ...activations_tf import get_tf_activation
27
+ from ...modeling_tf_outputs import TFBaseModelOutput, TFCausalLMOutput, TFSequenceClassifierOutput
28
+ from ...modeling_tf_utils import (
29
+ TFCausalLanguageModelingLoss,
30
+ TFConv1D,
31
+ TFModelInputType,
32
+ TFPreTrainedModel,
33
+ TFSequenceClassificationLoss,
34
+ TFSequenceSummary,
35
+ TFSharedEmbeddings,
36
+ get_initializer,
37
+ keras,
38
+ keras_serializable,
39
+ unpack_inputs,
40
+ )
41
+ from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax
42
+ from ...utils import (
43
+ ModelOutput,
44
+ add_code_sample_docstrings,
45
+ add_start_docstrings,
46
+ add_start_docstrings_to_model_forward,
47
+ logging,
48
+ replace_return_docstrings,
49
+ )
50
+ from .configuration_openai import OpenAIGPTConfig
51
+
52
+
53
+ logger = logging.get_logger(__name__)
54
+
55
+ _CHECKPOINT_FOR_DOC = "openai-community/openai-gpt"
56
+ _CONFIG_FOR_DOC = "OpenAIGPTConfig"
57
+
58
+
59
+ from ..deprecated._archive_maps import TF_OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
60
+
61
+
62
+ class TFAttention(keras.layers.Layer):
63
+ def __init__(self, nx, config, scale=False, **kwargs):
64
+ super().__init__(**kwargs)
65
+
66
+ n_state = nx # in Attention: n_state=768 (nx=n_embd)
67
+ # [switch nx => n_state from Block to Attention to keep identical to TF implementation]
68
+ assert (
69
+ n_state % config.n_head == 0
70
+ ), f"Hidden dimension {n_state} not dividable by number of heads {config.n_head}"
71
+ self.n_head = config.n_head
72
+ self.split_size = n_state
73
+ self.scale = scale
74
+ self.output_attentions = config.output_attentions
75
+
76
+ self.c_attn = TFConv1D(n_state * 3, nx, initializer_range=config.initializer_range, name="c_attn")
77
+ self.c_proj = TFConv1D(n_state, nx, initializer_range=config.initializer_range, name="c_proj")
78
+ self.attn_dropout = keras.layers.Dropout(config.attn_pdrop)
79
+ self.resid_dropout = keras.layers.Dropout(config.resid_pdrop)
80
+ self.n_state = n_state
81
+ self.pruned_heads = set()
82
+
83
+ def prune_heads(self, heads):
84
+ pass
85
+
86
+ @staticmethod
87
+ def causal_attention_mask(nd, ns):
88
+ """
89
+ 1's in the lower triangle, counting from the lower right corner. Same as tf.matrix_band_part(tf.ones([nd, ns]),
90
+ -1, ns-nd), but doesn't produce garbage on TPUs.
91
+ """
92
+ i = tf.range(nd)[:, None]
93
+ j = tf.range(ns)
94
+ m = i >= j - ns + nd
95
+ return m
96
+
97
+ def _attn(self, q, k, v, attention_mask, head_mask, output_attentions, training=False):
98
+ # q, k, v have shape [batch, heads, sequence, features]
99
+ w = tf.matmul(q, k, transpose_b=True)
100
+ if self.scale:
101
+ dk = tf.cast(shape_list(k)[-1], dtype=w.dtype) # scale attention_scores
102
+ w = w / tf.math.sqrt(dk)
103
+
104
+ # w has shape [batch, heads, dst_sequence, src_sequence], where information flows from src to dst.
105
+ _, _, nd, ns = shape_list(w)
106
+ b = tf.cast(self.causal_attention_mask(nd, ns), dtype=w.dtype)
107
+ b = tf.reshape(b, [1, 1, nd, ns])
108
+ w = w * b - 1e4 * (1 - b)
109
+
110
+ if attention_mask is not None:
111
+ # Apply the attention mask
112
+ attention_mask = tf.cast(attention_mask, dtype=w.dtype)
113
+ w = w + attention_mask
114
+
115
+ w = stable_softmax(w, axis=-1)
116
+ w = self.attn_dropout(w, training=training)
117
+
118
+ # Mask heads if we want to
119
+ if head_mask is not None:
120
+ w = w * head_mask
121
+
122
+ outputs = [tf.matmul(w, v)]
123
+ if output_attentions:
124
+ outputs.append(w)
125
+ return outputs
126
+
127
+ def merge_heads(self, x):
128
+ x = tf.transpose(x, [0, 2, 1, 3])
129
+ x_shape = shape_list(x)
130
+ new_x_shape = x_shape[:-2] + [x_shape[-2] * x_shape[-1]]
131
+ return tf.reshape(x, new_x_shape)
132
+
133
+ def split_heads(self, x):
134
+ x_shape = shape_list(x)
135
+ new_x_shape = x_shape[:-1] + [self.n_head, x_shape[-1] // self.n_head]
136
+ x = tf.reshape(x, new_x_shape)
137
+ return tf.transpose(x, (0, 2, 1, 3)) # (batch, head, seq_length, head_features)
138
+
139
+ def call(self, x, attention_mask, head_mask, output_attentions, training=False):
140
+ x = self.c_attn(x)
141
+ query, key, value = tf.split(x, 3, axis=2)
142
+ query = self.split_heads(query)
143
+ key = self.split_heads(key)
144
+ value = self.split_heads(value)
145
+
146
+ attn_outputs = self._attn(query, key, value, attention_mask, head_mask, output_attentions, training=training)
147
+ a = attn_outputs[0]
148
+
149
+ a = self.merge_heads(a)
150
+ a = self.c_proj(a)
151
+ a = self.resid_dropout(a, training=training)
152
+
153
+ outputs = [a] + attn_outputs[1:]
154
+ return outputs # a, (attentions)
155
+
156
+ def build(self, input_shape=None):
157
+ if self.built:
158
+ return
159
+ self.built = True
160
+ if getattr(self, "c_attn", None) is not None:
161
+ with tf.name_scope(self.c_attn.name):
162
+ self.c_attn.build([None, None, self.n_state * 3])
163
+ if getattr(self, "c_proj", None) is not None:
164
+ with tf.name_scope(self.c_proj.name):
165
+ self.c_proj.build([None, None, self.n_state])
166
+
167
+
168
+ class TFMLP(keras.layers.Layer):
169
+ def __init__(self, n_state, config, **kwargs):
170
+ super().__init__(**kwargs)
171
+ nx = config.n_embd
172
+ self.c_fc = TFConv1D(n_state, nx, initializer_range=config.initializer_range, name="c_fc")
173
+ self.c_proj = TFConv1D(nx, n_state, initializer_range=config.initializer_range, name="c_proj")
174
+ self.act = get_tf_activation("gelu")
175
+ self.dropout = keras.layers.Dropout(config.resid_pdrop)
176
+ self.nx = nx
177
+ self.n_state = n_state
178
+
179
+ def call(self, x, training=False):
180
+ h = self.act(self.c_fc(x))
181
+ h2 = self.c_proj(h)
182
+ h2 = self.dropout(h2, training=training)
183
+ return h2
184
+
185
+ def build(self, input_shape=None):
186
+ if self.built:
187
+ return
188
+ self.built = True
189
+ if getattr(self, "c_fc", None) is not None:
190
+ with tf.name_scope(self.c_fc.name):
191
+ self.c_fc.build([None, None, self.n_state])
192
+ if getattr(self, "c_proj", None) is not None:
193
+ with tf.name_scope(self.c_proj.name):
194
+ self.c_proj.build([None, None, self.nx])
195
+
196
+
197
+ class TFBlock(keras.layers.Layer):
198
+ def __init__(self, config, scale=False, **kwargs):
199
+ super().__init__(**kwargs)
200
+ nx = config.n_embd
201
+ self.attn = TFAttention(nx, config, scale, name="attn")
202
+ self.ln_1 = keras.layers.LayerNormalization(epsilon=config.layer_norm_epsilon, name="ln_1")
203
+ self.mlp = TFMLP(4 * nx, config, name="mlp")
204
+ self.ln_2 = keras.layers.LayerNormalization(epsilon=config.layer_norm_epsilon, name="ln_2")
205
+ self.nx = nx
206
+
207
+ def call(self, x, attention_mask, head_mask, output_attentions, training=False):
208
+ output_attn = self.attn(x, attention_mask, head_mask, output_attentions, training=training)
209
+ a = output_attn[0] # output_attn: a, (attentions)
210
+
211
+ n = self.ln_1(x + a)
212
+ m = self.mlp(n, training=training)
213
+ h = self.ln_2(n + m)
214
+
215
+ outputs = [h] + output_attn[1:]
216
+ return outputs # x, (attentions)
217
+
218
+ def build(self, input_shape=None):
219
+ if self.built:
220
+ return
221
+ self.built = True
222
+ if getattr(self, "attn", None) is not None:
223
+ with tf.name_scope(self.attn.name):
224
+ self.attn.build(None)
225
+ if getattr(self, "ln_1", None) is not None:
226
+ with tf.name_scope(self.ln_1.name):
227
+ self.ln_1.build([None, None, self.nx])
228
+ if getattr(self, "mlp", None) is not None:
229
+ with tf.name_scope(self.mlp.name):
230
+ self.mlp.build(None)
231
+ if getattr(self, "ln_2", None) is not None:
232
+ with tf.name_scope(self.ln_2.name):
233
+ self.ln_2.build([None, None, self.nx])
234
+
235
+
236
+ @keras_serializable
237
+ class TFOpenAIGPTMainLayer(keras.layers.Layer):
238
+ config_class = OpenAIGPTConfig
239
+
240
+ def __init__(self, config, *inputs, **kwargs):
241
+ super().__init__(*inputs, **kwargs)
242
+
243
+ self.config = config
244
+ self.output_hidden_states = config.output_hidden_states
245
+ self.output_attentions = config.output_attentions
246
+ self.return_dict = config.use_return_dict
247
+ self.num_hidden_layers = config.n_layer
248
+ self.n_embd = config.n_embd
249
+ self.n_positions = config.n_positions
250
+ self.initializer_range = config.initializer_range
251
+
252
+ self.tokens_embed = TFSharedEmbeddings(
253
+ config.vocab_size, config.n_embd, initializer_range=config.initializer_range, name="tokens_embed"
254
+ )
255
+ self.drop = keras.layers.Dropout(config.embd_pdrop)
256
+ self.h = [TFBlock(config, scale=True, name=f"h_._{i}") for i in range(config.n_layer)]
257
+
258
+ def build(self, input_shape=None):
259
+ with tf.name_scope("positions_embed"):
260
+ self.positions_embed = self.add_weight(
261
+ name="embeddings",
262
+ shape=[self.n_positions, self.n_embd],
263
+ initializer=get_initializer(self.initializer_range),
264
+ )
265
+
266
+ if self.built:
267
+ return
268
+ self.built = True
269
+ if getattr(self, "tokens_embed", None) is not None:
270
+ with tf.name_scope(self.tokens_embed.name):
271
+ self.tokens_embed.build(None)
272
+ if getattr(self, "h", None) is not None:
273
+ for layer in self.h:
274
+ with tf.name_scope(layer.name):
275
+ layer.build(None)
276
+
277
+ def get_input_embeddings(self):
278
+ return self.tokens_embed
279
+
280
+ def set_input_embeddings(self, value):
281
+ self.tokens_embed.weight = value
282
+ self.tokens_embed.vocab_size = shape_list(value)[0]
283
+
284
+ def _prune_heads(self, heads_to_prune):
285
+ """
286
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
287
+ """
288
+ raise NotImplementedError
289
+
290
+ @unpack_inputs
291
+ def call(
292
+ self,
293
+ input_ids: TFModelInputType | None = None,
294
+ attention_mask: np.ndarray | tf.Tensor | None = None,
295
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
296
+ position_ids: np.ndarray | tf.Tensor | None = None,
297
+ head_mask: np.ndarray | tf.Tensor | None = None,
298
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
299
+ output_attentions: Optional[bool] = None,
300
+ output_hidden_states: Optional[bool] = None,
301
+ return_dict: Optional[bool] = None,
302
+ training: Optional[bool] = False,
303
+ ) -> Union[Tuple, TFBaseModelOutput]:
304
+ if input_ids is not None and inputs_embeds is not None:
305
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
306
+ elif input_ids is not None:
307
+ input_shape = shape_list(input_ids)
308
+ input_ids = tf.reshape(input_ids, [-1, input_shape[-1]])
309
+ elif inputs_embeds is not None:
310
+ input_shape = shape_list(inputs_embeds)[:-1]
311
+ else:
312
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
313
+
314
+ if position_ids is None:
315
+ position_ids = tf.expand_dims(tf.range(input_shape[-1]), axis=0)
316
+
317
+ if attention_mask is not None:
318
+ # We create a 3D attention mask from a 2D tensor mask.
319
+ # Sizes are [batch_size, 1, 1, to_seq_length]
320
+ # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
321
+ # this attention mask is more simple than the triangular masking of causal attention
322
+ # used in OpenAI GPT, we just need to prepare the broadcast dimension here.
323
+ attention_mask = tf.reshape(attention_mask, (input_shape[0], 1, 1, input_shape[1]))
324
+
325
+ # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
326
+ # masked positions, this operation will create a tensor which is 0.0 for
327
+ # positions we want to attend and -10000.0 for masked positions.
328
+ # Since we are adding it to the raw scores before the softmax, this is
329
+ # effectively the same as removing these entirely.
330
+
331
+ one_cst = tf.constant(1.0)
332
+ attention_mask = tf.cast(attention_mask, dtype=one_cst.dtype)
333
+ attention_mask = tf.multiply(tf.subtract(one_cst, attention_mask), tf.constant(-10000.0))
334
+ else:
335
+ attention_mask = None
336
+
337
+ # Prepare head mask if needed
338
+ # 1.0 in head_mask indicate we keep the head
339
+ # attention_probs has shape bsz x n_heads x N x N
340
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
341
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
342
+ if head_mask is not None:
343
+ raise NotImplementedError
344
+ else:
345
+ head_mask = [None] * self.num_hidden_layers
346
+ # head_mask = tf.constant([0] * self.num_hidden_layers)
347
+
348
+ position_ids = tf.reshape(position_ids, [-1, shape_list(position_ids)[-1]])
349
+
350
+ if inputs_embeds is None:
351
+ check_embeddings_within_bounds(input_ids, self.config.vocab_size)
352
+ inputs_embeds = self.tokens_embed(input_ids, mode="embedding")
353
+ position_embeds = tf.gather(self.positions_embed, position_ids)
354
+ if token_type_ids is not None:
355
+ token_type_ids = tf.reshape(token_type_ids, [-1, shape_list(token_type_ids)[-1]])
356
+ check_embeddings_within_bounds(token_type_ids, self.config.vocab_size, "token_type_ids")
357
+ token_type_embeds = self.tokens_embed(token_type_ids, mode="embedding")
358
+ else:
359
+ token_type_embeds = 0
360
+ hidden_states = inputs_embeds + position_embeds + token_type_embeds
361
+ hidden_states = self.drop(hidden_states, training=training)
362
+
363
+ output_shape = input_shape + [shape_list(hidden_states)[-1]]
364
+
365
+ all_attentions = () if output_attentions else None
366
+ all_hidden_states = () if output_hidden_states else None
367
+ for i, block in enumerate(self.h):
368
+ if output_hidden_states:
369
+ all_hidden_states = all_hidden_states + (tf.reshape(hidden_states, output_shape),)
370
+
371
+ outputs = block(
372
+ hidden_states,
373
+ attention_mask,
374
+ head_mask[i],
375
+ output_attentions,
376
+ training=training,
377
+ )
378
+ hidden_states = outputs[0]
379
+ if output_attentions:
380
+ all_attentions = all_attentions + (outputs[1],)
381
+
382
+ hidden_states = tf.reshape(hidden_states, output_shape)
383
+ # Add last hidden state
384
+ if output_hidden_states:
385
+ all_hidden_states = all_hidden_states + (hidden_states,)
386
+
387
+ if output_attentions:
388
+ # let the number of heads free (-1) so we can extract attention even after head pruning
389
+ attention_output_shape = input_shape[:-1] + [-1] + shape_list(all_attentions[0])[-2:]
390
+ all_attentions = tuple(tf.reshape(t, attention_output_shape) for t in all_attentions)
391
+
392
+ if not return_dict:
393
+ return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None)
394
+
395
+ return TFBaseModelOutput(
396
+ last_hidden_state=hidden_states,
397
+ hidden_states=all_hidden_states,
398
+ attentions=all_attentions,
399
+ )
400
+
401
+
402
+ class TFOpenAIGPTPreTrainedModel(TFPreTrainedModel):
403
+ """
404
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
405
+ models.
406
+ """
407
+
408
+ config_class = OpenAIGPTConfig
409
+ base_model_prefix = "transformer"
410
+
411
+
412
+ @dataclass
413
+ class TFOpenAIGPTDoubleHeadsModelOutput(ModelOutput):
414
+ """
415
+ Base class for outputs of models predicting if two sentences are consecutive or not.
416
+
417
+ Args:
418
+ logits (`tf.Tensor` of shape `(batch_size, num_choices, sequence_length, config.vocab_size)`):
419
+ Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
420
+ mc_logits (`tf.Tensor` of shape `(batch_size, num_choices)`):
421
+ Prediction scores of the multiple choice classification head (scores for each choice before SoftMax).
422
+ hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
423
+ Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape
424
+ `(batch_size, sequence_length, hidden_size)`.
425
+
426
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
427
+ attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
428
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
429
+ sequence_length)`.
430
+
431
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
432
+ heads.
433
+ """
434
+
435
+ logits: tf.Tensor = None
436
+ mc_logits: tf.Tensor = None
437
+ hidden_states: Tuple[tf.Tensor] | None = None
438
+ attentions: Tuple[tf.Tensor] | None = None
439
+
440
+
441
+ OPENAI_GPT_START_DOCSTRING = r"""
442
+
443
+ This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
444
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
445
+ etc.)
446
+
447
+ This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
448
+ as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
449
+ behavior.
450
+
451
+ <Tip>
452
+
453
+ TensorFlow models and layers in `transformers` accept two formats as input:
454
+
455
+ - having all inputs as keyword arguments (like PyTorch models), or
456
+ - having all inputs as a list, tuple or dict in the first positional argument.
457
+
458
+ The reason the second format is supported is that Keras methods prefer this format when passing inputs to models
459
+ and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just
460
+ pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second
461
+ format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with
462
+ the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first
463
+ positional argument:
464
+
465
+ - a single Tensor with `input_ids` only and nothing else: `model(input_ids)`
466
+ - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
467
+ `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`
468
+ - a dictionary with one or several input Tensors associated to the input names given in the docstring:
469
+ `model({"input_ids": input_ids, "token_type_ids": token_type_ids})`
470
+
471
+ Note that when creating models and layers with
472
+ [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry
473
+ about any of this, as you can just pass inputs like you would to any other Python function!
474
+
475
+ </Tip>
476
+
477
+ Parameters:
478
+ config ([`OpenAIGPTConfig`]): Model configuration class with all the parameters of the model.
479
+ Initializing with a config file does not load the weights associated with the model, only the
480
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
481
+ """
482
+
483
+ OPENAI_GPT_INPUTS_DOCSTRING = r"""
484
+ Args:
485
+ input_ids (`Numpy array` or `tf.Tensor` of shape `(batch_size, sequence_length)`):
486
+ Indices of input sequence tokens in the vocabulary.
487
+
488
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and
489
+ [`PreTrainedTokenizer.encode`] for details.
490
+
491
+ [What are input IDs?](../glossary#input-ids)
492
+ attention_mask (`tf.Tensor` or `Numpy array` of shape `(batch_size, sequence_length)`, *optional*):
493
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
494
+
495
+ - 1 for tokens that are **not masked**,
496
+ - 0 for tokens that are **masked**.
497
+
498
+ [What are attention masks?](../glossary#attention-mask)
499
+ token_type_ids (`tf.Tensor` or `Numpy array` of shape `(batch_size, sequence_length)`, *optional*):
500
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
501
+ 1]`:
502
+
503
+ - 0 corresponds to a *sentence A* token,
504
+ - 1 corresponds to a *sentence B* token.
505
+
506
+ [What are token type IDs?](../glossary#token-type-ids)
507
+ position_ids (`tf.Tensor` or `Numpy array` of shape `(batch_size, sequence_length)`, *optional*):
508
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
509
+ config.max_position_embeddings - 1]`.
510
+
511
+ [What are position IDs?](../glossary#position-ids)
512
+ head_mask (`tf.Tensor` or `Numpy array` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
513
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
514
+
515
+ - 1 indicates the head is **not masked**,
516
+ - 0 indicates the head is **masked**.
517
+
518
+ inputs_embeds (`tf.Tensor` or `Numpy array` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
519
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
520
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
521
+ model's internal embedding lookup matrix.
522
+ output_attentions (`bool`, *optional*):
523
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
524
+ tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the
525
+ config will be used instead.
526
+ output_hidden_states (`bool`, *optional*):
527
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
528
+ more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
529
+ used instead.
530
+ return_dict (`bool`, *optional*):
531
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in
532
+ eager mode, in graph mode the value will always be set to True.
533
+ training (`bool`, *optional*, defaults to `False`):
534
+ Whether or not to use the model in training mode (some modules like dropout modules have different
535
+ behaviors between training and evaluation).
536
+ """
537
+
538
+
539
+ @add_start_docstrings(
540
+ "The bare OpenAI GPT transformer model outputting raw hidden-states without any specific head on top.",
541
+ OPENAI_GPT_START_DOCSTRING,
542
+ )
543
+ class TFOpenAIGPTModel(TFOpenAIGPTPreTrainedModel):
544
+ def __init__(self, config, *inputs, **kwargs):
545
+ super().__init__(config, *inputs, **kwargs)
546
+ self.transformer = TFOpenAIGPTMainLayer(config, name="transformer")
547
+
548
+ @unpack_inputs
549
+ @add_start_docstrings_to_model_forward(OPENAI_GPT_INPUTS_DOCSTRING)
550
+ @add_code_sample_docstrings(
551
+ checkpoint=_CHECKPOINT_FOR_DOC,
552
+ output_type=TFBaseModelOutput,
553
+ config_class=_CONFIG_FOR_DOC,
554
+ )
555
+ def call(
556
+ self,
557
+ input_ids: TFModelInputType | None = None,
558
+ attention_mask: np.ndarray | tf.Tensor | None = None,
559
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
560
+ position_ids: np.ndarray | tf.Tensor | None = None,
561
+ head_mask: np.ndarray | tf.Tensor | None = None,
562
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
563
+ output_attentions: Optional[bool] = None,
564
+ output_hidden_states: Optional[bool] = None,
565
+ return_dict: Optional[bool] = None,
566
+ training: Optional[bool] = False,
567
+ ) -> Union[Tuple, TFBaseModelOutput]:
568
+ outputs = self.transformer(
569
+ input_ids=input_ids,
570
+ attention_mask=attention_mask,
571
+ token_type_ids=token_type_ids,
572
+ position_ids=position_ids,
573
+ head_mask=head_mask,
574
+ inputs_embeds=inputs_embeds,
575
+ output_attentions=output_attentions,
576
+ output_hidden_states=output_hidden_states,
577
+ return_dict=return_dict,
578
+ training=training,
579
+ )
580
+ return outputs
581
+
582
+ def build(self, input_shape=None):
583
+ if self.built:
584
+ return
585
+ self.built = True
586
+ if getattr(self, "transformer", None) is not None:
587
+ with tf.name_scope(self.transformer.name):
588
+ self.transformer.build(None)
589
+
590
+
591
+ @add_start_docstrings(
592
+ """
593
+ OpenAI GPT Model transformer with a language modeling head on top (linear layer with weights tied to the input
594
+ embeddings).
595
+ """,
596
+ OPENAI_GPT_START_DOCSTRING,
597
+ )
598
+ class TFOpenAIGPTLMHeadModel(TFOpenAIGPTPreTrainedModel, TFCausalLanguageModelingLoss):
599
+ def __init__(self, config, *inputs, **kwargs):
600
+ super().__init__(config, *inputs, **kwargs)
601
+ self.transformer = TFOpenAIGPTMainLayer(config, name="transformer")
602
+ # OpenAIGPT does not have past caching features
603
+ self.supports_xla_generation = False
604
+
605
+ def get_output_embeddings(self):
606
+ return self.get_input_embeddings()
607
+
608
+ def set_output_embeddings(self, value):
609
+ self.set_input_embeddings(value)
610
+
611
+ @unpack_inputs
612
+ @add_start_docstrings_to_model_forward(OPENAI_GPT_INPUTS_DOCSTRING)
613
+ @add_code_sample_docstrings(
614
+ checkpoint=_CHECKPOINT_FOR_DOC,
615
+ output_type=TFCausalLMOutput,
616
+ config_class=_CONFIG_FOR_DOC,
617
+ )
618
+ def call(
619
+ self,
620
+ input_ids: TFModelInputType | None = None,
621
+ attention_mask: np.ndarray | tf.Tensor | None = None,
622
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
623
+ position_ids: np.ndarray | tf.Tensor | None = None,
624
+ head_mask: np.ndarray | tf.Tensor | None = None,
625
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
626
+ output_attentions: Optional[bool] = None,
627
+ output_hidden_states: Optional[bool] = None,
628
+ return_dict: Optional[bool] = None,
629
+ labels: np.ndarray | tf.Tensor | None = None,
630
+ training: Optional[bool] = False,
631
+ ) -> Union[Tuple, TFCausalLMOutput]:
632
+ r"""
633
+ labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
634
+ Labels for computing the cross entropy classification loss. Indices should be in `[0, ...,
635
+ config.vocab_size - 1]`.
636
+ """
637
+
638
+ transformer_outputs = self.transformer(
639
+ input_ids=input_ids,
640
+ attention_mask=attention_mask,
641
+ token_type_ids=token_type_ids,
642
+ position_ids=position_ids,
643
+ head_mask=head_mask,
644
+ inputs_embeds=inputs_embeds,
645
+ output_attentions=output_attentions,
646
+ output_hidden_states=output_hidden_states,
647
+ return_dict=return_dict,
648
+ training=training,
649
+ )
650
+ hidden_states = transformer_outputs[0]
651
+
652
+ logits = self.transformer.tokens_embed(hidden_states, mode="linear")
653
+
654
+ loss = None
655
+ if labels is not None:
656
+ # shift labels to the left and cut last logit token
657
+ shifted_logits = logits[:, :-1]
658
+ labels = labels[:, 1:]
659
+ loss = self.hf_compute_loss(labels, shifted_logits)
660
+
661
+ if not return_dict:
662
+ output = (logits,) + transformer_outputs[1:]
663
+ return ((loss,) + output) if loss is not None else output
664
+
665
+ return TFCausalLMOutput(
666
+ loss=loss,
667
+ logits=logits,
668
+ hidden_states=transformer_outputs.hidden_states,
669
+ attentions=transformer_outputs.attentions,
670
+ )
671
+
672
+ def prepare_inputs_for_generation(self, inputs, **kwargs):
673
+ return {"input_ids": inputs}
674
+
675
+ def build(self, input_shape=None):
676
+ if self.built:
677
+ return
678
+ self.built = True
679
+ if getattr(self, "transformer", None) is not None:
680
+ with tf.name_scope(self.transformer.name):
681
+ self.transformer.build(None)
682
+
683
+
684
+ @add_start_docstrings(
685
+ """
686
+ OpenAI GPT Model transformer with a language modeling and a multiple-choice classification head on top e.g. for
687
+ RocStories/SWAG tasks. The two heads are two linear layers. The language modeling head has its weights tied to the
688
+ input embeddings, the classification head takes as input the input of a specified classification token index in the
689
+ input sequence).
690
+ """,
691
+ OPENAI_GPT_START_DOCSTRING,
692
+ )
693
+ class TFOpenAIGPTDoubleHeadsModel(TFOpenAIGPTPreTrainedModel):
694
+ def __init__(self, config, *inputs, **kwargs):
695
+ super().__init__(config, *inputs, **kwargs)
696
+ config.num_labels = 1
697
+ self.transformer = TFOpenAIGPTMainLayer(config, name="transformer")
698
+ self.multiple_choice_head = TFSequenceSummary(
699
+ config, initializer_range=config.initializer_range, name="multiple_choice_head"
700
+ )
701
+
702
+ @unpack_inputs
703
+ @add_start_docstrings_to_model_forward(OPENAI_GPT_INPUTS_DOCSTRING)
704
+ @replace_return_docstrings(output_type=TFOpenAIGPTDoubleHeadsModelOutput, config_class=_CONFIG_FOR_DOC)
705
+ def call(
706
+ self,
707
+ input_ids: TFModelInputType | None = None,
708
+ attention_mask: np.ndarray | tf.Tensor | None = None,
709
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
710
+ position_ids: np.ndarray | tf.Tensor | None = None,
711
+ head_mask: np.ndarray | tf.Tensor | None = None,
712
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
713
+ mc_token_ids: np.ndarray | tf.Tensor | None = None,
714
+ output_attentions: Optional[bool] = None,
715
+ output_hidden_states: Optional[bool] = None,
716
+ return_dict: Optional[bool] = None,
717
+ training: Optional[bool] = False,
718
+ ) -> Union[Tuple, TFOpenAIGPTDoubleHeadsModelOutput]:
719
+ r"""
720
+ mc_token_ids (`tf.Tensor` or `Numpy array` of shape `(batch_size, num_choices)`, *optional*, default to index of the last token of the input):
721
+ Index of the classification token in each input sequence. Selected in the range `[0, input_ids.size(-1) -
722
+ 1]`.
723
+
724
+ Return:
725
+
726
+ Examples:
727
+
728
+ ```python
729
+ >>> import tensorflow as tf
730
+ >>> from transformers import AutoTokenizer, TFOpenAIGPTDoubleHeadsModel
731
+
732
+ >>> tokenizer = AutoTokenizer.from_pretrained("openai-community/openai-gpt")
733
+ >>> model = TFOpenAIGPTDoubleHeadsModel.from_pretrained("openai-community/openai-gpt")
734
+
735
+ >>> # Add a [CLS] to the vocabulary (we should train it also!)
736
+ >>> tokenizer.add_special_tokens({"cls_token": "[CLS]"})
737
+ >>> model.resize_token_embeddings(len(tokenizer)) # Update the model embeddings with the new vocabulary size
738
+ >>> print(tokenizer.cls_token_id, len(tokenizer)) # The newly token the last token of the vocabulary
739
+
740
+ >>> choices = ["Hello, my dog is cute [CLS]", "Hello, my cat is cute [CLS]"]
741
+ >>> encoding = tokenizer(choices, return_tensors="tf")
742
+ >>> inputs = {k: tf.expand_dims(v, 0) for k, v in encoding.items()}
743
+ >>> inputs["mc_token_ids"] = tf.constant(
744
+ ... [inputs["input_ids"].shape[-1] - 1, inputs["input_ids"].shape[-1] - 1]
745
+ ... )[
746
+ ... None, :
747
+ ... ] # Batch size 1
748
+ >>> outputs = model(inputs)
749
+ >>> lm_prediction_scores, mc_prediction_scores = outputs[:2]
750
+ ```"""
751
+
752
+ if input_ids is not None:
753
+ input_shapes = shape_list(input_ids)
754
+ else:
755
+ input_shapes = shape_list(inputs_embeds)[:-1]
756
+
757
+ seq_length = input_shapes[-1]
758
+ flat_input_ids = tf.reshape(input_ids, (-1, seq_length)) if input_ids is not None else None
759
+ flat_attention_mask = tf.reshape(attention_mask, (-1, seq_length)) if attention_mask is not None else None
760
+ flat_token_type_ids = tf.reshape(token_type_ids, (-1, seq_length)) if token_type_ids is not None else None
761
+ flat_position_ids = tf.reshape(position_ids, (-1, seq_length)) if position_ids is not None else None
762
+ transformer_outputs = self.transformer(
763
+ flat_input_ids,
764
+ flat_attention_mask,
765
+ flat_token_type_ids,
766
+ flat_position_ids,
767
+ head_mask,
768
+ inputs_embeds,
769
+ output_attentions,
770
+ output_hidden_states,
771
+ return_dict=return_dict,
772
+ training=training,
773
+ )
774
+ hidden_states = transformer_outputs[0]
775
+ hidden_states = tf.reshape(hidden_states, input_shapes + shape_list(hidden_states)[-1:])
776
+ if return_dict and output_hidden_states:
777
+ # We do this to match the slightly odd PT behaviour - the final hidden state is reshaped to rank 4 when the
778
+ # input is rank 3, but all other hidden states remain at rank-3 (with the first 2 dims merged)
779
+ all_hidden_states = transformer_outputs.hidden_states[:-1] + (hidden_states,)
780
+ else:
781
+ all_hidden_states = None
782
+ lm_logits = self.transformer.tokens_embed(hidden_states, mode="linear")
783
+ mc_logits = self.multiple_choice_head(hidden_states, mc_token_ids, training=training)
784
+ mc_logits = tf.squeeze(mc_logits, axis=-1)
785
+
786
+ if not return_dict:
787
+ return (lm_logits, mc_logits) + transformer_outputs[1:]
788
+
789
+ return TFOpenAIGPTDoubleHeadsModelOutput(
790
+ logits=lm_logits,
791
+ mc_logits=mc_logits,
792
+ hidden_states=all_hidden_states,
793
+ attentions=transformer_outputs.attentions,
794
+ )
795
+
796
+ @property
797
+ def input_signature(self):
798
+ return {
799
+ "input_ids": tf.TensorSpec((None, None, None), tf.int32, name="input_ids"),
800
+ "attention_mask": tf.TensorSpec((None, None, None), tf.int32, name="attention_mask"),
801
+ "mc_token_ids": tf.TensorSpec((None, None), tf.int32, name="token_type_ids"),
802
+ }
803
+
804
+ def build(self, input_shape=None):
805
+ if self.built:
806
+ return
807
+ self.built = True
808
+ if getattr(self, "transformer", None) is not None:
809
+ with tf.name_scope(self.transformer.name):
810
+ self.transformer.build(None)
811
+ if getattr(self, "multiple_choice_head", None) is not None:
812
+ with tf.name_scope(self.multiple_choice_head.name):
813
+ self.multiple_choice_head.build(None)
814
+
815
+
816
+ @add_start_docstrings(
817
+ """
818
+ The OpenAI GPT Model transformer with a sequence classification head on top (linear layer).
819
+
820
+ [`TFOpenAIGPTForSequenceClassification`] uses the last token in order to do the classification, as other causal
821
+ models (e.g. GPT-2) do.
822
+
823
+ Since it does classification on the last token, it requires to know the position of the last token. If a
824
+ `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
825
+ no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
826
+ padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
827
+ each row of the batch).
828
+ """,
829
+ OPENAI_GPT_START_DOCSTRING,
830
+ )
831
+ class TFOpenAIGPTForSequenceClassification(TFOpenAIGPTPreTrainedModel, TFSequenceClassificationLoss):
832
+ def __init__(self, config, *inputs, **kwargs):
833
+ super().__init__(config, *inputs, **kwargs)
834
+ self.num_labels = config.num_labels
835
+ self.score = keras.layers.Dense(
836
+ config.num_labels,
837
+ kernel_initializer=get_initializer(config.initializer_range),
838
+ name="score",
839
+ use_bias=False,
840
+ )
841
+ self.transformer = TFOpenAIGPTMainLayer(config, name="transformer")
842
+ self.config = config
843
+
844
+ @unpack_inputs
845
+ @add_start_docstrings_to_model_forward(OPENAI_GPT_INPUTS_DOCSTRING)
846
+ @add_code_sample_docstrings(
847
+ checkpoint=_CHECKPOINT_FOR_DOC,
848
+ output_type=TFSequenceClassifierOutput,
849
+ config_class=_CONFIG_FOR_DOC,
850
+ )
851
+ def call(
852
+ self,
853
+ input_ids: TFModelInputType | None = None,
854
+ attention_mask: np.ndarray | tf.Tensor | None = None,
855
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
856
+ position_ids: np.ndarray | tf.Tensor | None = None,
857
+ head_mask: np.ndarray | tf.Tensor | None = None,
858
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
859
+ output_attentions: Optional[bool] = None,
860
+ output_hidden_states: Optional[bool] = None,
861
+ return_dict: Optional[bool] = None,
862
+ labels: np.ndarray | tf.Tensor | None = None,
863
+ training: Optional[bool] = False,
864
+ ) -> Union[Tuple, TFSequenceClassifierOutput]:
865
+ r"""
866
+ labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
867
+ Labels for computing the cross entropy classification loss. Indices should be in `[0, ...,
868
+ config.vocab_size - 1]`.
869
+ """
870
+ transformer_outputs = self.transformer(
871
+ input_ids=input_ids,
872
+ attention_mask=attention_mask,
873
+ token_type_ids=token_type_ids,
874
+ position_ids=position_ids,
875
+ head_mask=head_mask,
876
+ inputs_embeds=inputs_embeds,
877
+ output_attentions=output_attentions,
878
+ output_hidden_states=output_hidden_states,
879
+ return_dict=return_dict,
880
+ training=training,
881
+ )
882
+
883
+ hidden_states = transformer_outputs[0]
884
+ logits = self.score(hidden_states)
885
+ in_logits = None
886
+ if self.config.pad_token_id is None:
887
+ sequence_lengths = -1
888
+ else:
889
+ if input_ids is not None:
890
+ sequence_lengths = (
891
+ tf.argmax(tf.cast(tf.math.equal(input_ids, self.config.pad_token_id), input_ids.dtype), axis=-1)
892
+ - 1
893
+ )
894
+ sequence_lengths = tf.where(sequence_lengths >= 0, sequence_lengths, input_ids.shape[-1] - 1)
895
+ in_logits = tf.gather(logits, sequence_lengths, batch_dims=1, axis=1)
896
+ else:
897
+ sequence_lengths = -1
898
+ logger.warning(
899
+ f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be "
900
+ "unexpected if using padding tokens in conjunction with `inputs_embeds.`"
901
+ )
902
+ loss = None
903
+
904
+ if labels is not None:
905
+ if input_ids is not None:
906
+ batch_size, sequence_length = shape_list(input_ids)[:2]
907
+ else:
908
+ batch_size, sequence_length = shape_list(inputs_embeds)[:2]
909
+ assert (
910
+ self.config.pad_token_id is not None or batch_size == 1
911
+ ), "Cannot handle batch sizes > 1 if no padding token is defined."
912
+
913
+ if not tf.is_tensor(sequence_lengths):
914
+ in_logits = logits[0:batch_size, sequence_lengths]
915
+
916
+ loss = self.hf_compute_loss(tf.reshape(labels, [-1, 1]), tf.reshape(in_logits, [-1, self.num_labels]))
917
+
918
+ pooled_logits = in_logits if in_logits is not None else logits
919
+
920
+ if not return_dict:
921
+ output = (pooled_logits,) + transformer_outputs[1:]
922
+ return ((loss,) + output) if loss is not None else output
923
+
924
+ return TFSequenceClassifierOutput(
925
+ loss=loss,
926
+ logits=pooled_logits,
927
+ hidden_states=transformer_outputs.hidden_states,
928
+ attentions=transformer_outputs.attentions,
929
+ )
930
+
931
+ def build(self, input_shape=None):
932
+ if self.built:
933
+ return
934
+ self.built = True
935
+ if getattr(self, "score", None) is not None:
936
+ with tf.name_scope(self.score.name):
937
+ self.score.build([None, None, self.config.n_embd])
938
+ if getattr(self, "transformer", None) is not None:
939
+ with tf.name_scope(self.transformer.name):
940
+ self.transformer.build(None)
llmeval-env/lib/python3.10/site-packages/transformers/models/openai/tokenization_openai.py ADDED
@@ -0,0 +1,394 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The Open AI Team Authors and The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Tokenization classes for OpenAI GPT."""
16
+
17
+
18
+ import json
19
+ import os
20
+ import re
21
+ import unicodedata
22
+ from typing import Optional, Tuple
23
+
24
+ from ...tokenization_utils import PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace
25
+ from ...utils import logging
26
+
27
+
28
+ logger = logging.get_logger(__name__)
29
+
30
+ VOCAB_FILES_NAMES = {
31
+ "vocab_file": "vocab.json",
32
+ "merges_file": "merges.txt",
33
+ }
34
+
35
+
36
+ # Copied from transformers.models.bert.tokenization_bert.whitespace_tokenize
37
+ def whitespace_tokenize(text):
38
+ """Runs basic whitespace cleaning and splitting on a piece of text."""
39
+ text = text.strip()
40
+ if not text:
41
+ return []
42
+ tokens = text.split()
43
+ return tokens
44
+
45
+
46
+ # Copied from transformers.models.bert.tokenization_bert.BasicTokenizer
47
+ class BasicTokenizer(object):
48
+ """
49
+ Constructs a BasicTokenizer that will run basic tokenization (punctuation splitting, lower casing, etc.).
50
+
51
+ Args:
52
+ do_lower_case (`bool`, *optional*, defaults to `True`):
53
+ Whether or not to lowercase the input when tokenizing.
54
+ never_split (`Iterable`, *optional*):
55
+ Collection of tokens which will never be split during tokenization. Only has an effect when
56
+ `do_basic_tokenize=True`
57
+ tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
58
+ Whether or not to tokenize Chinese characters.
59
+
60
+ This should likely be deactivated for Japanese (see this
61
+ [issue](https://github.com/huggingface/transformers/issues/328)).
62
+ strip_accents (`bool`, *optional*):
63
+ Whether or not to strip all accents. If this option is not specified, then it will be determined by the
64
+ value for `lowercase` (as in the original BERT).
65
+ do_split_on_punc (`bool`, *optional*, defaults to `True`):
66
+ In some instances we want to skip the basic punctuation splitting so that later tokenization can capture
67
+ the full context of the words, such as contractions.
68
+ """
69
+
70
+ def __init__(
71
+ self,
72
+ do_lower_case=True,
73
+ never_split=None,
74
+ tokenize_chinese_chars=True,
75
+ strip_accents=None,
76
+ do_split_on_punc=True,
77
+ ):
78
+ if never_split is None:
79
+ never_split = []
80
+ self.do_lower_case = do_lower_case
81
+ self.never_split = set(never_split)
82
+ self.tokenize_chinese_chars = tokenize_chinese_chars
83
+ self.strip_accents = strip_accents
84
+ self.do_split_on_punc = do_split_on_punc
85
+
86
+ def tokenize(self, text, never_split=None):
87
+ """
88
+ Basic Tokenization of a piece of text. For sub-word tokenization, see WordPieceTokenizer.
89
+
90
+ Args:
91
+ never_split (`List[str]`, *optional*)
92
+ Kept for backward compatibility purposes. Now implemented directly at the base class level (see
93
+ [`PreTrainedTokenizer.tokenize`]) List of token not to split.
94
+ """
95
+ # union() returns a new set by concatenating the two sets.
96
+ never_split = self.never_split.union(set(never_split)) if never_split else self.never_split
97
+ text = self._clean_text(text)
98
+
99
+ # This was added on November 1st, 2018 for the multilingual and Chinese
100
+ # models. This is also applied to the English models now, but it doesn't
101
+ # matter since the English models were not trained on any Chinese data
102
+ # and generally don't have any Chinese data in them (there are Chinese
103
+ # characters in the vocabulary because Wikipedia does have some Chinese
104
+ # words in the English Wikipedia.).
105
+ if self.tokenize_chinese_chars:
106
+ text = self._tokenize_chinese_chars(text)
107
+ # prevents treating the same character with different unicode codepoints as different characters
108
+ unicode_normalized_text = unicodedata.normalize("NFC", text)
109
+ orig_tokens = whitespace_tokenize(unicode_normalized_text)
110
+ split_tokens = []
111
+ for token in orig_tokens:
112
+ if token not in never_split:
113
+ if self.do_lower_case:
114
+ token = token.lower()
115
+ if self.strip_accents is not False:
116
+ token = self._run_strip_accents(token)
117
+ elif self.strip_accents:
118
+ token = self._run_strip_accents(token)
119
+ split_tokens.extend(self._run_split_on_punc(token, never_split))
120
+
121
+ output_tokens = whitespace_tokenize(" ".join(split_tokens))
122
+ return output_tokens
123
+
124
+ def _run_strip_accents(self, text):
125
+ """Strips accents from a piece of text."""
126
+ text = unicodedata.normalize("NFD", text)
127
+ output = []
128
+ for char in text:
129
+ cat = unicodedata.category(char)
130
+ if cat == "Mn":
131
+ continue
132
+ output.append(char)
133
+ return "".join(output)
134
+
135
+ def _run_split_on_punc(self, text, never_split=None):
136
+ """Splits punctuation on a piece of text."""
137
+ if not self.do_split_on_punc or (never_split is not None and text in never_split):
138
+ return [text]
139
+ chars = list(text)
140
+ i = 0
141
+ start_new_word = True
142
+ output = []
143
+ while i < len(chars):
144
+ char = chars[i]
145
+ if _is_punctuation(char):
146
+ output.append([char])
147
+ start_new_word = True
148
+ else:
149
+ if start_new_word:
150
+ output.append([])
151
+ start_new_word = False
152
+ output[-1].append(char)
153
+ i += 1
154
+
155
+ return ["".join(x) for x in output]
156
+
157
+ def _tokenize_chinese_chars(self, text):
158
+ """Adds whitespace around any CJK character."""
159
+ output = []
160
+ for char in text:
161
+ cp = ord(char)
162
+ if self._is_chinese_char(cp):
163
+ output.append(" ")
164
+ output.append(char)
165
+ output.append(" ")
166
+ else:
167
+ output.append(char)
168
+ return "".join(output)
169
+
170
+ def _is_chinese_char(self, cp):
171
+ """Checks whether CP is the codepoint of a CJK character."""
172
+ # This defines a "chinese character" as anything in the CJK Unicode block:
173
+ # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
174
+ #
175
+ # Note that the CJK Unicode block is NOT all Japanese and Korean characters,
176
+ # despite its name. The modern Korean Hangul alphabet is a different block,
177
+ # as is Japanese Hiragana and Katakana. Those alphabets are used to write
178
+ # space-separated words, so they are not treated specially and handled
179
+ # like the all of the other languages.
180
+ if (
181
+ (cp >= 0x4E00 and cp <= 0x9FFF)
182
+ or (cp >= 0x3400 and cp <= 0x4DBF) #
183
+ or (cp >= 0x20000 and cp <= 0x2A6DF) #
184
+ or (cp >= 0x2A700 and cp <= 0x2B73F) #
185
+ or (cp >= 0x2B740 and cp <= 0x2B81F) #
186
+ or (cp >= 0x2B820 and cp <= 0x2CEAF) #
187
+ or (cp >= 0xF900 and cp <= 0xFAFF)
188
+ or (cp >= 0x2F800 and cp <= 0x2FA1F) #
189
+ ): #
190
+ return True
191
+
192
+ return False
193
+
194
+ def _clean_text(self, text):
195
+ """Performs invalid character removal and whitespace cleanup on text."""
196
+ output = []
197
+ for char in text:
198
+ cp = ord(char)
199
+ if cp == 0 or cp == 0xFFFD or _is_control(char):
200
+ continue
201
+ if _is_whitespace(char):
202
+ output.append(" ")
203
+ else:
204
+ output.append(char)
205
+ return "".join(output)
206
+
207
+
208
+ def get_pairs(word):
209
+ """
210
+ Return set of symbol pairs in a word. word is represented as tuple of symbols (symbols being variable-length
211
+ strings)
212
+ """
213
+ pairs = set()
214
+ prev_char = word[0]
215
+ for char in word[1:]:
216
+ pairs.add((prev_char, char))
217
+ prev_char = char
218
+ return pairs
219
+
220
+
221
+ def text_standardize(text):
222
+ """
223
+ fixes some issues the spacy tokenizer had on books corpus also does some whitespace standardization
224
+ """
225
+ text = text.replace("—", "-")
226
+ text = text.replace("–", "-")
227
+ text = text.replace("―", "-")
228
+ text = text.replace("…", "...")
229
+ text = text.replace("´", "'")
230
+ text = re.sub(r"""(-+|~+|!+|"+|;+|\?+|\++|,+|\)+|\(+|\\+|\/+|\*+|\[+|\]+|}+|{+|\|+|_+)""", r" \1 ", text)
231
+ text = re.sub(r"\s*\n\s*", " \n ", text)
232
+ text = re.sub(r"[^\S\n]+", " ", text)
233
+ return text.strip()
234
+
235
+
236
+ class OpenAIGPTTokenizer(PreTrainedTokenizer):
237
+ """
238
+ Construct a GPT Tokenizer. Based on Byte-Pair-Encoding with the following peculiarities:
239
+
240
+ - lowercases all inputs,
241
+ - uses `SpaCy` tokenizer and `ftfy` for pre-BPE tokenization if they are installed, fallback to BERT's
242
+ `BasicTokenizer` if not.
243
+
244
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
245
+ this superclass for more information regarding those methods.
246
+
247
+ Args:
248
+ vocab_file (`str`):
249
+ Path to the vocabulary file.
250
+ merges_file (`str`):
251
+ Path to the merges file.
252
+ unk_token (`str`, *optional*, defaults to `"<unk>"`):
253
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
254
+ token instead.
255
+ """
256
+
257
+ vocab_files_names = VOCAB_FILES_NAMES
258
+ model_input_names = ["input_ids", "attention_mask"]
259
+
260
+ def __init__(self, vocab_file, merges_file, unk_token="<unk>", **kwargs):
261
+ try:
262
+ import ftfy
263
+ from spacy.lang.en import English
264
+
265
+ _nlp = English()
266
+ self.nlp = _nlp.tokenizer
267
+ self.fix_text = ftfy.fix_text
268
+ except ImportError:
269
+ logger.warning("ftfy or spacy is not installed using BERT BasicTokenizer instead of SpaCy & ftfy.")
270
+ self.nlp = BasicTokenizer(do_lower_case=True)
271
+ self.fix_text = None
272
+
273
+ with open(vocab_file, encoding="utf-8") as vocab_handle:
274
+ self.encoder = json.load(vocab_handle)
275
+ self.decoder = {v: k for k, v in self.encoder.items()}
276
+ with open(merges_file, encoding="utf-8") as merges_handle:
277
+ merges = merges_handle.read().split("\n")[1:-1]
278
+ merges = [tuple(merge.split()) for merge in merges]
279
+ self.bpe_ranks = dict(zip(merges, range(len(merges))))
280
+ self.cache = {}
281
+
282
+ super().__init__(unk_token=unk_token, **kwargs)
283
+
284
+ @property
285
+ def do_lower_case(self):
286
+ return True
287
+
288
+ @property
289
+ def vocab_size(self):
290
+ return len(self.encoder)
291
+
292
+ def get_vocab(self):
293
+ return dict(self.encoder, **self.added_tokens_encoder)
294
+
295
+ def bpe(self, token):
296
+ word = tuple(token[:-1]) + (token[-1] + "</w>",)
297
+ if token in self.cache:
298
+ return self.cache[token]
299
+ pairs = get_pairs(word)
300
+
301
+ if not pairs:
302
+ return token + "</w>"
303
+
304
+ while True:
305
+ bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf")))
306
+ if bigram not in self.bpe_ranks:
307
+ break
308
+ first, second = bigram
309
+ new_word = []
310
+ i = 0
311
+ while i < len(word):
312
+ try:
313
+ j = word.index(first, i)
314
+ except ValueError:
315
+ new_word.extend(word[i:])
316
+ break
317
+ else:
318
+ new_word.extend(word[i:j])
319
+ i = j
320
+
321
+ if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
322
+ new_word.append(first + second)
323
+ i += 2
324
+ else:
325
+ new_word.append(word[i])
326
+ i += 1
327
+ new_word = tuple(new_word)
328
+ word = new_word
329
+ if len(word) == 1:
330
+ break
331
+ else:
332
+ pairs = get_pairs(word)
333
+ word = " ".join(word)
334
+ if word == "\n </w>":
335
+ word = "\n</w>"
336
+ self.cache[token] = word
337
+ return word
338
+
339
+ def _tokenize(self, text):
340
+ """Tokenize a string."""
341
+ split_tokens = []
342
+ if self.fix_text is None:
343
+ # Using BERT's BasicTokenizer
344
+ text = self.nlp.tokenize(text)
345
+ for token in text:
346
+ split_tokens.extend(list(self.bpe(token).split(" ")))
347
+ else:
348
+ # Using SpaCy & ftfy (original tokenization process of OpenAI GPT)
349
+ text = self.nlp(text_standardize(self.fix_text(text)))
350
+ for token in text:
351
+ split_tokens.extend(list(self.bpe(token.text.lower()).split(" ")))
352
+ return split_tokens
353
+
354
+ def _convert_token_to_id(self, token):
355
+ """Converts a token (str) in an id using the vocab."""
356
+ return self.encoder.get(token, self.encoder.get(self.unk_token))
357
+
358
+ def _convert_id_to_token(self, index):
359
+ """Converts an id in a token (BPE) using the vocab."""
360
+ return self.decoder.get(index, self.unk_token)
361
+
362
+ def convert_tokens_to_string(self, tokens):
363
+ """Converts a sequence of tokens (string) in a single string."""
364
+ out_string = "".join(tokens).replace("</w>", " ").strip()
365
+ return out_string
366
+
367
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
368
+ if not os.path.isdir(save_directory):
369
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
370
+ return
371
+ vocab_file = os.path.join(
372
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
373
+ )
374
+ merge_file = os.path.join(
375
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"]
376
+ )
377
+
378
+ with open(vocab_file, "w", encoding="utf-8") as f:
379
+ f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + "\n")
380
+
381
+ index = 0
382
+ with open(merge_file, "w", encoding="utf-8") as writer:
383
+ writer.write("#version: 0.2\n")
384
+ for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]):
385
+ if index != token_index:
386
+ logger.warning(
387
+ f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
388
+ " Please check that the tokenizer is not corrupted!"
389
+ )
390
+ index = token_index
391
+ writer.write(" ".join(bpe_tokens) + "\n")
392
+ index += 1
393
+
394
+ return vocab_file, merge_file
llmeval-env/lib/python3.10/site-packages/transformers/models/openai/tokenization_openai_fast.py ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The Open AI Team Authors and The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Fast Tokenization classes for OpenAI GPT."""
16
+
17
+
18
+ from typing import Optional, Tuple
19
+
20
+ from ...tokenization_utils_fast import PreTrainedTokenizerFast
21
+ from ...utils import logging
22
+ from .tokenization_openai import OpenAIGPTTokenizer
23
+
24
+
25
+ logger = logging.get_logger(__name__)
26
+
27
+ VOCAB_FILES_NAMES = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
28
+
29
+
30
+ class OpenAIGPTTokenizerFast(PreTrainedTokenizerFast):
31
+ """
32
+ Construct a "fast" GPT Tokenizer (backed by HuggingFace's *tokenizers* library). Based on Byte-Pair-Encoding with
33
+ the following peculiarities:
34
+
35
+ - lower case all inputs
36
+ - uses BERT's BasicTokenizer for pre-BPE tokenization
37
+
38
+ This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
39
+ refer to this superclass for more information regarding those methods.
40
+
41
+ Args:
42
+ vocab_file (`str`):
43
+ Path to the vocabulary file.
44
+ merges_file (`str`):
45
+ Path to the merges file.
46
+ unk_token (`str`, *optional*, defaults to `"<unk>"`):
47
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
48
+ token instead.
49
+ """
50
+
51
+ vocab_files_names = VOCAB_FILES_NAMES
52
+ model_input_names = ["input_ids", "attention_mask"]
53
+ slow_tokenizer_class = OpenAIGPTTokenizer
54
+
55
+ def __init__(self, vocab_file=None, merges_file=None, tokenizer_file=None, unk_token="<unk>", **kwargs):
56
+ super().__init__(vocab_file, merges_file, tokenizer_file=tokenizer_file, unk_token=unk_token, **kwargs)
57
+
58
+ @property
59
+ def do_lower_case(self):
60
+ return True
61
+
62
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
63
+ files = self._tokenizer.model.save(save_directory, name=filename_prefix)
64
+ return tuple(files)
llmeval-env/lib/python3.10/site-packages/transformers/models/qwen2_moe/__init__.py ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The Qwen Team and The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import (
17
+ OptionalDependencyNotAvailable,
18
+ _LazyModule,
19
+ is_torch_available,
20
+ )
21
+
22
+
23
+ _import_structure = {
24
+ "configuration_qwen2_moe": ["QWEN2MOE_PRETRAINED_CONFIG_ARCHIVE_MAP", "Qwen2MoeConfig"],
25
+ }
26
+
27
+
28
+ try:
29
+ if not is_torch_available():
30
+ raise OptionalDependencyNotAvailable()
31
+ except OptionalDependencyNotAvailable:
32
+ pass
33
+ else:
34
+ _import_structure["modeling_qwen2_moe"] = [
35
+ "Qwen2MoeForCausalLM",
36
+ "Qwen2MoeModel",
37
+ "Qwen2MoePreTrainedModel",
38
+ "Qwen2MoeForSequenceClassification",
39
+ ]
40
+
41
+
42
+ if TYPE_CHECKING:
43
+ from .configuration_qwen2_moe import QWEN2MOE_PRETRAINED_CONFIG_ARCHIVE_MAP, Qwen2MoeConfig
44
+
45
+ try:
46
+ if not is_torch_available():
47
+ raise OptionalDependencyNotAvailable()
48
+ except OptionalDependencyNotAvailable:
49
+ pass
50
+ else:
51
+ from .modeling_qwen2_moe import (
52
+ Qwen2MoeForCausalLM,
53
+ Qwen2MoeForSequenceClassification,
54
+ Qwen2MoeModel,
55
+ Qwen2MoePreTrainedModel,
56
+ )
57
+
58
+
59
+ else:
60
+ import sys
61
+
62
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
llmeval-env/lib/python3.10/site-packages/transformers/models/qwen2_moe/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (953 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/qwen2_moe/__pycache__/configuration_qwen2_moe.cpython-310.pyc ADDED
Binary file (7.37 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/qwen2_moe/__pycache__/modeling_qwen2_moe.cpython-310.pyc ADDED
Binary file (43.9 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/qwen2_moe/configuration_qwen2_moe.py ADDED
@@ -0,0 +1,175 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 The Qwen team, Alibaba Group and the HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Qwen2MoE model configuration"""
16
+
17
+ from ...configuration_utils import PretrainedConfig
18
+ from ...utils import logging
19
+
20
+
21
+ logger = logging.get_logger(__name__)
22
+
23
+ QWEN2MOE_PRETRAINED_CONFIG_ARCHIVE_MAP = {
24
+ "Qwen/Qwen1.5-MoE-A2.7B": "https://huggingface.co/Qwen/Qwen1.5-MoE-A2.7B/resolve/main/config.json",
25
+ }
26
+
27
+
28
+ class Qwen2MoeConfig(PretrainedConfig):
29
+ r"""
30
+ This is the configuration class to store the configuration of a [`Qwen2MoeModel`]. It is used to instantiate a
31
+ Qwen2MoE model according to the specified arguments, defining the model architecture. Instantiating a configuration
32
+ with the defaults will yield a similar configuration to that of
33
+ Qwen1.5-MoE-A2.7B" [Qwen/Qwen1.5-MoE-A2.7B"](https://huggingface.co/Qwen/Qwen1.5-MoE-A2.7B").
34
+
35
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
36
+ documentation from [`PretrainedConfig`] for more information.
37
+
38
+
39
+ Args:
40
+ vocab_size (`int`, *optional*, defaults to 151936):
41
+ Vocabulary size of the Qwen2MoE model. Defines the number of different tokens that can be represented by the
42
+ `inputs_ids` passed when calling [`Qwen2MoeModel`]
43
+ hidden_size (`int`, *optional*, defaults to 2048):
44
+ Dimension of the hidden representations.
45
+ intermediate_size (`int`, *optional*, defaults to 5632):
46
+ Dimension of the MLP representations.
47
+ num_hidden_layers (`int`, *optional*, defaults to 24):
48
+ Number of hidden layers in the Transformer encoder.
49
+ num_attention_heads (`int`, *optional*, defaults to 16):
50
+ Number of attention heads for each attention layer in the Transformer encoder.
51
+ num_key_value_heads (`int`, *optional*, defaults to 16):
52
+ This is the number of key_value heads that should be used to implement Grouped Query Attention. If
53
+ `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
54
+ `num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When
55
+ converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
56
+ by meanpooling all the original heads within that group. For more details checkout [this
57
+ paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to `32`.
58
+ hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
59
+ The non-linear activation function (function or string) in the decoder.
60
+ max_position_embeddings (`int`, *optional*, defaults to 32768):
61
+ The maximum sequence length that this model might ever be used with.
62
+ initializer_range (`float`, *optional*, defaults to 0.02):
63
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
64
+ rms_norm_eps (`float`, *optional*, defaults to 1e-06):
65
+ The epsilon used by the rms normalization layers.
66
+ use_cache (`bool`, *optional*, defaults to `True`):
67
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
68
+ relevant if `config.is_decoder=True`.
69
+ tie_word_embeddings (`bool`, *optional*, defaults to `False`):
70
+ Whether the model's input and output word embeddings should be tied.
71
+ rope_theta (`float`, *optional*, defaults to 10000.0):
72
+ The base period of the RoPE embeddings.
73
+ use_sliding_window (`bool`, *optional*, defaults to `False`):
74
+ Whether to use sliding window attention.
75
+ sliding_window (`int`, *optional*, defaults to 4096):
76
+ Sliding window attention (SWA) window size. If not specified, will default to `4096`.
77
+ max_window_layers (`int`, *optional*, defaults to 28):
78
+ The number of layers that use SWA (Sliding Window Attention). The bottom layers use SWA while the top use full attention.
79
+ attention_dropout (`float`, *optional*, defaults to 0.0):
80
+ The dropout ratio for the attention probabilities.
81
+ decoder_sparse_step (`int`, *optional*, defaults to 1):
82
+ The frequency of the MoE layer.
83
+ moe_intermediate_size (`int`, *optional*, defaults to 1408):
84
+ Intermediate size of the routed expert.
85
+ shared_expert_intermediate_size (`int`, *optional*, defaults to 5632):
86
+ Intermediate size of the shared expert.
87
+ num_experts_per_tok (`int`, *optional*, defaults to 4):
88
+ Number of selected experts.
89
+ num_experts (`int`, *optional*, defaults to 60):
90
+ Number of routed experts.
91
+ norm_topk_prob (`bool`, *optional*, defaults to `False`):
92
+ Whether to normalize the topk probabilities.
93
+ output_router_logits (`bool`, *optional*, defaults to `False`):
94
+ Whether or not the router logits should be returned by the model. Enabeling this will also
95
+ allow the model to output the auxiliary loss, including load balancing loss and router z-loss.
96
+ router_aux_loss_coef (`float`, *optional*, defaults to 0.001):
97
+ The aux loss factor for the total loss.
98
+
99
+ ```python
100
+ >>> from transformers import Qwen2MoeModel, Qwen2MoeConfig
101
+
102
+ >>> # Initializing a Qwen2MoE style configuration
103
+ >>> configuration = Qwen2MoeConfig()
104
+
105
+ >>> # Initializing a model from the Qwen1.5-MoE-A2.7B" style configuration
106
+ >>> model = Qwen2MoeModel(configuration)
107
+
108
+ >>> # Accessing the model configuration
109
+ >>> configuration = model.config
110
+ ```"""
111
+
112
+ model_type = "qwen2_moe"
113
+ keys_to_ignore_at_inference = ["past_key_values"]
114
+
115
+ def __init__(
116
+ self,
117
+ vocab_size=151936,
118
+ hidden_size=2048,
119
+ intermediate_size=5632,
120
+ num_hidden_layers=24,
121
+ num_attention_heads=16,
122
+ num_key_value_heads=16,
123
+ hidden_act="silu",
124
+ max_position_embeddings=32768,
125
+ initializer_range=0.02,
126
+ rms_norm_eps=1e-6,
127
+ use_cache=True,
128
+ tie_word_embeddings=False,
129
+ rope_theta=10000.0,
130
+ use_sliding_window=False,
131
+ sliding_window=4096,
132
+ max_window_layers=28,
133
+ attention_dropout=0.0,
134
+ decoder_sparse_step=1,
135
+ moe_intermediate_size=1408,
136
+ shared_expert_intermediate_size=5632,
137
+ num_experts_per_tok=4,
138
+ num_experts=60,
139
+ norm_topk_prob=False,
140
+ output_router_logits=False,
141
+ router_aux_loss_coef=0.001,
142
+ **kwargs,
143
+ ):
144
+ self.vocab_size = vocab_size
145
+ self.max_position_embeddings = max_position_embeddings
146
+ self.hidden_size = hidden_size
147
+ self.intermediate_size = intermediate_size
148
+ self.num_hidden_layers = num_hidden_layers
149
+ self.num_attention_heads = num_attention_heads
150
+ self.use_sliding_window = use_sliding_window
151
+ self.sliding_window = sliding_window
152
+ self.max_window_layers = max_window_layers
153
+
154
+ self.num_key_value_heads = num_key_value_heads
155
+ self.hidden_act = hidden_act
156
+ self.initializer_range = initializer_range
157
+ self.rms_norm_eps = rms_norm_eps
158
+ self.use_cache = use_cache
159
+ self.rope_theta = rope_theta
160
+ self.attention_dropout = attention_dropout
161
+
162
+ # MoE arguments
163
+ self.decoder_sparse_step = decoder_sparse_step
164
+ self.moe_intermediate_size = moe_intermediate_size
165
+ self.shared_expert_intermediate_size = shared_expert_intermediate_size
166
+ self.num_experts_per_tok = num_experts_per_tok
167
+ self.num_experts = num_experts
168
+ self.norm_topk_prob = norm_topk_prob
169
+ self.output_router_logits = output_router_logits
170
+ self.router_aux_loss_coef = router_aux_loss_coef
171
+
172
+ super().__init__(
173
+ tie_word_embeddings=tie_word_embeddings,
174
+ **kwargs,
175
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/qwen2_moe/modeling_qwen2_moe.py ADDED
@@ -0,0 +1,1595 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 The Qwen team, Alibaba Group and the HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
5
+ # and OPT implementations in this library. It has been modified from its
6
+ # original forms to accommodate minor architectural differences compared
7
+ # to GPT-NeoX and OPT used by the Meta AI team that trained the model.
8
+ #
9
+ # Licensed under the Apache License, Version 2.0 (the "License");
10
+ # you may not use this file except in compliance with the License.
11
+ # You may obtain a copy of the License at
12
+ #
13
+ # http://www.apache.org/licenses/LICENSE-2.0
14
+ #
15
+ # Unless required by applicable law or agreed to in writing, software
16
+ # distributed under the License is distributed on an "AS IS" BASIS,
17
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18
+ # See the License for the specific language governing permissions and
19
+ # limitations under the License.
20
+ """ PyTorch Qwen2MoE model."""
21
+ import inspect
22
+ import math
23
+ import warnings
24
+ from typing import List, Optional, Tuple, Union
25
+
26
+ import torch
27
+ import torch.nn.functional as F
28
+ import torch.utils.checkpoint
29
+ from torch import nn
30
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
31
+
32
+ from ...activations import ACT2FN
33
+ from ...cache_utils import Cache, DynamicCache
34
+ from ...modeling_attn_mask_utils import _prepare_4d_causal_attention_mask, _prepare_4d_causal_attention_mask_for_sdpa
35
+ from ...modeling_outputs import MoeCausalLMOutputWithPast, MoeModelOutputWithPast, SequenceClassifierOutputWithPast
36
+ from ...modeling_utils import PreTrainedModel
37
+ from ...utils import (
38
+ add_start_docstrings,
39
+ add_start_docstrings_to_model_forward,
40
+ is_flash_attn_2_available,
41
+ is_flash_attn_greater_or_equal_2_10,
42
+ logging,
43
+ replace_return_docstrings,
44
+ )
45
+ from .configuration_qwen2_moe import Qwen2MoeConfig
46
+
47
+
48
+ if is_flash_attn_2_available():
49
+ from flash_attn import flash_attn_func, flash_attn_varlen_func
50
+ from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa
51
+
52
+ _flash_supports_window_size = "window_size" in list(inspect.signature(flash_attn_func).parameters)
53
+
54
+ logger = logging.get_logger(__name__)
55
+
56
+ _CHECKPOINT_FOR_DOC = "Qwen/Qwen1.5-MoE-A2.7B"
57
+ _CONFIG_FOR_DOC = "Qwen2MoeConfig"
58
+
59
+ QWEN2MOE_PRETRAINED_MODEL_ARCHIVE_LIST = [
60
+ "Qwen/Qwen1.5-MoE-A2.7B",
61
+ # See all Qwen2 models at https://huggingface.co/models?filter=qwen2
62
+ ]
63
+
64
+
65
+ # Copied from transformers.models.mixtral.modeling_mixtral.load_balancing_loss_func
66
+ def load_balancing_loss_func(
67
+ gate_logits: torch.Tensor, num_experts: torch.Tensor = None, top_k=2, attention_mask: Optional[torch.Tensor] = None
68
+ ) -> float:
69
+ r"""
70
+ Computes auxiliary load balancing loss as in Switch Transformer - implemented in Pytorch.
71
+
72
+ See Switch Transformer (https://arxiv.org/abs/2101.03961) for more details. This function implements the loss
73
+ function presented in equations (4) - (6) of the paper. It aims at penalizing cases where the routing between
74
+ experts is too unbalanced.
75
+
76
+ Args:
77
+ gate_logits (Union[`torch.Tensor`, Tuple[torch.Tensor]):
78
+ Logits from the `gate`, should be a tuple of model.config.num_hidden_layers tensors of
79
+ shape [batch_size X sequence_length, num_experts].
80
+ attention_mask (`torch.Tensor`, None):
81
+ The attention_mask used in forward function
82
+ shape [batch_size X sequence_length] if not None.
83
+ num_experts (`int`, *optional*):
84
+ Number of experts
85
+
86
+ Returns:
87
+ The auxiliary loss.
88
+ """
89
+ if gate_logits is None or not isinstance(gate_logits, tuple):
90
+ return 0
91
+
92
+ if isinstance(gate_logits, tuple):
93
+ compute_device = gate_logits[0].device
94
+ concatenated_gate_logits = torch.cat([layer_gate.to(compute_device) for layer_gate in gate_logits], dim=0)
95
+
96
+ routing_weights = torch.nn.functional.softmax(concatenated_gate_logits, dim=-1)
97
+
98
+ _, selected_experts = torch.topk(routing_weights, top_k, dim=-1)
99
+
100
+ expert_mask = torch.nn.functional.one_hot(selected_experts, num_experts)
101
+
102
+ if attention_mask is None:
103
+ # Compute the percentage of tokens routed to each experts
104
+ tokens_per_expert = torch.mean(expert_mask.float(), dim=0)
105
+
106
+ # Compute the average probability of routing to these experts
107
+ router_prob_per_expert = torch.mean(routing_weights, dim=0)
108
+ else:
109
+ batch_size, sequence_length = attention_mask.shape
110
+ num_hidden_layers = concatenated_gate_logits.shape[0] // (batch_size * sequence_length)
111
+
112
+ # Compute the mask that masks all padding tokens as 0 with the same shape of expert_mask
113
+ expert_attention_mask = (
114
+ attention_mask[None, :, :, None, None]
115
+ .expand((num_hidden_layers, batch_size, sequence_length, top_k, num_experts))
116
+ .reshape(-1, top_k, num_experts)
117
+ .to(compute_device)
118
+ )
119
+
120
+ # Compute the percentage of tokens routed to each experts
121
+ tokens_per_expert = torch.sum(expert_mask.float() * expert_attention_mask, dim=0) / torch.sum(
122
+ expert_attention_mask, dim=0
123
+ )
124
+
125
+ # Compute the mask that masks all padding tokens as 0 with the same shape of tokens_per_expert
126
+ router_per_expert_attention_mask = (
127
+ attention_mask[None, :, :, None]
128
+ .expand((num_hidden_layers, batch_size, sequence_length, num_experts))
129
+ .reshape(-1, num_experts)
130
+ .to(compute_device)
131
+ )
132
+
133
+ # Compute the average probability of routing to these experts
134
+ router_prob_per_expert = torch.sum(routing_weights * router_per_expert_attention_mask, dim=0) / torch.sum(
135
+ router_per_expert_attention_mask, dim=0
136
+ )
137
+
138
+ overall_loss = torch.sum(tokens_per_expert * router_prob_per_expert.unsqueeze(0))
139
+ return overall_loss * num_experts
140
+
141
+
142
+ # Copied from transformers.models.llama.modeling_llama._get_unpad_data
143
+ def _get_unpad_data(attention_mask):
144
+ seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)
145
+ indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
146
+ max_seqlen_in_batch = seqlens_in_batch.max().item()
147
+ cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.int32), (1, 0))
148
+ return (
149
+ indices,
150
+ cu_seqlens,
151
+ max_seqlen_in_batch,
152
+ )
153
+
154
+
155
+ # Copied from transformers.models.llama.modeling_llama.LlamaRMSNorm with Llama->Qwen2Moe
156
+ class Qwen2MoeRMSNorm(nn.Module):
157
+ def __init__(self, hidden_size, eps=1e-6):
158
+ """
159
+ Qwen2MoeRMSNorm is equivalent to T5LayerNorm
160
+ """
161
+ super().__init__()
162
+ self.weight = nn.Parameter(torch.ones(hidden_size))
163
+ self.variance_epsilon = eps
164
+
165
+ def forward(self, hidden_states):
166
+ input_dtype = hidden_states.dtype
167
+ hidden_states = hidden_states.to(torch.float32)
168
+ variance = hidden_states.pow(2).mean(-1, keepdim=True)
169
+ hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
170
+ return self.weight * hidden_states.to(input_dtype)
171
+
172
+
173
+ # Copied from transformers.models.mistral.modeling_mistral.MistralRotaryEmbedding with Mistral->Qwen2Moe
174
+ class Qwen2MoeRotaryEmbedding(nn.Module):
175
+ def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None):
176
+ super().__init__()
177
+
178
+ self.dim = dim
179
+ self.max_position_embeddings = max_position_embeddings
180
+ self.base = base
181
+ inv_freq = 1.0 / (self.base ** (torch.arange(0, self.dim, 2, dtype=torch.int64).float().to(device) / self.dim))
182
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
183
+
184
+ # Build here to make `torch.jit.trace` work.
185
+ self._set_cos_sin_cache(
186
+ seq_len=max_position_embeddings, device=self.inv_freq.device, dtype=torch.get_default_dtype()
187
+ )
188
+
189
+ def _set_cos_sin_cache(self, seq_len, device, dtype):
190
+ self.max_seq_len_cached = seq_len
191
+ t = torch.arange(self.max_seq_len_cached, device=device, dtype=torch.int64).type_as(self.inv_freq)
192
+
193
+ freqs = torch.outer(t, self.inv_freq)
194
+ # Different from paper, but it uses a different permutation in order to obtain the same calculation
195
+ emb = torch.cat((freqs, freqs), dim=-1)
196
+ self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False)
197
+ self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False)
198
+
199
+ def forward(self, x, seq_len=None):
200
+ # x: [bs, num_attention_heads, seq_len, head_size]
201
+ if seq_len > self.max_seq_len_cached:
202
+ self._set_cos_sin_cache(seq_len=seq_len, device=x.device, dtype=x.dtype)
203
+
204
+ return (
205
+ self.cos_cached[:seq_len].to(dtype=x.dtype),
206
+ self.sin_cached[:seq_len].to(dtype=x.dtype),
207
+ )
208
+
209
+
210
+ # Copied from transformers.models.llama.modeling_llama.rotate_half
211
+ def rotate_half(x):
212
+ """Rotates half the hidden dims of the input."""
213
+ x1 = x[..., : x.shape[-1] // 2]
214
+ x2 = x[..., x.shape[-1] // 2 :]
215
+ return torch.cat((-x2, x1), dim=-1)
216
+
217
+
218
+ # Copied from transformers.models.mistral.modeling_mistral.apply_rotary_pos_emb
219
+ def apply_rotary_pos_emb(q, k, cos, sin, position_ids, unsqueeze_dim=1):
220
+ """Applies Rotary Position Embedding to the query and key tensors.
221
+
222
+ Args:
223
+ q (`torch.Tensor`): The query tensor.
224
+ k (`torch.Tensor`): The key tensor.
225
+ cos (`torch.Tensor`): The cosine part of the rotary embedding.
226
+ sin (`torch.Tensor`): The sine part of the rotary embedding.
227
+ position_ids (`torch.Tensor`):
228
+ The position indices of the tokens corresponding to the query and key tensors. For example, this can be
229
+ used to pass offsetted position ids when working with a KV-cache.
230
+ unsqueeze_dim (`int`, *optional*, defaults to 1):
231
+ The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
232
+ sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
233
+ that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
234
+ k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
235
+ cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
236
+ the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
237
+ Returns:
238
+ `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
239
+ """
240
+ cos = cos[position_ids].unsqueeze(unsqueeze_dim)
241
+ sin = sin[position_ids].unsqueeze(unsqueeze_dim)
242
+ q_embed = (q * cos) + (rotate_half(q) * sin)
243
+ k_embed = (k * cos) + (rotate_half(k) * sin)
244
+ return q_embed, k_embed
245
+
246
+
247
+ # Modified from transformers.models.mistral.modeling_mistral.MistralMLP with Mistral->Qwen2Moe
248
+ class Qwen2MoeMLP(nn.Module):
249
+ def __init__(self, config, intermediate_size=None):
250
+ super().__init__()
251
+ self.config = config
252
+ self.hidden_size = config.hidden_size
253
+ self.intermediate_size = intermediate_size
254
+ self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
255
+ self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
256
+ self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
257
+ self.act_fn = ACT2FN[config.hidden_act]
258
+
259
+ def forward(self, x):
260
+ return self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
261
+
262
+
263
+ # Copied from transformers.models.llama.modeling_llama.repeat_kv
264
+ def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
265
+ """
266
+ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
267
+ num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
268
+ """
269
+ batch, num_key_value_heads, slen, head_dim = hidden_states.shape
270
+ if n_rep == 1:
271
+ return hidden_states
272
+ hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
273
+ return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
274
+
275
+
276
+ # Copied from transformers.models.qwen2.modeling_qwen2.Qwen2Attention with Qwen2->Qwen2Moe
277
+ class Qwen2MoeAttention(nn.Module):
278
+ """
279
+ Multi-headed attention from 'Attention Is All You Need' paper. Modified to use sliding window attention: Longformer
280
+ and "Generating Long Sequences with Sparse Transformers".
281
+ """
282
+
283
+ def __init__(self, config: Qwen2MoeConfig, layer_idx: Optional[int] = None):
284
+ super().__init__()
285
+ self.config = config
286
+ self.layer_idx = layer_idx
287
+ if layer_idx is None:
288
+ logger.warning_once(
289
+ f"Instantiating {self.__class__.__name__} without passing `layer_idx` is not recommended and will "
290
+ "to errors during the forward call, if caching is used. Please make sure to provide a `layer_idx` "
291
+ "when creating this class."
292
+ )
293
+
294
+ self.hidden_size = config.hidden_size
295
+ self.num_heads = config.num_attention_heads
296
+ self.head_dim = self.hidden_size // self.num_heads
297
+ self.num_key_value_heads = config.num_key_value_heads
298
+ self.num_key_value_groups = self.num_heads // self.num_key_value_heads
299
+ self.max_position_embeddings = config.max_position_embeddings
300
+ self.rope_theta = config.rope_theta
301
+ self.is_causal = True
302
+ self.attention_dropout = config.attention_dropout
303
+
304
+ if (self.head_dim * self.num_heads) != self.hidden_size:
305
+ raise ValueError(
306
+ f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}"
307
+ f" and `num_heads`: {self.num_heads})."
308
+ )
309
+ self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=True)
310
+ self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=True)
311
+ self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=True)
312
+ self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False)
313
+
314
+ self.rotary_emb = Qwen2MoeRotaryEmbedding(
315
+ self.head_dim,
316
+ max_position_embeddings=self.max_position_embeddings,
317
+ base=self.rope_theta,
318
+ )
319
+
320
+ def forward(
321
+ self,
322
+ hidden_states: torch.Tensor,
323
+ attention_mask: Optional[torch.Tensor] = None,
324
+ position_ids: Optional[torch.LongTensor] = None,
325
+ past_key_value: Optional[Cache] = None,
326
+ output_attentions: bool = False,
327
+ use_cache: bool = False,
328
+ **kwargs,
329
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
330
+ if "padding_mask" in kwargs:
331
+ warnings.warn(
332
+ "Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`"
333
+ )
334
+ bsz, q_len, _ = hidden_states.size()
335
+
336
+ query_states = self.q_proj(hidden_states)
337
+ key_states = self.k_proj(hidden_states)
338
+ value_states = self.v_proj(hidden_states)
339
+
340
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
341
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
342
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
343
+
344
+ kv_seq_len = key_states.shape[-2]
345
+ if past_key_value is not None:
346
+ if self.layer_idx is None:
347
+ raise ValueError(
348
+ f"The cache structure has changed since version v4.36. If you are using {self.__class__.__name__} "
349
+ "for auto-regressive decoding with k/v caching, please make sure to initialize the attention class "
350
+ "with a layer index."
351
+ )
352
+ kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
353
+ cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
354
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
355
+
356
+ if past_key_value is not None:
357
+ cache_kwargs = {"sin": sin, "cos": cos} # Specific to RoPE models
358
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
359
+
360
+ # repeat k/v heads if n_kv_heads < n_heads
361
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
362
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
363
+
364
+ attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
365
+
366
+ if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len):
367
+ raise ValueError(
368
+ f"Attention weights should be of size {(bsz, self.num_heads, q_len, kv_seq_len)}, but is"
369
+ f" {attn_weights.size()}"
370
+ )
371
+
372
+ if attention_mask is not None:
373
+ if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):
374
+ raise ValueError(
375
+ f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}"
376
+ )
377
+
378
+ attn_weights = attn_weights + attention_mask
379
+
380
+ # upcast attention to fp32
381
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
382
+ attn_weights = nn.functional.dropout(attn_weights, p=self.attention_dropout, training=self.training)
383
+ attn_output = torch.matmul(attn_weights, value_states)
384
+
385
+ if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
386
+ raise ValueError(
387
+ f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is"
388
+ f" {attn_output.size()}"
389
+ )
390
+
391
+ attn_output = attn_output.transpose(1, 2).contiguous()
392
+ attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
393
+
394
+ attn_output = self.o_proj(attn_output)
395
+
396
+ if not output_attentions:
397
+ attn_weights = None
398
+
399
+ return attn_output, attn_weights, past_key_value
400
+
401
+
402
+ # Copied from transformers.models.qwen2.modeling_qwen2.Qwen2FlashAttention2 with Qwen2->Qwen2Moe
403
+ class Qwen2MoeFlashAttention2(Qwen2MoeAttention):
404
+ """
405
+ Qwen2Moe flash attention module, following Qwen2Moe attention module. This module inherits from `Qwen2MoeAttention`
406
+ as the weights of the module stays untouched. The only required change would be on the forward pass
407
+ where it needs to correctly call the public API of flash attention and deal with padding tokens
408
+ in case the input contains any of them. Additionally, for sliding window attention, we apply SWA only to the bottom
409
+ config.max_window_layers layers.
410
+ """
411
+
412
+ # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2.__init__
413
+ def __init__(self, *args, **kwargs):
414
+ super().__init__(*args, **kwargs)
415
+
416
+ # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1.
417
+ # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0.
418
+ # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left).
419
+ self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10()
420
+
421
+ def forward(
422
+ self,
423
+ hidden_states: torch.Tensor,
424
+ attention_mask: Optional[torch.Tensor] = None,
425
+ position_ids: Optional[torch.LongTensor] = None,
426
+ past_key_value: Optional[Cache] = None,
427
+ output_attentions: bool = False,
428
+ use_cache: bool = False,
429
+ **kwargs,
430
+ ):
431
+ if "padding_mask" in kwargs:
432
+ warnings.warn(
433
+ "Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`"
434
+ )
435
+
436
+ # overwrite attention_mask with padding_mask
437
+ attention_mask = kwargs.pop("padding_mask")
438
+ bsz, q_len, _ = hidden_states.size()
439
+
440
+ query_states = self.q_proj(hidden_states)
441
+ key_states = self.k_proj(hidden_states)
442
+ value_states = self.v_proj(hidden_states)
443
+
444
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
445
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
446
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
447
+
448
+ kv_seq_len = key_states.shape[-2]
449
+ if past_key_value is not None:
450
+ if self.layer_idx is None:
451
+ raise ValueError(
452
+ f"The cache structure has changed since version v4.36. If you are using {self.__class__.__name__} "
453
+ "for auto-regressive decoding with k/v caching, please make sure to initialize the attention class "
454
+ "with a layer index."
455
+ )
456
+ kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
457
+
458
+ # Because the input can be padded, the absolute sequence length depends on the max position id.
459
+ rotary_seq_len = max(kv_seq_len, position_ids[:, -1].max().item()) + 1
460
+ cos, sin = self.rotary_emb(value_states, seq_len=rotary_seq_len)
461
+
462
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
463
+
464
+ use_sliding_windows = (
465
+ _flash_supports_window_size
466
+ and getattr(self.config, "sliding_window", None) is not None
467
+ and kv_seq_len > self.config.sliding_window
468
+ and self.config.use_sliding_window
469
+ )
470
+
471
+ if not _flash_supports_window_size:
472
+ logger.warning_once(
473
+ "The current flash attention version does not support sliding window attention, for a more memory efficient implementation"
474
+ " make sure to upgrade flash-attn library."
475
+ )
476
+
477
+ if past_key_value is not None:
478
+ # Activate slicing cache only if the config has a value `sliding_windows` attribute
479
+ cache_has_contents = past_key_value.get_seq_length(self.layer_idx) > 0
480
+ if (
481
+ getattr(self.config, "sliding_window", None) is not None
482
+ and kv_seq_len > self.config.sliding_window
483
+ and cache_has_contents
484
+ ):
485
+ slicing_tokens = 1 - self.config.sliding_window
486
+
487
+ past_key = past_key_value[self.layer_idx][0]
488
+ past_value = past_key_value[self.layer_idx][1]
489
+
490
+ past_key = past_key[:, :, slicing_tokens:, :].contiguous()
491
+ past_value = past_value[:, :, slicing_tokens:, :].contiguous()
492
+
493
+ if past_key.shape[-2] != self.config.sliding_window - 1:
494
+ raise ValueError(
495
+ f"past key must have a shape of (`batch_size, num_heads, self.config.sliding_window-1, head_dim`), got"
496
+ f" {past_key.shape}"
497
+ )
498
+
499
+ if attention_mask is not None:
500
+ attention_mask = attention_mask[:, slicing_tokens:]
501
+ attention_mask = torch.cat([attention_mask, torch.ones_like(attention_mask[:, -1:])], dim=-1)
502
+
503
+ cache_kwargs = {"sin": sin, "cos": cos} # Specific to RoPE models
504
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
505
+
506
+ # repeat k/v heads if n_kv_heads < n_heads
507
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
508
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
509
+ dropout_rate = 0.0 if not self.training else self.attention_dropout
510
+
511
+ # In PEFT, usually we cast the layer norms in float32 for training stability reasons
512
+ # therefore the input hidden states gets silently casted in float32. Hence, we need
513
+ # cast them back in float16 just to be sure everything works as expected.
514
+ input_dtype = query_states.dtype
515
+ if input_dtype == torch.float32:
516
+ if torch.is_autocast_enabled():
517
+ target_dtype = torch.get_autocast_gpu_dtype()
518
+ # Handle the case where the model is quantized
519
+ elif hasattr(self.config, "_pre_quantization_dtype"):
520
+ target_dtype = self.config._pre_quantization_dtype
521
+ else:
522
+ target_dtype = self.q_proj.weight.dtype
523
+
524
+ logger.warning_once(
525
+ f"The input hidden states seems to be silently casted in float32, this might be related to"
526
+ f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in"
527
+ f" {target_dtype}."
528
+ )
529
+
530
+ query_states = query_states.to(target_dtype)
531
+ key_states = key_states.to(target_dtype)
532
+ value_states = value_states.to(target_dtype)
533
+
534
+ # Reashape to the expected shape for Flash Attention
535
+ query_states = query_states.transpose(1, 2)
536
+ key_states = key_states.transpose(1, 2)
537
+ value_states = value_states.transpose(1, 2)
538
+
539
+ attn_output = self._flash_attention_forward(
540
+ query_states,
541
+ key_states,
542
+ value_states,
543
+ attention_mask,
544
+ q_len,
545
+ dropout=dropout_rate,
546
+ use_sliding_windows=use_sliding_windows,
547
+ )
548
+
549
+ attn_output = attn_output.reshape(bsz, q_len, self.hidden_size).contiguous()
550
+ attn_output = self.o_proj(attn_output)
551
+
552
+ if not output_attentions:
553
+ attn_weights = None
554
+
555
+ return attn_output, attn_weights, past_key_value
556
+
557
+ def _flash_attention_forward(
558
+ self,
559
+ query_states,
560
+ key_states,
561
+ value_states,
562
+ attention_mask,
563
+ query_length,
564
+ dropout=0.0,
565
+ softmax_scale=None,
566
+ use_sliding_windows=False,
567
+ ):
568
+ """
569
+ Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token
570
+ first unpad the input, then computes the attention scores and pad the final attention scores.
571
+
572
+ Args:
573
+ query_states (`torch.Tensor`):
574
+ Input query states to be passed to Flash Attention API
575
+ key_states (`torch.Tensor`):
576
+ Input key states to be passed to Flash Attention API
577
+ value_states (`torch.Tensor`):
578
+ Input value states to be passed to Flash Attention API
579
+ attention_mask (`torch.Tensor`):
580
+ The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the
581
+ position of padding tokens and 1 for the position of non-padding tokens.
582
+ dropout (`float`):
583
+ Attention dropout
584
+ softmax_scale (`float`, *optional*):
585
+ The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim)
586
+ use_sliding_windows (`bool`, *optional*):
587
+ Whether to activate sliding window attention.
588
+ """
589
+ if not self._flash_attn_uses_top_left_mask:
590
+ causal = self.is_causal
591
+ else:
592
+ # TODO: Remove the `query_length != 1` check once Flash Attention for RoCm is bumped to 2.1. For details, please see the comment in LlamaFlashAttention2 __init__.
593
+ causal = self.is_causal and query_length != 1
594
+
595
+ # Decide whether to use SWA or not by layer index.
596
+ if use_sliding_windows and self.layer_idx >= self.config.max_window_layers:
597
+ use_sliding_windows = False
598
+
599
+ # Contains at least one padding token in the sequence
600
+ if attention_mask is not None:
601
+ batch_size = query_states.shape[0]
602
+ query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input(
603
+ query_states, key_states, value_states, attention_mask, query_length
604
+ )
605
+
606
+ cu_seqlens_q, cu_seqlens_k = cu_seq_lens
607
+ max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens
608
+
609
+ if not use_sliding_windows:
610
+ attn_output_unpad = flash_attn_varlen_func(
611
+ query_states,
612
+ key_states,
613
+ value_states,
614
+ cu_seqlens_q=cu_seqlens_q,
615
+ cu_seqlens_k=cu_seqlens_k,
616
+ max_seqlen_q=max_seqlen_in_batch_q,
617
+ max_seqlen_k=max_seqlen_in_batch_k,
618
+ dropout_p=dropout,
619
+ softmax_scale=softmax_scale,
620
+ causal=causal,
621
+ )
622
+ else:
623
+ attn_output_unpad = flash_attn_varlen_func(
624
+ query_states,
625
+ key_states,
626
+ value_states,
627
+ cu_seqlens_q=cu_seqlens_q,
628
+ cu_seqlens_k=cu_seqlens_k,
629
+ max_seqlen_q=max_seqlen_in_batch_q,
630
+ max_seqlen_k=max_seqlen_in_batch_k,
631
+ dropout_p=dropout,
632
+ softmax_scale=softmax_scale,
633
+ causal=causal,
634
+ window_size=(self.config.sliding_window, self.config.sliding_window),
635
+ )
636
+
637
+ attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length)
638
+ else:
639
+ if not use_sliding_windows:
640
+ attn_output = flash_attn_func(
641
+ query_states,
642
+ key_states,
643
+ value_states,
644
+ dropout,
645
+ softmax_scale=softmax_scale,
646
+ causal=causal,
647
+ )
648
+ else:
649
+ attn_output = flash_attn_func(
650
+ query_states,
651
+ key_states,
652
+ value_states,
653
+ dropout,
654
+ softmax_scale=softmax_scale,
655
+ causal=causal,
656
+ window_size=(self.config.sliding_window, self.config.sliding_window),
657
+ )
658
+
659
+ return attn_output
660
+
661
+ # Copied from transformers.models.mistral.modeling_mistral.MistralFlashAttention2._upad_input
662
+ def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length):
663
+ batch_size, kv_seq_len, num_heads, head_dim = key_layer.shape
664
+
665
+ # On the first iteration we need to properly re-create the padding mask
666
+ # by slicing it on the proper place
667
+ if kv_seq_len != attention_mask.shape[-1]:
668
+ attention_mask_num_tokens = attention_mask.shape[-1]
669
+ attention_mask = attention_mask[:, attention_mask_num_tokens - kv_seq_len :]
670
+
671
+ indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask)
672
+
673
+ key_layer = index_first_axis(key_layer.reshape(batch_size * kv_seq_len, num_heads, head_dim), indices_k)
674
+ value_layer = index_first_axis(value_layer.reshape(batch_size * kv_seq_len, num_heads, head_dim), indices_k)
675
+
676
+ if query_length == kv_seq_len:
677
+ query_layer = index_first_axis(
678
+ query_layer.reshape(batch_size * kv_seq_len, num_heads, head_dim), indices_k
679
+ )
680
+ cu_seqlens_q = cu_seqlens_k
681
+ max_seqlen_in_batch_q = max_seqlen_in_batch_k
682
+ indices_q = indices_k
683
+ elif query_length == 1:
684
+ max_seqlen_in_batch_q = 1
685
+ cu_seqlens_q = torch.arange(
686
+ batch_size + 1, dtype=torch.int32, device=query_layer.device
687
+ ) # There is a memcpy here, that is very bad.
688
+ indices_q = cu_seqlens_q[:-1]
689
+ query_layer = query_layer.squeeze(1)
690
+ else:
691
+ # The -q_len: slice assumes left padding.
692
+ attention_mask = attention_mask[:, -query_length:]
693
+ query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask)
694
+
695
+ return (
696
+ query_layer,
697
+ key_layer,
698
+ value_layer,
699
+ indices_q,
700
+ (cu_seqlens_q, cu_seqlens_k),
701
+ (max_seqlen_in_batch_q, max_seqlen_in_batch_k),
702
+ )
703
+
704
+
705
+ # Copied from transformers.models.mistral.modeling_mistral.MistralSdpaAttention with Mistral->Qwen2Moe
706
+ class Qwen2MoeSdpaAttention(Qwen2MoeAttention):
707
+ """
708
+ Qwen2Moe attention module using torch.nn.functional.scaled_dot_product_attention. This module inherits from
709
+ `Qwen2MoeAttention` as the weights of the module stays untouched. The only changes are on the forward pass to adapt to
710
+ SDPA API.
711
+ """
712
+
713
+ # Adapted from Qwen2MoeAttention.forward
714
+ def forward(
715
+ self,
716
+ hidden_states: torch.Tensor,
717
+ attention_mask: Optional[torch.Tensor] = None,
718
+ position_ids: Optional[torch.LongTensor] = None,
719
+ past_key_value: Optional[Cache] = None,
720
+ output_attentions: bool = False,
721
+ use_cache: bool = False,
722
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
723
+ if output_attentions:
724
+ # TODO: Improve this warning with e.g. `model.config.attn_implementation = "manual"` once this is implemented.
725
+ logger.warning_once(
726
+ "Qwen2MoeModel is using Qwen2MoeSdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to the manual attention implementation, "
727
+ 'but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.'
728
+ )
729
+ return super().forward(
730
+ hidden_states=hidden_states,
731
+ attention_mask=attention_mask,
732
+ position_ids=position_ids,
733
+ past_key_value=past_key_value,
734
+ output_attentions=output_attentions,
735
+ use_cache=use_cache,
736
+ )
737
+
738
+ bsz, q_len, _ = hidden_states.size()
739
+
740
+ query_states = self.q_proj(hidden_states)
741
+ key_states = self.k_proj(hidden_states)
742
+ value_states = self.v_proj(hidden_states)
743
+
744
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
745
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
746
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
747
+
748
+ kv_seq_len = key_states.shape[-2]
749
+ if past_key_value is not None:
750
+ kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
751
+ cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
752
+
753
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
754
+
755
+ if past_key_value is not None:
756
+ cache_kwargs = {"sin": sin, "cos": cos} # Specific to RoPE models
757
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
758
+
759
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
760
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
761
+
762
+ if attention_mask is not None:
763
+ if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):
764
+ raise ValueError(
765
+ f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}"
766
+ )
767
+
768
+ # SDPA with memory-efficient backend is currently (torch==2.1.2) bugged with non-contiguous inputs with custom attn_mask,
769
+ # Reference: https://github.com/pytorch/pytorch/issues/112577.
770
+ if query_states.device.type == "cuda" and attention_mask is not None:
771
+ query_states = query_states.contiguous()
772
+ key_states = key_states.contiguous()
773
+ value_states = value_states.contiguous()
774
+
775
+ attn_output = torch.nn.functional.scaled_dot_product_attention(
776
+ query_states,
777
+ key_states,
778
+ value_states,
779
+ attn_mask=attention_mask,
780
+ dropout_p=self.attention_dropout if self.training else 0.0,
781
+ # The q_len > 1 is necessary to match with AttentionMaskConverter.to_causal_4d that does not create a causal mask in case q_len == 1.
782
+ is_causal=self.is_causal and attention_mask is None and q_len > 1,
783
+ )
784
+
785
+ attn_output = attn_output.transpose(1, 2).contiguous()
786
+ attn_output = attn_output.view(bsz, q_len, self.hidden_size)
787
+
788
+ attn_output = self.o_proj(attn_output)
789
+
790
+ return attn_output, None, past_key_value
791
+
792
+
793
+ QWEN2MOE_ATTENTION_CLASSES = {
794
+ "eager": Qwen2MoeAttention,
795
+ "flash_attention_2": Qwen2MoeFlashAttention2,
796
+ "sdpa": Qwen2MoeSdpaAttention,
797
+ }
798
+
799
+
800
+ class Qwen2MoeSparseMoeBlock(nn.Module):
801
+ def __init__(self, config):
802
+ super().__init__()
803
+ self.num_experts = config.num_experts
804
+ self.top_k = config.num_experts_per_tok
805
+ self.norm_topk_prob = config.norm_topk_prob
806
+
807
+ # gating
808
+ self.gate = nn.Linear(config.hidden_size, config.num_experts, bias=False)
809
+ self.experts = nn.ModuleList(
810
+ [Qwen2MoeMLP(config, intermediate_size=config.moe_intermediate_size) for _ in range(self.num_experts)]
811
+ )
812
+
813
+ self.shared_expert = Qwen2MoeMLP(config, intermediate_size=config.shared_expert_intermediate_size)
814
+ self.shared_expert_gate = torch.nn.Linear(config.hidden_size, 1, bias=False)
815
+
816
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
817
+ """ """
818
+ batch_size, sequence_length, hidden_dim = hidden_states.shape
819
+ hidden_states = hidden_states.view(-1, hidden_dim)
820
+ # router_logits: (batch * sequence_length, n_experts)
821
+ router_logits = self.gate(hidden_states)
822
+
823
+ routing_weights = F.softmax(router_logits, dim=1, dtype=torch.float)
824
+ routing_weights, selected_experts = torch.topk(routing_weights, self.top_k, dim=-1)
825
+ if self.norm_topk_prob:
826
+ routing_weights /= routing_weights.sum(dim=-1, keepdim=True)
827
+ # we cast back to the input dtype
828
+ routing_weights = routing_weights.to(hidden_states.dtype)
829
+
830
+ final_hidden_states = torch.zeros(
831
+ (batch_size * sequence_length, hidden_dim), dtype=hidden_states.dtype, device=hidden_states.device
832
+ )
833
+
834
+ # One hot encode the selected experts to create an expert mask
835
+ # this will be used to easily index which expert is going to be sollicitated
836
+ expert_mask = torch.nn.functional.one_hot(selected_experts, num_classes=self.num_experts).permute(2, 1, 0)
837
+
838
+ # Loop over all available experts in the model and perform the computation on each expert
839
+ for expert_idx in range(self.num_experts):
840
+ expert_layer = self.experts[expert_idx]
841
+ idx, top_x = torch.where(expert_mask[expert_idx])
842
+
843
+ # Index the correct hidden states and compute the expert hidden state for
844
+ # the current expert. We need to make sure to multiply the output hidden
845
+ # states by `routing_weights` on the corresponding tokens (top-1 and top-2)
846
+ current_state = hidden_states[None, top_x].reshape(-1, hidden_dim)
847
+ current_hidden_states = expert_layer(current_state) * routing_weights[top_x, idx, None]
848
+
849
+ # However `index_add_` only support torch tensors for indexing so we'll use
850
+ # the `top_x` tensor here.
851
+ final_hidden_states.index_add_(0, top_x, current_hidden_states.to(hidden_states.dtype))
852
+
853
+ shared_expert_output = self.shared_expert(hidden_states)
854
+ shared_expert_output = F.sigmoid(self.shared_expert_gate(hidden_states)) * shared_expert_output
855
+
856
+ final_hidden_states = final_hidden_states + shared_expert_output
857
+
858
+ final_hidden_states = final_hidden_states.reshape(batch_size, sequence_length, hidden_dim)
859
+ return final_hidden_states, router_logits
860
+
861
+
862
+ class Qwen2MoeDecoderLayer(nn.Module):
863
+ def __init__(self, config: Qwen2MoeConfig, layer_idx: int):
864
+ super().__init__()
865
+ self.hidden_size = config.hidden_size
866
+
867
+ self.self_attn = QWEN2MOE_ATTENTION_CLASSES[config._attn_implementation](config, layer_idx)
868
+
869
+ if config.num_experts > 0 and (layer_idx + 1) % config.decoder_sparse_step == 0:
870
+ self.mlp = Qwen2MoeSparseMoeBlock(config)
871
+ else:
872
+ self.mlp = Qwen2MoeMLP(config, intermediate_size=config.intermediate_size)
873
+
874
+ self.input_layernorm = Qwen2MoeRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
875
+ self.post_attention_layernorm = Qwen2MoeRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
876
+
877
+ def forward(
878
+ self,
879
+ hidden_states: torch.Tensor,
880
+ attention_mask: Optional[torch.Tensor] = None,
881
+ position_ids: Optional[torch.LongTensor] = None,
882
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
883
+ output_attentions: Optional[bool] = False,
884
+ output_router_logits: Optional[bool] = False,
885
+ use_cache: Optional[bool] = False,
886
+ **kwargs,
887
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
888
+ if "padding_mask" in kwargs:
889
+ warnings.warn(
890
+ "Passing `padding_mask` is deprecated and will be removed in v4.37. "
891
+ "Please make sure use `attention_mask` instead.`"
892
+ )
893
+ """
894
+ Args:
895
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
896
+ attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
897
+ `(batch, sequence_length)` where padding elements are indicated by 0.
898
+ output_attentions (`bool`, *optional*):
899
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
900
+ returned tensors for more detail.
901
+ output_router_logits (`bool`, *optional*):
902
+ Whether or not to return the logits of all the routers. They are useful for computing the router loss,
903
+ and should not be returned during inference.
904
+ use_cache (`bool`, *optional*):
905
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
906
+ (see `past_key_values`).
907
+ past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
908
+ """
909
+
910
+ residual = hidden_states
911
+
912
+ hidden_states = self.input_layernorm(hidden_states)
913
+
914
+ # Self Attention
915
+ hidden_states, self_attn_weights, present_key_value = self.self_attn(
916
+ hidden_states=hidden_states,
917
+ attention_mask=attention_mask,
918
+ position_ids=position_ids,
919
+ past_key_value=past_key_value,
920
+ output_attentions=output_attentions,
921
+ use_cache=use_cache,
922
+ )
923
+ hidden_states = residual + hidden_states
924
+
925
+ # Fully Connected
926
+ residual = hidden_states
927
+ hidden_states = self.post_attention_layernorm(hidden_states)
928
+
929
+ hidden_states = self.mlp(hidden_states)
930
+ if isinstance(hidden_states, tuple):
931
+ hidden_states, router_logits = hidden_states
932
+ else:
933
+ router_logits = None
934
+
935
+ hidden_states = residual + hidden_states
936
+
937
+ outputs = (hidden_states,)
938
+
939
+ if output_attentions:
940
+ outputs += (self_attn_weights,)
941
+
942
+ if use_cache:
943
+ outputs += (present_key_value,)
944
+
945
+ if output_router_logits:
946
+ outputs += (router_logits,)
947
+
948
+ return outputs
949
+
950
+
951
+ QWEN2MOE_START_DOCSTRING = r"""
952
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
953
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
954
+ etc.)
955
+
956
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
957
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
958
+ and behavior.
959
+
960
+ Parameters:
961
+ config ([`Qwen2MoeConfig`]):
962
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
963
+ load the weights associated with the model, only the configuration. Check out the
964
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
965
+ """
966
+
967
+
968
+ @add_start_docstrings(
969
+ "The bare Qwen2MoE Model outputting raw hidden-states without any specific head on top.",
970
+ QWEN2MOE_START_DOCSTRING,
971
+ )
972
+ class Qwen2MoePreTrainedModel(PreTrainedModel):
973
+ config_class = Qwen2MoeConfig
974
+ base_model_prefix = "model"
975
+ supports_gradient_checkpointing = True
976
+ _no_split_modules = ["Qwen2MoeDecoderLayer"]
977
+ _skip_keys_device_placement = "past_key_values"
978
+ _supports_flash_attn_2 = True
979
+ _supports_sdpa = True
980
+ _supports_cache_class = True
981
+
982
+ def _init_weights(self, module):
983
+ std = self.config.initializer_range
984
+ if isinstance(module, nn.Linear):
985
+ module.weight.data.normal_(mean=0.0, std=std)
986
+ if module.bias is not None:
987
+ module.bias.data.zero_()
988
+ elif isinstance(module, nn.Embedding):
989
+ module.weight.data.normal_(mean=0.0, std=std)
990
+ if module.padding_idx is not None:
991
+ module.weight.data[module.padding_idx].zero_()
992
+
993
+
994
+ QWEN2MOE_INPUTS_DOCSTRING = r"""
995
+ Args:
996
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
997
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
998
+ it.
999
+
1000
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
1001
+ [`PreTrainedTokenizer.__call__`] for details.
1002
+
1003
+ [What are input IDs?](../glossary#input-ids)
1004
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
1005
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
1006
+
1007
+ - 1 for tokens that are **not masked**,
1008
+ - 0 for tokens that are **masked**.
1009
+
1010
+ [What are attention masks?](../glossary#attention-mask)
1011
+
1012
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
1013
+ [`PreTrainedTokenizer.__call__`] for details.
1014
+
1015
+ If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
1016
+ `past_key_values`).
1017
+
1018
+ If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
1019
+ and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
1020
+ information on the default strategy.
1021
+
1022
+ - 1 indicates the head is **not masked**,
1023
+ - 0 indicates the head is **masked**.
1024
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1025
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
1026
+ config.n_positions - 1]`.
1027
+
1028
+ [What are position IDs?](../glossary#position-ids)
1029
+ past_key_values (`Cache` or `tuple(tuple(torch.FloatTensor))`, *optional*):
1030
+ Pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
1031
+ blocks) that can be used to speed up sequential decoding. This typically consists in the `past_key_values`
1032
+ returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`.
1033
+
1034
+ Two formats are allowed:
1035
+ - a [`~cache_utils.Cache`] instance;
1036
+ - Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
1037
+ shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`). This is also known as the legacy
1038
+ cache format.
1039
+
1040
+ The model will output the same cache format that is fed as input. If no `past_key_values` are passed, the
1041
+ legacy cache format will be returned.
1042
+
1043
+ If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't
1044
+ have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids`
1045
+ of shape `(batch_size, sequence_length)`.
1046
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
1047
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
1048
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
1049
+ model's internal embedding lookup matrix.
1050
+ use_cache (`bool`, *optional*):
1051
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
1052
+ `past_key_values`).
1053
+ output_attentions (`bool`, *optional*):
1054
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
1055
+ tensors for more detail.
1056
+ output_hidden_states (`bool`, *optional*):
1057
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
1058
+ more detail.
1059
+ output_router_logits (`bool`, *optional*):
1060
+ Whether or not to return the logits of all the routers. They are useful for computing the router loss, and
1061
+ should not be returned during inference.
1062
+ return_dict (`bool`, *optional*):
1063
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
1064
+ """
1065
+
1066
+
1067
+ @add_start_docstrings(
1068
+ "The bare Qwen2MoE Model outputting raw hidden-states without any specific head on top.",
1069
+ QWEN2MOE_START_DOCSTRING,
1070
+ )
1071
+ class Qwen2MoeModel(Qwen2MoePreTrainedModel):
1072
+ """
1073
+ Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`Qwen2MoeDecoderLayer`]
1074
+
1075
+ Args:
1076
+ config: Qwen2MoeConfig
1077
+ """
1078
+
1079
+ def __init__(self, config: Qwen2MoeConfig):
1080
+ super().__init__(config)
1081
+ self.padding_idx = config.pad_token_id
1082
+ self.vocab_size = config.vocab_size
1083
+
1084
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
1085
+ self.layers = nn.ModuleList(
1086
+ [Qwen2MoeDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
1087
+ )
1088
+ self._attn_implementation = config._attn_implementation
1089
+ self.norm = Qwen2MoeRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
1090
+
1091
+ self.gradient_checkpointing = False
1092
+ # Initialize weights and apply final processing
1093
+ self.post_init()
1094
+
1095
+ def get_input_embeddings(self):
1096
+ return self.embed_tokens
1097
+
1098
+ def set_input_embeddings(self, value):
1099
+ self.embed_tokens = value
1100
+
1101
+ @add_start_docstrings_to_model_forward(QWEN2MOE_INPUTS_DOCSTRING)
1102
+ def forward(
1103
+ self,
1104
+ input_ids: torch.LongTensor = None,
1105
+ attention_mask: Optional[torch.Tensor] = None,
1106
+ position_ids: Optional[torch.LongTensor] = None,
1107
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1108
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1109
+ use_cache: Optional[bool] = None,
1110
+ output_attentions: Optional[bool] = None,
1111
+ output_hidden_states: Optional[bool] = None,
1112
+ output_router_logits: Optional[bool] = None,
1113
+ return_dict: Optional[bool] = None,
1114
+ ) -> Union[Tuple, MoeModelOutputWithPast]:
1115
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1116
+ output_router_logits = (
1117
+ output_router_logits if output_router_logits is not None else self.config.output_router_logits
1118
+ )
1119
+ output_hidden_states = (
1120
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1121
+ )
1122
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
1123
+
1124
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1125
+
1126
+ # retrieve input_ids and inputs_embeds
1127
+ if input_ids is not None and inputs_embeds is not None:
1128
+ raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
1129
+ elif input_ids is not None:
1130
+ batch_size, seq_length = input_ids.shape
1131
+ elif inputs_embeds is not None:
1132
+ batch_size, seq_length, _ = inputs_embeds.shape
1133
+ else:
1134
+ raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
1135
+
1136
+ if self.gradient_checkpointing and self.training:
1137
+ if use_cache:
1138
+ logger.warning_once(
1139
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
1140
+ )
1141
+ use_cache = False
1142
+
1143
+ past_key_values_length = 0
1144
+
1145
+ if use_cache:
1146
+ use_legacy_cache = not isinstance(past_key_values, Cache)
1147
+ if use_legacy_cache:
1148
+ past_key_values = DynamicCache.from_legacy_cache(past_key_values)
1149
+ past_key_values_length = past_key_values.get_usable_length(seq_length)
1150
+
1151
+ if position_ids is None:
1152
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
1153
+ position_ids = torch.arange(
1154
+ past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device
1155
+ )
1156
+ position_ids = position_ids.unsqueeze(0).view(-1, seq_length)
1157
+ else:
1158
+ position_ids = position_ids.view(-1, seq_length).long()
1159
+
1160
+ if inputs_embeds is None:
1161
+ inputs_embeds = self.embed_tokens(input_ids)
1162
+
1163
+ if attention_mask is not None and self._attn_implementation == "flash_attention_2" and use_cache:
1164
+ is_padding_right = attention_mask[:, -1].sum().item() != batch_size
1165
+ if is_padding_right:
1166
+ raise ValueError(
1167
+ "You are attempting to perform batched generation with padding_side='right'"
1168
+ " this may lead to unexpected behaviour for Flash Attention version of Qwen2MoE. Make sure to "
1169
+ " call `tokenizer.padding_side = 'left'` before tokenizing the input. "
1170
+ )
1171
+
1172
+ if self._attn_implementation == "flash_attention_2":
1173
+ # 2d mask is passed through the layers
1174
+ attention_mask = attention_mask if (attention_mask is not None and 0 in attention_mask) else None
1175
+ elif self._attn_implementation == "sdpa" and not output_attentions:
1176
+ # output_attentions=True can not be supported when using SDPA, and we fall back on
1177
+ # the manual implementation that requires a 4D causal mask in all cases.
1178
+ attention_mask = _prepare_4d_causal_attention_mask_for_sdpa(
1179
+ attention_mask,
1180
+ (batch_size, seq_length),
1181
+ inputs_embeds,
1182
+ past_key_values_length,
1183
+ sliding_window=self.config.sliding_window,
1184
+ )
1185
+ else:
1186
+ # 4d mask is passed through the layers
1187
+ attention_mask = _prepare_4d_causal_attention_mask(
1188
+ attention_mask,
1189
+ (batch_size, seq_length),
1190
+ inputs_embeds,
1191
+ past_key_values_length,
1192
+ sliding_window=self.config.sliding_window,
1193
+ )
1194
+
1195
+ hidden_states = inputs_embeds
1196
+
1197
+ # decoder layers
1198
+ all_hidden_states = () if output_hidden_states else None
1199
+ all_self_attns = () if output_attentions else None
1200
+ all_router_logits = () if output_router_logits else None
1201
+ next_decoder_cache = None
1202
+
1203
+ for decoder_layer in self.layers:
1204
+ if output_hidden_states:
1205
+ all_hidden_states += (hidden_states,)
1206
+
1207
+ if self.gradient_checkpointing and self.training:
1208
+ layer_outputs = self._gradient_checkpointing_func(
1209
+ decoder_layer.__call__,
1210
+ hidden_states,
1211
+ attention_mask,
1212
+ position_ids,
1213
+ past_key_values,
1214
+ output_attentions,
1215
+ output_router_logits,
1216
+ use_cache,
1217
+ )
1218
+ else:
1219
+ layer_outputs = decoder_layer(
1220
+ hidden_states,
1221
+ attention_mask=attention_mask,
1222
+ position_ids=position_ids,
1223
+ past_key_value=past_key_values,
1224
+ output_attentions=output_attentions,
1225
+ output_router_logits=output_router_logits,
1226
+ use_cache=use_cache,
1227
+ )
1228
+
1229
+ hidden_states = layer_outputs[0]
1230
+
1231
+ if use_cache:
1232
+ next_decoder_cache = layer_outputs[2 if output_attentions else 1]
1233
+
1234
+ if output_attentions:
1235
+ all_self_attns += (layer_outputs[1],)
1236
+
1237
+ if output_router_logits and layer_outputs[-1] is not None:
1238
+ all_router_logits += (layer_outputs[-1],)
1239
+
1240
+ hidden_states = self.norm(hidden_states)
1241
+
1242
+ # add hidden states from the last decoder layer
1243
+ if output_hidden_states:
1244
+ all_hidden_states += (hidden_states,)
1245
+
1246
+ next_cache = None
1247
+ if use_cache:
1248
+ next_cache = next_decoder_cache.to_legacy_cache() if use_legacy_cache else next_decoder_cache
1249
+
1250
+ if not return_dict:
1251
+ return tuple(
1252
+ v
1253
+ for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, all_router_logits]
1254
+ if v is not None
1255
+ )
1256
+ return MoeModelOutputWithPast(
1257
+ last_hidden_state=hidden_states,
1258
+ past_key_values=next_cache,
1259
+ hidden_states=all_hidden_states,
1260
+ attentions=all_self_attns,
1261
+ router_logits=all_router_logits,
1262
+ )
1263
+
1264
+
1265
+ class Qwen2MoeForCausalLM(Qwen2MoePreTrainedModel):
1266
+ _tied_weights_keys = ["lm_head.weight"]
1267
+
1268
+ def __init__(self, config):
1269
+ super().__init__(config)
1270
+ self.model = Qwen2MoeModel(config)
1271
+ self.vocab_size = config.vocab_size
1272
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
1273
+
1274
+ self.router_aux_loss_coef = config.router_aux_loss_coef
1275
+ self.num_experts = config.num_experts
1276
+ self.num_experts_per_tok = config.num_experts_per_tok
1277
+ # Initialize weights and apply final processing
1278
+ self.post_init()
1279
+
1280
+ def get_input_embeddings(self):
1281
+ return self.model.embed_tokens
1282
+
1283
+ def set_input_embeddings(self, value):
1284
+ self.model.embed_tokens = value
1285
+
1286
+ def get_output_embeddings(self):
1287
+ return self.lm_head
1288
+
1289
+ def set_output_embeddings(self, new_embeddings):
1290
+ self.lm_head = new_embeddings
1291
+
1292
+ def set_decoder(self, decoder):
1293
+ self.model = decoder
1294
+
1295
+ def get_decoder(self):
1296
+ return self.model
1297
+
1298
+ @add_start_docstrings_to_model_forward(QWEN2MOE_INPUTS_DOCSTRING)
1299
+ @replace_return_docstrings(output_type=MoeCausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
1300
+ def forward(
1301
+ self,
1302
+ input_ids: torch.LongTensor = None,
1303
+ attention_mask: Optional[torch.Tensor] = None,
1304
+ position_ids: Optional[torch.LongTensor] = None,
1305
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1306
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1307
+ labels: Optional[torch.LongTensor] = None,
1308
+ use_cache: Optional[bool] = None,
1309
+ output_attentions: Optional[bool] = None,
1310
+ output_hidden_states: Optional[bool] = None,
1311
+ output_router_logits: Optional[bool] = None,
1312
+ return_dict: Optional[bool] = None,
1313
+ ) -> Union[Tuple, MoeCausalLMOutputWithPast]:
1314
+ r"""
1315
+ Args:
1316
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1317
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
1318
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
1319
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
1320
+
1321
+ Returns:
1322
+
1323
+ Example:
1324
+
1325
+ ```python
1326
+ >>> from transformers import AutoTokenizer, Qwen2MoeForCausalLM
1327
+
1328
+ >>> model = Qwen2MoeForCausalLM.from_pretrained(PATH_TO_CONVERTED_WEIGHTS)
1329
+ >>> tokenizer = AutoTokenizer.from_pretrained(PATH_TO_CONVERTED_TOKENIZER)
1330
+
1331
+ >>> prompt = "Hey, are you conscious? Can you talk to me?"
1332
+ >>> inputs = tokenizer(prompt, return_tensors="pt")
1333
+
1334
+ >>> # Generate
1335
+ >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
1336
+ >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
1337
+ "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
1338
+ ```"""
1339
+
1340
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1341
+ output_router_logits = (
1342
+ output_router_logits if output_router_logits is not None else self.config.output_router_logits
1343
+ )
1344
+ output_hidden_states = (
1345
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1346
+ )
1347
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1348
+
1349
+ # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
1350
+ outputs = self.model(
1351
+ input_ids=input_ids,
1352
+ attention_mask=attention_mask,
1353
+ position_ids=position_ids,
1354
+ past_key_values=past_key_values,
1355
+ inputs_embeds=inputs_embeds,
1356
+ use_cache=use_cache,
1357
+ output_attentions=output_attentions,
1358
+ output_hidden_states=output_hidden_states,
1359
+ output_router_logits=output_router_logits,
1360
+ return_dict=return_dict,
1361
+ )
1362
+
1363
+ hidden_states = outputs[0]
1364
+ logits = self.lm_head(hidden_states)
1365
+ logits = logits.float()
1366
+
1367
+ loss = None
1368
+ if labels is not None:
1369
+ # Shift so that tokens < n predict n
1370
+ shift_logits = logits[..., :-1, :].contiguous()
1371
+ shift_labels = labels[..., 1:].contiguous()
1372
+ # Flatten the tokens
1373
+ loss_fct = CrossEntropyLoss()
1374
+ shift_logits = shift_logits.view(-1, self.config.vocab_size)
1375
+ shift_labels = shift_labels.view(-1)
1376
+ # Enable model parallelism
1377
+ shift_labels = shift_labels.to(shift_logits.device)
1378
+ loss = loss_fct(shift_logits, shift_labels)
1379
+
1380
+ aux_loss = None
1381
+ if output_router_logits:
1382
+ aux_loss = load_balancing_loss_func(
1383
+ outputs.router_logits if return_dict else outputs[-1],
1384
+ self.num_experts,
1385
+ self.num_experts_per_tok,
1386
+ attention_mask,
1387
+ )
1388
+ if labels is not None:
1389
+ loss += self.router_aux_loss_coef * aux_loss.to(loss.device) # make sure to reside in the same device
1390
+
1391
+ if not return_dict:
1392
+ output = (logits,) + outputs[1:]
1393
+ if output_router_logits:
1394
+ output = (aux_loss,) + output
1395
+ return (loss,) + output if loss is not None else output
1396
+
1397
+ return MoeCausalLMOutputWithPast(
1398
+ loss=loss,
1399
+ aux_loss=aux_loss,
1400
+ logits=logits,
1401
+ past_key_values=outputs.past_key_values,
1402
+ hidden_states=outputs.hidden_states,
1403
+ attentions=outputs.attentions,
1404
+ router_logits=outputs.router_logits,
1405
+ )
1406
+
1407
+ def prepare_inputs_for_generation(
1408
+ self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs
1409
+ ):
1410
+ # Omit tokens covered by past_key_values
1411
+ if past_key_values is not None:
1412
+ if isinstance(past_key_values, Cache):
1413
+ cache_length = past_key_values.get_seq_length()
1414
+ past_length = past_key_values.seen_tokens
1415
+ max_cache_length = past_key_values.get_max_length()
1416
+ else:
1417
+ cache_length = past_length = past_key_values[0][0].shape[2]
1418
+ max_cache_length = None
1419
+
1420
+ # Keep only the unprocessed tokens:
1421
+ # 1 - If the length of the attention_mask exceeds the length of input_ids, then we are in a setting where
1422
+ # some of the inputs are exclusively passed as part of the cache (e.g. when passing input_embeds as
1423
+ # input)
1424
+ if attention_mask is not None and attention_mask.shape[1] > input_ids.shape[1]:
1425
+ input_ids = input_ids[:, -(attention_mask.shape[1] - past_length) :]
1426
+ # 2 - If the past_length is smaller than input_ids', then input_ids holds all input tokens. We can discard
1427
+ # input_ids based on the past_length.
1428
+ elif past_length < input_ids.shape[1]:
1429
+ input_ids = input_ids[:, past_length:]
1430
+ # 3 - Otherwise (past_length >= input_ids.shape[1]), let's assume input_ids only has unprocessed tokens.
1431
+
1432
+ # If we are about to go beyond the maximum cache length, we need to crop the input attention mask.
1433
+ if (
1434
+ max_cache_length is not None
1435
+ and attention_mask is not None
1436
+ and cache_length + input_ids.shape[1] > max_cache_length
1437
+ ):
1438
+ attention_mask = attention_mask[:, -max_cache_length:]
1439
+
1440
+ position_ids = kwargs.get("position_ids", None)
1441
+ if attention_mask is not None and position_ids is None:
1442
+ # create position_ids on the fly for batch generation
1443
+ position_ids = attention_mask.long().cumsum(-1) - 1
1444
+ position_ids.masked_fill_(attention_mask == 0, 1)
1445
+ if past_key_values:
1446
+ position_ids = position_ids[:, -input_ids.shape[1] :]
1447
+
1448
+ # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
1449
+ if inputs_embeds is not None and past_key_values is None:
1450
+ model_inputs = {"inputs_embeds": inputs_embeds}
1451
+ else:
1452
+ model_inputs = {"input_ids": input_ids}
1453
+
1454
+ model_inputs.update(
1455
+ {
1456
+ "position_ids": position_ids,
1457
+ "past_key_values": past_key_values,
1458
+ "use_cache": kwargs.get("use_cache"),
1459
+ "attention_mask": attention_mask,
1460
+ }
1461
+ )
1462
+ return model_inputs
1463
+
1464
+ @staticmethod
1465
+ def _reorder_cache(past_key_values, beam_idx):
1466
+ reordered_past = ()
1467
+ for layer_past in past_key_values:
1468
+ reordered_past += (
1469
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),
1470
+ )
1471
+ return reordered_past
1472
+
1473
+
1474
+ @add_start_docstrings(
1475
+ """
1476
+ The Qwen2MoE Model transformer with a sequence classification head on top (linear layer).
1477
+
1478
+ [`Qwen2MoeForSequenceClassification`] uses the last token in order to do the classification, as other causal models
1479
+ (e.g. GPT-2) do.
1480
+
1481
+ Since it does classification on the last token, it requires to know the position of the last token. If a
1482
+ `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
1483
+ no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
1484
+ padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
1485
+ each row of the batch).
1486
+ """,
1487
+ QWEN2MOE_START_DOCSTRING,
1488
+ )
1489
+ # Copied from transformers.models.llama.modeling_llama.LlamaForSequenceClassification with Llama->Qwen2Moe, LLAMA->QWEN2MOE
1490
+ class Qwen2MoeForSequenceClassification(Qwen2MoePreTrainedModel):
1491
+ def __init__(self, config):
1492
+ super().__init__(config)
1493
+ self.num_labels = config.num_labels
1494
+ self.model = Qwen2MoeModel(config)
1495
+ self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False)
1496
+
1497
+ # Initialize weights and apply final processing
1498
+ self.post_init()
1499
+
1500
+ def get_input_embeddings(self):
1501
+ return self.model.embed_tokens
1502
+
1503
+ def set_input_embeddings(self, value):
1504
+ self.model.embed_tokens = value
1505
+
1506
+ @add_start_docstrings_to_model_forward(QWEN2MOE_INPUTS_DOCSTRING)
1507
+ def forward(
1508
+ self,
1509
+ input_ids: torch.LongTensor = None,
1510
+ attention_mask: Optional[torch.Tensor] = None,
1511
+ position_ids: Optional[torch.LongTensor] = None,
1512
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1513
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1514
+ labels: Optional[torch.LongTensor] = None,
1515
+ use_cache: Optional[bool] = None,
1516
+ output_attentions: Optional[bool] = None,
1517
+ output_hidden_states: Optional[bool] = None,
1518
+ return_dict: Optional[bool] = None,
1519
+ ) -> Union[Tuple, SequenceClassifierOutputWithPast]:
1520
+ r"""
1521
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1522
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1523
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1524
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1525
+ """
1526
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1527
+
1528
+ transformer_outputs = self.model(
1529
+ input_ids,
1530
+ attention_mask=attention_mask,
1531
+ position_ids=position_ids,
1532
+ past_key_values=past_key_values,
1533
+ inputs_embeds=inputs_embeds,
1534
+ use_cache=use_cache,
1535
+ output_attentions=output_attentions,
1536
+ output_hidden_states=output_hidden_states,
1537
+ return_dict=return_dict,
1538
+ )
1539
+ hidden_states = transformer_outputs[0]
1540
+ logits = self.score(hidden_states)
1541
+
1542
+ if input_ids is not None:
1543
+ batch_size = input_ids.shape[0]
1544
+ else:
1545
+ batch_size = inputs_embeds.shape[0]
1546
+
1547
+ if self.config.pad_token_id is None and batch_size != 1:
1548
+ raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.")
1549
+ if self.config.pad_token_id is None:
1550
+ sequence_lengths = -1
1551
+ else:
1552
+ if input_ids is not None:
1553
+ # if no pad token found, use modulo instead of reverse indexing for ONNX compatibility
1554
+ sequence_lengths = torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1
1555
+ sequence_lengths = sequence_lengths % input_ids.shape[-1]
1556
+ sequence_lengths = sequence_lengths.to(logits.device)
1557
+ else:
1558
+ sequence_lengths = -1
1559
+
1560
+ pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths]
1561
+
1562
+ loss = None
1563
+ if labels is not None:
1564
+ labels = labels.to(logits.device)
1565
+ if self.config.problem_type is None:
1566
+ if self.num_labels == 1:
1567
+ self.config.problem_type = "regression"
1568
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
1569
+ self.config.problem_type = "single_label_classification"
1570
+ else:
1571
+ self.config.problem_type = "multi_label_classification"
1572
+
1573
+ if self.config.problem_type == "regression":
1574
+ loss_fct = MSELoss()
1575
+ if self.num_labels == 1:
1576
+ loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())
1577
+ else:
1578
+ loss = loss_fct(pooled_logits, labels)
1579
+ elif self.config.problem_type == "single_label_classification":
1580
+ loss_fct = CrossEntropyLoss()
1581
+ loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))
1582
+ elif self.config.problem_type == "multi_label_classification":
1583
+ loss_fct = BCEWithLogitsLoss()
1584
+ loss = loss_fct(pooled_logits, labels)
1585
+ if not return_dict:
1586
+ output = (pooled_logits,) + transformer_outputs[1:]
1587
+ return ((loss,) + output) if loss is not None else output
1588
+
1589
+ return SequenceClassifierOutputWithPast(
1590
+ loss=loss,
1591
+ logits=pooled_logits,
1592
+ past_key_values=transformer_outputs.past_key_values,
1593
+ hidden_states=transformer_outputs.hidden_states,
1594
+ attentions=transformer_outputs.attentions,
1595
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/seamless_m4t_v2/convert_fairseq2_to_hf.py ADDED
@@ -0,0 +1,405 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Converting Meta SeamlessM4Tv2 checkpoints from seamless_communication to HF."""
16
+
17
+
18
+ import argparse
19
+ import os
20
+ from pathlib import Path
21
+
22
+ import torch
23
+ from accelerate.utils.modeling import find_tied_parameters
24
+ from seamless_communication.inference import Translator
25
+
26
+ from transformers import (
27
+ SeamlessM4TFeatureExtractor,
28
+ SeamlessM4TProcessor,
29
+ SeamlessM4TTokenizer,
30
+ SeamlessM4Tv2Config,
31
+ SeamlessM4Tv2Model,
32
+ )
33
+ from transformers.utils import logging
34
+
35
+
36
+ # fmt: off
37
+ UNIT_SUPPORTED_LANGUAGES = ["__arb__", "__ben__", "__cat__", "__ces__", "__cmn__", "__cym__", "__dan__", "__deu__", "__eng__", "__est__", "__fin__", "__fra__", "__hin__", "__ind__", "__ita__", "__jpn__", "__kan__", "__kor__", "__mlt__", "__nld__", "__pes__", "__pol__", "__por__", "__ron__", "__rus__", "__slk__", "__spa__", "__swe__", "__swh__", "__tam__", "__tel__", "__tgl__", "__tha__", "__tur__", "__ukr__", "__urd__", "__uzn__", "__vie__", ]
38
+ # fmt: on
39
+
40
+ # fmt: off
41
+ VOCODER_SUPPORTED_LANGUAGES = ["__arb__", "__ben__", "__cat__", "__ces__", "__cmn__", "__cym__", "__dan__", "__deu__", "__eng__", "__est__", "__fin__", "__fra__", "__hin__", "__ind__", "__ita__", "__jpn__", "__kor__", "__mlt__", "__nld__", "__pes__", "__pol__", "__por__", "__ron__", "__rus__", "__slk__", "__spa__", "__swe__", "__swh__", "__tel__", "__tgl__", "__tha__", "__tur__", "__ukr__", "__urd__", "__uzn__", "__vie__",]
42
+ # fmt: on
43
+
44
+ # fmt: off
45
+ LARGE_SUPPORTED_LANGUAGES = ["afr","amh","arb","ary","arz","asm","azj","bel","ben","bos","bul","cat","ceb","ces","ckb","cmn","cmn_Hant","cym","dan","deu","ell","eng","est","eus","fin","fra","fuv","gaz","gle","glg","guj","heb","hin","hrv","hun","hye","ibo","ind","isl","ita","jav","jpn","kan","kat","kaz","khk","khm","kir","kor","lao","lit","lug","luo","lvs","mai","mal","mar","mkd","mlt","mni","mya","nld","nno","nob","npi","nya","ory","pan","pbt","pes","pol","por","ron","rus","sat","slk","slv","sna","snd","som","spa","srp","swe","swh","tam","tel","tgk","tgl","tha","tur","ukr","urd","uzn","vie","yor","yue","zlm","zul",]
46
+ # fmt: on
47
+
48
+
49
+ def assert_param_count(model_1, model_2):
50
+ count_1 = sum(p[1].numel() for p in model_1.named_parameters() if "final_proj" not in p[0])
51
+ count_2 = sum(p[1].numel() for p in model_2.named_parameters() if "final_proj" not in p[0])
52
+ assert count_1 == count_2, f"{model_1.__class__}: {count_1} != {model_2.__class__}: {count_2}"
53
+
54
+
55
+ def param_count(model):
56
+ return sum(p[1].numel() for p in model.named_parameters() if "final_proj" not in p[0])
57
+
58
+
59
+ def _grab_best_device(use_gpu=True):
60
+ if torch.cuda.device_count() > 0 and use_gpu:
61
+ device = "cuda"
62
+ else:
63
+ device = "cpu"
64
+ return torch.device(device)
65
+
66
+
67
+ logging.set_verbosity_info()
68
+ logger = logging.get_logger(__name__)
69
+
70
+ vocoder_convert_list = [
71
+ ("ups", "hifi_gan.upsampler"),
72
+ ("conv_pre", "hifi_gan.conv_pre"),
73
+ ("resblocks", "hifi_gan.resblocks"),
74
+ ("conv_post", "hifi_gan.conv_post"),
75
+ ("lang", "language_embedding"),
76
+ ("spkr", "speaker_embedding"),
77
+ ("dict.", "unit_embedding."),
78
+ ("dur_predictor.conv1.0", "dur_predictor.conv1"),
79
+ ("dur_predictor.conv2.0", "dur_predictor.conv2"),
80
+ ]
81
+
82
+ # order is important
83
+ wav2vec_convert_list = [
84
+ ("speech_encoder_frontend.model_dim_proj", "feature_projection.projection"),
85
+ ("speech_encoder_frontend.post_extract_layer_norm", "feature_projection.layer_norm"),
86
+ ("speech_encoder_frontend.pos_encoder.conv", "encoder.pos_conv_embed.conv"),
87
+ ("speech_encoder.inner.layers", "encoder.layers"),
88
+ ("speech_encoder.inner_layer_norm", "encoder.layer_norm"),
89
+ ("speech_encoder.adaptor_layers", "adapter.layers"),
90
+ ("inner_proj", "intermediate_dense"),
91
+ ("self_attn.output_proj", "self_attn.linear_out"),
92
+ ("output_proj", "output_dense"),
93
+ ("self_attn.k_proj", "self_attn.linear_k"),
94
+ ("self_attn.v_proj", "self_attn.linear_v"),
95
+ ("self_attn.q_proj", "self_attn.linear_q"),
96
+ ("self_attn.sdpa.u_bias", "self_attn.pos_bias_u"),
97
+ ("self_attn.sdpa.v_bias", "self_attn.pos_bias_v"),
98
+ ("self_attn.sdpa.rel_k_embed", "self_attn.distance_embedding"),
99
+ ("self_attn.sdpa.r_proj", "self_attn.linear_pos"),
100
+ ("conv.pointwise_conv1", "conv_module.pointwise_conv1"),
101
+ ("conv.pointwise_conv2", "conv_module.pointwise_conv2"),
102
+ ("conv.depthwise_conv", "conv_module.depthwise_conv"),
103
+ ("conv.batch_norm", "conv_module.batch_norm"),
104
+ ("conv.layer_norm", "conv_module.depthwise_layer_norm"),
105
+ ("conv_layer_norm", "conv_module.layer_norm"),
106
+ ("speech_encoder.proj1", "intermediate_ffn.intermediate_dense"),
107
+ ("speech_encoder.proj2", "intermediate_ffn.output_dense"),
108
+ ("speech_encoder.layer_norm", "inner_layer_norm"),
109
+ ]
110
+
111
+ t2u_convert_list = [
112
+ ("t2u_model.final_proj", "lm_head"),
113
+ ("t2u_model.", "model."),
114
+ ("encoder_decoder_attn_layer_norm", "cross_attention_layer_norm"),
115
+ ("encoder_decoder_attn", "cross_attention"),
116
+ ("linear_k", "k_proj"),
117
+ ("linear_v", "v_proj"),
118
+ ("linear_q", "q_proj"),
119
+ ("ffn.inner_proj", "ffn.fc1"),
120
+ ("ffn.output_proj", "ffn.fc2"),
121
+ ("output_proj", "out_proj"),
122
+ ("decoder_frontend.embed_char", "decoder.embed_char"),
123
+ ("decoder_frontend.pos_emb_alpha_char", "decoder.pos_emb_alpha_char"),
124
+ ("decoder_frontend.embed", "decoder.embed_tokens"),
125
+ ("decoder_frontend.pos_emb_alpha", "decoder.pos_emb_alpha"),
126
+ ("conv1d.conv", "conv"),
127
+ ("conv1d_layer_norm", "conv_layer_norm"),
128
+ ("decoder_frontend.variance_adaptor", "decoder"),
129
+ ("duration_predictor.conv1.0", "duration_predictor.conv1"),
130
+ ("duration_predictor.conv2.0", "duration_predictor.conv2"),
131
+ ]
132
+
133
+ text_convert_list = [
134
+ ("text_encoder.", ""),
135
+ ("text_decoder.", ""),
136
+ ("text_encoder_frontend.embed", "embed_tokens"),
137
+ ("text_decoder_frontend.embed", "embed_tokens"),
138
+ ("encoder_decoder_attn_layer_norm", "cross_attention_layer_norm"),
139
+ ("encoder_decoder_attn", "cross_attention"),
140
+ ("linear_k", "k_proj"),
141
+ ("linear_v", "v_proj"),
142
+ ("linear_q", "q_proj"),
143
+ ("ffn.inner_proj", "ffn.fc1"),
144
+ ("ffn.output_proj", "ffn.fc2"),
145
+ ("output_proj", "out_proj"),
146
+ ("final_proj", "lm_head"),
147
+ ]
148
+
149
+ CUR_PATH = os.path.dirname(os.path.abspath(__file__))
150
+ default_cache_dir = os.path.join(os.path.expanduser("~"), ".cache")
151
+ CACHE_DIR = os.path.join(os.getenv("XDG_CACHE_HOME", default_cache_dir), "huggingface", "hub")
152
+
153
+
154
+ def _load_hf_config():
155
+ return SeamlessM4Tv2Config()
156
+
157
+
158
+ def _convert_model(
159
+ original_model,
160
+ hf_model,
161
+ convert_list,
162
+ device,
163
+ unwanted_prefix="model.",
164
+ filter_state_dict="speech",
165
+ exclude_state_dict=None,
166
+ ):
167
+ state_dict = original_model.state_dict()
168
+
169
+ # filter func
170
+ if isinstance(filter_state_dict, str):
171
+
172
+ def filter_func(x):
173
+ return filter_state_dict in x[0]
174
+
175
+ else:
176
+
177
+ def filter_func(item):
178
+ if exclude_state_dict is not None and exclude_state_dict in item[0]:
179
+ return False
180
+ for filter_el in filter_state_dict:
181
+ if filter_el in item[0]:
182
+ return True
183
+
184
+ return False
185
+
186
+ state_dict = dict(filter(filter_func, state_dict.items()))
187
+
188
+ for k, v in list(state_dict.items()):
189
+ new_k = k[len(unwanted_prefix) :]
190
+ for old_layer_name, new_layer_name in convert_list:
191
+ if old_layer_name in new_k:
192
+ new_k = new_k.replace(old_layer_name, new_layer_name)
193
+
194
+ # must do it by hand
195
+ if ".layer_norm" in new_k and new_k.split(".layer_norm")[0][-1].isnumeric():
196
+ new_k = new_k.replace("layer_norm", "final_layer_norm")
197
+
198
+ state_dict[new_k] = state_dict.pop(k)
199
+
200
+ extra_keys = set(state_dict.keys()) - set(hf_model.state_dict().keys())
201
+ extra_keys = set(extra_keys)
202
+ missing_keys = set(hf_model.state_dict().keys()) - set(state_dict.keys())
203
+ missing_keys = set({k for k in missing_keys if "final_logits_bias" not in k})
204
+ if len(extra_keys) != 0:
205
+ raise ValueError(f"extra keys found: {extra_keys}")
206
+ if len(missing_keys) != 0:
207
+ raise ValueError(f"missing keys: {missing_keys}")
208
+ hf_model.load_state_dict(state_dict, strict=False)
209
+ n_params = param_count(hf_model)
210
+
211
+ logger.info(f"model loaded: {round(n_params/1e6,1)}M params")
212
+
213
+ hf_model.eval()
214
+ hf_model.to(device)
215
+ del state_dict
216
+
217
+ return hf_model
218
+
219
+
220
+ def load_model(save_dir, model_type, repo_id):
221
+ """
222
+ Meta SeamlessM4Tv2 is made of 8 main components:
223
+ - speech_encoder (#1) and speech_encoder_frontend (#2)
224
+ - t2u_model (#3)
225
+ - text_encoder (#4) and text_encoder_frontend (#5)
226
+ - text_decoder (#6) [and text_decoder_frontend (#5) = equals to text_encoder_frontend]
227
+ - final_proj (#7)
228
+ - vocoder (#8)
229
+ """
230
+ device = _grab_best_device()
231
+ name = "seamlessM4T_v2_large"
232
+
233
+ original_model = Translator(name, "vocoder_v2", device, dtype=torch.float32)
234
+
235
+ ######### TOKENIZER
236
+
237
+ langs = LARGE_SUPPORTED_LANGUAGES
238
+ langs = [f"__{lang}__" for lang in langs]
239
+ vocab_file = os.path.join(os.path.expanduser("~"), "tokenizer", model_type, "tokenizer.model")
240
+
241
+ save_dir = os.path.join(save_dir, name)
242
+ Path(save_dir).mkdir(exist_ok=True)
243
+
244
+ tokenizer = SeamlessM4TTokenizer(vocab_file, additional_special_tokens=langs)
245
+
246
+ sanity_check_lang_id = tokenizer.convert_tokens_to_ids("__fra__")
247
+
248
+ tokenizer.save_pretrained(save_dir)
249
+ tokenizer = SeamlessM4TTokenizer.from_pretrained(save_dir)
250
+
251
+ if sanity_check_lang_id != tokenizer.convert_tokens_to_ids("__fra__"):
252
+ raise ValueError(
253
+ f"Error in tokenizer saving/loading - __fra__ lang id is not coherent: {sanity_check_lang_id} vs {tokenizer.convert_tokens_to_ids('__fra__')}"
254
+ )
255
+
256
+ ####### get language to ids dict
257
+ text_decoder_lang_code_to_id = {lang.replace("__", ""): tokenizer.convert_tokens_to_ids(lang) for lang in langs}
258
+ # offset: vocoder unit vocab size + 5 (for EOS/PAD/BOS/UNK/MSK) + len(supported_languages)
259
+ t2u_lang_code_to_id = {
260
+ code.replace("__", ""): i + 10005 + len(UNIT_SUPPORTED_LANGUAGES)
261
+ for i, code in enumerate(UNIT_SUPPORTED_LANGUAGES)
262
+ }
263
+ vocoder_lang_code_to_id = {code.replace("__", ""): i for i, code in enumerate(VOCODER_SUPPORTED_LANGUAGES)}
264
+
265
+ ######### FE
266
+
267
+ fe = SeamlessM4TFeatureExtractor(language_code=langs)
268
+
269
+ fe.save_pretrained(save_dir)
270
+ fe = SeamlessM4TFeatureExtractor.from_pretrained(save_dir)
271
+
272
+ processor = SeamlessM4TProcessor(feature_extractor=fe, tokenizer=tokenizer)
273
+ processor.save_pretrained(save_dir)
274
+ processor.push_to_hub(repo_id=repo_id, create_pr=True)
275
+
276
+ processor = SeamlessM4TProcessor.from_pretrained(save_dir)
277
+
278
+ ######## Model
279
+
280
+ # init config
281
+ hf_config = _load_hf_config()
282
+
283
+ ######## get id_to_text and char_to_id from original model tokenizers
284
+ id_to_text = {i: original_model.text_tokenizer.model.index_to_token(i) for i in range(hf_config.vocab_size)}
285
+ char_to_id = {
286
+ original_model.model.t2u_model.decoder_frontend.char_tokenizer.model.index_to_token(i): i for i in range(10904)
287
+ }
288
+
289
+ # init model
290
+ hf_model = SeamlessM4Tv2Model(hf_config)
291
+
292
+ hf_model.generation_config.__setattr__("text_decoder_lang_to_code_id", text_decoder_lang_code_to_id)
293
+ hf_model.generation_config.__setattr__("t2u_lang_code_to_id", t2u_lang_code_to_id)
294
+ hf_model.generation_config.__setattr__("vocoder_lang_code_to_id", vocoder_lang_code_to_id)
295
+ hf_model.generation_config.__setattr__("id_to_text", id_to_text)
296
+ hf_model.generation_config.__setattr__("char_to_id", char_to_id)
297
+
298
+ # -1. take care of vocoder
299
+ # similarly to speech T5 must apply and remove weight norm
300
+ hf_model.vocoder.apply_weight_norm()
301
+ hf_model.vocoder = _convert_model(
302
+ original_model,
303
+ hf_model.vocoder,
304
+ vocoder_convert_list,
305
+ device,
306
+ unwanted_prefix="vocoder.code_generator.",
307
+ filter_state_dict="vocoder",
308
+ )
309
+ hf_model.vocoder.remove_weight_norm()
310
+
311
+ # 1. take care of speech encoder
312
+ wav2vec = hf_model.speech_encoder
313
+ hf_model.speech_encoder = _convert_model(
314
+ original_model, wav2vec, wav2vec_convert_list, device, unwanted_prefix="model.", filter_state_dict="speech"
315
+ )
316
+
317
+ # 2. take care of t2u
318
+
319
+ hf_model.t2u_model = _convert_model(
320
+ original_model,
321
+ hf_model.t2u_model,
322
+ t2u_convert_list,
323
+ device,
324
+ unwanted_prefix="model.",
325
+ filter_state_dict="t2u_model",
326
+ )
327
+
328
+ # 3. take care of text encoder
329
+ hf_model.text_encoder = _convert_model(
330
+ original_model,
331
+ hf_model.text_encoder,
332
+ text_convert_list,
333
+ device,
334
+ unwanted_prefix="model.",
335
+ filter_state_dict=["model.text_encoder"],
336
+ exclude_state_dict="t2u_model",
337
+ )
338
+
339
+ # 4. take care of text decoder
340
+ hf_model.text_decoder = _convert_model(
341
+ original_model,
342
+ hf_model.text_decoder,
343
+ text_convert_list,
344
+ device,
345
+ unwanted_prefix="model.",
346
+ filter_state_dict=["model.text_decoder"],
347
+ exclude_state_dict="t2u_model",
348
+ )
349
+
350
+ # 5. take care of final proj
351
+ hf_model.lm_head = _convert_model(
352
+ original_model,
353
+ hf_model.lm_head,
354
+ [("final_proj.", "")],
355
+ device,
356
+ unwanted_prefix="model.",
357
+ filter_state_dict=["model.final_proj"],
358
+ exclude_state_dict="t2u_model",
359
+ )
360
+
361
+ # sanity check
362
+ print(find_tied_parameters(hf_model))
363
+
364
+ count_1 = param_count(hf_model)
365
+ count_2 = param_count(original_model)
366
+
367
+ print(f"HF MODEL:{count_1}, ORIGINAL_MODEL: {count_2}, diff:{count_1 - count_2}")
368
+ print(f"HF MODEL excluding embeddings:{hf_model.num_parameters(exclude_embeddings=True)}")
369
+
370
+ del original_model
371
+
372
+ hf_model.generation_config._from_model_config = False
373
+ hf_model.save_pretrained(save_dir)
374
+ hf_model.push_to_hub(repo_id=repo_id, create_pr=True)
375
+ hf_model = SeamlessM4Tv2Model.from_pretrained(save_dir)
376
+
377
+
378
+ if __name__ == "__main__":
379
+ parser = argparse.ArgumentParser()
380
+ # Required parameters
381
+
382
+ parser.add_argument(
383
+ "--model_type",
384
+ default="large",
385
+ type=str,
386
+ help="Model type.",
387
+ )
388
+
389
+ parser.add_argument(
390
+ "--save_dir",
391
+ default="/home/ubuntu/weights_v2",
392
+ type=str,
393
+ help="Path to the output PyTorch model.",
394
+ )
395
+
396
+ parser.add_argument(
397
+ "--repo_id",
398
+ default="facebook/seamless-m4t-v2-large",
399
+ type=str,
400
+ help="Repo ID.",
401
+ )
402
+
403
+ args = parser.parse_args()
404
+
405
+ load_model(args.save_dir, args.model_type, args.repo_id)
llmeval-env/lib/python3.10/site-packages/transformers/models/speech_to_text/__init__.py ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2021 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import (
17
+ OptionalDependencyNotAvailable,
18
+ _LazyModule,
19
+ is_sentencepiece_available,
20
+ is_tf_available,
21
+ is_torch_available,
22
+ )
23
+
24
+
25
+ _import_structure = {
26
+ "configuration_speech_to_text": ["SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "Speech2TextConfig"],
27
+ "feature_extraction_speech_to_text": ["Speech2TextFeatureExtractor"],
28
+ "processing_speech_to_text": ["Speech2TextProcessor"],
29
+ }
30
+
31
+ try:
32
+ if not is_sentencepiece_available():
33
+ raise OptionalDependencyNotAvailable()
34
+ except OptionalDependencyNotAvailable:
35
+ pass
36
+ else:
37
+ _import_structure["tokenization_speech_to_text"] = ["Speech2TextTokenizer"]
38
+
39
+ try:
40
+ if not is_tf_available():
41
+ raise OptionalDependencyNotAvailable()
42
+ except OptionalDependencyNotAvailable:
43
+ pass
44
+ else:
45
+ _import_structure["modeling_tf_speech_to_text"] = [
46
+ "TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
47
+ "TFSpeech2TextForConditionalGeneration",
48
+ "TFSpeech2TextModel",
49
+ "TFSpeech2TextPreTrainedModel",
50
+ ]
51
+
52
+ try:
53
+ if not is_torch_available():
54
+ raise OptionalDependencyNotAvailable()
55
+ except OptionalDependencyNotAvailable:
56
+ pass
57
+ else:
58
+ _import_structure["modeling_speech_to_text"] = [
59
+ "SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
60
+ "Speech2TextForConditionalGeneration",
61
+ "Speech2TextModel",
62
+ "Speech2TextPreTrainedModel",
63
+ ]
64
+
65
+
66
+ if TYPE_CHECKING:
67
+ from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, Speech2TextConfig
68
+ from .feature_extraction_speech_to_text import Speech2TextFeatureExtractor
69
+ from .processing_speech_to_text import Speech2TextProcessor
70
+
71
+ try:
72
+ if not is_sentencepiece_available():
73
+ raise OptionalDependencyNotAvailable()
74
+ except OptionalDependencyNotAvailable:
75
+ pass
76
+ else:
77
+ from .tokenization_speech_to_text import Speech2TextTokenizer
78
+
79
+ try:
80
+ if not is_tf_available():
81
+ raise OptionalDependencyNotAvailable()
82
+ except OptionalDependencyNotAvailable:
83
+ pass
84
+ else:
85
+ from .modeling_tf_speech_to_text import (
86
+ TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
87
+ TFSpeech2TextForConditionalGeneration,
88
+ TFSpeech2TextModel,
89
+ TFSpeech2TextPreTrainedModel,
90
+ )
91
+
92
+ try:
93
+ if not is_torch_available():
94
+ raise OptionalDependencyNotAvailable()
95
+ except OptionalDependencyNotAvailable:
96
+ pass
97
+ else:
98
+ from .modeling_speech_to_text import (
99
+ SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
100
+ Speech2TextForConditionalGeneration,
101
+ Speech2TextModel,
102
+ Speech2TextPreTrainedModel,
103
+ )
104
+
105
+ else:
106
+ import sys
107
+
108
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
llmeval-env/lib/python3.10/site-packages/transformers/models/speech_to_text/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.77 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/speech_to_text/__pycache__/configuration_speech_to_text.cpython-310.pyc ADDED
Binary file (8.47 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/speech_to_text/__pycache__/convert_s2t_fairseq_to_tfms.cpython-310.pyc ADDED
Binary file (3.69 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/speech_to_text/__pycache__/feature_extraction_speech_to_text.cpython-310.pyc ADDED
Binary file (10.9 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/speech_to_text/__pycache__/modeling_speech_to_text.cpython-310.pyc ADDED
Binary file (44.6 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/speech_to_text/__pycache__/modeling_tf_speech_to_text.cpython-310.pyc ADDED
Binary file (51.1 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/speech_to_text/__pycache__/processing_speech_to_text.cpython-310.pyc ADDED
Binary file (4.19 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/speech_to_text/__pycache__/tokenization_speech_to_text.cpython-310.pyc ADDED
Binary file (10.7 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/speech_to_text/configuration_speech_to_text.py ADDED
@@ -0,0 +1,199 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Speech2Text model configuration"""
16
+
17
+ from ...configuration_utils import PretrainedConfig
18
+ from ...utils import logging
19
+
20
+
21
+ logger = logging.get_logger(__name__)
22
+
23
+
24
+ from ..deprecated._archive_maps import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
25
+
26
+
27
+ class Speech2TextConfig(PretrainedConfig):
28
+ r"""
29
+ This is the configuration class to store the configuration of a [`Speech2TextModel`]. It is used to instantiate a
30
+ Speech2Text model according to the specified arguments, defining the model architecture. Instantiating a
31
+ configuration with the defaults will yield a similar configuration to that of the Speech2Text
32
+ [facebook/s2t-small-librispeech-asr](https://huggingface.co/facebook/s2t-small-librispeech-asr) architecture.
33
+
34
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
35
+ documentation from [`PretrainedConfig`] for more information.
36
+
37
+
38
+ Args:
39
+ vocab_size (`int`, *optional*, defaults to 10000):
40
+ Vocabulary size of the Speech2Text model. Defines the number of different tokens that can be represented by
41
+ the `inputs_ids` passed when calling [`Speech2TextModel`]
42
+ encoder_layers (`int`, *optional*, defaults to 12):
43
+ Number of encoder layers.
44
+ encoder_ffn_dim (`int`, *optional*, defaults to 2048):
45
+ Dimensionality of the "intermediate" (often named feed-forward) layer in encoder.
46
+ encoder_attention_heads (`int`, *optional*, defaults to 4):
47
+ Number of attention heads for each attention layer in the Transformer encoder.
48
+ decoder_layers (`int`, *optional*, defaults to 6):
49
+ Number of decoder layers.
50
+ decoder_ffn_dim (`int`, *optional*, defaults to 2048):
51
+ Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
52
+ decoder_attention_heads (`int`, *optional*, defaults to 4):
53
+ Number of attention heads for each attention layer in the Transformer decoder.
54
+ encoder_layerdrop (`float`, *optional*, defaults to 0.0):
55
+ The LayerDrop probability for the encoder. See the [LayerDrop paper](https://arxiv.org/abs/1909.11556) for
56
+ more details.
57
+ decoder_layerdrop (`float`, *optional*, defaults to 0.0):
58
+ The LayerDrop probability for the decoder. See the [LayerDrop paper](https://arxiv.org/abs/1909.11556) for
59
+ more details.
60
+ use_cache (`bool`, *optional*, defaults to `True`):
61
+ Whether the model should return the last key/values attentions (not used by all models).
62
+ is_encoder_decoder (`bool`, *optional*, defaults to `True`):
63
+ Whether the model is set up as an encoder-decoder architecture for sequence-to-sequence tasks.
64
+ activation_function (`str` or `function`, *optional*, defaults to `"relu"`):
65
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
66
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
67
+ d_model (`int`, *optional*, defaults to 256):
68
+ Dimensionality of the layers and the pooler layer.
69
+ dropout (`float`, *optional*, defaults to 0.1):
70
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
71
+ attention_dropout (`float`, *optional*, defaults to 0.0):
72
+ The dropout ratio for the attention probabilities.
73
+ activation_dropout (`float`, *optional*, defaults to 0.0):
74
+ The dropout ratio for activations inside the fully connected layer.
75
+ init_std (`float`, *optional*, defaults to 0.02):
76
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
77
+ decoder_start_token_id (`int`, *optional*, defaults to 2):
78
+ The initial token ID of the decoder when decoding sequences.
79
+ scale_embedding (`bool`, *optional*, defaults to `True`):
80
+ Whether the embeddings are scaled by the square root of `d_model`.
81
+ pad_token_id (`int`, *optional*, defaults to 1):
82
+ Padding token id.
83
+ bos_token_id (`int`, *optional*, defaults to 0):
84
+ The id of the beginning-of-sequence token.
85
+ eos_token_id (`int`, *optional*, defaults to 2):
86
+ The id of the end-of-sequence token.
87
+ max_source_positions (`int`, *optional*, defaults to 6000):
88
+ The maximum sequence length of log-mel filter-bank features that this model might ever be used with.
89
+ max_target_positions (`int`, *optional*, defaults to 1024):
90
+ The maximum sequence length that this model might ever be used with. Typically, set this to something large
91
+ just in case (e.g., 512 or 1024 or 2048).
92
+ num_conv_layers (`int`, *optional*, defaults to 2):
93
+ Number of 1D convolutional layers in the conv module.
94
+ conv_kernel_sizes (`Tuple[int]`, *optional*, defaults to `(5, 5)`):
95
+ A tuple of integers defining the kernel size of each 1D convolutional layer in the conv module. The length
96
+ of `conv_kernel_sizes` has to match `num_conv_layers`.
97
+ conv_channels (`int`, *optional*, defaults to 1024):
98
+ An integer defining the number of output channels of each convolution layers except the final one in the
99
+ conv module.
100
+ input_feat_per_channel (`int`, *optional*, defaults to 80):
101
+ An integer specifying the size of feature vector. This is also the dimensions of log-mel filter-bank
102
+ features.
103
+ input_channels (`int`, *optional*, defaults to 1):
104
+ An integer specifying number of input channels of the input feature vector.
105
+
106
+ Example:
107
+
108
+ ```python
109
+ >>> from transformers import Speech2TextConfig, Speech2TextModel
110
+
111
+ >>> # Initializing a Speech2Text s2t_transformer_s style configuration
112
+ >>> configuration = Speech2TextConfig()
113
+
114
+ >>> # Initializing a model (with random weights) from the s2t_transformer_s style configuration
115
+ >>> model = Speech2TextModel(configuration)
116
+
117
+ >>> # Accessing the model configuration
118
+ >>> configuration = model.config
119
+ ```"""
120
+
121
+ model_type = "speech_to_text"
122
+ keys_to_ignore_at_inference = ["past_key_values"]
123
+ attribute_map = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
124
+
125
+ def __init__(
126
+ self,
127
+ vocab_size=10000,
128
+ encoder_layers=12,
129
+ encoder_ffn_dim=2048,
130
+ encoder_attention_heads=4,
131
+ decoder_layers=6,
132
+ decoder_ffn_dim=2048,
133
+ decoder_attention_heads=4,
134
+ encoder_layerdrop=0.0,
135
+ decoder_layerdrop=0.0,
136
+ use_cache=True,
137
+ is_encoder_decoder=True,
138
+ activation_function="relu",
139
+ d_model=256,
140
+ dropout=0.1,
141
+ attention_dropout=0.0,
142
+ activation_dropout=0.0,
143
+ init_std=0.02,
144
+ decoder_start_token_id=2,
145
+ scale_embedding=True,
146
+ pad_token_id=1,
147
+ bos_token_id=0,
148
+ eos_token_id=2,
149
+ max_source_positions=6000,
150
+ max_target_positions=1024,
151
+ num_conv_layers=2,
152
+ conv_kernel_sizes=(5, 5),
153
+ conv_channels=1024,
154
+ input_feat_per_channel=80,
155
+ input_channels=1,
156
+ **kwargs,
157
+ ):
158
+ self.vocab_size = vocab_size
159
+ self.d_model = d_model
160
+ self.encoder_ffn_dim = encoder_ffn_dim
161
+ self.encoder_layers = encoder_layers
162
+ self.encoder_attention_heads = encoder_attention_heads
163
+ self.decoder_ffn_dim = decoder_ffn_dim
164
+ self.decoder_layers = decoder_layers
165
+ self.decoder_attention_heads = decoder_attention_heads
166
+ self.dropout = dropout
167
+ self.attention_dropout = attention_dropout
168
+ self.activation_dropout = activation_dropout
169
+ self.activation_function = activation_function
170
+ self.init_std = init_std
171
+ self.encoder_layerdrop = encoder_layerdrop
172
+ self.decoder_layerdrop = decoder_layerdrop
173
+ self.use_cache = use_cache
174
+ self.num_hidden_layers = encoder_layers
175
+ self.scale_embedding = scale_embedding # scale factor will be sqrt(d_model) if True
176
+ self.max_source_positions = max_source_positions
177
+ self.max_target_positions = max_target_positions
178
+ self.num_conv_layers = num_conv_layers
179
+ self.conv_kernel_sizes = list(conv_kernel_sizes)
180
+ self.conv_channels = conv_channels
181
+ self.input_feat_per_channel = input_feat_per_channel
182
+ self.input_channels = input_channels
183
+
184
+ if len(self.conv_kernel_sizes) != self.num_conv_layers:
185
+ raise ValueError(
186
+ "Configuration for convolutional module is incorrect. "
187
+ "It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` "
188
+ f"but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes)}`, "
189
+ f"`config.num_conv_layers = {self.num_conv_layers}`."
190
+ )
191
+
192
+ super().__init__(
193
+ pad_token_id=pad_token_id,
194
+ bos_token_id=bos_token_id,
195
+ eos_token_id=eos_token_id,
196
+ is_encoder_decoder=is_encoder_decoder,
197
+ decoder_start_token_id=decoder_start_token_id,
198
+ **kwargs,
199
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/speech_to_text/convert_s2t_fairseq_to_tfms.py ADDED
@@ -0,0 +1,121 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2021 The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import argparse
16
+
17
+ import torch
18
+ from torch import nn
19
+
20
+ from transformers import Speech2TextConfig, Speech2TextForConditionalGeneration
21
+
22
+
23
+ def remove_ignore_keys_(state_dict):
24
+ ignore_keys = [
25
+ "encoder.version",
26
+ "decoder.version",
27
+ "model.encoder.version",
28
+ "model.decoder.version",
29
+ "decoder.output_projection.weight",
30
+ "_float_tensor",
31
+ "encoder.embed_positions._float_tensor",
32
+ "decoder.embed_positions._float_tensor",
33
+ ]
34
+ for k in ignore_keys:
35
+ state_dict.pop(k, None)
36
+
37
+
38
+ def rename_keys(s_dict):
39
+ keys = list(s_dict.keys())
40
+ for key in keys:
41
+ if "transformer_layers" in key:
42
+ s_dict[key.replace("transformer_layers", "layers")] = s_dict.pop(key)
43
+ elif "subsample" in key:
44
+ s_dict[key.replace("subsample", "conv")] = s_dict.pop(key)
45
+
46
+
47
+ def make_linear_from_emb(emb):
48
+ vocab_size, emb_size = emb.weight.shape
49
+ lin_layer = nn.Linear(vocab_size, emb_size, bias=False)
50
+ lin_layer.weight.data = emb.weight.data
51
+ return lin_layer
52
+
53
+
54
+ def convert_fairseq_s2t_checkpoint_to_tfms(checkpoint_path, pytorch_dump_folder_path):
55
+ m2m_100 = torch.load(checkpoint_path, map_location="cpu")
56
+ args = m2m_100["args"]
57
+ state_dict = m2m_100["model"]
58
+ lm_head_weights = state_dict["decoder.output_projection.weight"]
59
+
60
+ remove_ignore_keys_(state_dict)
61
+ rename_keys(state_dict)
62
+
63
+ vocab_size = state_dict["decoder.embed_tokens.weight"].shape[0]
64
+
65
+ tie_embeds = args.share_decoder_input_output_embed
66
+
67
+ conv_kernel_sizes = [int(i) for i in args.conv_kernel_sizes.split(",")]
68
+ config = Speech2TextConfig(
69
+ vocab_size=vocab_size,
70
+ max_source_positions=args.max_source_positions,
71
+ max_target_positions=args.max_target_positions,
72
+ encoder_layers=args.encoder_layers,
73
+ decoder_layers=args.decoder_layers,
74
+ encoder_attention_heads=args.encoder_attention_heads,
75
+ decoder_attention_heads=args.decoder_attention_heads,
76
+ encoder_ffn_dim=args.encoder_ffn_embed_dim,
77
+ decoder_ffn_dim=args.decoder_ffn_embed_dim,
78
+ d_model=args.encoder_embed_dim,
79
+ dropout=args.dropout,
80
+ attention_dropout=args.attention_dropout,
81
+ activation_dropout=args.activation_dropout,
82
+ activation_function="relu",
83
+ num_conv_layers=len(conv_kernel_sizes),
84
+ conv_channels=args.conv_channels,
85
+ conv_kernel_sizes=conv_kernel_sizes,
86
+ input_feat_per_channel=args.input_feat_per_channel,
87
+ input_channels=args.input_channels,
88
+ tie_word_embeddings=tie_embeds,
89
+ num_beams=5,
90
+ max_length=200,
91
+ use_cache=True,
92
+ decoder_start_token_id=2,
93
+ early_stopping=True,
94
+ )
95
+
96
+ model = Speech2TextForConditionalGeneration(config)
97
+ missing, unexpected = model.model.load_state_dict(state_dict, strict=False)
98
+ if len(missing) > 0 and not set(missing) <= {
99
+ "encoder.embed_positions.weights",
100
+ "decoder.embed_positions.weights",
101
+ }:
102
+ raise ValueError(
103
+ "Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"
104
+ f" but all the following weights are missing {missing}"
105
+ )
106
+
107
+ if tie_embeds:
108
+ model.lm_head = make_linear_from_emb(model.model.decoder.embed_tokens)
109
+ else:
110
+ model.lm_head.weight.data = lm_head_weights
111
+
112
+ model.save_pretrained(pytorch_dump_folder_path)
113
+
114
+
115
+ if __name__ == "__main__":
116
+ parser = argparse.ArgumentParser()
117
+ # Required parameters
118
+ parser.add_argument("--fairseq_path", type=str, help="Path to the fairseq model (.pt) file.")
119
+ parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
120
+ args = parser.parse_args()
121
+ convert_fairseq_s2t_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
llmeval-env/lib/python3.10/site-packages/transformers/models/speech_to_text/feature_extraction_speech_to_text.py ADDED
@@ -0,0 +1,297 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """
16
+ Feature extractor class for Speech2Text
17
+ """
18
+
19
+ from typing import List, Optional, Union
20
+
21
+ import numpy as np
22
+
23
+ from ...audio_utils import mel_filter_bank, spectrogram, window_function
24
+ from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
25
+ from ...feature_extraction_utils import BatchFeature
26
+ from ...utils import PaddingStrategy, TensorType, is_speech_available, logging
27
+
28
+
29
+ if is_speech_available():
30
+ import torch
31
+ import torchaudio.compliance.kaldi as ta_kaldi
32
+
33
+ logger = logging.get_logger(__name__)
34
+
35
+
36
+ class Speech2TextFeatureExtractor(SequenceFeatureExtractor):
37
+ r"""
38
+ Constructs a Speech2Text feature extractor.
39
+
40
+ This feature extractor inherits from [`Speech2TextFeatureExtractor`] which contains most of the main methods. Users
41
+ should refer to this superclass for more information regarding those methods.
42
+
43
+ This class extracts mel-filter bank features from raw speech using TorchAudio if installed or using numpy
44
+ otherwise, and applies utterance-level cepstral mean and variance normalization to the extracted features.
45
+
46
+ Args:
47
+ feature_size (`int`, *optional*, defaults to 80):
48
+ The feature dimension of the extracted features.
49
+ sampling_rate (`int`, *optional*, defaults to 16000):
50
+ The sampling rate at which the audio files should be digitalized expressed in hertz (Hz).
51
+ num_mel_bins (`int`, *optional*, defaults to 80):
52
+ Number of Mel-frequency bins.
53
+ padding_value (`float`, *optional*, defaults to 0.0):
54
+ The value that is used to fill the padding vectors.
55
+ do_ceptral_normalize (`bool`, *optional*, defaults to `True`):
56
+ Whether or not to apply utterance-level cepstral mean and variance normalization to extracted features.
57
+ normalize_means (`bool`, *optional*, defaults to `True`):
58
+ Whether or not to zero-mean normalize the extracted features.
59
+ normalize_vars (`bool`, *optional*, defaults to `True`):
60
+ Whether or not to unit-variance normalize the extracted features.
61
+ """
62
+
63
+ model_input_names = ["input_features", "attention_mask"]
64
+
65
+ def __init__(
66
+ self,
67
+ feature_size=80,
68
+ sampling_rate=16000,
69
+ num_mel_bins=80,
70
+ padding_value=0.0,
71
+ do_ceptral_normalize=True,
72
+ normalize_means=True,
73
+ normalize_vars=True,
74
+ **kwargs,
75
+ ):
76
+ super().__init__(feature_size=feature_size, sampling_rate=sampling_rate, padding_value=padding_value, **kwargs)
77
+ self.num_mel_bins = num_mel_bins
78
+ self.do_ceptral_normalize = do_ceptral_normalize
79
+ self.normalize_means = normalize_means
80
+ self.normalize_vars = normalize_vars
81
+ self.return_attention_mask = True
82
+
83
+ if not is_speech_available():
84
+ mel_filters = mel_filter_bank(
85
+ num_frequency_bins=256,
86
+ num_mel_filters=self.num_mel_bins,
87
+ min_frequency=20,
88
+ max_frequency=sampling_rate // 2,
89
+ sampling_rate=sampling_rate,
90
+ norm=None,
91
+ mel_scale="kaldi",
92
+ triangularize_in_mel_space=True,
93
+ )
94
+
95
+ self.mel_filters = np.pad(mel_filters, ((0, 1), (0, 0)))
96
+ self.window = window_function(400, "povey", periodic=False)
97
+
98
+ def _extract_fbank_features(
99
+ self,
100
+ waveform: np.ndarray,
101
+ ) -> np.ndarray:
102
+ """
103
+ Get mel-filter bank features using TorchAudio. Note that TorchAudio requires 16-bit signed integers as inputs
104
+ and hence the waveform should not be normalized before feature extraction.
105
+ """
106
+ waveform = waveform * (2**15) # Kaldi compliance: 16-bit signed integers
107
+ if is_speech_available():
108
+ waveform = torch.from_numpy(waveform).unsqueeze(0)
109
+ features = ta_kaldi.fbank(waveform, num_mel_bins=self.num_mel_bins, sample_frequency=self.sampling_rate)
110
+ features = features.numpy()
111
+ else:
112
+ waveform = np.squeeze(waveform)
113
+ features = spectrogram(
114
+ waveform,
115
+ self.window,
116
+ frame_length=400,
117
+ hop_length=160,
118
+ fft_length=512,
119
+ power=2.0,
120
+ center=False,
121
+ preemphasis=0.97,
122
+ mel_filters=self.mel_filters,
123
+ log_mel="log",
124
+ mel_floor=1.192092955078125e-07,
125
+ remove_dc_offset=True,
126
+ ).T
127
+ return features
128
+
129
+ @staticmethod
130
+ def utterance_cmvn(
131
+ x: np.ndarray,
132
+ input_length: int,
133
+ normalize_means: Optional[bool] = True,
134
+ normalize_vars: Optional[bool] = True,
135
+ padding_value: float = 0.0,
136
+ ) -> np.ndarray:
137
+ # make sure we normalize float32 arrays
138
+ if normalize_means:
139
+ mean = x[:input_length].mean(axis=0)
140
+ x = np.subtract(x, mean)
141
+ if normalize_vars:
142
+ std = x[:input_length].std(axis=0)
143
+ x = np.divide(x, std)
144
+
145
+ if input_length < x.shape[0]:
146
+ x[input_length:] = padding_value
147
+
148
+ # make sure array is in float32
149
+ x = x.astype(np.float32)
150
+
151
+ return x
152
+
153
+ def normalize(
154
+ self, input_features: List[np.ndarray], attention_mask: Optional[np.ndarray] = None
155
+ ) -> List[np.ndarray]:
156
+ lengths = attention_mask.sum(-1) if attention_mask is not None else [x.shape[0] for x in input_features]
157
+ return [
158
+ self.utterance_cmvn(x, n, self.normalize_means, self.normalize_vars, self.padding_value)
159
+ for x, n in zip(input_features, lengths)
160
+ ]
161
+
162
+ def __call__(
163
+ self,
164
+ raw_speech: Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]],
165
+ padding: Union[bool, str, PaddingStrategy] = False,
166
+ max_length: Optional[int] = None,
167
+ truncation: bool = False,
168
+ pad_to_multiple_of: Optional[int] = None,
169
+ return_tensors: Optional[Union[str, TensorType]] = None,
170
+ sampling_rate: Optional[int] = None,
171
+ return_attention_mask: Optional[bool] = None,
172
+ **kwargs,
173
+ ) -> BatchFeature:
174
+ """
175
+ Main method to featurize and prepare for the model one or several sequence(s).
176
+
177
+ Args:
178
+ raw_speech (`np.ndarray`, `List[float]`, `List[np.ndarray]`, `List[List[float]]`):
179
+ The sequence or batch of sequences to be padded. Each sequence can be a numpy array, a list of float
180
+ values, a list of numpy arrays or a list of list of float values. Must be mono channel audio, not
181
+ stereo, i.e. single float per timestep.
182
+ padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`):
183
+ Select a strategy to pad the returned sequences (according to the model's padding side and padding
184
+ index) among:
185
+
186
+ - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
187
+ sequence if provided).
188
+ - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
189
+ acceptable input length for the model if that argument is not provided.
190
+ - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
191
+ lengths).
192
+ max_length (`int`, *optional*):
193
+ Maximum length of the returned list and optionally padding length (see above).
194
+ truncation (`bool`):
195
+ Activates truncation to cut input sequences longer than *max_length* to *max_length*.
196
+ pad_to_multiple_of (`int`, *optional*):
197
+ If set will pad the sequence to a multiple of the provided value.
198
+
199
+ This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability
200
+ `>= 7.5` (Volta), or on TPUs which benefit from having sequence lengths be a multiple of 128.
201
+ return_attention_mask (`bool`, *optional*):
202
+ Whether to return the attention mask. If left to the default, will return the attention mask according
203
+ to the specific feature_extractor's default.
204
+
205
+ [What are attention masks?](../glossary#attention-mask)
206
+
207
+ <Tip>
208
+
209
+ For Speech2TextTransformer models, `attention_mask` should always be passed for batched inference, to
210
+ avoid subtle bugs.
211
+
212
+ </Tip>
213
+
214
+ return_tensors (`str` or [`~utils.TensorType`], *optional*):
215
+ If set, will return tensors instead of list of python integers. Acceptable values are:
216
+
217
+ - `'tf'`: Return TensorFlow `tf.constant` objects.
218
+ - `'pt'`: Return PyTorch `torch.Tensor` objects.
219
+ - `'np'`: Return Numpy `np.ndarray` objects.
220
+ sampling_rate (`int`, *optional*):
221
+ The sampling rate at which the `raw_speech` input was sampled. It is strongly recommended to pass
222
+ `sampling_rate` at the forward call to prevent silent errors.
223
+ padding_value (`float`, defaults to 0.0):
224
+ The value that is used to fill the padding values / vectors.
225
+ """
226
+
227
+ if sampling_rate is not None:
228
+ if sampling_rate != self.sampling_rate:
229
+ raise ValueError(
230
+ f"The model corresponding to this feature extractor: {self} was trained using a sampling rate of"
231
+ f" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"
232
+ f" {self.sampling_rate} and not {sampling_rate}."
233
+ )
234
+ else:
235
+ logger.warning(
236
+ "It is strongly recommended to pass the `sampling_rate` argument to this function. "
237
+ "Failing to do so can result in silent errors that might be hard to debug."
238
+ )
239
+
240
+ is_batched_numpy = isinstance(raw_speech, np.ndarray) and len(raw_speech.shape) > 1
241
+ if is_batched_numpy and len(raw_speech.shape) > 2:
242
+ raise ValueError(f"Only mono-channel audio is supported for input to {self}")
243
+ is_batched = is_batched_numpy or (
244
+ isinstance(raw_speech, (list, tuple)) and (isinstance(raw_speech[0], (np.ndarray, tuple, list)))
245
+ )
246
+
247
+ if is_batched:
248
+ raw_speech = [np.asarray(speech, dtype=np.float32) for speech in raw_speech]
249
+ elif not is_batched and not isinstance(raw_speech, np.ndarray):
250
+ raw_speech = np.asarray(raw_speech, dtype=np.float32)
251
+ elif isinstance(raw_speech, np.ndarray) and raw_speech.dtype is np.dtype(np.float64):
252
+ raw_speech = raw_speech.astype(np.float32)
253
+
254
+ # always return batch
255
+ if not is_batched:
256
+ raw_speech = [raw_speech]
257
+
258
+ # extract fbank features
259
+ features = [self._extract_fbank_features(waveform) for waveform in raw_speech]
260
+
261
+ # convert into correct format for padding
262
+ encoded_inputs = BatchFeature({"input_features": features})
263
+
264
+ padded_inputs = self.pad(
265
+ encoded_inputs,
266
+ padding=padding,
267
+ max_length=max_length,
268
+ truncation=truncation,
269
+ pad_to_multiple_of=pad_to_multiple_of,
270
+ return_attention_mask=return_attention_mask,
271
+ **kwargs,
272
+ )
273
+
274
+ # make sure list is in array format
275
+ input_features = padded_inputs.get("input_features")
276
+ if isinstance(input_features[0], list):
277
+ padded_inputs["input_features"] = [np.asarray(feature, dtype=np.float32) for feature in input_features]
278
+
279
+ attention_mask = padded_inputs.get("attention_mask")
280
+ if attention_mask is not None:
281
+ padded_inputs["attention_mask"] = [np.asarray(array, dtype=np.int32) for array in attention_mask]
282
+
283
+ # Utterance-level cepstral mean and variance normalization
284
+ if self.do_ceptral_normalize:
285
+ attention_mask = (
286
+ np.array(attention_mask, dtype=np.int32)
287
+ if self._get_padding_strategies(padding, max_length=max_length) is not PaddingStrategy.DO_NOT_PAD
288
+ else None
289
+ )
290
+ padded_inputs["input_features"] = self.normalize(
291
+ padded_inputs["input_features"], attention_mask=attention_mask
292
+ )
293
+
294
+ if return_tensors is not None:
295
+ padded_inputs = padded_inputs.convert_to_tensors(return_tensors)
296
+
297
+ return padded_inputs
llmeval-env/lib/python3.10/site-packages/transformers/models/speech_to_text/modeling_speech_to_text.py ADDED
@@ -0,0 +1,1370 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The Fairseq Authors and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch Speech2Text model."""
16
+
17
+ import math
18
+ from typing import Optional, Tuple, Union
19
+
20
+ import torch
21
+ from torch import nn
22
+ from torch.nn import CrossEntropyLoss
23
+
24
+ from ...activations import ACT2FN
25
+ from ...modeling_attn_mask_utils import _prepare_4d_attention_mask, _prepare_4d_causal_attention_mask
26
+ from ...modeling_outputs import (
27
+ BaseModelOutput,
28
+ BaseModelOutputWithPastAndCrossAttentions,
29
+ Seq2SeqLMOutput,
30
+ Seq2SeqModelOutput,
31
+ )
32
+ from ...modeling_utils import PreTrainedModel
33
+ from ...utils import (
34
+ add_start_docstrings,
35
+ add_start_docstrings_to_model_forward,
36
+ logging,
37
+ replace_return_docstrings,
38
+ )
39
+ from .configuration_speech_to_text import Speech2TextConfig
40
+
41
+
42
+ logger = logging.get_logger(__name__)
43
+
44
+ _CONFIG_FOR_DOC = "Speech2TextConfig"
45
+
46
+
47
+ from ..deprecated._archive_maps import SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
48
+
49
+
50
+ # Copied from transformers.models.bart.modeling_bart.shift_tokens_right
51
+ def shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int, decoder_start_token_id: int):
52
+ """
53
+ Shift input ids one token to the right.
54
+ """
55
+ shifted_input_ids = input_ids.new_zeros(input_ids.shape)
56
+ shifted_input_ids[:, 1:] = input_ids[:, :-1].clone()
57
+ shifted_input_ids[:, 0] = decoder_start_token_id
58
+
59
+ if pad_token_id is None:
60
+ raise ValueError("self.model.config.pad_token_id has to be defined.")
61
+ # replace possible -100 values in labels by `pad_token_id`
62
+ shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id)
63
+
64
+ return shifted_input_ids
65
+
66
+
67
+ class Conv1dSubsampler(nn.Module):
68
+ """
69
+ Convolutional subsampler: a stack of 1D convolution (along temporal dimension) followed by non-linear activation
70
+ via gated linear units (https://arxiv.org/abs/1911.08460)
71
+ """
72
+
73
+ def __init__(self, config):
74
+ super(Conv1dSubsampler, self).__init__()
75
+ self.config = config
76
+ self.num_layers = config.num_conv_layers
77
+ self.in_channels = config.input_feat_per_channel * config.input_channels
78
+ self.mid_channels = config.conv_channels
79
+ self.out_channels = config.d_model
80
+ self.kernel_sizes = config.conv_kernel_sizes
81
+
82
+ self.conv_layers = nn.ModuleList(
83
+ nn.Conv1d(
84
+ self.in_channels if i == 0 else self.mid_channels // 2,
85
+ self.mid_channels if i < self.num_layers - 1 else self.out_channels * 2,
86
+ kernel_size=k,
87
+ stride=2,
88
+ padding=k // 2,
89
+ )
90
+ for i, k in enumerate(self.kernel_sizes)
91
+ )
92
+
93
+ def forward(self, input_features):
94
+ hidden_states = input_features.transpose(1, 2).contiguous() # -> B x (C x D) x T
95
+ for conv in self.conv_layers:
96
+ hidden_states = conv(hidden_states)
97
+ hidden_states = nn.functional.glu(hidden_states, dim=1)
98
+ hidden_states = hidden_states.transpose(1, 2).contiguous() # -> T x B x (C x D)
99
+ return hidden_states
100
+
101
+
102
+ class Speech2TextSinusoidalPositionalEmbedding(nn.Module):
103
+ """This module produces sinusoidal positional embeddings of any length."""
104
+
105
+ def __init__(self, num_positions: int, embedding_dim: int, padding_idx: Optional[int] = None):
106
+ super().__init__()
107
+ self.offset = 2
108
+ self.embedding_dim = embedding_dim
109
+ self.padding_idx = padding_idx
110
+ self.make_weights(num_positions + self.offset, embedding_dim, padding_idx)
111
+
112
+ def make_weights(self, num_embeddings: int, embedding_dim: int, padding_idx: Optional[int] = None):
113
+ emb_weights = self.get_embedding(num_embeddings, embedding_dim, padding_idx)
114
+ if hasattr(self, "weights"):
115
+ # in forward put the weights on the correct dtype and device of the param
116
+ emb_weights = emb_weights.to(dtype=self.weights.dtype, device=self.weights.device)
117
+
118
+ self.weights = nn.Parameter(emb_weights)
119
+ self.weights.requires_grad = False
120
+ self.weights.detach_()
121
+
122
+ @staticmethod
123
+ def get_embedding(num_embeddings: int, embedding_dim: int, padding_idx: Optional[int] = None):
124
+ """
125
+ Build sinusoidal embeddings. This matches the implementation in tensor2tensor, but differs slightly from the
126
+ description in Section 3.5 of "Attention Is All You Need".
127
+ """
128
+ half_dim = embedding_dim // 2
129
+ emb = math.log(10000) / (half_dim - 1)
130
+ emb = torch.exp(torch.arange(half_dim, dtype=torch.int64).float() * -emb)
131
+ emb = torch.arange(num_embeddings, dtype=torch.int64).float().unsqueeze(1) * emb.unsqueeze(0)
132
+ emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1).view(num_embeddings, -1)
133
+ if embedding_dim % 2 == 1:
134
+ # zero pad
135
+ emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1)
136
+ if padding_idx is not None:
137
+ emb[padding_idx, :] = 0
138
+ return emb.to(torch.get_default_dtype())
139
+
140
+ @torch.no_grad()
141
+ def forward(self, input_ids: torch.Tensor, past_key_values_length: int = 0):
142
+ bsz, seq_len = input_ids.size()
143
+ # Create the position ids from the input token ids. Any padded tokens remain padded.
144
+ position_ids = self.create_position_ids_from_input_ids(input_ids, self.padding_idx, past_key_values_length).to(
145
+ input_ids.device
146
+ )
147
+
148
+ # expand embeddings if needed
149
+ max_pos = self.padding_idx + 1 + seq_len
150
+ if max_pos > self.weights.size(0):
151
+ self.make_weights(max_pos + self.offset, self.embedding_dim, self.padding_idx)
152
+
153
+ return self.weights.index_select(0, position_ids.view(-1)).view(bsz, seq_len, -1).detach()
154
+
155
+ def create_position_ids_from_input_ids(
156
+ self, input_ids: torch.Tensor, padding_idx: int, past_key_values_length: Optional[int] = 0
157
+ ):
158
+ """
159
+ Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding
160
+ symbols are ignored. This is modified from fairseq's `utils.make_positions`.
161
+
162
+ Args:
163
+ x: torch.Tensor x:
164
+ Returns: torch.Tensor
165
+ """
166
+ # The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA.
167
+ mask = input_ids.ne(padding_idx).int()
168
+ incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask
169
+ return incremental_indices.long() + padding_idx
170
+
171
+
172
+ # Copied from transformers.models.bart.modeling_bart.BartAttention with Bart->Speech2Text
173
+ class Speech2TextAttention(nn.Module):
174
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
175
+
176
+ def __init__(
177
+ self,
178
+ embed_dim: int,
179
+ num_heads: int,
180
+ dropout: float = 0.0,
181
+ is_decoder: bool = False,
182
+ bias: bool = True,
183
+ is_causal: bool = False,
184
+ config: Optional[Speech2TextConfig] = None,
185
+ ):
186
+ super().__init__()
187
+ self.embed_dim = embed_dim
188
+ self.num_heads = num_heads
189
+ self.dropout = dropout
190
+ self.head_dim = embed_dim // num_heads
191
+ self.config = config
192
+
193
+ if (self.head_dim * num_heads) != self.embed_dim:
194
+ raise ValueError(
195
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
196
+ f" and `num_heads`: {num_heads})."
197
+ )
198
+ self.scaling = self.head_dim**-0.5
199
+ self.is_decoder = is_decoder
200
+ self.is_causal = is_causal
201
+
202
+ self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
203
+ self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
204
+ self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
205
+ self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
206
+
207
+ def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
208
+ return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
209
+
210
+ def forward(
211
+ self,
212
+ hidden_states: torch.Tensor,
213
+ key_value_states: Optional[torch.Tensor] = None,
214
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
215
+ attention_mask: Optional[torch.Tensor] = None,
216
+ layer_head_mask: Optional[torch.Tensor] = None,
217
+ output_attentions: bool = False,
218
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
219
+ """Input shape: Batch x Time x Channel"""
220
+
221
+ # if key_value_states are provided this layer is used as a cross-attention layer
222
+ # for the decoder
223
+ is_cross_attention = key_value_states is not None
224
+
225
+ bsz, tgt_len, _ = hidden_states.size()
226
+
227
+ # get query proj
228
+ query_states = self.q_proj(hidden_states) * self.scaling
229
+ # get key, value proj
230
+ # `past_key_value[0].shape[2] == key_value_states.shape[1]`
231
+ # is checking that the `sequence_length` of the `past_key_value` is the same as
232
+ # the provided `key_value_states` to support prefix tuning
233
+ if (
234
+ is_cross_attention
235
+ and past_key_value is not None
236
+ and past_key_value[0].shape[2] == key_value_states.shape[1]
237
+ ):
238
+ # reuse k,v, cross_attentions
239
+ key_states = past_key_value[0]
240
+ value_states = past_key_value[1]
241
+ elif is_cross_attention:
242
+ # cross_attentions
243
+ key_states = self._shape(self.k_proj(key_value_states), -1, bsz)
244
+ value_states = self._shape(self.v_proj(key_value_states), -1, bsz)
245
+ elif past_key_value is not None:
246
+ # reuse k, v, self_attention
247
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
248
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
249
+ key_states = torch.cat([past_key_value[0], key_states], dim=2)
250
+ value_states = torch.cat([past_key_value[1], value_states], dim=2)
251
+ else:
252
+ # self_attention
253
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
254
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
255
+
256
+ if self.is_decoder:
257
+ # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
258
+ # Further calls to cross_attention layer can then reuse all cross-attention
259
+ # key/value_states (first "if" case)
260
+ # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
261
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
262
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
263
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
264
+ past_key_value = (key_states, value_states)
265
+
266
+ proj_shape = (bsz * self.num_heads, -1, self.head_dim)
267
+ query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)
268
+ key_states = key_states.reshape(*proj_shape)
269
+ value_states = value_states.reshape(*proj_shape)
270
+
271
+ src_len = key_states.size(1)
272
+ attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
273
+
274
+ if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len):
275
+ raise ValueError(
276
+ f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is"
277
+ f" {attn_weights.size()}"
278
+ )
279
+
280
+ if attention_mask is not None:
281
+ if attention_mask.size() != (bsz, 1, tgt_len, src_len):
282
+ raise ValueError(
283
+ f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}"
284
+ )
285
+ attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask
286
+ attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
287
+
288
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1)
289
+
290
+ if layer_head_mask is not None:
291
+ if layer_head_mask.size() != (self.num_heads,):
292
+ raise ValueError(
293
+ f"Head mask for a single layer should be of size {(self.num_heads,)}, but is"
294
+ f" {layer_head_mask.size()}"
295
+ )
296
+ attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
297
+ attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
298
+
299
+ if output_attentions:
300
+ # this operation is a bit awkward, but it's required to
301
+ # make sure that attn_weights keeps its gradient.
302
+ # In order to do so, attn_weights have to be reshaped
303
+ # twice and have to be reused in the following
304
+ attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
305
+ attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)
306
+ else:
307
+ attn_weights_reshaped = None
308
+
309
+ attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
310
+
311
+ attn_output = torch.bmm(attn_probs, value_states)
312
+
313
+ if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):
314
+ raise ValueError(
315
+ f"`attn_output` should be of size {(bsz * self.num_heads, tgt_len, self.head_dim)}, but is"
316
+ f" {attn_output.size()}"
317
+ )
318
+
319
+ attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
320
+ attn_output = attn_output.transpose(1, 2)
321
+
322
+ # Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be
323
+ # partitioned across GPUs when using tensor-parallelism.
324
+ attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim)
325
+
326
+ attn_output = self.out_proj(attn_output)
327
+
328
+ return attn_output, attn_weights_reshaped, past_key_value
329
+
330
+
331
+ SPEECH_TO_TEXT_ATTENTION_CLASSES = {"eager": Speech2TextAttention}
332
+
333
+
334
+ # Copied from transformers.models.mbart.modeling_mbart.MBartEncoderLayer with MBart->Speech2Text, MBART->SPEECH_TO_TEXT
335
+ class Speech2TextEncoderLayer(nn.Module):
336
+ def __init__(self, config: Speech2TextConfig):
337
+ super().__init__()
338
+ self.embed_dim = config.d_model
339
+
340
+ self.self_attn = SPEECH_TO_TEXT_ATTENTION_CLASSES[config._attn_implementation](
341
+ embed_dim=self.embed_dim,
342
+ num_heads=config.encoder_attention_heads,
343
+ dropout=config.attention_dropout,
344
+ config=config,
345
+ )
346
+ self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
347
+ self.dropout = config.dropout
348
+ self.activation_fn = ACT2FN[config.activation_function]
349
+ self.activation_dropout = config.activation_dropout
350
+ self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim)
351
+ self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim)
352
+ self.final_layer_norm = nn.LayerNorm(self.embed_dim)
353
+
354
+ def forward(
355
+ self,
356
+ hidden_states: torch.Tensor,
357
+ attention_mask: torch.Tensor,
358
+ layer_head_mask: torch.Tensor,
359
+ output_attentions: bool = False,
360
+ ) -> torch.Tensor:
361
+ """
362
+ Args:
363
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
364
+ attention_mask (`torch.FloatTensor`): attention mask of size
365
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
366
+ layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size
367
+ `(encoder_attention_heads,)`.
368
+ output_attentions (`bool`, *optional*):
369
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
370
+ returned tensors for more detail.
371
+ """
372
+ residual = hidden_states
373
+ hidden_states = self.self_attn_layer_norm(hidden_states)
374
+ hidden_states, attn_weights, _ = self.self_attn(
375
+ hidden_states=hidden_states,
376
+ attention_mask=attention_mask,
377
+ layer_head_mask=layer_head_mask,
378
+ output_attentions=output_attentions,
379
+ )
380
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
381
+ hidden_states = residual + hidden_states
382
+
383
+ residual = hidden_states
384
+ hidden_states = self.final_layer_norm(hidden_states)
385
+ hidden_states = self.activation_fn(self.fc1(hidden_states))
386
+ hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
387
+ hidden_states = self.fc2(hidden_states)
388
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
389
+ hidden_states = residual + hidden_states
390
+
391
+ if hidden_states.dtype == torch.float16 and (
392
+ torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any()
393
+ ):
394
+ clamp_value = torch.finfo(hidden_states.dtype).max - 1000
395
+ hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
396
+
397
+ outputs = (hidden_states,)
398
+
399
+ if output_attentions:
400
+ outputs += (attn_weights,)
401
+
402
+ return outputs
403
+
404
+
405
+ # Copied from transformers.models.mbart.modeling_mbart.MBartDecoderLayer with MBart->Speech2Text, MBART->SPEECH_TO_TEXT
406
+ class Speech2TextDecoderLayer(nn.Module):
407
+ def __init__(self, config: Speech2TextConfig):
408
+ super().__init__()
409
+ self.embed_dim = config.d_model
410
+
411
+ self.self_attn = SPEECH_TO_TEXT_ATTENTION_CLASSES[config._attn_implementation](
412
+ embed_dim=self.embed_dim,
413
+ num_heads=config.decoder_attention_heads,
414
+ dropout=config.attention_dropout,
415
+ is_decoder=True,
416
+ is_causal=True,
417
+ config=config,
418
+ )
419
+ self.dropout = config.dropout
420
+ self.activation_fn = ACT2FN[config.activation_function]
421
+ self.activation_dropout = config.activation_dropout
422
+
423
+ self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
424
+ self.encoder_attn = SPEECH_TO_TEXT_ATTENTION_CLASSES[config._attn_implementation](
425
+ self.embed_dim,
426
+ config.decoder_attention_heads,
427
+ dropout=config.attention_dropout,
428
+ is_decoder=True,
429
+ config=config,
430
+ )
431
+ self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim)
432
+ self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim)
433
+ self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim)
434
+ self.final_layer_norm = nn.LayerNorm(self.embed_dim)
435
+
436
+ def forward(
437
+ self,
438
+ hidden_states: torch.Tensor,
439
+ attention_mask: Optional[torch.Tensor] = None,
440
+ encoder_hidden_states: Optional[torch.Tensor] = None,
441
+ encoder_attention_mask: Optional[torch.Tensor] = None,
442
+ layer_head_mask: Optional[torch.Tensor] = None,
443
+ cross_attn_layer_head_mask: Optional[torch.Tensor] = None,
444
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
445
+ output_attentions: Optional[bool] = False,
446
+ use_cache: Optional[bool] = True,
447
+ ) -> torch.Tensor:
448
+ """
449
+ Args:
450
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
451
+ attention_mask (`torch.FloatTensor`): attention mask of size
452
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
453
+ encoder_hidden_states (`torch.FloatTensor`):
454
+ cross attention input to the layer of shape `(batch, seq_len, embed_dim)`
455
+ encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size
456
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
457
+ layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size
458
+ `(encoder_attention_heads,)`.
459
+ cross_attn_layer_head_mask (`torch.FloatTensor`): mask for cross-attention heads in a given layer of
460
+ size `(decoder_attention_heads,)`.
461
+ past_key_value (`Tuple(torch.FloatTensor)`): cached past key and value projection states
462
+ output_attentions (`bool`, *optional*):
463
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
464
+ returned tensors for more detail.
465
+ """
466
+ residual = hidden_states
467
+ hidden_states = self.self_attn_layer_norm(hidden_states)
468
+
469
+ # Self Attention
470
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
471
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
472
+ # add present self-attn cache to positions 1,2 of present_key_value tuple
473
+ hidden_states, self_attn_weights, present_key_value = self.self_attn(
474
+ hidden_states=hidden_states,
475
+ past_key_value=self_attn_past_key_value,
476
+ attention_mask=attention_mask,
477
+ layer_head_mask=layer_head_mask,
478
+ output_attentions=output_attentions,
479
+ )
480
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
481
+ hidden_states = residual + hidden_states
482
+
483
+ # Cross-Attention Block
484
+ cross_attn_present_key_value = None
485
+ cross_attn_weights = None
486
+ if encoder_hidden_states is not None:
487
+ residual = hidden_states
488
+ hidden_states = self.encoder_attn_layer_norm(hidden_states)
489
+
490
+ # cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple
491
+ cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
492
+ hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn(
493
+ hidden_states=hidden_states,
494
+ key_value_states=encoder_hidden_states,
495
+ attention_mask=encoder_attention_mask,
496
+ layer_head_mask=cross_attn_layer_head_mask,
497
+ past_key_value=cross_attn_past_key_value,
498
+ output_attentions=output_attentions,
499
+ )
500
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
501
+ hidden_states = residual + hidden_states
502
+
503
+ # add cross-attn to positions 3,4 of present_key_value tuple
504
+ present_key_value = present_key_value + cross_attn_present_key_value
505
+
506
+ # Fully Connected
507
+ residual = hidden_states
508
+ hidden_states = self.final_layer_norm(hidden_states)
509
+ hidden_states = self.activation_fn(self.fc1(hidden_states))
510
+ hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
511
+ hidden_states = self.fc2(hidden_states)
512
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
513
+ hidden_states = residual + hidden_states
514
+
515
+ outputs = (hidden_states,)
516
+
517
+ if output_attentions:
518
+ outputs += (self_attn_weights, cross_attn_weights)
519
+
520
+ if use_cache:
521
+ outputs += (present_key_value,)
522
+
523
+ return outputs
524
+
525
+
526
+ class Speech2TextPreTrainedModel(PreTrainedModel):
527
+ config_class = Speech2TextConfig
528
+ base_model_prefix = "model"
529
+ main_input_name = "input_features"
530
+ supports_gradient_checkpointing = True
531
+
532
+ def _init_weights(self, module):
533
+ std = self.config.init_std
534
+ if isinstance(module, (nn.Linear, nn.Conv1d)):
535
+ module.weight.data.normal_(mean=0.0, std=std)
536
+ if module.bias is not None:
537
+ module.bias.data.zero_()
538
+ elif isinstance(module, nn.Embedding):
539
+ module.weight.data.normal_(mean=0.0, std=std)
540
+ if module.padding_idx is not None:
541
+ module.weight.data[module.padding_idx].zero_()
542
+
543
+ def _get_feat_extract_output_lengths(self, input_lengths: torch.LongTensor):
544
+ """
545
+ Computes the output length of the convolutional layers
546
+ """
547
+ for i in range(self.config.num_conv_layers):
548
+ input_lengths = (input_lengths - 1) // 2 + 1
549
+
550
+ return input_lengths
551
+
552
+ def _get_feature_vector_attention_mask(self, feature_vector_length, attention_mask):
553
+ # generate creates 3D attention mask, because of the shape of input_features
554
+ # convert it to 2D if thats the case
555
+ if len(attention_mask.shape) > 2:
556
+ attention_mask = attention_mask[:, :, -1]
557
+
558
+ subsampled_lengths = self._get_feat_extract_output_lengths(attention_mask.sum(-1))
559
+ bsz = attention_mask.size()[0]
560
+ attention_mask = torch.zeros(
561
+ (bsz, feature_vector_length), dtype=attention_mask.dtype, device=attention_mask.device
562
+ )
563
+
564
+ # these two operations makes sure that all values
565
+ # before the output lengths indices are attended to
566
+ attention_mask[(torch.arange(bsz, device=attention_mask.device), subsampled_lengths - 1)] = 1
567
+ attention_mask = attention_mask.flip([-1]).cumsum(-1).flip([-1]).long()
568
+ return attention_mask
569
+
570
+
571
+ SPEECH_TO_TEXT_START_DOCSTRING = r"""
572
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
573
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
574
+ etc.)
575
+
576
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
577
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
578
+ and behavior.
579
+
580
+ Parameters:
581
+ config ([`Speech2TextConfig`]):
582
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
583
+ load the weights associated with the model, only the configuration. Check out the
584
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
585
+ """
586
+
587
+ SPEECH_TO_TEXT_INPUTS_DOCSTRING = r"""
588
+ Args:
589
+ input_features (`torch.FloatTensor` of shape `(batch_size, sequence_length, feature_size)`):
590
+ Float values of fbank features extracted from the raw speech waveform. Raw speech waveform can be obtained
591
+ by loading a `.flac` or `.wav` audio file into an array of type `List[float]` or a `numpy.ndarray`, *e.g.*
592
+ via the soundfile library (`pip install soundfile`). To prepare the array into `input_features`, the
593
+ [`AutoFeatureExtractor`] should be used for extracting the fbank features, padding and conversion into a
594
+ tensor of type `torch.FloatTensor`. See [`~Speech2TextFeatureExtractor.__call__`]
595
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
596
+ Mask to avoid performing convolution and attention on padding token indices. Mask values selected in `[0,
597
+ 1]`:
598
+
599
+ - 1 for tokens that are **not masked**,
600
+ - 0 for tokens that are **masked**.
601
+
602
+ [What are attention masks?](../glossary#attention-mask)
603
+ decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
604
+ Indices of decoder input sequence tokens in the vocabulary.
605
+
606
+ Indices can be obtained using [`SpeechToTextTokenizer`]. See [`PreTrainedTokenizer.encode`] and
607
+ [`PreTrainedTokenizer.__call__`] for details.
608
+
609
+ [What are decoder input IDs?](../glossary#decoder-input-ids)
610
+
611
+ SpeechToText uses the `eos_token_id` as the starting token for `decoder_input_ids` generation. If
612
+ `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
613
+ `past_key_values`).
614
+ decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
615
+ Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
616
+ be used by default.
617
+
618
+ If you want to change padding behavior, you should read
619
+ [`modeling_speech_to_text._prepare_decoder_attention_mask`] and modify to your needs. See diagram 1 in [the
620
+ paper](https://arxiv.org/abs/1910.13461) for more information on the default strategy.
621
+ head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
622
+ Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`:
623
+
624
+ - 1 indicates the head is **not masked**,
625
+ - 0 indicates the head is **masked**.
626
+
627
+ decoder_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
628
+ Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in `[0, 1]`:
629
+
630
+ - 1 indicates the head is **not masked**,
631
+ - 0 indicates the head is **masked**.
632
+
633
+ cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
634
+ Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:
635
+
636
+ - 1 indicates the head is **not masked**,
637
+ - 0 indicates the head is **masked**.
638
+
639
+ encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*):
640
+ Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`)
641
+ `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of
642
+ hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.
643
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
644
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
645
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
646
+ `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
647
+
648
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
649
+ blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
650
+
651
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
652
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
653
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
654
+ decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*):
655
+ Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded
656
+ representation. If `past_key_values` is used, optionally only the last `decoder_inputs_embeds` have to be
657
+ input (see `past_key_values`). This is useful if you want more control over how to convert
658
+ `decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix.
659
+ use_cache (`bool`, *optional*):
660
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
661
+ `past_key_values`).
662
+ output_attentions (`bool`, *optional*):
663
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
664
+ tensors for more detail.
665
+ output_hidden_states (`bool`, *optional*):
666
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
667
+ more detail.
668
+ return_dict (`bool`, *optional*):
669
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
670
+ """
671
+
672
+
673
+ class Speech2TextEncoder(Speech2TextPreTrainedModel):
674
+ """
675
+ Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a
676
+ [`Speech2TextEncoderLayer`].
677
+
678
+ Args:
679
+ config: Speech2TextConfig
680
+ embed_tokens (nn.Embedding): output embedding
681
+ """
682
+
683
+ def __init__(self, config: Speech2TextConfig):
684
+ super().__init__(config)
685
+
686
+ self.dropout = config.dropout
687
+ self.layerdrop = config.encoder_layerdrop
688
+
689
+ embed_dim = config.d_model
690
+ self.padding_idx = config.pad_token_id
691
+ self.max_source_positions = config.max_source_positions
692
+ self.embed_scale = math.sqrt(embed_dim) if config.scale_embedding else 1.0
693
+
694
+ self.conv = Conv1dSubsampler(config)
695
+
696
+ self.embed_positions = Speech2TextSinusoidalPositionalEmbedding(
697
+ self.max_source_positions,
698
+ embed_dim,
699
+ self.padding_idx,
700
+ )
701
+ self.layers = nn.ModuleList([Speech2TextEncoderLayer(config) for _ in range(config.encoder_layers)])
702
+ self.layer_norm = nn.LayerNorm(config.d_model)
703
+
704
+ self.gradient_checkpointing = False
705
+ # Initialize weights and apply final processing
706
+ self.post_init()
707
+
708
+ def forward(
709
+ self,
710
+ input_features,
711
+ attention_mask=None,
712
+ head_mask=None,
713
+ output_attentions=None,
714
+ output_hidden_states=None,
715
+ return_dict=None,
716
+ ):
717
+ r"""
718
+ Args:
719
+ input_features (`torch.LongTensor` of shape `(batch_size, sequence_length, feature_size)`):
720
+ Float values of fbank features extracted from the raw speech waveform. Raw speech waveform can be
721
+ obtained by loading a `.flac` or `.wav` audio file into an array of type `List[float]` or a
722
+ `numpy.ndarray`, *e.g.* via the soundfile library (`pip install soundfile`). To prepare the array into
723
+ `input_features`, the [`AutoFeatureExtractor`] should be used for extracting the fbank features,
724
+ padding and conversion into a tensor of type `torch.FloatTensor`. See
725
+ [`~Speech2TextFeatureExtractor.__call__`]
726
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
727
+ Mask to avoid performing convolution and attention on padding token indices. Mask values selected in
728
+ `[0, 1]`:
729
+
730
+ - 1 for tokens that are **not masked**,
731
+ - 0 for tokens that are **masked**.
732
+
733
+ [What are attention masks?](../glossary#attention-mask)
734
+ head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
735
+ Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
736
+
737
+ - 1 indicates the head is **not masked**,
738
+ - 0 indicates the head is **masked**.
739
+
740
+ output_attentions (`bool`, *optional*):
741
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
742
+ returned tensors for more detail.
743
+ output_hidden_states (`bool`, *optional*):
744
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
745
+ for more detail.
746
+ return_dict (`bool`, *optional*):
747
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
748
+ """
749
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
750
+ output_hidden_states = (
751
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
752
+ )
753
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
754
+ inputs_embeds = self.conv(input_features)
755
+ inputs_embeds = self.embed_scale * inputs_embeds
756
+
757
+ # subsample attention mask if necessary
758
+ if attention_mask is not None:
759
+ attention_mask = self._get_feature_vector_attention_mask(inputs_embeds.shape[1], attention_mask)
760
+ padding_mask = attention_mask.ne(1).long()
761
+ else:
762
+ padding_mask = torch.zeros(inputs_embeds.shape[:2], dtype=torch.long, device=inputs_embeds.device)
763
+
764
+ embed_pos = self.embed_positions(padding_mask)
765
+
766
+ hidden_states = inputs_embeds + embed_pos
767
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
768
+
769
+ # expand attention_mask
770
+ if attention_mask is not None:
771
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
772
+ attention_mask = _prepare_4d_attention_mask(attention_mask, inputs_embeds.dtype)
773
+
774
+ encoder_states = () if output_hidden_states else None
775
+ all_attentions = () if output_attentions else None
776
+
777
+ # check if head_mask has a correct number of layers specified if desired
778
+ if head_mask is not None:
779
+ assert head_mask.size()[0] == (
780
+ len(self.layers)
781
+ ), f"The head_mask should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}."
782
+
783
+ for idx, encoder_layer in enumerate(self.layers):
784
+ if output_hidden_states:
785
+ encoder_states = encoder_states + (hidden_states,)
786
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
787
+ to_drop = False
788
+ if self.training:
789
+ dropout_probability = torch.rand([])
790
+ if dropout_probability < self.layerdrop: # skip the layer
791
+ to_drop = True
792
+
793
+ if to_drop:
794
+ layer_outputs = (None, None)
795
+ else:
796
+ if self.gradient_checkpointing and self.training:
797
+ layer_outputs = self._gradient_checkpointing_func(
798
+ encoder_layer.__call__,
799
+ hidden_states,
800
+ attention_mask,
801
+ (head_mask[idx] if head_mask is not None else None),
802
+ output_attentions,
803
+ )
804
+ else:
805
+ layer_outputs = encoder_layer(
806
+ hidden_states,
807
+ attention_mask,
808
+ layer_head_mask=(head_mask[idx] if head_mask is not None else None),
809
+ output_attentions=output_attentions,
810
+ )
811
+
812
+ hidden_states = layer_outputs[0]
813
+
814
+ if output_attentions:
815
+ all_attentions = all_attentions + (layer_outputs[1],)
816
+
817
+ hidden_states = self.layer_norm(hidden_states)
818
+ if output_hidden_states:
819
+ encoder_states = encoder_states + (hidden_states,)
820
+
821
+ if not return_dict:
822
+ return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
823
+ return BaseModelOutput(
824
+ last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
825
+ )
826
+
827
+
828
+ class Speech2TextDecoder(Speech2TextPreTrainedModel):
829
+ """
830
+ Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`Speech2TextDecoderLayer`]
831
+
832
+ Args:
833
+ config: Speech2TextConfig
834
+ embed_tokens (nn.Embedding): output embedding
835
+ """
836
+
837
+ def __init__(self, config: Speech2TextConfig):
838
+ super().__init__(config)
839
+ self.dropout = config.dropout
840
+ self.layerdrop = config.decoder_layerdrop
841
+ self.padding_idx = config.pad_token_id
842
+ self.max_target_positions = config.max_target_positions
843
+ self.embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0
844
+
845
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.d_model, self.padding_idx)
846
+
847
+ self.embed_positions = Speech2TextSinusoidalPositionalEmbedding(
848
+ self.max_target_positions,
849
+ config.d_model,
850
+ self.padding_idx,
851
+ )
852
+
853
+ self.layers = nn.ModuleList([Speech2TextDecoderLayer(config) for _ in range(config.decoder_layers)])
854
+
855
+ self.layer_norm = nn.LayerNorm(config.d_model)
856
+
857
+ self.gradient_checkpointing = False
858
+ # Initialize weights and apply final processing
859
+ self.post_init()
860
+
861
+ def get_input_embeddings(self):
862
+ return self.embed_tokens
863
+
864
+ def set_input_embeddings(self, value):
865
+ self.embed_tokens = value
866
+
867
+ def forward(
868
+ self,
869
+ input_ids=None,
870
+ attention_mask=None,
871
+ encoder_hidden_states=None,
872
+ encoder_attention_mask=None,
873
+ head_mask=None,
874
+ cross_attn_head_mask=None,
875
+ past_key_values=None,
876
+ inputs_embeds=None,
877
+ use_cache=None,
878
+ output_attentions=None,
879
+ output_hidden_states=None,
880
+ return_dict=None,
881
+ ):
882
+ r"""
883
+ Args:
884
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
885
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
886
+ provide it.
887
+
888
+ Indices can be obtained using [`Speech2TextTokenizer`]. See [`PreTrainedTokenizer.encode`] and
889
+ [`PreTrainedTokenizer.__call__`] for details.
890
+
891
+ [What are input IDs?](../glossary#input-ids)
892
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
893
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
894
+
895
+ - 1 for tokens that are **not masked**,
896
+ - 0 for tokens that are **masked**.
897
+
898
+ [What are attention masks?](../glossary#attention-mask)
899
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*):
900
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
901
+ of the decoder.
902
+ encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*):
903
+ Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values
904
+ selected in `[0, 1]`:
905
+
906
+ - 1 for tokens that are **not masked**,
907
+ - 0 for tokens that are **masked**.
908
+
909
+ [What are attention masks?](../glossary#attention-mask)
910
+ head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
911
+ Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
912
+
913
+ - 1 indicates the head is **not masked**,
914
+ - 0 indicates the head is **masked**.
915
+
916
+ cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
917
+ Mask to nullify selected heads of the attention modules in encoder to avoid performing cross-attention
918
+ on hidden heads. Mask values selected in `[0, 1]`:
919
+
920
+ - 1 indicates the head is **not masked**,
921
+ - 0 indicates the head is **masked**.
922
+
923
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
924
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
925
+ shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of
926
+ shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
927
+
928
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and in the
929
+ cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
930
+
931
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those
932
+ that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of
933
+ all `decoder_input_ids` of shape `(batch_size, sequence_length)`.
934
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
935
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
936
+ This is useful if you want more control over how to convert `input_ids` indices into associated vectors
937
+ than the model's internal embedding lookup matrix.
938
+ output_attentions (`bool`, *optional*):
939
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
940
+ returned tensors for more detail.
941
+ output_hidden_states (`bool`, *optional*):
942
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
943
+ for more detail.
944
+ return_dict (`bool`, *optional*):
945
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
946
+ """
947
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
948
+ output_hidden_states = (
949
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
950
+ )
951
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
952
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
953
+
954
+ # retrieve input_ids and inputs_embeds
955
+ if input_ids is not None and inputs_embeds is not None:
956
+ raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
957
+ elif input_ids is not None:
958
+ input_shape = input_ids.size()
959
+ input_ids = input_ids.view(-1, input_shape[-1])
960
+ elif inputs_embeds is not None:
961
+ input_shape = inputs_embeds.size()[:-1]
962
+ else:
963
+ raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
964
+
965
+ # past_key_values_length
966
+ past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
967
+
968
+ if inputs_embeds is None:
969
+ inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
970
+
971
+ attention_mask = _prepare_4d_causal_attention_mask(
972
+ attention_mask, input_shape, inputs_embeds, past_key_values_length
973
+ )
974
+
975
+ # expand encoder attention mask
976
+ if encoder_hidden_states is not None and encoder_attention_mask is not None:
977
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
978
+ encoder_attention_mask = _prepare_4d_attention_mask(
979
+ encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]
980
+ )
981
+
982
+ # embed positions
983
+ positions = self.embed_positions(input_ids, past_key_values_length=past_key_values_length)
984
+
985
+ hidden_states = inputs_embeds + positions
986
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
987
+
988
+ if self.gradient_checkpointing and self.training:
989
+ if use_cache:
990
+ logger.warning_once(
991
+ "`use_cache = True` is incompatible with gradient checkpointing. Setting `use_cache = False`..."
992
+ )
993
+ use_cache = False
994
+
995
+ # decoder layers
996
+ all_hidden_states = () if output_hidden_states else None
997
+ all_self_attns = () if output_attentions else None
998
+ all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None
999
+ next_decoder_cache = () if use_cache else None
1000
+
1001
+ # check if head_mask/cross_attn_head_mask has a correct number of layers specified if desired
1002
+ for attn_mask, mask_name in zip([head_mask, cross_attn_head_mask], ["head_mask", "cross_attn_head_mask"]):
1003
+ if attn_mask is not None:
1004
+ assert attn_mask.size()[0] == (len(self.layers)), (
1005
+ f"The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for"
1006
+ f" {head_mask.size()[0]}."
1007
+ )
1008
+ for idx, decoder_layer in enumerate(self.layers):
1009
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
1010
+ if output_hidden_states:
1011
+ all_hidden_states += (hidden_states,)
1012
+ if self.training:
1013
+ dropout_probability = torch.rand([])
1014
+ if dropout_probability < self.layerdrop:
1015
+ continue
1016
+
1017
+ past_key_value = past_key_values[idx] if past_key_values is not None else None
1018
+
1019
+ if self.gradient_checkpointing and self.training:
1020
+ layer_outputs = self._gradient_checkpointing_func(
1021
+ decoder_layer.__call__,
1022
+ hidden_states,
1023
+ attention_mask,
1024
+ encoder_hidden_states,
1025
+ encoder_attention_mask,
1026
+ head_mask[idx] if head_mask is not None else None,
1027
+ cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None,
1028
+ None,
1029
+ output_attentions,
1030
+ use_cache,
1031
+ )
1032
+ else:
1033
+ layer_outputs = decoder_layer(
1034
+ hidden_states,
1035
+ attention_mask=attention_mask,
1036
+ encoder_hidden_states=encoder_hidden_states,
1037
+ encoder_attention_mask=encoder_attention_mask,
1038
+ layer_head_mask=(head_mask[idx] if head_mask is not None else None),
1039
+ cross_attn_layer_head_mask=(
1040
+ cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None
1041
+ ),
1042
+ past_key_value=past_key_value,
1043
+ output_attentions=output_attentions,
1044
+ use_cache=use_cache,
1045
+ )
1046
+ hidden_states = layer_outputs[0]
1047
+
1048
+ if use_cache:
1049
+ next_decoder_cache += (layer_outputs[3 if output_attentions else 1],)
1050
+
1051
+ if output_attentions:
1052
+ all_self_attns += (layer_outputs[1],)
1053
+
1054
+ if encoder_hidden_states is not None:
1055
+ all_cross_attentions += (layer_outputs[2],)
1056
+
1057
+ hidden_states = self.layer_norm(hidden_states)
1058
+ # add hidden states from the last decoder layer
1059
+ if output_hidden_states:
1060
+ all_hidden_states += (hidden_states,)
1061
+
1062
+ next_cache = next_decoder_cache if use_cache else None
1063
+ if not return_dict:
1064
+ return tuple(
1065
+ v
1066
+ for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, all_cross_attentions]
1067
+ if v is not None
1068
+ )
1069
+ return BaseModelOutputWithPastAndCrossAttentions(
1070
+ last_hidden_state=hidden_states,
1071
+ past_key_values=next_cache,
1072
+ hidden_states=all_hidden_states,
1073
+ attentions=all_self_attns,
1074
+ cross_attentions=all_cross_attentions,
1075
+ )
1076
+
1077
+
1078
+ @add_start_docstrings(
1079
+ "The bare Speech2Text Model outputting raw hidden-states without any specific head on top.",
1080
+ SPEECH_TO_TEXT_START_DOCSTRING,
1081
+ )
1082
+ class Speech2TextModel(Speech2TextPreTrainedModel):
1083
+ def __init__(self, config: Speech2TextConfig):
1084
+ super().__init__(config)
1085
+
1086
+ self.encoder = Speech2TextEncoder(config)
1087
+ self.decoder = Speech2TextDecoder(config)
1088
+
1089
+ # Initialize weights and apply final processing
1090
+ self.post_init()
1091
+
1092
+ def get_input_embeddings(self):
1093
+ return self.decoder.embed_tokens
1094
+
1095
+ def set_input_embeddings(self, value):
1096
+ self.decoder.embed_tokens = value
1097
+
1098
+ def get_encoder(self):
1099
+ return self.encoder
1100
+
1101
+ def get_decoder(self):
1102
+ return self.decoder
1103
+
1104
+ @add_start_docstrings_to_model_forward(SPEECH_TO_TEXT_INPUTS_DOCSTRING)
1105
+ @replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)
1106
+ def forward(
1107
+ self,
1108
+ input_features: Optional[torch.LongTensor] = None,
1109
+ attention_mask: Optional[torch.Tensor] = None,
1110
+ decoder_input_ids: Optional[torch.LongTensor] = None,
1111
+ decoder_attention_mask: Optional[torch.LongTensor] = None,
1112
+ head_mask: Optional[torch.Tensor] = None,
1113
+ decoder_head_mask: Optional[torch.Tensor] = None,
1114
+ cross_attn_head_mask: Optional[torch.Tensor] = None,
1115
+ encoder_outputs: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
1116
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
1117
+ decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
1118
+ use_cache: Optional[bool] = None,
1119
+ output_attentions: Optional[bool] = None,
1120
+ output_hidden_states: Optional[bool] = None,
1121
+ return_dict: Optional[bool] = None,
1122
+ ) -> Union[Tuple[torch.FloatTensor], Seq2SeqLMOutput]:
1123
+ r"""
1124
+ Returns:
1125
+
1126
+ Example:
1127
+
1128
+ ```python
1129
+ >>> import torch
1130
+ >>> from transformers import Speech2TextModel, AutoFeatureExtractor
1131
+ >>> from datasets import load_dataset
1132
+
1133
+ >>> model = Speech2TextModel.from_pretrained("facebook/s2t-small-librispeech-asr")
1134
+ >>> feature_extractor = AutoFeatureExtractor.from_pretrained("facebook/s2t-small-librispeech-asr")
1135
+ >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
1136
+ >>> inputs = feature_extractor(
1137
+ ... ds[0]["audio"]["array"], sampling_rate=ds[0]["audio"]["sampling_rate"], return_tensors="pt"
1138
+ ... )
1139
+ >>> input_features = inputs.input_features
1140
+ >>> decoder_input_ids = torch.tensor([[1, 1]]) * model.config.decoder_start_token_id
1141
+ >>> last_hidden_state = model(input_features, decoder_input_ids=decoder_input_ids).last_hidden_state
1142
+ >>> list(last_hidden_state.shape)
1143
+ [1, 2, 256]
1144
+ ```"""
1145
+
1146
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1147
+ output_hidden_states = (
1148
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1149
+ )
1150
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
1151
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1152
+
1153
+ if encoder_outputs is None:
1154
+ encoder_outputs = self.encoder(
1155
+ input_features,
1156
+ attention_mask=attention_mask,
1157
+ head_mask=head_mask,
1158
+ output_attentions=output_attentions,
1159
+ output_hidden_states=output_hidden_states,
1160
+ return_dict=return_dict,
1161
+ )
1162
+ # If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True
1163
+ elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):
1164
+ encoder_outputs = BaseModelOutput(
1165
+ last_hidden_state=encoder_outputs[0],
1166
+ hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
1167
+ attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
1168
+ )
1169
+
1170
+ # downsample encoder attention mask
1171
+ if attention_mask is not None:
1172
+ encoder_attention_mask = self._get_feature_vector_attention_mask(
1173
+ encoder_outputs[0].shape[1], attention_mask
1174
+ )
1175
+ else:
1176
+ encoder_attention_mask = None
1177
+
1178
+ # decoder outputs consists of (dec_features, past_key_value, dec_hidden, dec_attn)
1179
+ decoder_outputs = self.decoder(
1180
+ input_ids=decoder_input_ids,
1181
+ attention_mask=decoder_attention_mask,
1182
+ encoder_hidden_states=encoder_outputs[0],
1183
+ encoder_attention_mask=encoder_attention_mask,
1184
+ head_mask=decoder_head_mask,
1185
+ cross_attn_head_mask=cross_attn_head_mask,
1186
+ past_key_values=past_key_values,
1187
+ inputs_embeds=decoder_inputs_embeds,
1188
+ use_cache=use_cache,
1189
+ output_attentions=output_attentions,
1190
+ output_hidden_states=output_hidden_states,
1191
+ return_dict=return_dict,
1192
+ )
1193
+
1194
+ if not return_dict:
1195
+ return decoder_outputs + encoder_outputs
1196
+
1197
+ return Seq2SeqModelOutput(
1198
+ last_hidden_state=decoder_outputs.last_hidden_state,
1199
+ past_key_values=decoder_outputs.past_key_values,
1200
+ decoder_hidden_states=decoder_outputs.hidden_states,
1201
+ decoder_attentions=decoder_outputs.attentions,
1202
+ cross_attentions=decoder_outputs.cross_attentions,
1203
+ encoder_last_hidden_state=encoder_outputs.last_hidden_state,
1204
+ encoder_hidden_states=encoder_outputs.hidden_states,
1205
+ encoder_attentions=encoder_outputs.attentions,
1206
+ )
1207
+
1208
+
1209
+ @add_start_docstrings(
1210
+ "The Speech2Text Model with a language modeling head. Can be used for summarization.",
1211
+ SPEECH_TO_TEXT_START_DOCSTRING,
1212
+ )
1213
+ class Speech2TextForConditionalGeneration(Speech2TextPreTrainedModel):
1214
+ base_model_prefix = "model"
1215
+ _tied_weights_keys = ["lm_head.weight"]
1216
+
1217
+ def __init__(self, config: Speech2TextConfig):
1218
+ super().__init__(config)
1219
+ self.model = Speech2TextModel(config)
1220
+ self.lm_head = nn.Linear(config.d_model, self.config.vocab_size, bias=False)
1221
+
1222
+ # Initialize weights and apply final processing
1223
+ self.post_init()
1224
+
1225
+ def get_encoder(self):
1226
+ return self.model.get_encoder()
1227
+
1228
+ def get_decoder(self):
1229
+ return self.model.get_decoder()
1230
+
1231
+ def get_output_embeddings(self):
1232
+ return self.lm_head
1233
+
1234
+ def set_output_embeddings(self, new_embeddings):
1235
+ self.lm_head = new_embeddings
1236
+
1237
+ @add_start_docstrings_to_model_forward(SPEECH_TO_TEXT_INPUTS_DOCSTRING)
1238
+ @replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)
1239
+ def forward(
1240
+ self,
1241
+ input_features: Optional[torch.LongTensor] = None,
1242
+ attention_mask: Optional[torch.Tensor] = None,
1243
+ decoder_input_ids: Optional[torch.LongTensor] = None,
1244
+ decoder_attention_mask: Optional[torch.LongTensor] = None,
1245
+ head_mask: Optional[torch.Tensor] = None,
1246
+ decoder_head_mask: Optional[torch.Tensor] = None,
1247
+ cross_attn_head_mask: Optional[torch.Tensor] = None,
1248
+ encoder_outputs: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
1249
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
1250
+ decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
1251
+ labels: Optional[torch.LongTensor] = None,
1252
+ use_cache: Optional[bool] = None,
1253
+ output_attentions: Optional[bool] = None,
1254
+ output_hidden_states: Optional[bool] = None,
1255
+ return_dict: Optional[bool] = None,
1256
+ ) -> Union[Tuple[torch.FloatTensor], Seq2SeqLMOutput]:
1257
+ r"""
1258
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1259
+ Labels for computing the language modeling loss. Indices should either be in `[0, ..., config.vocab_size]`
1260
+ or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is
1261
+ only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
1262
+
1263
+ Returns:
1264
+
1265
+ Example:
1266
+
1267
+ ```python
1268
+ >>> import torch
1269
+ >>> from transformers import Speech2TextProcessor, Speech2TextForConditionalGeneration
1270
+ >>> from datasets import load_dataset
1271
+
1272
+ >>> model = Speech2TextForConditionalGeneration.from_pretrained("facebook/s2t-small-librispeech-asr")
1273
+ >>> processor = Speech2TextProcessor.from_pretrained("facebook/s2t-small-librispeech-asr")
1274
+
1275
+
1276
+ >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
1277
+
1278
+ >>> inputs = processor(
1279
+ ... ds[0]["audio"]["array"], sampling_rate=ds[0]["audio"]["sampling_rate"], return_tensors="pt"
1280
+ ... )
1281
+ >>> input_features = inputs.input_features
1282
+
1283
+ >>> generated_ids = model.generate(inputs=input_features)
1284
+
1285
+ >>> transcription = processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
1286
+ >>> transcription
1287
+ 'mister quilter is the apostle of the middle classes and we are glad to welcome his gospel'
1288
+ ```"""
1289
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1290
+
1291
+ if labels is not None:
1292
+ if decoder_input_ids is None and decoder_inputs_embeds is None:
1293
+ decoder_input_ids = shift_tokens_right(
1294
+ labels, self.config.pad_token_id, self.config.decoder_start_token_id
1295
+ )
1296
+
1297
+ outputs = self.model(
1298
+ input_features,
1299
+ attention_mask=attention_mask,
1300
+ decoder_input_ids=decoder_input_ids,
1301
+ encoder_outputs=encoder_outputs,
1302
+ decoder_attention_mask=decoder_attention_mask,
1303
+ head_mask=head_mask,
1304
+ decoder_head_mask=decoder_head_mask,
1305
+ cross_attn_head_mask=cross_attn_head_mask,
1306
+ past_key_values=past_key_values,
1307
+ decoder_inputs_embeds=decoder_inputs_embeds,
1308
+ use_cache=use_cache,
1309
+ output_attentions=output_attentions,
1310
+ output_hidden_states=output_hidden_states,
1311
+ return_dict=return_dict,
1312
+ )
1313
+ lm_logits = self.lm_head(outputs[0])
1314
+
1315
+ loss = None
1316
+ if labels is not None:
1317
+ loss_fct = CrossEntropyLoss()
1318
+ loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1))
1319
+
1320
+ if not return_dict:
1321
+ output = (lm_logits,) + outputs[1:]
1322
+ return ((loss,) + output) if loss is not None else output
1323
+
1324
+ return Seq2SeqLMOutput(
1325
+ loss=loss,
1326
+ logits=lm_logits,
1327
+ past_key_values=outputs.past_key_values,
1328
+ decoder_hidden_states=outputs.decoder_hidden_states,
1329
+ decoder_attentions=outputs.decoder_attentions,
1330
+ cross_attentions=outputs.cross_attentions,
1331
+ encoder_last_hidden_state=outputs.encoder_last_hidden_state,
1332
+ encoder_hidden_states=outputs.encoder_hidden_states,
1333
+ encoder_attentions=outputs.encoder_attentions,
1334
+ )
1335
+
1336
+ def prepare_inputs_for_generation(
1337
+ self,
1338
+ decoder_input_ids,
1339
+ past_key_values=None,
1340
+ attention_mask=None,
1341
+ head_mask=None,
1342
+ decoder_head_mask=None,
1343
+ cross_attn_head_mask=None,
1344
+ use_cache=None,
1345
+ encoder_outputs=None,
1346
+ **kwargs,
1347
+ ):
1348
+ # cut decoder_input_ids if past is used
1349
+ if past_key_values is not None:
1350
+ decoder_input_ids = decoder_input_ids[:, -1:]
1351
+
1352
+ return {
1353
+ "encoder_outputs": encoder_outputs,
1354
+ "past_key_values": past_key_values,
1355
+ "decoder_input_ids": decoder_input_ids,
1356
+ "attention_mask": attention_mask,
1357
+ "head_mask": head_mask,
1358
+ "decoder_head_mask": decoder_head_mask,
1359
+ "cross_attn_head_mask": cross_attn_head_mask,
1360
+ "use_cache": use_cache, # change this to avoid caching (presumably for debugging)
1361
+ }
1362
+
1363
+ @staticmethod
1364
+ def _reorder_cache(past_key_values, beam_idx):
1365
+ reordered_past = ()
1366
+ for layer_past in past_key_values:
1367
+ reordered_past += (
1368
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),
1369
+ )
1370
+ return reordered_past
llmeval-env/lib/python3.10/site-packages/transformers/models/speech_to_text/modeling_tf_speech_to_text.py ADDED
@@ -0,0 +1,1607 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The Fairseq Authors and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ TensorFlow Speech2Text model."""
16
+
17
+
18
+ from __future__ import annotations
19
+
20
+ import random
21
+ from typing import Optional, Tuple, Union
22
+
23
+ import numpy as np
24
+ import tensorflow as tf
25
+
26
+ from ...activations_tf import get_tf_activation, glu
27
+ from ...modeling_tf_outputs import (
28
+ TFBaseModelOutput,
29
+ TFBaseModelOutputWithPastAndCrossAttentions,
30
+ TFSeq2SeqLMOutput,
31
+ TFSeq2SeqModelOutput,
32
+ )
33
+ from ...modeling_tf_utils import (
34
+ TFCausalLanguageModelingLoss,
35
+ TFModelInputType,
36
+ TFPreTrainedModel,
37
+ TFSharedEmbeddings,
38
+ keras,
39
+ keras_serializable,
40
+ unpack_inputs,
41
+ )
42
+ from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax
43
+ from ...utils import (
44
+ add_code_sample_docstrings,
45
+ add_start_docstrings,
46
+ add_start_docstrings_to_model_forward,
47
+ logging,
48
+ replace_return_docstrings,
49
+ )
50
+ from .configuration_speech_to_text import Speech2TextConfig
51
+
52
+
53
+ logger = logging.get_logger(__name__)
54
+
55
+ _CONFIG_FOR_DOC = "Speech2TextConfig"
56
+ _CHECKPOINT_FOR_DOC = "facebook/s2t-small-librispeech-asr"
57
+
58
+
59
+ from ..deprecated._archive_maps import TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
60
+
61
+
62
+ LARGE_NEGATIVE = -1e8
63
+
64
+
65
+ # Copied from transformers.models.bart.modeling_tf_bart.shift_tokens_right
66
+ def shift_tokens_right(input_ids: tf.Tensor, pad_token_id: int, decoder_start_token_id: int):
67
+ pad_token_id = tf.cast(pad_token_id, input_ids.dtype)
68
+ decoder_start_token_id = tf.cast(decoder_start_token_id, input_ids.dtype)
69
+ start_tokens = tf.fill(
70
+ (shape_list(input_ids)[0], 1), tf.convert_to_tensor(decoder_start_token_id, input_ids.dtype)
71
+ )
72
+ shifted_input_ids = tf.concat([start_tokens, input_ids[:, :-1]], -1)
73
+ # replace possible -100 values in labels by `pad_token_id`
74
+ shifted_input_ids = tf.where(
75
+ shifted_input_ids == -100,
76
+ tf.fill(shape_list(shifted_input_ids), tf.convert_to_tensor(pad_token_id, input_ids.dtype)),
77
+ shifted_input_ids,
78
+ )
79
+
80
+ # "Verify that `labels` has only positive values and -100"
81
+ assert_gte0 = tf.debugging.assert_greater_equal(shifted_input_ids, tf.constant(0, dtype=input_ids.dtype))
82
+
83
+ # Make sure the assertion op is called by wrapping the result in an identity no-op
84
+ with tf.control_dependencies([assert_gte0]):
85
+ shifted_input_ids = tf.identity(shifted_input_ids)
86
+
87
+ return shifted_input_ids
88
+
89
+
90
+ # Copied from transformers.models.bart.modeling_tf_bart._make_causal_mask
91
+ def _make_causal_mask(input_ids_shape: tf.TensorShape, past_key_values_length: int = 0):
92
+ """
93
+ Make causal mask used for bi-directional self-attention.
94
+ """
95
+ bsz = input_ids_shape[0]
96
+ tgt_len = input_ids_shape[1]
97
+ mask = tf.ones((tgt_len, tgt_len)) * LARGE_NEGATIVE
98
+ mask_cond = tf.range(shape_list(mask)[-1])
99
+
100
+ mask = tf.where(mask_cond < tf.reshape(mask_cond + 1, (shape_list(mask)[-1], 1)), 0.0, mask)
101
+
102
+ if past_key_values_length > 0:
103
+ mask = tf.concat([tf.zeros((tgt_len, past_key_values_length)), mask], axis=-1)
104
+
105
+ return tf.tile(mask[None, None, :, :], (bsz, 1, 1, 1))
106
+
107
+
108
+ # Copied from transformers.models.bart.modeling_tf_bart._expand_mask
109
+ def _expand_mask(mask: tf.Tensor, tgt_len: Optional[int] = None):
110
+ """
111
+ Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
112
+ """
113
+ src_len = shape_list(mask)[1]
114
+ tgt_len = tgt_len if tgt_len is not None else src_len
115
+ one_cst = tf.constant(1.0)
116
+ mask = tf.cast(mask, dtype=one_cst.dtype)
117
+ expanded_mask = tf.tile(mask[:, None, None, :], (1, 1, tgt_len, 1))
118
+
119
+ return (one_cst - expanded_mask) * LARGE_NEGATIVE
120
+
121
+
122
+ class TFConv1dSubsampler(keras.layers.Layer):
123
+ """
124
+ Convolutional subsampler: a stack of 1D convolution (along temporal dimension) followed by non-linear activation
125
+ via gated linear units (https://arxiv.org/abs/1911.08460)
126
+ """
127
+
128
+ def __init__(self, config: Speech2TextConfig, **kwargs):
129
+ super().__init__(**kwargs)
130
+ self.config = config
131
+ self.num_layers = config.num_conv_layers
132
+ self.in_channels = config.input_feat_per_channel * config.input_channels
133
+ self.mid_channels = config.conv_channels
134
+ self.out_channels = config.d_model
135
+ self.kernel_sizes = config.conv_kernel_sizes
136
+
137
+ self.conv_layers = [
138
+ keras.layers.Conv1D(
139
+ filters=self.mid_channels if i < self.num_layers - 1 else self.out_channels * 2,
140
+ kernel_size=k,
141
+ strides=2,
142
+ name=f"conv_layers.{i}",
143
+ )
144
+ for i, k in enumerate(self.kernel_sizes)
145
+ ]
146
+
147
+ def call(self, input_features: tf.Tensor) -> tf.Tensor:
148
+ # TF Conv1D assumes Batch x Time x Channels, same as the input
149
+ hidden_states = tf.cast(input_features, tf.float32)
150
+ for i, conv in enumerate(self.conv_layers):
151
+ # equivalent to `padding=k // 2` on PT's `nn.Conv1d`
152
+ pad_len = self.kernel_sizes[i] // 2
153
+ hidden_shapes = shape_list(hidden_states)
154
+ hidden_states = tf.concat(
155
+ (
156
+ tf.zeros((hidden_shapes[0], pad_len, hidden_shapes[2])),
157
+ hidden_states,
158
+ tf.zeros((hidden_shapes[0], pad_len, hidden_shapes[2])),
159
+ ),
160
+ axis=1,
161
+ )
162
+
163
+ hidden_states = conv(hidden_states)
164
+ hidden_states = glu(hidden_states, axis=2) # GLU over the Channel dimension
165
+ return hidden_states
166
+
167
+ def build(self, input_shape=None):
168
+ if self.built:
169
+ return
170
+ self.built = True
171
+ if getattr(self, "conv_layers", None) is not None:
172
+ for i, layer in enumerate(self.conv_layers):
173
+ with tf.name_scope(layer.name):
174
+ layer.build([None, None, self.in_channels] if i == 0 else [None, None, self.mid_channels // 2])
175
+
176
+
177
+ class TFSpeech2TextSinusoidalPositionalEmbedding(keras.layers.Layer):
178
+ """This module produces sinusoidal positional embeddings of any length."""
179
+
180
+ def __init__(self, num_positions: int, embedding_dim: int, padding_idx: Optional[int] = None, **kwargs):
181
+ super().__init__(**kwargs)
182
+ self.offset = 2
183
+ self.embedding_dim = embedding_dim
184
+ self.padding_idx = padding_idx
185
+ self.embedding_weights = self._get_embedding(num_positions + self.offset, embedding_dim, padding_idx)
186
+
187
+ @staticmethod
188
+ def _get_embedding(num_embeddings: int, embedding_dim: int, padding_idx: Optional[int] = None) -> tf.Tensor:
189
+ """
190
+ Build sinusoidal embeddings. This matches the implementation in tensor2tensor, but differs slightly from the
191
+ description in Section 3.5 of "Attention Is All You Need".
192
+ """
193
+ half_dim = embedding_dim // 2
194
+ emb = tf.math.log(10000.0) / (half_dim - 1)
195
+ emb = tf.math.exp(tf.range(half_dim, dtype=tf.float32) * -emb)
196
+ emb = tf.expand_dims(tf.range(num_embeddings, dtype=tf.float32), axis=1) * tf.expand_dims(emb, axis=0)
197
+ emb = tf.reshape(tf.concat([tf.math.sin(emb), tf.math.cos(emb)], axis=1), shape=[num_embeddings, -1])
198
+ if embedding_dim % 2 == 1:
199
+ # zero pad
200
+ emb = tf.concat([emb, tf.zeros(num_embeddings, 1)], axis=1)
201
+ if padding_idx is not None:
202
+ emb = tf.concat([emb[:padding_idx, :], tf.zeros((1, tf.shape(emb)[1])), emb[padding_idx + 1 :, :]], axis=0)
203
+ return emb
204
+
205
+ def call(self, input_ids: tf.Tensor, past_key_values_length: int = 0) -> tf.Tensor:
206
+ bsz, seq_len = shape_list(input_ids)
207
+ # Create the position ids from the input token ids. Any padded tokens remain padded.
208
+ position_ids = self.create_position_ids_from_input_ids(input_ids, self.padding_idx, past_key_values_length)
209
+
210
+ # Matt: The PyTorch code does a lot of work to cache the embeddings, setting the cached values as a
211
+ # model attribute in the forward pass. This is extremely forbidden in TF, which wants forward calls to be
212
+ # idempotent. TF doesn't need that caching anyway, since it can just store constants during compilation,
213
+ # so we just remove all of that code.
214
+ embeddings = self._get_embedding(
215
+ self.padding_idx + 1 + seq_len + self.offset + past_key_values_length, self.embedding_dim, self.padding_idx
216
+ )
217
+ return tf.reshape(tf.gather(embeddings, tf.reshape(position_ids, (-1,)), axis=0), (bsz, seq_len, -1))
218
+
219
+ @staticmethod
220
+ def create_position_ids_from_input_ids(
221
+ input_ids: tf.Tensor, padding_idx: int, past_key_values_length: Optional[int] = 0
222
+ ) -> tf.Tensor:
223
+ """
224
+ Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding
225
+ symbols are ignored. This is modified from fairseq's `utils.make_positions`.
226
+
227
+ Args:
228
+ x: tf.Tensor x:
229
+ Returns: tf.Tensor
230
+ """
231
+ mask = tf.cast(tf.math.not_equal(input_ids, padding_idx), dtype=tf.int32)
232
+ incremental_indices = (tf.math.cumsum(mask, axis=1) + past_key_values_length) * mask
233
+ return tf.cast(incremental_indices, dtype=tf.int64) + padding_idx
234
+
235
+
236
+ # Copied from transformers.models.bart.modeling_tf_bart.TFBartAttention with Bart->Speech2Text
237
+ class TFSpeech2TextAttention(keras.layers.Layer):
238
+ """Multi-headed attention from "Attention Is All You Need"""
239
+
240
+ def __init__(
241
+ self,
242
+ embed_dim: int,
243
+ num_heads: int,
244
+ dropout: float = 0.0,
245
+ is_decoder: bool = False,
246
+ bias: bool = True,
247
+ **kwargs,
248
+ ):
249
+ super().__init__(**kwargs)
250
+ self.embed_dim = embed_dim
251
+
252
+ self.num_heads = num_heads
253
+ self.dropout = keras.layers.Dropout(dropout)
254
+ self.head_dim = embed_dim // num_heads
255
+ if (self.head_dim * num_heads) != self.embed_dim:
256
+ raise ValueError(
257
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
258
+ f" and `num_heads`: {num_heads})."
259
+ )
260
+ self.scaling = self.head_dim**-0.5
261
+ self.is_decoder = is_decoder
262
+
263
+ self.k_proj = keras.layers.Dense(embed_dim, use_bias=bias, name="k_proj")
264
+ self.q_proj = keras.layers.Dense(embed_dim, use_bias=bias, name="q_proj")
265
+ self.v_proj = keras.layers.Dense(embed_dim, use_bias=bias, name="v_proj")
266
+ self.out_proj = keras.layers.Dense(embed_dim, use_bias=bias, name="out_proj")
267
+
268
+ def _shape(self, tensor: tf.Tensor, seq_len: int, bsz: int):
269
+ return tf.transpose(tf.reshape(tensor, (bsz, seq_len, self.num_heads, self.head_dim)), (0, 2, 1, 3))
270
+
271
+ def call(
272
+ self,
273
+ hidden_states: tf.Tensor,
274
+ key_value_states: tf.Tensor | None = None,
275
+ past_key_value: Tuple[Tuple[tf.Tensor]] | None = None,
276
+ attention_mask: tf.Tensor | None = None,
277
+ layer_head_mask: tf.Tensor | None = None,
278
+ training: Optional[bool] = False,
279
+ ) -> Tuple[tf.Tensor, tf.Tensor | None]:
280
+ """Input shape: Batch x Time x Channel"""
281
+
282
+ # if key_value_states are provided this layer is used as a cross-attention layer
283
+ # for the decoder
284
+ is_cross_attention = key_value_states is not None
285
+ bsz, tgt_len, embed_dim = shape_list(hidden_states)
286
+
287
+ # get query proj
288
+ query_states = self.q_proj(hidden_states) * self.scaling
289
+ # get key, value proj
290
+ if is_cross_attention and past_key_value is not None:
291
+ # reuse k,v, cross_attentions
292
+ key_states = past_key_value[0]
293
+ value_states = past_key_value[1]
294
+ elif is_cross_attention:
295
+ # cross_attentions
296
+ key_states = self._shape(self.k_proj(key_value_states), -1, bsz)
297
+ value_states = self._shape(self.v_proj(key_value_states), -1, bsz)
298
+ elif past_key_value is not None:
299
+ # reuse k, v, self_attention
300
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
301
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
302
+ key_states = tf.concat([past_key_value[0], key_states], axis=2)
303
+ value_states = tf.concat([past_key_value[1], value_states], axis=2)
304
+ else:
305
+ # self_attention
306
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
307
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
308
+
309
+ if self.is_decoder:
310
+ # if cross_attention save Tuple(tf.Tensor, tf.Tensor) of all cross attention key/value_states.
311
+ # Further calls to cross_attention layer can then reuse all cross-attention
312
+ # key/value_states (first "if" case)
313
+ # if uni-directional self-attention (decoder) save Tuple(tf.Tensor, tf.Tensor) of
314
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
315
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
316
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
317
+ past_key_value = (key_states, value_states)
318
+
319
+ proj_shape = (bsz * self.num_heads, -1, self.head_dim)
320
+ query_states = tf.reshape(self._shape(query_states, tgt_len, bsz), proj_shape)
321
+ key_states = tf.reshape(key_states, proj_shape)
322
+ value_states = tf.reshape(value_states, proj_shape)
323
+
324
+ src_len = shape_list(key_states)[1]
325
+ attn_weights = tf.matmul(query_states, key_states, transpose_b=True)
326
+
327
+ tf.debugging.assert_equal(
328
+ shape_list(attn_weights),
329
+ [bsz * self.num_heads, tgt_len, src_len],
330
+ message=(
331
+ f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is"
332
+ f" {shape_list(attn_weights)}"
333
+ ),
334
+ )
335
+
336
+ if attention_mask is not None:
337
+ tf.debugging.assert_equal(
338
+ shape_list(attention_mask),
339
+ [bsz, 1, tgt_len, src_len],
340
+ message=(
341
+ f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is"
342
+ f" {shape_list(attention_mask)}"
343
+ ),
344
+ )
345
+
346
+ attention_mask = tf.cast(attention_mask, dtype=attn_weights.dtype)
347
+ attn_weights = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len)) + attention_mask
348
+ attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len))
349
+
350
+ attn_weights = stable_softmax(attn_weights, axis=-1)
351
+
352
+ if layer_head_mask is not None:
353
+ tf.debugging.assert_equal(
354
+ shape_list(layer_head_mask),
355
+ [self.num_heads],
356
+ message=(
357
+ f"Head mask for a single layer should be of size {(self.num_heads)}, but is"
358
+ f" {shape_list(layer_head_mask)}"
359
+ ),
360
+ )
361
+
362
+ attn_weights = tf.reshape(layer_head_mask, (1, -1, 1, 1)) * tf.reshape(
363
+ attn_weights, (bsz, self.num_heads, tgt_len, src_len)
364
+ )
365
+ attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len))
366
+
367
+ attn_probs = self.dropout(attn_weights, training=training)
368
+ attn_output = tf.matmul(attn_probs, value_states)
369
+
370
+ tf.debugging.assert_equal(
371
+ shape_list(attn_output),
372
+ [bsz * self.num_heads, tgt_len, self.head_dim],
373
+ message=(
374
+ f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is"
375
+ f" {shape_list(attn_output)}"
376
+ ),
377
+ )
378
+
379
+ attn_output = tf.transpose(
380
+ tf.reshape(attn_output, (bsz, self.num_heads, tgt_len, self.head_dim)), (0, 2, 1, 3)
381
+ )
382
+ attn_output = tf.reshape(attn_output, (bsz, tgt_len, embed_dim))
383
+
384
+ attn_output = self.out_proj(attn_output)
385
+ attn_weights: tf.Tensor = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len))
386
+
387
+ return attn_output, attn_weights, past_key_value
388
+
389
+ def build(self, input_shape=None):
390
+ if self.built:
391
+ return
392
+ self.built = True
393
+ if getattr(self, "k_proj", None) is not None:
394
+ with tf.name_scope(self.k_proj.name):
395
+ self.k_proj.build([None, None, self.embed_dim])
396
+ if getattr(self, "q_proj", None) is not None:
397
+ with tf.name_scope(self.q_proj.name):
398
+ self.q_proj.build([None, None, self.embed_dim])
399
+ if getattr(self, "v_proj", None) is not None:
400
+ with tf.name_scope(self.v_proj.name):
401
+ self.v_proj.build([None, None, self.embed_dim])
402
+ if getattr(self, "out_proj", None) is not None:
403
+ with tf.name_scope(self.out_proj.name):
404
+ self.out_proj.build([None, None, self.embed_dim])
405
+
406
+
407
+ class TFSpeech2TextEncoderLayer(keras.layers.Layer):
408
+ def __init__(self, config: Speech2TextConfig, **kwargs):
409
+ super().__init__(**kwargs)
410
+ self.embed_dim = config.d_model
411
+ self.self_attn = TFSpeech2TextAttention(
412
+ self.embed_dim, config.encoder_attention_heads, dropout=config.attention_dropout, name="self_attn"
413
+ )
414
+ self.self_attn_layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="self_attn_layer_norm")
415
+ self.dropout = keras.layers.Dropout(config.dropout)
416
+ self.activation_fn = get_tf_activation(config.activation_function)
417
+ self.activation_dropout = keras.layers.Dropout(config.activation_dropout)
418
+ self.fc1 = keras.layers.Dense(config.encoder_ffn_dim, name="fc1")
419
+ self.fc2 = keras.layers.Dense(self.embed_dim, name="fc2")
420
+ self.final_layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="final_layer_norm")
421
+ self.config = config
422
+
423
+ def call(
424
+ self, hidden_states: tf.Tensor, attention_mask: tf.Tensor, layer_head_mask: tf.Tensor, training: bool = False
425
+ ):
426
+ """
427
+ Args:
428
+ hidden_states (`tf.Tensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
429
+ attention_mask (`tf.Tensor`): attention mask of size
430
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
431
+ layer_head_mask (`tf.Tensor`): mask for attention heads in a given layer of size
432
+ `(encoder_attention_heads,)`
433
+ """
434
+ residual = hidden_states
435
+ hidden_states = self.self_attn_layer_norm(hidden_states)
436
+ hidden_states, self_attn_weights, _ = self.self_attn(
437
+ hidden_states=hidden_states,
438
+ attention_mask=attention_mask,
439
+ layer_head_mask=layer_head_mask,
440
+ training=training,
441
+ )
442
+
443
+ tf.debugging.assert_equal(
444
+ shape_list(hidden_states),
445
+ shape_list(residual),
446
+ message=f"Self attn modified the shape of query {shape_list(residual)} to {shape_list(hidden_states)}",
447
+ )
448
+
449
+ hidden_states = self.dropout(hidden_states, training=training)
450
+ hidden_states = residual + hidden_states
451
+
452
+ residual = hidden_states
453
+ hidden_states = self.final_layer_norm(hidden_states)
454
+ hidden_states = self.activation_fn(self.fc1(hidden_states))
455
+ hidden_states = self.activation_dropout(hidden_states, training=training)
456
+ hidden_states = self.fc2(hidden_states)
457
+ hidden_states = self.dropout(hidden_states, training=training)
458
+ hidden_states = residual + hidden_states
459
+
460
+ return hidden_states, self_attn_weights
461
+
462
+ def build(self, input_shape=None):
463
+ if self.built:
464
+ return
465
+ self.built = True
466
+ if getattr(self, "self_attn", None) is not None:
467
+ with tf.name_scope(self.self_attn.name):
468
+ self.self_attn.build(None)
469
+ if getattr(self, "self_attn_layer_norm", None) is not None:
470
+ with tf.name_scope(self.self_attn_layer_norm.name):
471
+ self.self_attn_layer_norm.build([None, None, self.embed_dim])
472
+ if getattr(self, "fc1", None) is not None:
473
+ with tf.name_scope(self.fc1.name):
474
+ self.fc1.build([None, None, self.embed_dim])
475
+ if getattr(self, "fc2", None) is not None:
476
+ with tf.name_scope(self.fc2.name):
477
+ self.fc2.build([None, None, self.config.encoder_ffn_dim])
478
+ if getattr(self, "final_layer_norm", None) is not None:
479
+ with tf.name_scope(self.final_layer_norm.name):
480
+ self.final_layer_norm.build([None, None, self.embed_dim])
481
+
482
+
483
+ class TFSpeech2TextDecoderLayer(keras.layers.Layer):
484
+ def __init__(self, config: Speech2TextConfig, **kwargs):
485
+ super().__init__(**kwargs)
486
+ self.embed_dim = config.d_model
487
+
488
+ self.self_attn = TFSpeech2TextAttention(
489
+ embed_dim=self.embed_dim,
490
+ num_heads=config.decoder_attention_heads,
491
+ dropout=config.attention_dropout,
492
+ name="self_attn",
493
+ is_decoder=True,
494
+ )
495
+ self.dropout = keras.layers.Dropout(config.dropout)
496
+ self.activation_fn = get_tf_activation(config.activation_function)
497
+ self.activation_dropout = keras.layers.Dropout(config.activation_dropout)
498
+
499
+ self.self_attn_layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="self_attn_layer_norm")
500
+ self.encoder_attn = TFSpeech2TextAttention(
501
+ self.embed_dim,
502
+ config.decoder_attention_heads,
503
+ dropout=config.attention_dropout,
504
+ name="encoder_attn",
505
+ is_decoder=True,
506
+ )
507
+ self.encoder_attn_layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="encoder_attn_layer_norm")
508
+ self.fc1 = keras.layers.Dense(config.decoder_ffn_dim, name="fc1")
509
+ self.fc2 = keras.layers.Dense(self.embed_dim, name="fc2")
510
+ self.final_layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="final_layer_norm")
511
+ self.config = config
512
+
513
+ def call(
514
+ self,
515
+ hidden_states,
516
+ attention_mask: tf.Tensor | None = None,
517
+ encoder_hidden_states: tf.Tensor | None = None,
518
+ encoder_attention_mask: tf.Tensor | None = None,
519
+ layer_head_mask: tf.Tensor | None = None,
520
+ cross_attn_layer_head_mask: tf.Tensor | None = None,
521
+ past_key_value: Tuple[tf.Tensor] | None = None,
522
+ training=False,
523
+ ) -> Tuple[tf.Tensor, tf.Tensor, Tuple[Tuple[tf.Tensor]]]:
524
+ """
525
+ Args:
526
+ hidden_states (`tf.Tensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
527
+ attention_mask (`tf.Tensor`): attention mask of size
528
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
529
+ encoder_hidden_states (`tf.Tensor`):
530
+ cross attention input to the layer of shape `(batch, seq_len, embed_dim)`
531
+ encoder_attention_mask (`tf.Tensor`): encoder attention mask of size
532
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
533
+ layer_head_mask (`tf.Tensor`): mask for attention heads in a given layer of size
534
+ `(decoder_attention_heads,)`
535
+ cross_attn_layer_head_mask (`tf.Tensor`): mask for heads of the cross-attention module.
536
+ `(decoder_attention_heads,)`
537
+ past_key_value (`Tuple(tf.Tensor)`): cached past key and value projection states
538
+ """
539
+ residual = hidden_states
540
+ hidden_states = self.self_attn_layer_norm(hidden_states)
541
+
542
+ # Self Attention
543
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
544
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
545
+ # add present self-attn cache to positions 1,2 of present_key_value tuple
546
+ hidden_states, self_attn_weights, present_key_value = self.self_attn(
547
+ hidden_states=hidden_states,
548
+ past_key_value=self_attn_past_key_value,
549
+ attention_mask=attention_mask,
550
+ layer_head_mask=layer_head_mask,
551
+ training=training,
552
+ )
553
+ hidden_states = self.dropout(hidden_states, training=training)
554
+ hidden_states = residual + hidden_states
555
+
556
+ # Cross-Attention Block
557
+ cross_attn_present_key_value = None
558
+ cross_attn_weights = None
559
+ if encoder_hidden_states is not None:
560
+ residual = hidden_states
561
+ hidden_states = self.encoder_attn_layer_norm(hidden_states)
562
+
563
+ # cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple
564
+ cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
565
+ hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn(
566
+ hidden_states=hidden_states,
567
+ key_value_states=encoder_hidden_states,
568
+ attention_mask=encoder_attention_mask,
569
+ layer_head_mask=cross_attn_layer_head_mask,
570
+ past_key_value=cross_attn_past_key_value,
571
+ training=training,
572
+ )
573
+ hidden_states = self.dropout(hidden_states, training=training)
574
+ hidden_states = residual + hidden_states
575
+
576
+ # add cross-attn to positions 3,4 of present_key_value tuple
577
+ present_key_value = present_key_value + cross_attn_present_key_value
578
+
579
+ # Fully Connected
580
+ residual = hidden_states
581
+ hidden_states = self.final_layer_norm(hidden_states)
582
+ hidden_states = self.activation_fn(self.fc1(hidden_states))
583
+ hidden_states = self.activation_dropout(hidden_states, training=training)
584
+ hidden_states = self.fc2(hidden_states)
585
+ hidden_states = self.dropout(hidden_states, training=training)
586
+ hidden_states = residual + hidden_states
587
+
588
+ return (
589
+ hidden_states,
590
+ self_attn_weights,
591
+ cross_attn_weights,
592
+ present_key_value,
593
+ )
594
+
595
+ def build(self, input_shape=None):
596
+ if self.built:
597
+ return
598
+ self.built = True
599
+ if getattr(self, "self_attn", None) is not None:
600
+ with tf.name_scope(self.self_attn.name):
601
+ self.self_attn.build(None)
602
+ if getattr(self, "self_attn_layer_norm", None) is not None:
603
+ with tf.name_scope(self.self_attn_layer_norm.name):
604
+ self.self_attn_layer_norm.build([None, None, self.embed_dim])
605
+ if getattr(self, "encoder_attn", None) is not None:
606
+ with tf.name_scope(self.encoder_attn.name):
607
+ self.encoder_attn.build(None)
608
+ if getattr(self, "encoder_attn_layer_norm", None) is not None:
609
+ with tf.name_scope(self.encoder_attn_layer_norm.name):
610
+ self.encoder_attn_layer_norm.build([None, None, self.embed_dim])
611
+ if getattr(self, "fc1", None) is not None:
612
+ with tf.name_scope(self.fc1.name):
613
+ self.fc1.build([None, None, self.embed_dim])
614
+ if getattr(self, "fc2", None) is not None:
615
+ with tf.name_scope(self.fc2.name):
616
+ self.fc2.build([None, None, self.config.decoder_ffn_dim])
617
+ if getattr(self, "final_layer_norm", None) is not None:
618
+ with tf.name_scope(self.final_layer_norm.name):
619
+ self.final_layer_norm.build([None, None, self.embed_dim])
620
+
621
+
622
+ class TFSpeech2TextPreTrainedModel(TFPreTrainedModel):
623
+ config_class = Speech2TextConfig
624
+ base_model_prefix = "model"
625
+ main_input_name = "input_features"
626
+ _keys_to_ignore_on_load_unexpected = [r"encoder.embed_positions.weights"]
627
+
628
+ def _get_feat_extract_output_lengths(self, input_lengths: tf.Tensor):
629
+ """
630
+ Computes the output length of the convolutional layers
631
+ """
632
+ for _ in range(self.config.num_conv_layers):
633
+ input_lengths = (input_lengths - 1) // 2 + 1
634
+
635
+ return input_lengths
636
+
637
+ @property
638
+ def input_signature(self):
639
+ return {
640
+ "input_features": tf.TensorSpec(
641
+ (None, None, self.config.input_feat_per_channel * self.config.input_channels),
642
+ tf.float32,
643
+ name="input_features",
644
+ ),
645
+ "attention_mask": tf.TensorSpec((None, None), tf.int32, name="attention_mask"),
646
+ "decoder_input_ids": tf.TensorSpec((None, None), tf.int32, name="decoder_input_ids"),
647
+ "decoder_attention_mask": tf.TensorSpec((None, None), tf.int32, name="decoder_attention_mask"),
648
+ }
649
+
650
+
651
+ SPEECH_TO_TEXT_START_DOCSTRING = r"""
652
+ This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
653
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
654
+ etc.)
655
+
656
+ This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
657
+ as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
658
+ behavior.
659
+
660
+ <Tip>
661
+
662
+ TensorFlow models and layers in `transformers` accept two formats as input:
663
+
664
+ - having all inputs as keyword arguments (like PyTorch models), or
665
+ - having all inputs as a list, tuple or dict in the first positional argument.
666
+
667
+ The reason the second format is supported is that Keras methods prefer this format when passing inputs to models
668
+ and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just
669
+ pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second
670
+ format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with
671
+ the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first
672
+ positional argument:
673
+
674
+ - a single Tensor with `input_ids` only and nothing else: `model(input_ids)`
675
+ - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
676
+ `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`
677
+ - a dictionary with one or several input Tensors associated to the input names given in the docstring:
678
+ `model({"input_ids": input_ids, "token_type_ids": token_type_ids})`
679
+
680
+ Note that when creating models and layers with
681
+ [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry
682
+ about any of this, as you can just pass inputs like you would to any other Python function!
683
+
684
+ </Tip>
685
+
686
+ Parameters:
687
+ config ([`Speech2TextConfig`]):
688
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
689
+ load the weights associated with the model, only the configuration. Check out the
690
+ [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
691
+ """
692
+
693
+
694
+ SPEECH_TO_TEXT_INPUTS_DOCSTRING = r"""
695
+ Args:
696
+ input_features (`tf.Tensor` of shape `(batch_size, sequence_length, feature_size)`):
697
+ Float values of fbank features extracted from the raw speech waveform. Raw speech waveform can be obtained
698
+ by loading a `.flac` or `.wav` audio file into an array of type `List[float]` or a `numpy.ndarray`, *e.g.*
699
+ via the soundfile library (`pip install soundfile`). To prepare the array into `input_features`, the
700
+ [`AutoFeatureExtractor`] should be used for extracting the fbank features, padding and conversion into a
701
+ tensor of floats. See [`~Speech2TextFeatureExtractor.__call__`]
702
+ attention_mask (`tf.Tensor` of shape `({0})`, *optional*):
703
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
704
+
705
+ - 1 for tokens that are **not masked**,
706
+ - 0 for tokens that are **masked**.
707
+
708
+ [What are attention masks?](../glossary#attention-mask)
709
+ decoder_input_ids (`tf.Tensor` of shape `(batch_size, target_sequence_length)`, *optional*):
710
+ Indices of decoder input sequence tokens in the vocabulary.
711
+
712
+ Indices can be obtained using [`Speech2TextTokenizer`]. See [`PreTrainedTokenizer.encode`] and
713
+ [`PreTrainedTokenizer.__call__`] for details.
714
+
715
+ [What are decoder input IDs?](../glossary#decoder-input-ids)
716
+
717
+ SpeechToText uses the `eos_token_id` as the starting token for `decoder_input_ids` generation. If
718
+ `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
719
+ `past_key_values`).
720
+
721
+ For translation and summarization training, `decoder_input_ids` should be provided. If no
722
+ `decoder_input_ids` is provided, the model will create this tensor by shifting the `input_ids` to the right
723
+ for denoising pre-training following the paper.
724
+ decoder_attention_mask (`tf.Tensor` of shape `(batch_size, target_sequence_length)`, *optional*):
725
+ will be made by default and ignore pad tokens. It is not recommended to set this for most use cases.
726
+ head_mask (`tf.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
727
+ Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`:
728
+
729
+ - 1 indicates the head is **not masked**,
730
+ - 0 indicates the head is **masked**.
731
+
732
+ decoder_head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
733
+ Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in `[0, 1]`:
734
+
735
+ - 1 indicates the head is **not masked**,
736
+ - 0 indicates the head is **masked**.
737
+
738
+ cross_attn_head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
739
+ Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:
740
+
741
+ - 1 indicates the head is **not masked**,
742
+ - 0 indicates the head is **masked**.
743
+
744
+ encoder_outputs (`tf.FloatTensor`, *optional*):
745
+ hidden states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.
746
+ of shape `(batch_size, sequence_length, hidden_size)` is a sequence of
747
+ past_key_values (`Tuple[Tuple[tf.Tensor]]` of length `config.n_layers`)
748
+ contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
749
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
750
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
751
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
752
+ decoder_inputs_embeds (`tf.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*):
753
+ Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded
754
+ representation. If `past_key_values` is used, optionally only the last `decoder_inputs_embeds` have to be
755
+ input (see `past_key_values`). This is useful if you want more control over how to convert
756
+ `decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix.
757
+ use_cache (`bool`, *optional*):
758
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
759
+ `past_key_values`).
760
+ output_attentions (`bool`, *optional*):
761
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
762
+ tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the
763
+ config will be used instead.
764
+ output_hidden_states (`bool`, *optional*):
765
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
766
+ more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
767
+ used instead.
768
+ return_dict (`bool`, *optional*):
769
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in
770
+ eager mode, in graph mode the value will always be set to True.
771
+ training (`bool`, *optional*, defaults to `False`):
772
+ Whether or not to use the model in training mode (some modules like dropout modules have different
773
+ behaviors between training and evaluation).
774
+ """
775
+
776
+
777
+ @keras_serializable
778
+ class TFSpeech2TextEncoder(keras.layers.Layer):
779
+ config_class = Speech2TextConfig
780
+ """
781
+ Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a
782
+ [`TFSpeech2TextEncoderLayer`].
783
+
784
+ Args:
785
+ config: Speech2TextConfig
786
+ """
787
+
788
+ def __init__(self, config: Speech2TextConfig, **kwargs):
789
+ super().__init__(**kwargs)
790
+ self.config = config
791
+
792
+ self.dropout = keras.layers.Dropout(config.dropout)
793
+ self.layerdrop = config.encoder_layerdrop
794
+
795
+ embed_dim = config.d_model
796
+ self.padding_idx = config.pad_token_id
797
+ self.max_source_positions = config.max_source_positions
798
+ self.embed_scale = tf.math.sqrt(float(embed_dim)) if config.scale_embedding else 1.0
799
+
800
+ self.conv = TFConv1dSubsampler(config, name="conv")
801
+
802
+ self.embed_positions = TFSpeech2TextSinusoidalPositionalEmbedding(
803
+ num_positions=config.max_source_positions,
804
+ embedding_dim=embed_dim,
805
+ padding_idx=self.padding_idx,
806
+ name="embed_positions",
807
+ )
808
+ self.layers = [TFSpeech2TextEncoderLayer(config, name=f"layers.{i}") for i in range(config.encoder_layers)]
809
+ self.layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="layer_norm")
810
+
811
+ def _get_feat_extract_output_lengths(self, input_lengths: tf.Tensor):
812
+ """
813
+ Computes the output length of the convolutional layers
814
+ """
815
+ for _ in range(self.config.num_conv_layers):
816
+ input_lengths = (input_lengths - 1) // 2 + 1
817
+
818
+ return input_lengths
819
+
820
+ def _get_feature_vector_attention_mask(self, feature_vector_length, attention_mask):
821
+ # generate creates 3D attention mask, because of the shape of input_features
822
+ # convert it to 2D if thats the case
823
+ if len(attention_mask.shape) > 2:
824
+ attention_mask = attention_mask[:, :, -1]
825
+
826
+ subsampled_lengths = self._get_feat_extract_output_lengths(tf.math.reduce_sum(attention_mask, -1))
827
+ bsz = shape_list(attention_mask)[0]
828
+ indices = tf.concat(
829
+ (
830
+ tf.expand_dims(tf.range(bsz, dtype=attention_mask.dtype), -1),
831
+ tf.expand_dims(subsampled_lengths - 1, -1),
832
+ ),
833
+ axis=-1,
834
+ )
835
+ attention_mask = tf.scatter_nd(indices=indices, updates=tf.ones(bsz), shape=[bsz, feature_vector_length])
836
+ attention_mask = tf.cast(tf.reverse(tf.math.cumsum(tf.reverse(attention_mask, [-1]), -1), [-1]), tf.int64)
837
+ return attention_mask
838
+
839
+ @unpack_inputs
840
+ def call(
841
+ self,
842
+ input_features=None,
843
+ attention_mask=None,
844
+ head_mask=None,
845
+ output_attentions=None,
846
+ output_hidden_states=None,
847
+ return_dict=None,
848
+ training=False,
849
+ ):
850
+ """
851
+ Args:
852
+ input_features (`tf.Tensor` of shape `(batch_size, sequence_length, feature_size)`):
853
+ Float values of fbank features extracted from the raw speech waveform. Raw speech waveform can be
854
+ obtained by loading a `.flac` or `.wav` audio file into an array of type `List[float]` or a
855
+ `numpy.ndarray`, *e.g.* via the soundfile library (`pip install soundfile`). To prepare the array into
856
+ `input_features`, the [`AutoFeatureExtractor`] should be used for extracting the fbank features,
857
+ padding and conversion into a tensor of floats. See [`~Speech2TextFeatureExtractor.__call__`]
858
+ attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
859
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
860
+
861
+ - 1 for tokens that are **not masked**,
862
+ - 0 for tokens that are **masked**.
863
+
864
+ [What are attention masks?](../glossary#attention-mask)
865
+ head_mask (`tf.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, `optional):
866
+ Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
867
+
868
+ - 1 indicates the head is **not masked**,
869
+ - 0 indicates the head is **masked**.
870
+
871
+ output_attentions (`bool`, *optional*):
872
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
873
+ returned tensors for more detail.
874
+ output_hidden_states (`bool`, *optional*):
875
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
876
+ for more detail.
877
+ return_dict (`bool`, *optional*):
878
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
879
+ """
880
+ if input_features is None:
881
+ raise ValueError("You have to specify input_features")
882
+
883
+ inputs_embeds = self.conv(input_features)
884
+ inputs_embeds = self.embed_scale * inputs_embeds
885
+
886
+ # subsample attention mask if necessary
887
+ if attention_mask is not None:
888
+ attention_mask = self._get_feature_vector_attention_mask(tf.shape(inputs_embeds)[1], attention_mask)
889
+ padding_mask = tf.cast(tf.math.not_equal(attention_mask, 1), tf.int64)
890
+ else:
891
+ padding_mask = tf.zeros(tf.shape(inputs_embeds)[:-1], dtype=tf.int64)
892
+
893
+ embed_pos = self.embed_positions(padding_mask)
894
+
895
+ hidden_states = inputs_embeds + embed_pos
896
+ hidden_states = self.dropout(hidden_states, training=training)
897
+
898
+ # check attention mask and invert
899
+ if attention_mask is not None:
900
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
901
+ attention_mask = _expand_mask(attention_mask)
902
+
903
+ encoder_states = () if output_hidden_states else None
904
+ all_attentions = () if output_attentions else None
905
+
906
+ # check if head_mask has a correct number of layers specified if desired
907
+ if head_mask is not None:
908
+ tf.debugging.assert_equal(
909
+ shape_list(head_mask)[0],
910
+ len(self.layers),
911
+ message=(
912
+ f"The head_mask should be specified for {len(self.layers)} layers, but it is for"
913
+ f" {shape_list(head_mask)[0]}."
914
+ ),
915
+ )
916
+
917
+ for idx, encoder_layer in enumerate(self.layers):
918
+ if output_hidden_states:
919
+ encoder_states = encoder_states + (hidden_states,)
920
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
921
+ dropout_probability = random.uniform(0, 1)
922
+ if training and (dropout_probability < self.layerdrop): # skip the layer
923
+ continue
924
+
925
+ hidden_states, attn = encoder_layer(
926
+ hidden_states,
927
+ attention_mask,
928
+ head_mask[idx] if head_mask is not None else None,
929
+ training=training,
930
+ )
931
+
932
+ if output_attentions:
933
+ all_attentions += (attn,)
934
+
935
+ hidden_states = self.layer_norm(hidden_states)
936
+ if output_hidden_states:
937
+ encoder_states = encoder_states + (hidden_states,)
938
+
939
+ if not return_dict:
940
+ return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
941
+ return TFBaseModelOutput(
942
+ last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
943
+ )
944
+
945
+ def build(self, input_shape=None):
946
+ if self.built:
947
+ return
948
+ self.built = True
949
+ if getattr(self, "conv", None) is not None:
950
+ with tf.name_scope(self.conv.name):
951
+ self.conv.build(None)
952
+ if getattr(self, "embed_positions", None) is not None:
953
+ with tf.name_scope(self.embed_positions.name):
954
+ self.embed_positions.build(None)
955
+ if getattr(self, "layer_norm", None) is not None:
956
+ with tf.name_scope(self.layer_norm.name):
957
+ self.layer_norm.build([None, None, self.config.d_model])
958
+ if getattr(self, "layers", None) is not None:
959
+ for layer in self.layers:
960
+ with tf.name_scope(layer.name):
961
+ layer.build(None)
962
+
963
+
964
+ @keras_serializable
965
+ class TFSpeech2TextDecoder(keras.layers.Layer):
966
+ config_class = Speech2TextConfig
967
+ """
968
+ Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`TFSpeech2TextDecoderLayer`]
969
+
970
+ Args:
971
+ config: Speech2TextConfig
972
+ """
973
+
974
+ def __init__(self, config: Speech2TextConfig, **kwargs):
975
+ super().__init__(**kwargs)
976
+ self.config = config
977
+ self.layerdrop = config.decoder_layerdrop
978
+ self.padding_idx = config.pad_token_id
979
+ self.max_target_positions = config.max_target_positions
980
+ self.embed_scale = tf.math.sqrt(float(config.d_model)) if config.scale_embedding else 1.0
981
+
982
+ self.embed_tokens = TFSharedEmbeddings(config.vocab_size, config.d_model, name="embed_tokens")
983
+
984
+ self.embed_positions = TFSpeech2TextSinusoidalPositionalEmbedding(
985
+ num_positions=config.max_target_positions,
986
+ embedding_dim=config.d_model,
987
+ padding_idx=self.padding_idx,
988
+ name="embed_positions",
989
+ )
990
+
991
+ self.layers = [TFSpeech2TextDecoderLayer(config, name=f"layers.{i}") for i in range(config.decoder_layers)]
992
+ self.layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="layer_norm")
993
+
994
+ self.dropout = keras.layers.Dropout(config.dropout)
995
+
996
+ def get_embed_tokens(self):
997
+ return self.embed_tokens
998
+
999
+ def set_embed_tokens(self, embed_tokens):
1000
+ self.embed_tokens = embed_tokens
1001
+
1002
+ @unpack_inputs
1003
+ def call(
1004
+ self,
1005
+ input_ids=None,
1006
+ inputs_embeds=None,
1007
+ attention_mask=None,
1008
+ encoder_hidden_states=None,
1009
+ encoder_attention_mask=None,
1010
+ head_mask=None,
1011
+ cross_attn_head_mask=None,
1012
+ past_key_values=None,
1013
+ use_cache=None,
1014
+ output_attentions=None,
1015
+ output_hidden_states=None,
1016
+ return_dict=None,
1017
+ training=False,
1018
+ ):
1019
+ r"""
1020
+ Args:
1021
+ input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`):
1022
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
1023
+ provide it.
1024
+
1025
+ Indices can be obtained using [`Speech2TextTokenizer`]. See [`PreTrainedTokenizer.encode`] and
1026
+ [`PreTrainedTokenizer.__call__`] for details.
1027
+
1028
+ [What are input IDs?](../glossary#input-ids)
1029
+ attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
1030
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
1031
+
1032
+ - 1 for tokens that are **not masked**,
1033
+ - 0 for tokens that are **masked**.
1034
+
1035
+ [What are attention masks?](../glossary#attention-mask)
1036
+ encoder_hidden_states (`tf.Tensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*):
1037
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
1038
+ of the decoder.
1039
+ encoder_attention_mask (`tf.Tensor` of shape `(batch_size, encoder_sequence_length)`, *optional*):
1040
+ Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values
1041
+ selected in `[0, 1]`:
1042
+
1043
+ - 1 for tokens that are **not masked**,
1044
+ - 0 for tokens that are **masked**.
1045
+
1046
+ [What are attention masks?](../glossary#attention-mask)
1047
+ head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
1048
+ Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
1049
+
1050
+ - 1 indicates the head is **not masked**,
1051
+ - 0 indicates the head is **masked**.
1052
+
1053
+ cross_attn_head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
1054
+ Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:
1055
+
1056
+ - 1 indicates the head is **not masked**,
1057
+ - 0 indicates the head is **masked**.
1058
+
1059
+ past_key_values (`Tuple[Tuple[tf.Tensor]]` of length `config.n_layers` with each tuple having 2 tuples each of which has 2 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
1060
+ Contains precomputed key and value hidden-states of the attention blocks. Can be used to speed up
1061
+ decoding.
1062
+
1063
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those
1064
+ that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of
1065
+ all `decoder_input_ids` of shape `(batch_size, sequence_length)`.
1066
+ inputs_embeds (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
1067
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
1068
+ This is useful if you want more control over how to convert `input_ids` indices into associated vectors
1069
+ than the model's internal embedding lookup matrix.
1070
+ output_attentions (`bool`, *optional*):
1071
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
1072
+ returned tensors for more detail.
1073
+ output_hidden_states (`bool`, *optional*):
1074
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
1075
+ for more detail.
1076
+ return_dict (`bool`, *optional*):
1077
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
1078
+ """
1079
+
1080
+ if input_ids is not None and inputs_embeds is not None:
1081
+ raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
1082
+ elif input_ids is not None:
1083
+ input_shape = shape_list(input_ids)
1084
+ elif inputs_embeds is not None:
1085
+ input_shape = shape_list(inputs_embeds)[:-1]
1086
+ else:
1087
+ raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
1088
+
1089
+ # past_key_values_length
1090
+ past_key_values_length = shape_list(past_key_values[0][0])[2] if past_key_values is not None else 0
1091
+
1092
+ if inputs_embeds is None:
1093
+ check_embeddings_within_bounds(input_ids, self.embed_tokens.vocab_size)
1094
+ inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
1095
+ else:
1096
+ inputs_embeds = inputs_embeds
1097
+
1098
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
1099
+ if input_shape[-1] > 1:
1100
+ combined_attention_mask = _make_causal_mask(input_shape, past_key_values_length=past_key_values_length)
1101
+ else:
1102
+ combined_attention_mask = _expand_mask(
1103
+ tf.ones((input_shape[0], input_shape[1] + past_key_values_length)), tgt_len=input_shape[-1]
1104
+ )
1105
+
1106
+ if attention_mask is not None:
1107
+ combined_attention_mask = combined_attention_mask + _expand_mask(attention_mask, tgt_len=input_shape[-1])
1108
+
1109
+ # expand encoder attention mask
1110
+ if encoder_hidden_states is not None and encoder_attention_mask is not None:
1111
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
1112
+ encoder_attention_mask = _expand_mask(encoder_attention_mask, tgt_len=input_shape[-1])
1113
+
1114
+ # embed positions
1115
+ positions = self.embed_positions(input_ids, past_key_values_length=past_key_values_length)
1116
+
1117
+ hidden_states = inputs_embeds + positions
1118
+ hidden_states = self.dropout(hidden_states, training=training)
1119
+
1120
+ # decoder layers
1121
+ all_hidden_states = () if output_hidden_states else None
1122
+ all_self_attns = () if output_attentions else None
1123
+ all_cross_attns = () if (output_attentions and encoder_hidden_states is not None) else None
1124
+ next_decoder_cache = () if use_cache else None
1125
+
1126
+ # check if head_mask and cross_attn_head_mask have a correct number of layers specified if desired
1127
+ for attn_mask_name, attn_mask in [("head_mask", head_mask), ("cross_attn_head_mask", cross_attn_head_mask)]:
1128
+ if attn_mask is not None:
1129
+ tf.debugging.assert_equal(
1130
+ shape_list(attn_mask)[0],
1131
+ len(self.layers),
1132
+ message=(
1133
+ f"The {attn_mask_name} should be specified for {len(self.layers)} layers, but it is for"
1134
+ f" {shape_list(attn_mask)[0]}."
1135
+ ),
1136
+ )
1137
+
1138
+ for idx, decoder_layer in enumerate(self.layers):
1139
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
1140
+ if output_hidden_states:
1141
+ all_hidden_states += (hidden_states,)
1142
+ dropout_probability = random.uniform(0, 1)
1143
+ if training and (dropout_probability < self.layerdrop):
1144
+ continue
1145
+
1146
+ past_key_value = past_key_values[idx] if past_key_values is not None else None
1147
+ cross_attn_layer_head_mask = cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None
1148
+
1149
+ hidden_states, layer_self_attn, layer_cross_attn, present_key_value = decoder_layer(
1150
+ hidden_states,
1151
+ attention_mask=combined_attention_mask,
1152
+ encoder_hidden_states=encoder_hidden_states,
1153
+ encoder_attention_mask=encoder_attention_mask,
1154
+ layer_head_mask=head_mask[idx] if head_mask is not None else None,
1155
+ cross_attn_layer_head_mask=cross_attn_layer_head_mask,
1156
+ past_key_value=past_key_value,
1157
+ )
1158
+
1159
+ if use_cache:
1160
+ next_decoder_cache += (present_key_value,)
1161
+
1162
+ if output_attentions:
1163
+ all_self_attns += (layer_self_attn,)
1164
+
1165
+ if encoder_hidden_states is not None:
1166
+ all_cross_attns += (layer_cross_attn,)
1167
+
1168
+ hidden_states = self.layer_norm(hidden_states)
1169
+ if output_hidden_states:
1170
+ all_hidden_states += (hidden_states,)
1171
+
1172
+ next_cache = next_decoder_cache if use_cache else None
1173
+
1174
+ if not return_dict:
1175
+ return hidden_states, next_cache, all_hidden_states, all_self_attns, all_cross_attns
1176
+ else:
1177
+ return TFBaseModelOutputWithPastAndCrossAttentions(
1178
+ last_hidden_state=hidden_states,
1179
+ past_key_values=next_cache,
1180
+ hidden_states=all_hidden_states,
1181
+ attentions=all_self_attns,
1182
+ cross_attentions=all_cross_attns,
1183
+ )
1184
+
1185
+ def build(self, input_shape=None):
1186
+ if self.built:
1187
+ return
1188
+ self.built = True
1189
+ if getattr(self, "embed_tokens", None) is not None:
1190
+ with tf.name_scope(self.embed_tokens.name):
1191
+ self.embed_tokens.build(None)
1192
+ if getattr(self, "embed_positions", None) is not None:
1193
+ with tf.name_scope(self.embed_positions.name):
1194
+ self.embed_positions.build(None)
1195
+ if getattr(self, "layer_norm", None) is not None:
1196
+ with tf.name_scope(self.layer_norm.name):
1197
+ self.layer_norm.build([None, None, self.config.d_model])
1198
+ if getattr(self, "layers", None) is not None:
1199
+ for layer in self.layers:
1200
+ with tf.name_scope(layer.name):
1201
+ layer.build(None)
1202
+
1203
+
1204
+ @keras_serializable
1205
+ class TFSpeech2TextMainLayer(keras.layers.Layer):
1206
+ config_class = Speech2TextConfig
1207
+
1208
+ def __init__(self, config: Speech2TextConfig, **kwargs):
1209
+ super().__init__(**kwargs)
1210
+ self.config = config
1211
+
1212
+ self.encoder = TFSpeech2TextEncoder(config, name="encoder")
1213
+ self.decoder = TFSpeech2TextDecoder(config, name="decoder")
1214
+
1215
+ def get_input_embeddings(self):
1216
+ return self.decoder.embed_tokens
1217
+
1218
+ def set_input_embeddings(self, new_embeddings):
1219
+ self.decoder.embed_tokens = new_embeddings
1220
+
1221
+ @unpack_inputs
1222
+ def call(
1223
+ self,
1224
+ input_features=None,
1225
+ attention_mask=None,
1226
+ decoder_input_ids=None,
1227
+ decoder_attention_mask=None,
1228
+ head_mask=None,
1229
+ decoder_head_mask=None,
1230
+ cross_attn_head_mask=None,
1231
+ encoder_outputs=None,
1232
+ past_key_values=None,
1233
+ decoder_inputs_embeds=None,
1234
+ use_cache=None,
1235
+ output_attentions=None,
1236
+ output_hidden_states=None,
1237
+ return_dict=None,
1238
+ training=False,
1239
+ **kwargs,
1240
+ ):
1241
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1242
+ output_hidden_states = (
1243
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1244
+ )
1245
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
1246
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1247
+
1248
+ if encoder_outputs is None:
1249
+ encoder_outputs = self.encoder(
1250
+ input_features=input_features,
1251
+ attention_mask=attention_mask,
1252
+ head_mask=head_mask,
1253
+ output_attentions=output_attentions,
1254
+ output_hidden_states=output_hidden_states,
1255
+ return_dict=return_dict,
1256
+ training=training,
1257
+ )
1258
+ # If the user passed a tuple for encoder_outputs, we wrap it in a TFBaseModelOutput when return_dict=True
1259
+ elif return_dict and not isinstance(encoder_outputs, TFBaseModelOutput):
1260
+ encoder_outputs = TFBaseModelOutput(
1261
+ last_hidden_state=encoder_outputs[0],
1262
+ hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
1263
+ attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
1264
+ )
1265
+ # If the user passed a TFBaseModelOutput for encoder_outputs, we wrap it in a tuple when return_dict=False
1266
+ elif not return_dict and not isinstance(encoder_outputs, tuple):
1267
+ encoder_outputs = encoder_outputs.to_tuple()
1268
+
1269
+ # downsample encoder attention mask
1270
+ if attention_mask is not None:
1271
+ encoder_attention_mask = self.encoder._get_feature_vector_attention_mask(
1272
+ tf.shape(encoder_outputs[0])[1], attention_mask
1273
+ )
1274
+ else:
1275
+ encoder_attention_mask = None
1276
+
1277
+ # decoder outputs consists of (dec_features, past_key_value, dec_hidden, dec_attn)
1278
+ decoder_outputs = self.decoder(
1279
+ input_ids=decoder_input_ids,
1280
+ attention_mask=decoder_attention_mask,
1281
+ encoder_hidden_states=encoder_outputs[0],
1282
+ encoder_attention_mask=encoder_attention_mask,
1283
+ head_mask=decoder_head_mask,
1284
+ cross_attn_head_mask=cross_attn_head_mask,
1285
+ past_key_values=past_key_values,
1286
+ inputs_embeds=decoder_inputs_embeds,
1287
+ use_cache=use_cache,
1288
+ output_attentions=output_attentions,
1289
+ output_hidden_states=output_hidden_states,
1290
+ return_dict=return_dict,
1291
+ training=training,
1292
+ )
1293
+
1294
+ if not return_dict:
1295
+ return decoder_outputs + encoder_outputs
1296
+
1297
+ return TFSeq2SeqModelOutput(
1298
+ last_hidden_state=decoder_outputs.last_hidden_state,
1299
+ past_key_values=decoder_outputs.past_key_values,
1300
+ decoder_hidden_states=decoder_outputs.hidden_states,
1301
+ decoder_attentions=decoder_outputs.attentions,
1302
+ cross_attentions=decoder_outputs.cross_attentions,
1303
+ encoder_last_hidden_state=encoder_outputs.last_hidden_state,
1304
+ encoder_hidden_states=encoder_outputs.hidden_states,
1305
+ encoder_attentions=encoder_outputs.attentions,
1306
+ )
1307
+
1308
+ def build(self, input_shape=None):
1309
+ if self.built:
1310
+ return
1311
+ self.built = True
1312
+ if getattr(self, "encoder", None) is not None:
1313
+ with tf.name_scope(self.encoder.name):
1314
+ self.encoder.build(None)
1315
+ if getattr(self, "decoder", None) is not None:
1316
+ with tf.name_scope(self.decoder.name):
1317
+ self.decoder.build(None)
1318
+
1319
+
1320
+ @add_start_docstrings(
1321
+ "The bare Speech2Text Model outputting raw hidden-states without any specific head on top.",
1322
+ SPEECH_TO_TEXT_START_DOCSTRING,
1323
+ )
1324
+ class TFSpeech2TextModel(TFSpeech2TextPreTrainedModel):
1325
+ def __init__(self, config: Speech2TextConfig, *inputs, **kwargs):
1326
+ super().__init__(config, *inputs, **kwargs)
1327
+
1328
+ self.model = TFSpeech2TextMainLayer(config, name="model")
1329
+
1330
+ def get_encoder(self):
1331
+ return self.model.encoder
1332
+
1333
+ def get_decoder(self):
1334
+ return self.model.decoder
1335
+
1336
+ @unpack_inputs
1337
+ @add_start_docstrings_to_model_forward(SPEECH_TO_TEXT_INPUTS_DOCSTRING)
1338
+ @add_code_sample_docstrings(
1339
+ checkpoint=_CHECKPOINT_FOR_DOC,
1340
+ output_type=TFSeq2SeqModelOutput,
1341
+ config_class=_CONFIG_FOR_DOC,
1342
+ )
1343
+ def call(
1344
+ self,
1345
+ input_features: TFModelInputType | None = None,
1346
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1347
+ decoder_input_ids: np.ndarray | tf.Tensor | None = None,
1348
+ decoder_attention_mask: np.ndarray | tf.Tensor | None = None,
1349
+ head_mask: np.ndarray | tf.Tensor | None = None,
1350
+ decoder_head_mask: np.ndarray | tf.Tensor | None = None,
1351
+ cross_attn_head_mask: np.ndarray | tf.Tensor | None = None,
1352
+ encoder_outputs: np.ndarray | tf.Tensor | None = None,
1353
+ past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None,
1354
+ decoder_inputs_embeds: np.ndarray | tf.Tensor | None = None,
1355
+ use_cache: Optional[bool] = None,
1356
+ output_attentions: Optional[bool] = None,
1357
+ output_hidden_states: Optional[bool] = None,
1358
+ return_dict: Optional[bool] = None,
1359
+ training: bool = False,
1360
+ **kwargs,
1361
+ ) -> Union[Tuple, TFSeq2SeqModelOutput]:
1362
+ outputs = self.model(
1363
+ input_features=input_features,
1364
+ attention_mask=attention_mask,
1365
+ decoder_input_ids=decoder_input_ids,
1366
+ decoder_attention_mask=decoder_attention_mask,
1367
+ head_mask=head_mask,
1368
+ decoder_head_mask=decoder_head_mask,
1369
+ cross_attn_head_mask=cross_attn_head_mask,
1370
+ encoder_outputs=encoder_outputs,
1371
+ past_key_values=past_key_values,
1372
+ decoder_inputs_embeds=decoder_inputs_embeds,
1373
+ use_cache=use_cache,
1374
+ output_attentions=output_attentions,
1375
+ output_hidden_states=output_hidden_states,
1376
+ return_dict=return_dict,
1377
+ training=training,
1378
+ )
1379
+
1380
+ return outputs
1381
+
1382
+ def serving_output(self, output):
1383
+ pkv = tf.tuple(output.past_key_values)[1] if self.config.use_cache else None
1384
+ dec_hs = tf.convert_to_tensor(output.decoder_hidden_states) if self.config.output_hidden_states else None
1385
+ dec_attns = tf.convert_to_tensor(output.decoder_attentions) if self.config.output_attentions else None
1386
+ cross_attns = tf.convert_to_tensor(output.cross_attentions) if self.config.output_attentions else None
1387
+ enc_hs = tf.convert_to_tensor(output.encoder_hidden_states) if self.config.output_hidden_states else None
1388
+ enc_attns = tf.convert_to_tensor(output.encoder_attentions) if self.config.output_attentions else None
1389
+
1390
+ return TFSeq2SeqModelOutput(
1391
+ last_hidden_state=output.last_hidden_state,
1392
+ past_key_values=pkv,
1393
+ decoder_hidden_states=dec_hs,
1394
+ decoder_attentions=dec_attns,
1395
+ cross_attentions=cross_attns,
1396
+ encoder_last_hidden_state=output.encoder_last_hidden_state,
1397
+ encoder_hidden_states=enc_hs,
1398
+ encoder_attentions=enc_attns,
1399
+ )
1400
+
1401
+ def build(self, input_shape=None):
1402
+ if self.built:
1403
+ return
1404
+ self.built = True
1405
+ if getattr(self, "model", None) is not None:
1406
+ with tf.name_scope(self.model.name):
1407
+ self.model.build(None)
1408
+
1409
+
1410
+ @add_start_docstrings(
1411
+ "The Speech2Text Model with a language modeling head. Can be used for summarization.",
1412
+ SPEECH_TO_TEXT_START_DOCSTRING,
1413
+ )
1414
+ class TFSpeech2TextForConditionalGeneration(TFSpeech2TextPreTrainedModel, TFCausalLanguageModelingLoss):
1415
+ def __init__(self, config: Speech2TextConfig):
1416
+ super().__init__(config)
1417
+ self.model = TFSpeech2TextMainLayer(config, name="model")
1418
+ self.lm_head = keras.layers.Dense(self.config.vocab_size, use_bias=False, name="lm_head")
1419
+ # TODO (Joao): investigate why Speech2Text has numerical issues in XLA generate
1420
+ self.supports_xla_generation = False
1421
+ self.config = config
1422
+
1423
+ def get_encoder(self):
1424
+ return self.model.encoder
1425
+
1426
+ def get_decoder(self):
1427
+ return self.model.decoder
1428
+
1429
+ def resize_token_embeddings(self, new_num_tokens: int) -> tf.Variable:
1430
+ new_embeddings = super().resize_token_embeddings(new_num_tokens)
1431
+ return new_embeddings
1432
+
1433
+ def get_output_embeddings(self):
1434
+ return self.lm_head
1435
+
1436
+ def set_output_embeddings(self, new_embeddings):
1437
+ self.lm_head = new_embeddings
1438
+
1439
+ @unpack_inputs
1440
+ @add_start_docstrings_to_model_forward(SPEECH_TO_TEXT_INPUTS_DOCSTRING)
1441
+ @replace_return_docstrings(output_type=TFSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)
1442
+ def call(
1443
+ self,
1444
+ input_features: TFModelInputType | None = None,
1445
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1446
+ decoder_input_ids: np.ndarray | tf.Tensor | None = None,
1447
+ decoder_attention_mask: np.ndarray | tf.Tensor | None = None,
1448
+ head_mask: np.ndarray | tf.Tensor | None = None,
1449
+ decoder_head_mask: np.ndarray | tf.Tensor | None = None,
1450
+ cross_attn_head_mask: np.ndarray | tf.Tensor | None = None,
1451
+ encoder_outputs: np.ndarray | tf.Tensor | None = None,
1452
+ past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None,
1453
+ decoder_inputs_embeds: np.ndarray | tf.Tensor | None = None,
1454
+ labels: np.ndarray | tf.Tensor | None = None,
1455
+ use_cache: Optional[bool] = None,
1456
+ output_attentions: Optional[bool] = None,
1457
+ output_hidden_states: Optional[bool] = None,
1458
+ return_dict: Optional[bool] = None,
1459
+ training: Optional[bool] = False,
1460
+ **kwargs,
1461
+ ) -> Union[Tuple, TFSeq2SeqLMOutput]:
1462
+ r"""
1463
+ labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
1464
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
1465
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
1466
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
1467
+
1468
+ Returns:
1469
+
1470
+ Example:
1471
+
1472
+ ```python
1473
+ >>> import tensorflow as tf
1474
+ >>> from transformers import Speech2TextProcessor, TFSpeech2TextForConditionalGeneration
1475
+ >>> from datasets import load_dataset
1476
+ >>> import soundfile as sf
1477
+
1478
+ >>> model = TFSpeech2TextForConditionalGeneration.from_pretrained(
1479
+ ... "facebook/s2t-small-librispeech-asr", from_pt=True
1480
+ ... )
1481
+ >>> processor = Speech2TextProcessor.from_pretrained("facebook/s2t-small-librispeech-asr")
1482
+
1483
+
1484
+ >>> def map_to_array(batch):
1485
+ ... speech, _ = sf.read(batch["file"])
1486
+ ... batch["speech"] = speech
1487
+ ... return batch
1488
+
1489
+
1490
+ >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
1491
+ >>> ds = ds.map(map_to_array)
1492
+ >>> ds.set_format(type="tf")
1493
+
1494
+ >>> input_features = processor(
1495
+ ... ds["speech"][0], sampling_rate=16000, return_tensors="tf"
1496
+ ... ).input_features # Batch size 1
1497
+ >>> generated_ids = model.generate(input_features)
1498
+
1499
+ >>> transcription = processor.batch_decode(generated_ids)
1500
+ ```"""
1501
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1502
+
1503
+ if labels is not None:
1504
+ if decoder_input_ids is None and decoder_inputs_embeds is None:
1505
+ decoder_input_ids = shift_tokens_right(
1506
+ labels, self.config.pad_token_id, self.config.decoder_start_token_id
1507
+ )
1508
+
1509
+ outputs = self.model(
1510
+ input_features=input_features,
1511
+ attention_mask=attention_mask,
1512
+ decoder_input_ids=decoder_input_ids,
1513
+ encoder_outputs=encoder_outputs,
1514
+ decoder_attention_mask=decoder_attention_mask,
1515
+ head_mask=head_mask,
1516
+ decoder_head_mask=decoder_head_mask,
1517
+ cross_attn_head_mask=cross_attn_head_mask,
1518
+ past_key_values=past_key_values,
1519
+ decoder_inputs_embeds=decoder_inputs_embeds,
1520
+ use_cache=use_cache,
1521
+ output_attentions=output_attentions,
1522
+ output_hidden_states=output_hidden_states,
1523
+ return_dict=return_dict,
1524
+ training=training,
1525
+ )
1526
+ lm_logits = self.lm_head(outputs[0])
1527
+ masked_lm_loss = None if labels is None else self.hf_compute_loss(labels, lm_logits)
1528
+
1529
+ if not return_dict:
1530
+ output = (lm_logits,) + outputs[1:]
1531
+ return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
1532
+
1533
+ return TFSeq2SeqLMOutput(
1534
+ loss=masked_lm_loss,
1535
+ logits=lm_logits,
1536
+ past_key_values=outputs.past_key_values,
1537
+ decoder_hidden_states=outputs.decoder_hidden_states,
1538
+ decoder_attentions=outputs.decoder_attentions,
1539
+ cross_attentions=outputs.cross_attentions,
1540
+ encoder_last_hidden_state=outputs.encoder_last_hidden_state,
1541
+ encoder_hidden_states=outputs.encoder_hidden_states,
1542
+ encoder_attentions=outputs.encoder_attentions,
1543
+ )
1544
+
1545
+ def serving_output(self, output):
1546
+ pkv = tf.tuple(output.past_key_values)[1] if self.config.use_cache else None
1547
+ dec_hs = tf.convert_to_tensor(output.decoder_hidden_states) if self.config.output_hidden_states else None
1548
+ dec_attns = tf.convert_to_tensor(output.decoder_attentions) if self.config.output_attentions else None
1549
+ cross_attns = tf.convert_to_tensor(output.cross_attentions) if self.config.output_attentions else None
1550
+ enc_hs = tf.convert_to_tensor(output.encoder_hidden_states) if self.config.output_hidden_states else None
1551
+ enc_attns = tf.convert_to_tensor(output.encoder_attentions) if self.config.output_attentions else None
1552
+
1553
+ return TFSeq2SeqLMOutput(
1554
+ logits=output.logits,
1555
+ past_key_values=pkv,
1556
+ decoder_hidden_states=dec_hs,
1557
+ decoder_attentions=dec_attns,
1558
+ cross_attentions=cross_attns,
1559
+ encoder_last_hidden_state=output.encoder_last_hidden_state,
1560
+ encoder_hidden_states=enc_hs,
1561
+ encoder_attentions=enc_attns,
1562
+ )
1563
+
1564
+ def prepare_inputs_for_generation(
1565
+ self,
1566
+ decoder_input_ids,
1567
+ past_key_values=None,
1568
+ attention_mask=None,
1569
+ head_mask=None,
1570
+ decoder_head_mask=None,
1571
+ cross_attn_head_mask=None,
1572
+ use_cache=None,
1573
+ encoder_outputs=None,
1574
+ **kwargs,
1575
+ ):
1576
+ # cut decoder_input_ids if past is used
1577
+ if past_key_values is not None:
1578
+ decoder_input_ids = decoder_input_ids[:, -1:]
1579
+
1580
+ return {
1581
+ "input_features": None, # needs to be passed to make Keras.layer.__call__ happy
1582
+ "encoder_outputs": encoder_outputs,
1583
+ "past_key_values": past_key_values,
1584
+ "decoder_input_ids": decoder_input_ids,
1585
+ "attention_mask": attention_mask,
1586
+ "head_mask": head_mask,
1587
+ "decoder_head_mask": decoder_head_mask,
1588
+ "cross_attn_head_mask": cross_attn_head_mask,
1589
+ "use_cache": use_cache, # change this to avoid caching (presumably for debugging)
1590
+ }
1591
+
1592
+ def build(self, input_shape=None):
1593
+ if self.built:
1594
+ return
1595
+ self.built = True
1596
+ if getattr(self, "model", None) is not None:
1597
+ with tf.name_scope(self.model.name):
1598
+ self.model.build(None)
1599
+ if getattr(self, "lm_head", None) is not None:
1600
+ with tf.name_scope(self.lm_head.name):
1601
+ self.lm_head.build([None, None, self.config.d_model])
1602
+
1603
+ def tf_to_pt_weight_rename(self, tf_weight):
1604
+ if tf_weight == "lm_head.weight":
1605
+ return tf_weight, "model.decoder.embed_tokens.weight"
1606
+ else:
1607
+ return (tf_weight,)
llmeval-env/lib/python3.10/site-packages/transformers/models/speech_to_text/processing_speech_to_text.py ADDED
@@ -0,0 +1,116 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """
16
+ Speech processor class for Speech2Text
17
+ """
18
+ import warnings
19
+ from contextlib import contextmanager
20
+
21
+ from ...processing_utils import ProcessorMixin
22
+
23
+
24
+ class Speech2TextProcessor(ProcessorMixin):
25
+ r"""
26
+ Constructs a Speech2Text processor which wraps a Speech2Text feature extractor and a Speech2Text tokenizer into a
27
+ single processor.
28
+
29
+ [`Speech2TextProcessor`] offers all the functionalities of [`Speech2TextFeatureExtractor`] and
30
+ [`Speech2TextTokenizer`]. See the [`~Speech2TextProcessor.__call__`] and [`~Speech2TextProcessor.decode`] for more
31
+ information.
32
+
33
+ Args:
34
+ feature_extractor (`Speech2TextFeatureExtractor`):
35
+ An instance of [`Speech2TextFeatureExtractor`]. The feature extractor is a required input.
36
+ tokenizer (`Speech2TextTokenizer`):
37
+ An instance of [`Speech2TextTokenizer`]. The tokenizer is a required input.
38
+ """
39
+
40
+ feature_extractor_class = "Speech2TextFeatureExtractor"
41
+ tokenizer_class = "Speech2TextTokenizer"
42
+
43
+ def __init__(self, feature_extractor, tokenizer):
44
+ super().__init__(feature_extractor, tokenizer)
45
+ self.current_processor = self.feature_extractor
46
+ self._in_target_context_manager = False
47
+
48
+ def __call__(self, *args, **kwargs):
49
+ """
50
+ When used in normal mode, this method forwards all its arguments to Speech2TextFeatureExtractor's
51
+ [`~Speech2TextFeatureExtractor.__call__`] and returns its output. If used in the context
52
+ [`~Speech2TextProcessor.as_target_processor`] this method forwards all its arguments to Speech2TextTokenizer's
53
+ [`~Speech2TextTokenizer.__call__`]. Please refer to the doctsring of the above two methods for more
54
+ information.
55
+ """
56
+ # For backward compatibility
57
+ if self._in_target_context_manager:
58
+ return self.current_processor(*args, **kwargs)
59
+
60
+ if "raw_speech" in kwargs:
61
+ warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.")
62
+ audio = kwargs.pop("raw_speech")
63
+ else:
64
+ audio = kwargs.pop("audio", None)
65
+ sampling_rate = kwargs.pop("sampling_rate", None)
66
+ text = kwargs.pop("text", None)
67
+ if len(args) > 0:
68
+ audio = args[0]
69
+ args = args[1:]
70
+
71
+ if audio is None and text is None:
72
+ raise ValueError("You need to specify either an `audio` or `text` input to process.")
73
+
74
+ if audio is not None:
75
+ inputs = self.feature_extractor(audio, *args, sampling_rate=sampling_rate, **kwargs)
76
+ if text is not None:
77
+ encodings = self.tokenizer(text, **kwargs)
78
+
79
+ if text is None:
80
+ return inputs
81
+ elif audio is None:
82
+ return encodings
83
+ else:
84
+ inputs["labels"] = encodings["input_ids"]
85
+ return inputs
86
+
87
+ def batch_decode(self, *args, **kwargs):
88
+ """
89
+ This method forwards all its arguments to Speech2TextTokenizer's [`~PreTrainedTokenizer.batch_decode`]. Please
90
+ refer to the docstring of this method for more information.
91
+ """
92
+ return self.tokenizer.batch_decode(*args, **kwargs)
93
+
94
+ def decode(self, *args, **kwargs):
95
+ """
96
+ This method forwards all its arguments to Speech2TextTokenizer's [`~PreTrainedTokenizer.decode`]. Please refer
97
+ to the docstring of this method for more information.
98
+ """
99
+ return self.tokenizer.decode(*args, **kwargs)
100
+
101
+ @contextmanager
102
+ def as_target_processor(self):
103
+ """
104
+ Temporarily sets the tokenizer for processing the input. Useful for encoding the labels when fine-tuning
105
+ Speech2Text.
106
+ """
107
+ warnings.warn(
108
+ "`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
109
+ "labels by using the argument `text` of the regular `__call__` method (either in the same call as "
110
+ "your audio inputs, or in a separate call."
111
+ )
112
+ self._in_target_context_manager = True
113
+ self.current_processor = self.tokenizer
114
+ yield
115
+ self.current_processor = self.feature_extractor
116
+ self._in_target_context_manager = False