applied-ai-018 commited on
Commit
4ea5aa8
·
verified ·
1 Parent(s): 3b2b052

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. env-llmeval/lib/python3.10/site-packages/transformers/models/blenderbot_small/__init__.py +138 -0
  2. env-llmeval/lib/python3.10/site-packages/transformers/models/blenderbot_small/configuration_blenderbot_small.py +392 -0
  3. env-llmeval/lib/python3.10/site-packages/transformers/models/blenderbot_small/modeling_blenderbot_small.py +1570 -0
  4. env-llmeval/lib/python3.10/site-packages/transformers/models/blenderbot_small/modeling_flax_blenderbot_small.py +1522 -0
  5. env-llmeval/lib/python3.10/site-packages/transformers/models/blenderbot_small/modeling_tf_blenderbot_small.py +1526 -0
  6. env-llmeval/lib/python3.10/site-packages/transformers/models/blenderbot_small/tokenization_blenderbot_small.py +258 -0
  7. env-llmeval/lib/python3.10/site-packages/transformers/models/blenderbot_small/tokenization_blenderbot_small_fast.py +140 -0
  8. env-llmeval/lib/python3.10/site-packages/transformers/models/clap/__init__.py +76 -0
  9. env-llmeval/lib/python3.10/site-packages/transformers/models/clap/__pycache__/__init__.cpython-310.pyc +0 -0
  10. env-llmeval/lib/python3.10/site-packages/transformers/models/clap/__pycache__/configuration_clap.cpython-310.pyc +0 -0
  11. env-llmeval/lib/python3.10/site-packages/transformers/models/clap/__pycache__/convert_clap_original_pytorch_to_hf.cpython-310.pyc +0 -0
  12. env-llmeval/lib/python3.10/site-packages/transformers/models/clap/__pycache__/feature_extraction_clap.cpython-310.pyc +0 -0
  13. env-llmeval/lib/python3.10/site-packages/transformers/models/clap/__pycache__/modeling_clap.cpython-310.pyc +0 -0
  14. env-llmeval/lib/python3.10/site-packages/transformers/models/clap/__pycache__/processing_clap.cpython-310.pyc +0 -0
  15. env-llmeval/lib/python3.10/site-packages/transformers/models/clap/configuration_clap.py +432 -0
  16. env-llmeval/lib/python3.10/site-packages/transformers/models/clap/convert_clap_original_pytorch_to_hf.py +133 -0
  17. env-llmeval/lib/python3.10/site-packages/transformers/models/clap/feature_extraction_clap.py +363 -0
  18. env-llmeval/lib/python3.10/site-packages/transformers/models/clap/modeling_clap.py +0 -0
  19. env-llmeval/lib/python3.10/site-packages/transformers/models/clap/processing_clap.py +117 -0
  20. env-llmeval/lib/python3.10/site-packages/transformers/models/fastspeech2_conformer/__pycache__/__init__.cpython-310.pyc +0 -0
  21. env-llmeval/lib/python3.10/site-packages/transformers/models/fastspeech2_conformer/__pycache__/convert_model_with_hifigan.cpython-310.pyc +0 -0
  22. env-llmeval/lib/python3.10/site-packages/transformers/models/fastspeech2_conformer/__pycache__/tokenization_fastspeech2_conformer.cpython-310.pyc +0 -0
  23. env-llmeval/lib/python3.10/site-packages/transformers/models/nllb/__init__.py +64 -0
  24. env-llmeval/lib/python3.10/site-packages/transformers/models/nllb/__pycache__/__init__.cpython-310.pyc +0 -0
  25. env-llmeval/lib/python3.10/site-packages/transformers/models/nllb/__pycache__/tokenization_nllb.cpython-310.pyc +0 -0
  26. env-llmeval/lib/python3.10/site-packages/transformers/models/nllb/__pycache__/tokenization_nllb_fast.cpython-310.pyc +0 -0
  27. env-llmeval/lib/python3.10/site-packages/transformers/models/nllb/tokenization_nllb.py +446 -0
  28. env-llmeval/lib/python3.10/site-packages/transformers/models/nllb/tokenization_nllb_fast.py +359 -0
  29. env-llmeval/lib/python3.10/site-packages/transformers/models/qdqbert/__pycache__/configuration_qdqbert.cpython-310.pyc +0 -0
  30. env-llmeval/lib/python3.10/site-packages/transformers/models/qdqbert/__pycache__/modeling_qdqbert.cpython-310.pyc +0 -0
  31. env-llmeval/lib/python3.10/site-packages/transformers/models/qdqbert/modeling_qdqbert.py +1739 -0
  32. env-llmeval/lib/python3.10/site-packages/transformers/models/qwen2/__init__.py +80 -0
  33. env-llmeval/lib/python3.10/site-packages/transformers/models/qwen2/__pycache__/__init__.cpython-310.pyc +0 -0
  34. env-llmeval/lib/python3.10/site-packages/transformers/models/qwen2/__pycache__/configuration_qwen2.cpython-310.pyc +0 -0
  35. env-llmeval/lib/python3.10/site-packages/transformers/models/qwen2/__pycache__/modeling_qwen2.cpython-310.pyc +0 -0
  36. env-llmeval/lib/python3.10/site-packages/transformers/models/qwen2/__pycache__/tokenization_qwen2.cpython-310.pyc +0 -0
  37. env-llmeval/lib/python3.10/site-packages/transformers/models/qwen2/__pycache__/tokenization_qwen2_fast.cpython-310.pyc +0 -0
  38. env-llmeval/lib/python3.10/site-packages/transformers/models/qwen2/configuration_qwen2.py +144 -0
  39. env-llmeval/lib/python3.10/site-packages/transformers/models/qwen2/modeling_qwen2.py +1401 -0
  40. env-llmeval/lib/python3.10/site-packages/transformers/models/qwen2/tokenization_qwen2.py +345 -0
  41. env-llmeval/lib/python3.10/site-packages/transformers/models/qwen2/tokenization_qwen2_fast.py +143 -0
  42. env-llmeval/lib/python3.10/site-packages/transformers/models/splinter/__init__.py +79 -0
  43. env-llmeval/lib/python3.10/site-packages/transformers/models/splinter/__pycache__/__init__.cpython-310.pyc +0 -0
  44. env-llmeval/lib/python3.10/site-packages/transformers/models/splinter/__pycache__/configuration_splinter.cpython-310.pyc +0 -0
  45. env-llmeval/lib/python3.10/site-packages/transformers/models/splinter/__pycache__/modeling_splinter.cpython-310.pyc +0 -0
  46. env-llmeval/lib/python3.10/site-packages/transformers/models/splinter/__pycache__/tokenization_splinter.cpython-310.pyc +0 -0
  47. env-llmeval/lib/python3.10/site-packages/transformers/models/splinter/__pycache__/tokenization_splinter_fast.cpython-310.pyc +0 -0
  48. env-llmeval/lib/python3.10/site-packages/transformers/models/splinter/configuration_splinter.py +128 -0
  49. env-llmeval/lib/python3.10/site-packages/transformers/models/splinter/modeling_splinter.py +1109 -0
  50. env-llmeval/lib/python3.10/site-packages/transformers/models/splinter/tokenization_splinter.py +529 -0
env-llmeval/lib/python3.10/site-packages/transformers/models/blenderbot_small/__init__.py ADDED
@@ -0,0 +1,138 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import (
17
+ OptionalDependencyNotAvailable,
18
+ _LazyModule,
19
+ is_flax_available,
20
+ is_tf_available,
21
+ is_tokenizers_available,
22
+ is_torch_available,
23
+ )
24
+
25
+
26
+ _import_structure = {
27
+ "configuration_blenderbot_small": [
28
+ "BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP",
29
+ "BlenderbotSmallConfig",
30
+ "BlenderbotSmallOnnxConfig",
31
+ ],
32
+ "tokenization_blenderbot_small": ["BlenderbotSmallTokenizer"],
33
+ }
34
+
35
+ try:
36
+ if not is_tokenizers_available():
37
+ raise OptionalDependencyNotAvailable()
38
+ except OptionalDependencyNotAvailable:
39
+ pass
40
+ else:
41
+ _import_structure["tokenization_blenderbot_small_fast"] = ["BlenderbotSmallTokenizerFast"]
42
+
43
+ try:
44
+ if not is_torch_available():
45
+ raise OptionalDependencyNotAvailable()
46
+ except OptionalDependencyNotAvailable:
47
+ pass
48
+ else:
49
+ _import_structure["modeling_blenderbot_small"] = [
50
+ "BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST",
51
+ "BlenderbotSmallForCausalLM",
52
+ "BlenderbotSmallForConditionalGeneration",
53
+ "BlenderbotSmallModel",
54
+ "BlenderbotSmallPreTrainedModel",
55
+ ]
56
+
57
+ try:
58
+ if not is_tf_available():
59
+ raise OptionalDependencyNotAvailable()
60
+ except OptionalDependencyNotAvailable:
61
+ pass
62
+ else:
63
+ _import_structure["modeling_tf_blenderbot_small"] = [
64
+ "TFBlenderbotSmallForConditionalGeneration",
65
+ "TFBlenderbotSmallModel",
66
+ "TFBlenderbotSmallPreTrainedModel",
67
+ ]
68
+
69
+ try:
70
+ if not is_flax_available():
71
+ raise OptionalDependencyNotAvailable()
72
+ except OptionalDependencyNotAvailable:
73
+ pass
74
+ else:
75
+ _import_structure["modeling_flax_blenderbot_small"] = [
76
+ "FlaxBlenderbotSmallForConditionalGeneration",
77
+ "FlaxBlenderbotSmallModel",
78
+ "FlaxBlenderbotSmallPreTrainedModel",
79
+ ]
80
+
81
+ if TYPE_CHECKING:
82
+ from .configuration_blenderbot_small import (
83
+ BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP,
84
+ BlenderbotSmallConfig,
85
+ BlenderbotSmallOnnxConfig,
86
+ )
87
+ from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
88
+
89
+ try:
90
+ if not is_tokenizers_available():
91
+ raise OptionalDependencyNotAvailable()
92
+ except OptionalDependencyNotAvailable:
93
+ pass
94
+ else:
95
+ from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast
96
+
97
+ try:
98
+ if not is_torch_available():
99
+ raise OptionalDependencyNotAvailable()
100
+ except OptionalDependencyNotAvailable:
101
+ pass
102
+ else:
103
+ from .modeling_blenderbot_small import (
104
+ BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST,
105
+ BlenderbotSmallForCausalLM,
106
+ BlenderbotSmallForConditionalGeneration,
107
+ BlenderbotSmallModel,
108
+ BlenderbotSmallPreTrainedModel,
109
+ )
110
+
111
+ try:
112
+ if not is_tf_available():
113
+ raise OptionalDependencyNotAvailable()
114
+ except OptionalDependencyNotAvailable:
115
+ pass
116
+ else:
117
+ from .modeling_tf_blenderbot_small import (
118
+ TFBlenderbotSmallForConditionalGeneration,
119
+ TFBlenderbotSmallModel,
120
+ TFBlenderbotSmallPreTrainedModel,
121
+ )
122
+
123
+ try:
124
+ if not is_flax_available():
125
+ raise OptionalDependencyNotAvailable()
126
+ except OptionalDependencyNotAvailable:
127
+ pass
128
+ else:
129
+ from .modeling_flax_blenderbot_small import (
130
+ FlaxBlenderbotSmallForConditionalGeneration,
131
+ FlaxBlenderbotSmallModel,
132
+ FlaxBlenderbotSmallPreTrainedModel,
133
+ )
134
+
135
+ else:
136
+ import sys
137
+
138
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
env-llmeval/lib/python3.10/site-packages/transformers/models/blenderbot_small/configuration_blenderbot_small.py ADDED
@@ -0,0 +1,392 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The Facebook, Inc. and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ BlenderbotSmall model configuration"""
16
+
17
+ from collections import OrderedDict
18
+ from typing import Any, Mapping, Optional
19
+
20
+ from ... import PreTrainedTokenizer
21
+ from ...configuration_utils import PretrainedConfig
22
+ from ...file_utils import TensorType, is_torch_available
23
+ from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeq2SeqConfigWithPast
24
+ from ...onnx.utils import compute_effective_axis_dimension
25
+ from ...utils import logging
26
+
27
+
28
+ logger = logging.get_logger(__name__)
29
+
30
+ BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP = {
31
+ "facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json",
32
+ # See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
33
+ }
34
+
35
+
36
+ class BlenderbotSmallConfig(PretrainedConfig):
37
+ r"""
38
+ This is the configuration class to store the configuration of a [`BlenderbotSmallModel`]. It is used to instantiate
39
+ an BlenderbotSmall model according to the specified arguments, defining the model architecture. Instantiating a
40
+ configuration with the defaults will yield a similar configuration to that of the BlenderbotSmall
41
+ [facebook/blenderbot_small-90M](https://huggingface.co/facebook/blenderbot_small-90M) architecture.
42
+
43
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
44
+ documentation from [`PretrainedConfig`] for more information.
45
+
46
+
47
+ Args:
48
+ vocab_size (`int`, *optional*, defaults to 50265):
49
+ Vocabulary size of the BlenderbotSmall model. Defines the number of different tokens that can be
50
+ represented by the `inputs_ids` passed when calling [`BlenderbotSmallModel`] or [`TFBlenderbotSmallModel`].
51
+ d_model (`int`, *optional*, defaults to 512):
52
+ Dimensionality of the layers and the pooler layer.
53
+ encoder_layers (`int`, *optional*, defaults to 8):
54
+ Number of encoder layers.
55
+ decoder_layers (`int`, *optional*, defaults to 8):
56
+ Number of decoder layers.
57
+ encoder_attention_heads (`int`, *optional*, defaults to 16):
58
+ Number of attention heads for each attention layer in the Transformer encoder.
59
+ decoder_attention_heads (`int`, *optional*, defaults to 16):
60
+ Number of attention heads for each attention layer in the Transformer decoder.
61
+ decoder_ffn_dim (`int`, *optional*, defaults to 2048):
62
+ Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
63
+ encoder_ffn_dim (`int`, *optional*, defaults to 2048):
64
+ Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
65
+ activation_function (`str` or `function`, *optional*, defaults to `"gelu"`):
66
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
67
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
68
+ dropout (`float`, *optional*, defaults to 0.1):
69
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
70
+ attention_dropout (`float`, *optional*, defaults to 0.0):
71
+ The dropout ratio for the attention probabilities.
72
+ activation_dropout (`float`, *optional*, defaults to 0.0):
73
+ The dropout ratio for activations inside the fully connected layer.
74
+ max_position_embeddings (`int`, *optional*, defaults to 512):
75
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
76
+ just in case (e.g., 512 or 1024 or 2048).
77
+ init_std (`float`, *optional*, defaults to 0.02):
78
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
79
+ encoder_layerdrop (`float`, *optional*, defaults to 0.0):
80
+ The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556)
81
+ for more details.
82
+ decoder_layerdrop (`float`, *optional*, defaults to 0.0):
83
+ The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556)
84
+ for more details.
85
+ scale_embedding (`bool`, *optional*, defaults to `False`):
86
+ Scale embeddings by diving by sqrt(d_model).
87
+ use_cache (`bool`, *optional*, defaults to `True`):
88
+ Whether or not the model should return the last key/values attentions (not used by all models)
89
+ forced_eos_token_id (`int`, *optional*, defaults to 2):
90
+ The id of the token to force as the last generated token when `max_length` is reached. Usually set to
91
+ `eos_token_id`.
92
+
93
+ Example:
94
+
95
+ ```python
96
+ >>> from transformers import BlenderbotSmallConfig, BlenderbotSmallModel
97
+
98
+ >>> # Initializing a BlenderbotSmall facebook/blenderbot_small-90M style configuration
99
+ >>> configuration = BlenderbotSmallConfig()
100
+
101
+ >>> # Initializing a model (with random weights) from the facebook/blenderbot_small-90M style configuration
102
+ >>> model = BlenderbotSmallModel(configuration)
103
+
104
+ >>> # Accessing the model configuration
105
+ >>> configuration = model.config
106
+ ```"""
107
+
108
+ model_type = "blenderbot-small"
109
+ keys_to_ignore_at_inference = ["past_key_values"]
110
+ attribute_map = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
111
+
112
+ def __init__(
113
+ self,
114
+ vocab_size=50265,
115
+ max_position_embeddings=512,
116
+ encoder_layers=8,
117
+ encoder_ffn_dim=2048,
118
+ encoder_attention_heads=16,
119
+ decoder_layers=8,
120
+ decoder_ffn_dim=2048,
121
+ decoder_attention_heads=16,
122
+ encoder_layerdrop=0.0,
123
+ decoder_layerdrop=0.0,
124
+ use_cache=True,
125
+ is_encoder_decoder=True,
126
+ activation_function="gelu",
127
+ d_model=512,
128
+ dropout=0.1,
129
+ attention_dropout=0.0,
130
+ activation_dropout=0.0,
131
+ init_std=0.02,
132
+ decoder_start_token_id=1,
133
+ scale_embedding=False,
134
+ pad_token_id=0,
135
+ bos_token_id=1,
136
+ eos_token_id=2,
137
+ forced_eos_token_id=2,
138
+ **kwargs,
139
+ ):
140
+ self.vocab_size = vocab_size
141
+ self.max_position_embeddings = max_position_embeddings
142
+ self.d_model = d_model
143
+ self.encoder_ffn_dim = encoder_ffn_dim
144
+ self.encoder_layers = encoder_layers
145
+ self.encoder_attention_heads = encoder_attention_heads
146
+ self.decoder_ffn_dim = decoder_ffn_dim
147
+ self.decoder_layers = decoder_layers
148
+ self.decoder_attention_heads = decoder_attention_heads
149
+ self.dropout = dropout
150
+ self.attention_dropout = attention_dropout
151
+ self.activation_dropout = activation_dropout
152
+ self.activation_function = activation_function
153
+ self.init_std = init_std
154
+ self.encoder_layerdrop = encoder_layerdrop
155
+ self.decoder_layerdrop = decoder_layerdrop
156
+ self.use_cache = use_cache
157
+ self.num_hidden_layers = encoder_layers
158
+ self.scale_embedding = scale_embedding # scale factor will be sqrt(d_model) if True
159
+
160
+ super().__init__(
161
+ pad_token_id=pad_token_id,
162
+ bos_token_id=bos_token_id,
163
+ eos_token_id=eos_token_id,
164
+ is_encoder_decoder=is_encoder_decoder,
165
+ decoder_start_token_id=decoder_start_token_id,
166
+ forced_eos_token_id=forced_eos_token_id,
167
+ **kwargs,
168
+ )
169
+
170
+
171
+ # Copied from transformers.models.bart.configuration_bart.BartOnnxConfig
172
+ class BlenderbotSmallOnnxConfig(OnnxSeq2SeqConfigWithPast):
173
+ @property
174
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
175
+ if self.task in ["default", "seq2seq-lm"]:
176
+ common_inputs = OrderedDict(
177
+ [
178
+ ("input_ids", {0: "batch", 1: "encoder_sequence"}),
179
+ ("attention_mask", {0: "batch", 1: "encoder_sequence"}),
180
+ ]
181
+ )
182
+
183
+ if self.use_past:
184
+ common_inputs["decoder_input_ids"] = {0: "batch"}
185
+ common_inputs["decoder_attention_mask"] = {0: "batch", 1: "past_decoder_sequence + sequence"}
186
+ else:
187
+ common_inputs["decoder_input_ids"] = {0: "batch", 1: "decoder_sequence"}
188
+ common_inputs["decoder_attention_mask"] = {0: "batch", 1: "decoder_sequence"}
189
+
190
+ if self.use_past:
191
+ self.fill_with_past_key_values_(common_inputs, direction="inputs")
192
+ elif self.task == "causal-lm":
193
+ # TODO: figure this case out.
194
+ common_inputs = OrderedDict(
195
+ [
196
+ ("input_ids", {0: "batch", 1: "encoder_sequence"}),
197
+ ("attention_mask", {0: "batch", 1: "encoder_sequence"}),
198
+ ]
199
+ )
200
+ if self.use_past:
201
+ num_encoder_layers, _ = self.num_layers
202
+ for i in range(num_encoder_layers):
203
+ common_inputs[f"past_key_values.{i}.key"] = {0: "batch", 2: "past_sequence + sequence"}
204
+ common_inputs[f"past_key_values.{i}.value"] = {0: "batch", 2: "past_sequence + sequence"}
205
+ else:
206
+ common_inputs = OrderedDict(
207
+ [
208
+ ("input_ids", {0: "batch", 1: "encoder_sequence"}),
209
+ ("attention_mask", {0: "batch", 1: "encoder_sequence"}),
210
+ ("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
211
+ ("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
212
+ ]
213
+ )
214
+
215
+ return common_inputs
216
+
217
+ @property
218
+ def outputs(self) -> Mapping[str, Mapping[int, str]]:
219
+ if self.task in ["default", "seq2seq-lm"]:
220
+ common_outputs = super().outputs
221
+ else:
222
+ common_outputs = super(OnnxConfigWithPast, self).outputs
223
+ if self.use_past:
224
+ num_encoder_layers, _ = self.num_layers
225
+ for i in range(num_encoder_layers):
226
+ common_outputs[f"present.{i}.key"] = {0: "batch", 2: "past_sequence + sequence"}
227
+ common_outputs[f"present.{i}.value"] = {0: "batch", 2: "past_sequence + sequence"}
228
+ return common_outputs
229
+
230
+ def _generate_dummy_inputs_for_default_and_seq2seq_lm(
231
+ self,
232
+ tokenizer: PreTrainedTokenizer,
233
+ batch_size: int = -1,
234
+ seq_length: int = -1,
235
+ is_pair: bool = False,
236
+ framework: Optional[TensorType] = None,
237
+ ) -> Mapping[str, Any]:
238
+ encoder_inputs = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
239
+ tokenizer, batch_size, seq_length, is_pair, framework
240
+ )
241
+
242
+ # Generate decoder inputs
243
+ decoder_seq_length = seq_length if not self.use_past else 1
244
+ decoder_inputs = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
245
+ tokenizer, batch_size, decoder_seq_length, is_pair, framework
246
+ )
247
+ decoder_inputs = {f"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
248
+ common_inputs = dict(**encoder_inputs, **decoder_inputs)
249
+
250
+ if self.use_past:
251
+ if not is_torch_available():
252
+ raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed.")
253
+ else:
254
+ import torch
255
+ batch, encoder_seq_length = common_inputs["input_ids"].shape
256
+ decoder_seq_length = common_inputs["decoder_input_ids"].shape[1]
257
+ num_encoder_attention_heads, num_decoder_attention_heads = self.num_attention_heads
258
+ encoder_shape = (
259
+ batch,
260
+ num_encoder_attention_heads,
261
+ encoder_seq_length,
262
+ self._config.hidden_size // num_encoder_attention_heads,
263
+ )
264
+ decoder_past_length = decoder_seq_length + 3
265
+ decoder_shape = (
266
+ batch,
267
+ num_decoder_attention_heads,
268
+ decoder_past_length,
269
+ self._config.hidden_size // num_decoder_attention_heads,
270
+ )
271
+
272
+ common_inputs["decoder_attention_mask"] = torch.cat(
273
+ [common_inputs["decoder_attention_mask"], torch.ones(batch, decoder_past_length)], dim=1
274
+ )
275
+
276
+ common_inputs["past_key_values"] = []
277
+ # If the number of encoder and decoder layers are present in the model configuration, both are considered
278
+ num_encoder_layers, num_decoder_layers = self.num_layers
279
+ min_num_layers = min(num_encoder_layers, num_decoder_layers)
280
+ max_num_layers = max(num_encoder_layers, num_decoder_layers) - min_num_layers
281
+ remaining_side_name = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
282
+
283
+ for _ in range(min_num_layers):
284
+ common_inputs["past_key_values"].append(
285
+ (
286
+ torch.zeros(decoder_shape),
287
+ torch.zeros(decoder_shape),
288
+ torch.zeros(encoder_shape),
289
+ torch.zeros(encoder_shape),
290
+ )
291
+ )
292
+ # TODO: test this.
293
+ shape = encoder_shape if remaining_side_name == "encoder" else decoder_shape
294
+ for _ in range(min_num_layers, max_num_layers):
295
+ common_inputs["past_key_values"].append((torch.zeros(shape), torch.zeros(shape)))
296
+ return common_inputs
297
+
298
+ def _generate_dummy_inputs_for_causal_lm(
299
+ self,
300
+ tokenizer: PreTrainedTokenizer,
301
+ batch_size: int = -1,
302
+ seq_length: int = -1,
303
+ is_pair: bool = False,
304
+ framework: Optional[TensorType] = None,
305
+ ) -> Mapping[str, Any]:
306
+ common_inputs = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
307
+ tokenizer, batch_size, seq_length, is_pair, framework
308
+ )
309
+
310
+ if self.use_past:
311
+ if not is_torch_available():
312
+ raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed.")
313
+ else:
314
+ import torch
315
+ batch, seqlen = common_inputs["input_ids"].shape
316
+ # Not using the same length for past_key_values
317
+ past_key_values_length = seqlen + 2
318
+ num_encoder_layers, _ = self.num_layers
319
+ num_encoder_attention_heads, _ = self.num_attention_heads
320
+ past_shape = (
321
+ batch,
322
+ num_encoder_attention_heads,
323
+ past_key_values_length,
324
+ self._config.hidden_size // num_encoder_attention_heads,
325
+ )
326
+
327
+ mask_dtype = common_inputs["attention_mask"].dtype
328
+ common_inputs["attention_mask"] = torch.cat(
329
+ [common_inputs["attention_mask"], torch.ones(batch, past_key_values_length, dtype=mask_dtype)], dim=1
330
+ )
331
+ common_inputs["past_key_values"] = [
332
+ (torch.zeros(past_shape), torch.zeros(past_shape)) for _ in range(num_encoder_layers)
333
+ ]
334
+ return common_inputs
335
+
336
+ def _generate_dummy_inputs_for_sequence_classification_and_question_answering(
337
+ self,
338
+ tokenizer: PreTrainedTokenizer,
339
+ batch_size: int = -1,
340
+ seq_length: int = -1,
341
+ is_pair: bool = False,
342
+ framework: Optional[TensorType] = None,
343
+ ) -> Mapping[str, Any]:
344
+ # Copied from OnnxConfig.generate_dummy_inputs
345
+ # Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
346
+ # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
347
+ batch_size = compute_effective_axis_dimension(
348
+ batch_size, fixed_dimension=OnnxConfig.default_fixed_batch, num_token_to_add=0
349
+ )
350
+
351
+ # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
352
+ token_to_add = tokenizer.num_special_tokens_to_add(is_pair)
353
+ seq_length = compute_effective_axis_dimension(
354
+ seq_length, fixed_dimension=OnnxConfig.default_fixed_sequence, num_token_to_add=token_to_add
355
+ )
356
+
357
+ # Generate dummy inputs according to compute batch and sequence
358
+ dummy_input = [" ".join([tokenizer.unk_token]) * seq_length] * batch_size
359
+ common_inputs = dict(tokenizer(dummy_input, return_tensors=framework))
360
+ return common_inputs
361
+
362
+ def generate_dummy_inputs(
363
+ self,
364
+ tokenizer: PreTrainedTokenizer,
365
+ batch_size: int = -1,
366
+ seq_length: int = -1,
367
+ is_pair: bool = False,
368
+ framework: Optional[TensorType] = None,
369
+ ) -> Mapping[str, Any]:
370
+ if self.task in ["default", "seq2seq-lm"]:
371
+ common_inputs = self._generate_dummy_inputs_for_default_and_seq2seq_lm(
372
+ tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework
373
+ )
374
+
375
+ elif self.task == "causal-lm":
376
+ common_inputs = self._generate_dummy_inputs_for_causal_lm(
377
+ tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework
378
+ )
379
+ else:
380
+ common_inputs = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
381
+ tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework
382
+ )
383
+
384
+ return common_inputs
385
+
386
+ def _flatten_past_key_values_(self, flattened_output, name, idx, t):
387
+ if self.task in ["default", "seq2seq-lm"]:
388
+ flattened_output = super()._flatten_past_key_values_(flattened_output, name, idx, t)
389
+ else:
390
+ flattened_output = super(OnnxSeq2SeqConfigWithPast, self)._flatten_past_key_values_(
391
+ flattened_output, name, idx, t
392
+ )
env-llmeval/lib/python3.10/site-packages/transformers/models/blenderbot_small/modeling_blenderbot_small.py ADDED
@@ -0,0 +1,1570 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The Facebook, Inc. and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch BlenderbotSmall model."""
16
+
17
+
18
+ import copy
19
+ import math
20
+ from typing import List, Optional, Tuple, Union
21
+
22
+ import torch
23
+ import torch.utils.checkpoint
24
+ from torch import nn
25
+ from torch.nn import CrossEntropyLoss
26
+
27
+ from ...activations import ACT2FN
28
+ from ...modeling_attn_mask_utils import _prepare_4d_attention_mask, _prepare_4d_causal_attention_mask
29
+ from ...modeling_outputs import (
30
+ BaseModelOutput,
31
+ BaseModelOutputWithPastAndCrossAttentions,
32
+ CausalLMOutputWithCrossAttentions,
33
+ Seq2SeqLMOutput,
34
+ Seq2SeqModelOutput,
35
+ )
36
+ from ...modeling_utils import PreTrainedModel
37
+ from ...utils import (
38
+ add_end_docstrings,
39
+ add_start_docstrings,
40
+ add_start_docstrings_to_model_forward,
41
+ logging,
42
+ replace_return_docstrings,
43
+ )
44
+ from .configuration_blenderbot_small import BlenderbotSmallConfig
45
+
46
+
47
+ logger = logging.get_logger(__name__)
48
+
49
+ _CONFIG_FOR_DOC = "BlenderbotSmallConfig"
50
+
51
+
52
+ BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST = [
53
+ "facebook/blenderbot_small-90M",
54
+ # See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
55
+ ]
56
+
57
+
58
+ # Copied from transformers.models.bart.modeling_bart.shift_tokens_right
59
+ def shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int, decoder_start_token_id: int):
60
+ """
61
+ Shift input ids one token to the right.
62
+ """
63
+ shifted_input_ids = input_ids.new_zeros(input_ids.shape)
64
+ shifted_input_ids[:, 1:] = input_ids[:, :-1].clone()
65
+ shifted_input_ids[:, 0] = decoder_start_token_id
66
+
67
+ if pad_token_id is None:
68
+ raise ValueError("self.model.config.pad_token_id has to be defined.")
69
+ # replace possible -100 values in labels by `pad_token_id`
70
+ shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id)
71
+
72
+ return shifted_input_ids
73
+
74
+
75
+ # Copied from transformers.models.blenderbot.modeling_blenderbot.BlenderbotLearnedPositionalEmbedding with Blenderbot->BlenderbotSmall
76
+ class BlenderbotSmallLearnedPositionalEmbedding(nn.Embedding):
77
+ """
78
+ This module learns positional embeddings up to a fixed maximum size.
79
+ """
80
+
81
+ def __init__(self, num_embeddings: int, embedding_dim: int):
82
+ super().__init__(num_embeddings, embedding_dim)
83
+
84
+ def forward(self, input_ids_shape: torch.Size, past_key_values_length: int = 0):
85
+ """`input_ids_shape` is expected to be [bsz x seqlen]."""
86
+ bsz, seq_len = input_ids_shape[:2]
87
+ positions = torch.arange(
88
+ past_key_values_length, past_key_values_length + seq_len, dtype=torch.long, device=self.weight.device
89
+ )
90
+ return super().forward(positions)
91
+
92
+
93
+ # Copied from transformers.models.bart.modeling_bart.BartAttention with Bart->BlenderbotSmall
94
+ class BlenderbotSmallAttention(nn.Module):
95
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
96
+
97
+ def __init__(
98
+ self,
99
+ embed_dim: int,
100
+ num_heads: int,
101
+ dropout: float = 0.0,
102
+ is_decoder: bool = False,
103
+ bias: bool = True,
104
+ is_causal: bool = False,
105
+ config: Optional[BlenderbotSmallConfig] = None,
106
+ ):
107
+ super().__init__()
108
+ self.embed_dim = embed_dim
109
+ self.num_heads = num_heads
110
+ self.dropout = dropout
111
+ self.head_dim = embed_dim // num_heads
112
+ self.config = config
113
+
114
+ if (self.head_dim * num_heads) != self.embed_dim:
115
+ raise ValueError(
116
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
117
+ f" and `num_heads`: {num_heads})."
118
+ )
119
+ self.scaling = self.head_dim**-0.5
120
+ self.is_decoder = is_decoder
121
+ self.is_causal = is_causal
122
+
123
+ self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
124
+ self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
125
+ self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
126
+ self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
127
+
128
+ def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
129
+ return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
130
+
131
+ def forward(
132
+ self,
133
+ hidden_states: torch.Tensor,
134
+ key_value_states: Optional[torch.Tensor] = None,
135
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
136
+ attention_mask: Optional[torch.Tensor] = None,
137
+ layer_head_mask: Optional[torch.Tensor] = None,
138
+ output_attentions: bool = False,
139
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
140
+ """Input shape: Batch x Time x Channel"""
141
+
142
+ # if key_value_states are provided this layer is used as a cross-attention layer
143
+ # for the decoder
144
+ is_cross_attention = key_value_states is not None
145
+
146
+ bsz, tgt_len, _ = hidden_states.size()
147
+
148
+ # get query proj
149
+ query_states = self.q_proj(hidden_states) * self.scaling
150
+ # get key, value proj
151
+ # `past_key_value[0].shape[2] == key_value_states.shape[1]`
152
+ # is checking that the `sequence_length` of the `past_key_value` is the same as
153
+ # the provided `key_value_states` to support prefix tuning
154
+ if (
155
+ is_cross_attention
156
+ and past_key_value is not None
157
+ and past_key_value[0].shape[2] == key_value_states.shape[1]
158
+ ):
159
+ # reuse k,v, cross_attentions
160
+ key_states = past_key_value[0]
161
+ value_states = past_key_value[1]
162
+ elif is_cross_attention:
163
+ # cross_attentions
164
+ key_states = self._shape(self.k_proj(key_value_states), -1, bsz)
165
+ value_states = self._shape(self.v_proj(key_value_states), -1, bsz)
166
+ elif past_key_value is not None:
167
+ # reuse k, v, self_attention
168
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
169
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
170
+ key_states = torch.cat([past_key_value[0], key_states], dim=2)
171
+ value_states = torch.cat([past_key_value[1], value_states], dim=2)
172
+ else:
173
+ # self_attention
174
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
175
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
176
+
177
+ if self.is_decoder:
178
+ # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
179
+ # Further calls to cross_attention layer can then reuse all cross-attention
180
+ # key/value_states (first "if" case)
181
+ # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
182
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
183
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
184
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
185
+ past_key_value = (key_states, value_states)
186
+
187
+ proj_shape = (bsz * self.num_heads, -1, self.head_dim)
188
+ query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)
189
+ key_states = key_states.reshape(*proj_shape)
190
+ value_states = value_states.reshape(*proj_shape)
191
+
192
+ src_len = key_states.size(1)
193
+ attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
194
+
195
+ if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len):
196
+ raise ValueError(
197
+ f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is"
198
+ f" {attn_weights.size()}"
199
+ )
200
+
201
+ if attention_mask is not None:
202
+ if attention_mask.size() != (bsz, 1, tgt_len, src_len):
203
+ raise ValueError(
204
+ f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}"
205
+ )
206
+ attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask
207
+ attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
208
+
209
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1)
210
+
211
+ if layer_head_mask is not None:
212
+ if layer_head_mask.size() != (self.num_heads,):
213
+ raise ValueError(
214
+ f"Head mask for a single layer should be of size {(self.num_heads,)}, but is"
215
+ f" {layer_head_mask.size()}"
216
+ )
217
+ attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
218
+ attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
219
+
220
+ if output_attentions:
221
+ # this operation is a bit awkward, but it's required to
222
+ # make sure that attn_weights keeps its gradient.
223
+ # In order to do so, attn_weights have to be reshaped
224
+ # twice and have to be reused in the following
225
+ attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
226
+ attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)
227
+ else:
228
+ attn_weights_reshaped = None
229
+
230
+ attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
231
+
232
+ attn_output = torch.bmm(attn_probs, value_states)
233
+
234
+ if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):
235
+ raise ValueError(
236
+ f"`attn_output` should be of size {(bsz * self.num_heads, tgt_len, self.head_dim)}, but is"
237
+ f" {attn_output.size()}"
238
+ )
239
+
240
+ attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
241
+ attn_output = attn_output.transpose(1, 2)
242
+
243
+ # Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be
244
+ # partitioned across GPUs when using tensor-parallelism.
245
+ attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim)
246
+
247
+ attn_output = self.out_proj(attn_output)
248
+
249
+ return attn_output, attn_weights_reshaped, past_key_value
250
+
251
+
252
+ # Copied from transformers.models.bart.modeling_bart.BartEncoderLayer with Bart->BlenderbotSmall, BART->BLENDERBOT_SMALL
253
+ class BlenderbotSmallEncoderLayer(nn.Module):
254
+ def __init__(self, config: BlenderbotSmallConfig):
255
+ super().__init__()
256
+ self.embed_dim = config.d_model
257
+
258
+ self.self_attn = BLENDERBOT_SMALL_ATTENTION_CLASSES[config._attn_implementation](
259
+ embed_dim=self.embed_dim,
260
+ num_heads=config.encoder_attention_heads,
261
+ dropout=config.attention_dropout,
262
+ config=config,
263
+ )
264
+ self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
265
+ self.dropout = config.dropout
266
+ self.activation_fn = ACT2FN[config.activation_function]
267
+ self.activation_dropout = config.activation_dropout
268
+ self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim)
269
+ self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim)
270
+ self.final_layer_norm = nn.LayerNorm(self.embed_dim)
271
+
272
+ def forward(
273
+ self,
274
+ hidden_states: torch.FloatTensor,
275
+ attention_mask: torch.FloatTensor,
276
+ layer_head_mask: torch.FloatTensor,
277
+ output_attentions: Optional[bool] = False,
278
+ ) -> Tuple[torch.FloatTensor, Optional[torch.FloatTensor]]:
279
+ """
280
+ Args:
281
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
282
+ attention_mask (`torch.FloatTensor`): attention mask of size
283
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
284
+ layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size
285
+ `(encoder_attention_heads,)`.
286
+ output_attentions (`bool`, *optional*):
287
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
288
+ returned tensors for more detail.
289
+ """
290
+ residual = hidden_states
291
+ hidden_states, attn_weights, _ = self.self_attn(
292
+ hidden_states=hidden_states,
293
+ attention_mask=attention_mask,
294
+ layer_head_mask=layer_head_mask,
295
+ output_attentions=output_attentions,
296
+ )
297
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
298
+ hidden_states = residual + hidden_states
299
+ hidden_states = self.self_attn_layer_norm(hidden_states)
300
+
301
+ residual = hidden_states
302
+ hidden_states = self.activation_fn(self.fc1(hidden_states))
303
+ hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
304
+ hidden_states = self.fc2(hidden_states)
305
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
306
+ hidden_states = residual + hidden_states
307
+ hidden_states = self.final_layer_norm(hidden_states)
308
+
309
+ if hidden_states.dtype == torch.float16 and (
310
+ torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any()
311
+ ):
312
+ clamp_value = torch.finfo(hidden_states.dtype).max - 1000
313
+ hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
314
+
315
+ outputs = (hidden_states,)
316
+
317
+ if output_attentions:
318
+ outputs += (attn_weights,)
319
+
320
+ return outputs
321
+
322
+
323
+ # TODO: Implement attention with SDPA for TimeSeriesTransformer.
324
+ BLENDERBOT_SMALL_ATTENTION_CLASSES = {
325
+ "eager": BlenderbotSmallAttention,
326
+ }
327
+
328
+
329
+ # Copied from transformers.models.bart.modeling_bart.BartDecoderLayer with Bart->BlenderbotSmall, BART->BLENDERBOT_SMALL
330
+ class BlenderbotSmallDecoderLayer(nn.Module):
331
+ def __init__(self, config: BlenderbotSmallConfig):
332
+ super().__init__()
333
+ self.embed_dim = config.d_model
334
+
335
+ self.self_attn = BLENDERBOT_SMALL_ATTENTION_CLASSES[config._attn_implementation](
336
+ embed_dim=self.embed_dim,
337
+ num_heads=config.decoder_attention_heads,
338
+ dropout=config.attention_dropout,
339
+ is_decoder=True,
340
+ is_causal=True,
341
+ config=config,
342
+ )
343
+ self.dropout = config.dropout
344
+ self.activation_fn = ACT2FN[config.activation_function]
345
+ self.activation_dropout = config.activation_dropout
346
+
347
+ self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
348
+ self.encoder_attn = BLENDERBOT_SMALL_ATTENTION_CLASSES[config._attn_implementation](
349
+ self.embed_dim,
350
+ config.decoder_attention_heads,
351
+ dropout=config.attention_dropout,
352
+ is_decoder=True,
353
+ config=config,
354
+ )
355
+ self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim)
356
+ self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim)
357
+ self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim)
358
+ self.final_layer_norm = nn.LayerNorm(self.embed_dim)
359
+
360
+ def forward(
361
+ self,
362
+ hidden_states: torch.Tensor,
363
+ attention_mask: Optional[torch.Tensor] = None,
364
+ encoder_hidden_states: Optional[torch.Tensor] = None,
365
+ encoder_attention_mask: Optional[torch.Tensor] = None,
366
+ layer_head_mask: Optional[torch.Tensor] = None,
367
+ cross_attn_layer_head_mask: Optional[torch.Tensor] = None,
368
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
369
+ output_attentions: Optional[bool] = False,
370
+ use_cache: Optional[bool] = True,
371
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
372
+ """
373
+ Args:
374
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
375
+ attention_mask (`torch.FloatTensor`): attention mask of size
376
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
377
+ encoder_hidden_states (`torch.FloatTensor`):
378
+ cross attention input to the layer of shape `(batch, seq_len, embed_dim)`
379
+ encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size
380
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
381
+ layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size
382
+ `(encoder_attention_heads,)`.
383
+ cross_attn_layer_head_mask (`torch.FloatTensor`): mask for cross-attention heads in a given layer of
384
+ size `(decoder_attention_heads,)`.
385
+ past_key_value (`Tuple(torch.FloatTensor)`): cached past key and value projection states
386
+ output_attentions (`bool`, *optional*):
387
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
388
+ returned tensors for more detail.
389
+ """
390
+ residual = hidden_states
391
+
392
+ # Self Attention
393
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
394
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
395
+ # add present self-attn cache to positions 1,2 of present_key_value tuple
396
+ hidden_states, self_attn_weights, present_key_value = self.self_attn(
397
+ hidden_states=hidden_states,
398
+ past_key_value=self_attn_past_key_value,
399
+ attention_mask=attention_mask,
400
+ layer_head_mask=layer_head_mask,
401
+ output_attentions=output_attentions,
402
+ )
403
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
404
+ hidden_states = residual + hidden_states
405
+ hidden_states = self.self_attn_layer_norm(hidden_states)
406
+
407
+ # Cross-Attention Block
408
+ cross_attn_present_key_value = None
409
+ cross_attn_weights = None
410
+ if encoder_hidden_states is not None:
411
+ residual = hidden_states
412
+
413
+ # cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple
414
+ cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
415
+ hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn(
416
+ hidden_states=hidden_states,
417
+ key_value_states=encoder_hidden_states,
418
+ attention_mask=encoder_attention_mask,
419
+ layer_head_mask=cross_attn_layer_head_mask,
420
+ past_key_value=cross_attn_past_key_value,
421
+ output_attentions=output_attentions,
422
+ )
423
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
424
+ hidden_states = residual + hidden_states
425
+ hidden_states = self.encoder_attn_layer_norm(hidden_states)
426
+
427
+ # add cross-attn to positions 3,4 of present_key_value tuple
428
+ present_key_value = present_key_value + cross_attn_present_key_value
429
+
430
+ # Fully Connected
431
+ residual = hidden_states
432
+ hidden_states = self.activation_fn(self.fc1(hidden_states))
433
+ hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
434
+ hidden_states = self.fc2(hidden_states)
435
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
436
+ hidden_states = residual + hidden_states
437
+ hidden_states = self.final_layer_norm(hidden_states)
438
+
439
+ outputs = (hidden_states,)
440
+
441
+ if output_attentions:
442
+ outputs += (self_attn_weights, cross_attn_weights)
443
+
444
+ if use_cache:
445
+ outputs += (present_key_value,)
446
+
447
+ return outputs
448
+
449
+
450
+ class BlenderbotSmallPreTrainedModel(PreTrainedModel):
451
+ config_class = BlenderbotSmallConfig
452
+ base_model_prefix = "model"
453
+ supports_gradient_checkpointing = True
454
+
455
+ def _init_weights(self, module):
456
+ std = self.config.init_std
457
+ if isinstance(module, nn.Linear):
458
+ module.weight.data.normal_(mean=0.0, std=std)
459
+ if module.bias is not None:
460
+ module.bias.data.zero_()
461
+ elif isinstance(module, nn.Embedding):
462
+ module.weight.data.normal_(mean=0.0, std=std)
463
+ if module.padding_idx is not None:
464
+ module.weight.data[module.padding_idx].zero_()
465
+
466
+ @property
467
+ def dummy_inputs(self):
468
+ pad_token = self.config.pad_token_id
469
+ input_ids = torch.tensor([[0, 6, 10, 4, 2], [0, 8, 12, 2, pad_token]], device=self.device)
470
+ dummy_inputs = {
471
+ "attention_mask": input_ids.ne(pad_token),
472
+ "input_ids": input_ids,
473
+ "decoder_input_ids": input_ids,
474
+ }
475
+ return dummy_inputs
476
+
477
+
478
+ BLENDERBOT_SMALL_START_DOCSTRING = r"""
479
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
480
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
481
+ etc.)
482
+
483
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
484
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
485
+ and behavior.
486
+
487
+ Parameters:
488
+ config ([`BlenderbotSmallConfig`]):
489
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
490
+ load the weights associated with the model, only the configuration. Check out the
491
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
492
+ """
493
+
494
+ BLENDERBOT_SMALL_GENERATION_EXAMPLE = r"""
495
+ Conversation example:
496
+
497
+ ```python
498
+ >>> from transformers import AutoTokenizer, BlenderbotSmallForConditionalGeneration
499
+
500
+ >>> mname = "facebook/blenderbot_small-90M"
501
+ >>> model = BlenderbotSmallForConditionalGeneration.from_pretrained(mname)
502
+ >>> tokenizer = AutoTokenizer.from_pretrained(mname)
503
+ >>> UTTERANCE = "My friends are cool but they eat too many carbs."
504
+ >>> print("Human: ", UTTERANCE)
505
+ Human: My friends are cool but they eat too many carbs.
506
+
507
+ >>> inputs = tokenizer([UTTERANCE], return_tensors="pt")
508
+ >>> reply_ids = model.generate(**inputs)
509
+ >>> print("Bot: ", tokenizer.batch_decode(reply_ids, skip_special_tokens=True)[0])
510
+ Bot: what kind of carbs do they eat? i don't know much about carbs.
511
+
512
+ >>> REPLY = "I'm not sure"
513
+ >>> print("Human: ", REPLY)
514
+ Human: I'm not sure
515
+
516
+ >>> NEXT_UTTERANCE = (
517
+ ... "My friends are cool but they eat too many carbs.__end__ __start__what kind of carbs do they eat? "
518
+ ... "i don't know much about carbs__end__ "
519
+ ... "__start__ I'm not sure."
520
+ ... )
521
+ >>> inputs = tokenizer([NEXT_UTTERANCE], return_tensors="pt")
522
+ >>> next_reply_ids = model.generate(**inputs)
523
+ >>> print("Bot: ", tokenizer.batch_decode(next_reply_ids, skip_special_tokens=True)[0])
524
+ Bot: they eat a lot of carbs. carbs are high in fat, protein, and fats.
525
+ ```
526
+ """
527
+
528
+ BLENDERBOT_SMALL_INPUTS_DOCSTRING = r"""
529
+ Args:
530
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
531
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
532
+ it.
533
+
534
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
535
+ [`PreTrainedTokenizer.__call__`] for details.
536
+
537
+ [What are input IDs?](../glossary#input-ids)
538
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
539
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
540
+
541
+ - 1 for tokens that are **not masked**,
542
+ - 0 for tokens that are **masked**.
543
+
544
+ [What are attention masks?](../glossary#attention-mask)
545
+ decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
546
+ Indices of decoder input sequence tokens in the vocabulary.
547
+
548
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
549
+ [`PreTrainedTokenizer.__call__`] for details.
550
+
551
+ [What are decoder input IDs?](../glossary#decoder-input-ids)
552
+
553
+ BlenderbotSmall uses the `bos_token_id` as the starting token for `decoder_input_ids` generation. If
554
+ `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
555
+ `past_key_values`).
556
+ decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
557
+ Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
558
+ be used by default.
559
+ head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
560
+ Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`:
561
+
562
+ - 1 indicates the head is **not masked**,
563
+ - 0 indicates the head is **masked**.
564
+
565
+ decoder_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
566
+ Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in `[0, 1]`:
567
+
568
+ - 1 indicates the head is **not masked**,
569
+ - 0 indicates the head is **masked**.
570
+
571
+ cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
572
+ Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0,
573
+ 1]`:
574
+
575
+ - 1 indicates the head is **not masked**,
576
+ - 0 indicates the head is **masked**.
577
+
578
+ encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*):
579
+ Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`)
580
+ `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of
581
+ hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.
582
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
583
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
584
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
585
+ `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
586
+
587
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
588
+ blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
589
+
590
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
591
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
592
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
593
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
594
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
595
+ This is useful if you want more control over how to convert `input_ids` indices into associated vectors
596
+ than the model's internal embedding lookup matrix.
597
+ decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*):
598
+ Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded
599
+ representation. If `past_key_values` is used, optionally only the last `decoder_inputs_embeds` have to be
600
+ input (see `past_key_values`). This is useful if you want more control over how to convert
601
+ `decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix.
602
+
603
+ If `decoder_input_ids` and `decoder_inputs_embeds` are both unset, `decoder_inputs_embeds` takes the value
604
+ of `inputs_embeds`.
605
+ use_cache (`bool`, *optional*):
606
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
607
+ `past_key_values`).
608
+ output_attentions (`bool`, *optional*):
609
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
610
+ tensors for more detail.
611
+ output_hidden_states (`bool`, *optional*):
612
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
613
+ more detail.
614
+ return_dict (`bool`, *optional*):
615
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
616
+ """
617
+
618
+
619
+ class BlenderbotSmallEncoder(BlenderbotSmallPreTrainedModel):
620
+ """
621
+ Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a
622
+ [`BlenderbotSmallEncoderLayer`].
623
+
624
+ Args:
625
+ config: BlenderbotSmallConfig
626
+ embed_tokens (nn.Embedding): output embedding
627
+ """
628
+
629
+ def __init__(self, config: BlenderbotSmallConfig, embed_tokens: Optional[nn.Embedding] = None):
630
+ super().__init__(config)
631
+
632
+ self.dropout = config.dropout
633
+ self.layerdrop = config.encoder_layerdrop
634
+
635
+ embed_dim = config.d_model
636
+ self.padding_idx = config.pad_token_id
637
+ self.max_source_positions = config.max_position_embeddings
638
+ self.embed_scale = math.sqrt(embed_dim) if config.scale_embedding else 1.0
639
+
640
+ if embed_tokens is not None:
641
+ self.embed_tokens = embed_tokens
642
+ else:
643
+ self.embed_tokens = nn.Embedding(config.vocab_size, embed_dim, self.padding_idx)
644
+
645
+ self.embed_positions = BlenderbotSmallLearnedPositionalEmbedding(
646
+ config.max_position_embeddings,
647
+ embed_dim,
648
+ )
649
+ self.layers = nn.ModuleList([BlenderbotSmallEncoderLayer(config) for _ in range(config.encoder_layers)])
650
+ self.layernorm_embedding = nn.LayerNorm(embed_dim)
651
+
652
+ self.gradient_checkpointing = False
653
+ # Initialize weights and apply final processing
654
+ self.post_init()
655
+
656
+ def forward(
657
+ self,
658
+ input_ids=None,
659
+ attention_mask=None,
660
+ head_mask=None,
661
+ inputs_embeds=None,
662
+ output_attentions=None,
663
+ output_hidden_states=None,
664
+ return_dict=None,
665
+ ):
666
+ r"""
667
+ Args:
668
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
669
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
670
+ provide it.
671
+
672
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
673
+ [`PreTrainedTokenizer.__call__`] for details.
674
+
675
+ [What are input IDs?](../glossary#input-ids)
676
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
677
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
678
+
679
+ - 1 for tokens that are **not masked**,
680
+ - 0 for tokens that are **masked**.
681
+
682
+ [What are attention masks?](../glossary#attention-mask)
683
+ head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
684
+ Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
685
+
686
+ - 1 indicates the head is **not masked**,
687
+ - 0 indicates the head is **masked**.
688
+
689
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
690
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
691
+ This is useful if you want more control over how to convert `input_ids` indices into associated vectors
692
+ than the model's internal embedding lookup matrix.
693
+ output_attentions (`bool`, *optional*):
694
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
695
+ returned tensors for more detail.
696
+ output_hidden_states (`bool`, *optional*):
697
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
698
+ for more detail.
699
+ return_dict (`bool`, *optional*):
700
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
701
+ """
702
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
703
+ output_hidden_states = (
704
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
705
+ )
706
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
707
+
708
+ # retrieve input_ids and inputs_embeds
709
+ if input_ids is not None and inputs_embeds is not None:
710
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
711
+ elif input_ids is not None:
712
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
713
+ input_shape = input_ids.size()
714
+ input_ids = input_ids.view(-1, input_shape[-1])
715
+ elif inputs_embeds is not None:
716
+ input_shape = inputs_embeds.size()[:-1]
717
+ else:
718
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
719
+
720
+ if inputs_embeds is None:
721
+ inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
722
+
723
+ embed_pos = self.embed_positions(input_shape)
724
+
725
+ hidden_states = inputs_embeds + embed_pos
726
+ hidden_states = self.layernorm_embedding(hidden_states)
727
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
728
+
729
+ # expand attention_mask
730
+ if attention_mask is not None:
731
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
732
+ attention_mask = _prepare_4d_attention_mask(attention_mask, inputs_embeds.dtype)
733
+
734
+ encoder_states = () if output_hidden_states else None
735
+ all_attentions = () if output_attentions else None
736
+
737
+ # check if head_mask has a correct number of layers specified if desired
738
+ if head_mask is not None:
739
+ if head_mask.size()[0] != len(self.layers):
740
+ raise ValueError(
741
+ f"The head_mask should be specified for {len(self.layers)} layers, but it is for"
742
+ f" {head_mask.size()[0]}."
743
+ )
744
+ for idx, encoder_layer in enumerate(self.layers):
745
+ if output_hidden_states:
746
+ encoder_states = encoder_states + (hidden_states,)
747
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
748
+ to_drop = False
749
+ if self.training:
750
+ dropout_probability = torch.rand([])
751
+ if dropout_probability < self.layerdrop: # skip the layer
752
+ to_drop = True
753
+
754
+ if to_drop:
755
+ layer_outputs = (None, None)
756
+ else:
757
+ if self.gradient_checkpointing and self.training:
758
+ layer_outputs = self._gradient_checkpointing_func(
759
+ encoder_layer.__call__,
760
+ hidden_states,
761
+ attention_mask,
762
+ (head_mask[idx] if head_mask is not None else None),
763
+ output_attentions,
764
+ )
765
+ else:
766
+ layer_outputs = encoder_layer(
767
+ hidden_states,
768
+ attention_mask,
769
+ layer_head_mask=(head_mask[idx] if head_mask is not None else None),
770
+ output_attentions=output_attentions,
771
+ )
772
+
773
+ hidden_states = layer_outputs[0]
774
+
775
+ if output_attentions:
776
+ all_attentions = all_attentions + (layer_outputs[1],)
777
+
778
+ if output_hidden_states:
779
+ encoder_states = encoder_states + (hidden_states,)
780
+
781
+ if not return_dict:
782
+ return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
783
+ return BaseModelOutput(
784
+ last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
785
+ )
786
+
787
+
788
+ class BlenderbotSmallDecoder(BlenderbotSmallPreTrainedModel):
789
+ """
790
+ Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`BlenderbotSmallDecoderLayer`]
791
+
792
+ Args:
793
+ config: BlenderbotSmallConfig
794
+ embed_tokens (nn.Embedding): output embedding
795
+ """
796
+
797
+ def __init__(self, config: BlenderbotSmallConfig, embed_tokens: Optional[nn.Embedding] = None):
798
+ super().__init__(config)
799
+ self.dropout = config.dropout
800
+ self.layerdrop = config.decoder_layerdrop
801
+ self.padding_idx = config.pad_token_id
802
+ self.max_target_positions = config.max_position_embeddings
803
+ self.embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0
804
+
805
+ if embed_tokens is not None:
806
+ self.embed_tokens = embed_tokens
807
+ else:
808
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.d_model, self.padding_idx)
809
+
810
+ self.embed_positions = BlenderbotSmallLearnedPositionalEmbedding(
811
+ config.max_position_embeddings,
812
+ config.d_model,
813
+ )
814
+ self.layers = nn.ModuleList([BlenderbotSmallDecoderLayer(config) for _ in range(config.decoder_layers)])
815
+ self.layernorm_embedding = nn.LayerNorm(config.d_model)
816
+
817
+ self.gradient_checkpointing = False
818
+ # Initialize weights and apply final processing
819
+ self.post_init()
820
+
821
+ def get_input_embeddings(self):
822
+ return self.embed_tokens
823
+
824
+ def set_input_embeddings(self, value):
825
+ self.embed_tokens = value
826
+
827
+ def forward(
828
+ self,
829
+ input_ids=None,
830
+ attention_mask=None,
831
+ encoder_hidden_states=None,
832
+ encoder_attention_mask=None,
833
+ head_mask=None,
834
+ cross_attn_head_mask=None,
835
+ past_key_values=None,
836
+ inputs_embeds=None,
837
+ use_cache=None,
838
+ output_attentions=None,
839
+ output_hidden_states=None,
840
+ return_dict=None,
841
+ ):
842
+ r"""
843
+ Args:
844
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
845
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
846
+ provide it.
847
+
848
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
849
+ [`PreTrainedTokenizer.__call__`] for details.
850
+
851
+ [What are input IDs?](../glossary#input-ids)
852
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
853
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
854
+
855
+ - 1 for tokens that are **not masked**,
856
+ - 0 for tokens that are **masked**.
857
+
858
+ [What are attention masks?](../glossary#attention-mask)
859
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*):
860
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
861
+ of the decoder.
862
+ encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*):
863
+ Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values
864
+ selected in `[0, 1]`:
865
+
866
+ - 1 for tokens that are **not masked**,
867
+ - 0 for tokens that are **masked**.
868
+
869
+ [What are attention masks?](../glossary#attention-mask)
870
+ head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
871
+ Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
872
+
873
+ - 1 indicates the head is **not masked**,
874
+ - 0 indicates the head is **masked**.
875
+
876
+ cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
877
+ Mask to nullify selected heads of the cross-attention modules in the decoder to avoid performing
878
+ cross-attention on hidden heads. Mask values selected in `[0, 1]`:
879
+
880
+ - 1 indicates the head is **not masked**,
881
+ - 0 indicates the head is **masked**.
882
+
883
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
884
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
885
+ shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of
886
+ shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
887
+
888
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and in the
889
+ cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
890
+
891
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those
892
+ that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of
893
+ all `decoder_input_ids` of shape `(batch_size, sequence_length)`.
894
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
895
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
896
+ This is useful if you want more control over how to convert `input_ids` indices into associated vectors
897
+ than the model's internal embedding lookup matrix.
898
+ output_attentions (`bool`, *optional*):
899
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
900
+ returned tensors for more detail.
901
+ output_hidden_states (`bool`, *optional*):
902
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
903
+ for more detail.
904
+ return_dict (`bool`, *optional*):
905
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
906
+ """
907
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
908
+ output_hidden_states = (
909
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
910
+ )
911
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
912
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
913
+
914
+ # retrieve input_ids and inputs_embeds
915
+ if input_ids is not None and inputs_embeds is not None:
916
+ raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
917
+ elif input_ids is not None:
918
+ input_shape = input_ids.size()
919
+ input_ids = input_ids.view(-1, input_shape[-1])
920
+ elif inputs_embeds is not None:
921
+ input_shape = inputs_embeds.size()[:-1]
922
+ else:
923
+ raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
924
+
925
+ # past_key_values_length
926
+ past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
927
+
928
+ if inputs_embeds is None:
929
+ inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
930
+
931
+ attention_mask = _prepare_4d_causal_attention_mask(
932
+ attention_mask, input_shape, inputs_embeds, past_key_values_length
933
+ )
934
+
935
+ # expand encoder attention mask
936
+ if encoder_hidden_states is not None and encoder_attention_mask is not None:
937
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
938
+ encoder_attention_mask = _prepare_4d_attention_mask(
939
+ encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]
940
+ )
941
+
942
+ # embed positions
943
+ positions = self.embed_positions(input_shape, past_key_values_length)
944
+
945
+ # BlenderbotSmall applies layer norm on hidden_states
946
+ inputs_embeds = self.layernorm_embedding(inputs_embeds)
947
+ hidden_states = inputs_embeds + positions
948
+
949
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
950
+
951
+ if self.gradient_checkpointing and self.training:
952
+ if use_cache:
953
+ logger.warning_once(
954
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
955
+ )
956
+ use_cache = False
957
+
958
+ # decoder layers
959
+ all_hidden_states = () if output_hidden_states else None
960
+ all_self_attns = () if output_attentions else None
961
+ all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None
962
+ next_decoder_cache = () if use_cache else None
963
+
964
+ # check if head_mask/cross_attn_head_mask has a correct number of layers specified if desired
965
+ for attn_mask, mask_name in zip([head_mask, cross_attn_head_mask], ["head_mask", "cross_attn_head_mask"]):
966
+ if attn_mask is not None:
967
+ if attn_mask.size()[0] != len(self.layers):
968
+ raise ValueError(
969
+ f"The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for"
970
+ f" {head_mask.size()[0]}."
971
+ )
972
+ for idx, decoder_layer in enumerate(self.layers):
973
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
974
+ if output_hidden_states:
975
+ all_hidden_states += (hidden_states,)
976
+ if self.training:
977
+ dropout_probability = torch.rand([])
978
+ if dropout_probability < self.layerdrop:
979
+ continue
980
+
981
+ past_key_value = past_key_values[idx] if past_key_values is not None else None
982
+
983
+ if self.gradient_checkpointing and self.training:
984
+ layer_outputs = self._gradient_checkpointing_func(
985
+ decoder_layer.__call__,
986
+ hidden_states,
987
+ attention_mask,
988
+ encoder_hidden_states,
989
+ encoder_attention_mask,
990
+ head_mask[idx] if head_mask is not None else None,
991
+ cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None,
992
+ None,
993
+ output_attentions,
994
+ use_cache,
995
+ )
996
+ else:
997
+ layer_outputs = decoder_layer(
998
+ hidden_states,
999
+ attention_mask=attention_mask,
1000
+ encoder_hidden_states=encoder_hidden_states,
1001
+ encoder_attention_mask=encoder_attention_mask,
1002
+ layer_head_mask=(head_mask[idx] if head_mask is not None else None),
1003
+ cross_attn_layer_head_mask=(
1004
+ cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None
1005
+ ),
1006
+ past_key_value=past_key_value,
1007
+ output_attentions=output_attentions,
1008
+ use_cache=use_cache,
1009
+ )
1010
+ hidden_states = layer_outputs[0]
1011
+
1012
+ if use_cache:
1013
+ next_decoder_cache += (layer_outputs[3 if output_attentions else 1],)
1014
+
1015
+ if output_attentions:
1016
+ all_self_attns += (layer_outputs[1],)
1017
+
1018
+ if encoder_hidden_states is not None:
1019
+ all_cross_attentions += (layer_outputs[2],)
1020
+
1021
+ # add hidden states from the last decoder layer
1022
+ if output_hidden_states:
1023
+ all_hidden_states += (hidden_states,)
1024
+
1025
+ next_cache = next_decoder_cache if use_cache else None
1026
+ if not return_dict:
1027
+ return tuple(
1028
+ v
1029
+ for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, all_cross_attentions]
1030
+ if v is not None
1031
+ )
1032
+ return BaseModelOutputWithPastAndCrossAttentions(
1033
+ last_hidden_state=hidden_states,
1034
+ past_key_values=next_cache,
1035
+ hidden_states=all_hidden_states,
1036
+ attentions=all_self_attns,
1037
+ cross_attentions=all_cross_attentions,
1038
+ )
1039
+
1040
+
1041
+ @add_start_docstrings(
1042
+ "The bare BlenderbotSmall Model outputting raw hidden-states without any specific head on top.",
1043
+ BLENDERBOT_SMALL_START_DOCSTRING,
1044
+ )
1045
+ class BlenderbotSmallModel(BlenderbotSmallPreTrainedModel):
1046
+ _tied_weights_keys = ["decoder.embed_tokens.weight", "encoder.embed_tokens.weight"]
1047
+
1048
+ def __init__(self, config: BlenderbotSmallConfig):
1049
+ super().__init__(config)
1050
+
1051
+ padding_idx, vocab_size = config.pad_token_id, config.vocab_size
1052
+ self.shared = nn.Embedding(vocab_size, config.d_model, padding_idx)
1053
+
1054
+ self.encoder = BlenderbotSmallEncoder(config, self.shared)
1055
+ self.decoder = BlenderbotSmallDecoder(config, self.shared)
1056
+
1057
+ # Initialize weights and apply final processing
1058
+ self.post_init()
1059
+
1060
+ def get_input_embeddings(self):
1061
+ return self.shared
1062
+
1063
+ def set_input_embeddings(self, value):
1064
+ self.shared = value
1065
+ self.encoder.embed_tokens = self.shared
1066
+ self.decoder.embed_tokens = self.shared
1067
+
1068
+ def get_encoder(self):
1069
+ return self.encoder
1070
+
1071
+ def get_decoder(self):
1072
+ return self.decoder
1073
+
1074
+ @add_start_docstrings_to_model_forward(BLENDERBOT_SMALL_INPUTS_DOCSTRING)
1075
+ @replace_return_docstrings(output_type=Seq2SeqModelOutput, config_class=_CONFIG_FOR_DOC)
1076
+ def forward(
1077
+ self,
1078
+ input_ids: Optional[torch.LongTensor] = None,
1079
+ attention_mask: Optional[torch.Tensor] = None,
1080
+ decoder_input_ids: Optional[torch.LongTensor] = None,
1081
+ decoder_attention_mask: Optional[torch.LongTensor] = None,
1082
+ head_mask: Optional[torch.Tensor] = None,
1083
+ decoder_head_mask: Optional[torch.Tensor] = None,
1084
+ cross_attn_head_mask: Optional[torch.Tensor] = None,
1085
+ encoder_outputs: Optional[Union[Tuple, BaseModelOutput]] = None,
1086
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1087
+ inputs_embeds: Optional[torch.Tensor] = None,
1088
+ decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
1089
+ use_cache: Optional[bool] = None,
1090
+ output_attentions: Optional[bool] = None,
1091
+ output_hidden_states: Optional[bool] = None,
1092
+ return_dict: Optional[bool] = None,
1093
+ ) -> Union[Tuple[torch.FloatTensor], Seq2SeqModelOutput]:
1094
+ r"""
1095
+ Returns:
1096
+
1097
+ Example:
1098
+
1099
+ ```python
1100
+ >>> from transformers import AutoTokenizer, BlenderbotSmallModel
1101
+
1102
+ >>> model = BlenderbotSmallModel.from_pretrained("facebook/blenderbot_small-90M")
1103
+ >>> tokenizer = AutoTokenizer.from_pretrained("facebook/blenderbot_small-90M")
1104
+
1105
+ >>> inputs = tokenizer("Studies have been shown that owning a dog is good for you", return_tensors="pt")
1106
+ >>> decoder_inputs = tokenizer("Studies show that", return_tensors="pt") # Batch size 1
1107
+ >>> outputs = model(input_ids=inputs.input_ids, decoder_input_ids=decoder_inputs.input_ids)
1108
+
1109
+ >>> last_hidden_states = outputs.last_hidden_state
1110
+ >>> list(last_hidden_states.shape)
1111
+ [1, 3, 512]
1112
+ ```"""
1113
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1114
+ output_hidden_states = (
1115
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1116
+ )
1117
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
1118
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1119
+
1120
+ if encoder_outputs is None:
1121
+ encoder_outputs = self.encoder(
1122
+ input_ids=input_ids,
1123
+ attention_mask=attention_mask,
1124
+ head_mask=head_mask,
1125
+ inputs_embeds=inputs_embeds,
1126
+ output_attentions=output_attentions,
1127
+ output_hidden_states=output_hidden_states,
1128
+ return_dict=return_dict,
1129
+ )
1130
+ # If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True
1131
+ elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):
1132
+ encoder_outputs = BaseModelOutput(
1133
+ last_hidden_state=encoder_outputs[0],
1134
+ hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
1135
+ attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
1136
+ )
1137
+
1138
+ # decoder outputs consists of (dec_features, past_key_value, dec_hidden, dec_attn)
1139
+ decoder_outputs = self.decoder(
1140
+ input_ids=decoder_input_ids,
1141
+ attention_mask=decoder_attention_mask,
1142
+ encoder_hidden_states=encoder_outputs[0],
1143
+ encoder_attention_mask=attention_mask,
1144
+ head_mask=decoder_head_mask,
1145
+ cross_attn_head_mask=cross_attn_head_mask,
1146
+ past_key_values=past_key_values,
1147
+ inputs_embeds=decoder_inputs_embeds,
1148
+ use_cache=use_cache,
1149
+ output_attentions=output_attentions,
1150
+ output_hidden_states=output_hidden_states,
1151
+ return_dict=return_dict,
1152
+ )
1153
+
1154
+ if not return_dict:
1155
+ return decoder_outputs + encoder_outputs
1156
+
1157
+ return Seq2SeqModelOutput(
1158
+ last_hidden_state=decoder_outputs.last_hidden_state,
1159
+ past_key_values=decoder_outputs.past_key_values,
1160
+ decoder_hidden_states=decoder_outputs.hidden_states,
1161
+ decoder_attentions=decoder_outputs.attentions,
1162
+ cross_attentions=decoder_outputs.cross_attentions,
1163
+ encoder_last_hidden_state=encoder_outputs.last_hidden_state,
1164
+ encoder_hidden_states=encoder_outputs.hidden_states,
1165
+ encoder_attentions=encoder_outputs.attentions,
1166
+ )
1167
+
1168
+
1169
+ @add_start_docstrings(
1170
+ "The BlenderbotSmall Model with a language modeling head. Can be used for summarization.",
1171
+ BLENDERBOT_SMALL_START_DOCSTRING,
1172
+ )
1173
+ class BlenderbotSmallForConditionalGeneration(BlenderbotSmallPreTrainedModel):
1174
+ base_model_prefix = "model"
1175
+ _keys_to_ignore_on_load_missing = ["final_logits_bias"]
1176
+ _tied_weights_keys = ["decoder.embed_tokens.weight", "encoder.embed_tokens.weight", "lm_head.weight"]
1177
+
1178
+ def __init__(self, config: BlenderbotSmallConfig):
1179
+ super().__init__(config)
1180
+ self.model = BlenderbotSmallModel(config)
1181
+ self.register_buffer("final_logits_bias", torch.zeros((1, self.model.shared.num_embeddings)))
1182
+ self.lm_head = nn.Linear(config.d_model, self.model.shared.num_embeddings, bias=False)
1183
+
1184
+ # Initialize weights and apply final processing
1185
+ self.post_init()
1186
+
1187
+ def get_encoder(self):
1188
+ return self.model.get_encoder()
1189
+
1190
+ def get_decoder(self):
1191
+ return self.model.get_decoder()
1192
+
1193
+ def resize_token_embeddings(self, new_num_tokens: int, pad_to_multiple_of: Optional[int] = None) -> nn.Embedding:
1194
+ new_embeddings = super().resize_token_embeddings(new_num_tokens, pad_to_multiple_of)
1195
+ self._resize_final_logits_bias(new_embeddings.weight.shape[0])
1196
+ return new_embeddings
1197
+
1198
+ def _resize_final_logits_bias(self, new_num_tokens: int) -> None:
1199
+ old_num_tokens = self.final_logits_bias.shape[-1]
1200
+ if new_num_tokens <= old_num_tokens:
1201
+ new_bias = self.final_logits_bias[:, :new_num_tokens]
1202
+ else:
1203
+ extra_bias = torch.zeros((1, new_num_tokens - old_num_tokens), device=self.final_logits_bias.device)
1204
+ new_bias = torch.cat([self.final_logits_bias, extra_bias], dim=1)
1205
+ self.register_buffer("final_logits_bias", new_bias)
1206
+
1207
+ def get_output_embeddings(self):
1208
+ return self.lm_head
1209
+
1210
+ def set_output_embeddings(self, new_embeddings):
1211
+ self.lm_head = new_embeddings
1212
+
1213
+ @add_start_docstrings_to_model_forward(BLENDERBOT_SMALL_INPUTS_DOCSTRING)
1214
+ @replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)
1215
+ @add_end_docstrings(BLENDERBOT_SMALL_GENERATION_EXAMPLE)
1216
+ def forward(
1217
+ self,
1218
+ input_ids: Optional[torch.LongTensor] = None,
1219
+ attention_mask: Optional[torch.Tensor] = None,
1220
+ decoder_input_ids: Optional[torch.LongTensor] = None,
1221
+ decoder_attention_mask: Optional[torch.LongTensor] = None,
1222
+ head_mask: Optional[torch.Tensor] = None,
1223
+ decoder_head_mask: Optional[torch.Tensor] = None,
1224
+ cross_attn_head_mask: Optional[torch.Tensor] = None,
1225
+ encoder_outputs: Optional[Union[Tuple, BaseModelOutput]] = None,
1226
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1227
+ inputs_embeds: Optional[torch.Tensor] = None,
1228
+ decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
1229
+ labels: Optional[torch.LongTensor] = None,
1230
+ use_cache: Optional[bool] = None,
1231
+ output_attentions: Optional[bool] = None,
1232
+ output_hidden_states: Optional[bool] = None,
1233
+ return_dict: Optional[bool] = None,
1234
+ ) -> Union[Tuple[torch.FloatTensor], Seq2SeqLMOutput]:
1235
+ r"""
1236
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1237
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
1238
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
1239
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
1240
+
1241
+ Returns:
1242
+ """
1243
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1244
+
1245
+ if labels is not None:
1246
+ if use_cache:
1247
+ logger.warning("The `use_cache` argument is changed to `False` since `labels` is provided.")
1248
+ use_cache = False
1249
+ if decoder_input_ids is None and decoder_inputs_embeds is None:
1250
+ decoder_input_ids = shift_tokens_right(
1251
+ labels, self.config.pad_token_id, self.config.decoder_start_token_id
1252
+ )
1253
+
1254
+ outputs = self.model(
1255
+ input_ids,
1256
+ attention_mask=attention_mask,
1257
+ decoder_input_ids=decoder_input_ids,
1258
+ encoder_outputs=encoder_outputs,
1259
+ decoder_attention_mask=decoder_attention_mask,
1260
+ head_mask=head_mask,
1261
+ decoder_head_mask=decoder_head_mask,
1262
+ cross_attn_head_mask=cross_attn_head_mask,
1263
+ past_key_values=past_key_values,
1264
+ inputs_embeds=inputs_embeds,
1265
+ decoder_inputs_embeds=decoder_inputs_embeds,
1266
+ use_cache=use_cache,
1267
+ output_attentions=output_attentions,
1268
+ output_hidden_states=output_hidden_states,
1269
+ return_dict=return_dict,
1270
+ )
1271
+ lm_logits = self.lm_head(outputs[0]) + self.final_logits_bias
1272
+
1273
+ masked_lm_loss = None
1274
+ if labels is not None:
1275
+ loss_fct = CrossEntropyLoss()
1276
+ masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1))
1277
+
1278
+ if not return_dict:
1279
+ output = (lm_logits,) + outputs[1:]
1280
+ return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
1281
+
1282
+ return Seq2SeqLMOutput(
1283
+ loss=masked_lm_loss,
1284
+ logits=lm_logits,
1285
+ past_key_values=outputs.past_key_values,
1286
+ decoder_hidden_states=outputs.decoder_hidden_states,
1287
+ decoder_attentions=outputs.decoder_attentions,
1288
+ cross_attentions=outputs.cross_attentions,
1289
+ encoder_last_hidden_state=outputs.encoder_last_hidden_state,
1290
+ encoder_hidden_states=outputs.encoder_hidden_states,
1291
+ encoder_attentions=outputs.encoder_attentions,
1292
+ )
1293
+
1294
+ def prepare_inputs_for_generation(
1295
+ self,
1296
+ decoder_input_ids,
1297
+ past_key_values=None,
1298
+ attention_mask=None,
1299
+ head_mask=None,
1300
+ decoder_head_mask=None,
1301
+ cross_attn_head_mask=None,
1302
+ use_cache=None,
1303
+ encoder_outputs=None,
1304
+ **kwargs,
1305
+ ):
1306
+ # cut decoder_input_ids if past is used
1307
+ if past_key_values is not None:
1308
+ past_length = past_key_values[0][0].shape[2]
1309
+
1310
+ # Some generation methods already pass only the last input ID
1311
+ if decoder_input_ids.shape[1] > past_length:
1312
+ remove_prefix_length = past_length
1313
+ else:
1314
+ # Default to old behavior: keep only final ID
1315
+ remove_prefix_length = decoder_input_ids.shape[1] - 1
1316
+
1317
+ decoder_input_ids = decoder_input_ids[:, remove_prefix_length:]
1318
+
1319
+ return {
1320
+ "input_ids": None, # encoder_outputs is defined. input_ids not needed
1321
+ "encoder_outputs": encoder_outputs,
1322
+ "past_key_values": past_key_values,
1323
+ "decoder_input_ids": decoder_input_ids,
1324
+ "attention_mask": attention_mask,
1325
+ "head_mask": head_mask,
1326
+ "decoder_head_mask": decoder_head_mask,
1327
+ "cross_attn_head_mask": cross_attn_head_mask,
1328
+ "use_cache": use_cache, # change this to avoid caching (presumably for debugging)
1329
+ }
1330
+
1331
+ @staticmethod
1332
+ def _reorder_cache(past_key_values, beam_idx):
1333
+ reordered_past = ()
1334
+ for layer_past in past_key_values:
1335
+ # cached cross_attention states don't have to be reordered -> they are always the same
1336
+ reordered_past += (
1337
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past[:2])
1338
+ + layer_past[2:],
1339
+ )
1340
+ return reordered_past
1341
+
1342
+
1343
+ # Copied from transformers.models.bart.modeling_bart.BartDecoderWrapper with Bart->BlenderbotSmall
1344
+ class BlenderbotSmallDecoderWrapper(BlenderbotSmallPreTrainedModel):
1345
+ """
1346
+ This wrapper class is a helper class to correctly load pretrained checkpoints when the causal language model is
1347
+ used in combination with the [`EncoderDecoderModel`] framework.
1348
+ """
1349
+
1350
+ def __init__(self, config):
1351
+ super().__init__(config)
1352
+ self.decoder = BlenderbotSmallDecoder(config)
1353
+
1354
+ def forward(self, *args, **kwargs):
1355
+ return self.decoder(*args, **kwargs)
1356
+
1357
+
1358
+ # Copied from transformers.models.bart.modeling_bart.BartForCausalLM with Bart->BlenderbotSmall, facebook/bart-base->facebook/blenderbot_small-90M
1359
+ class BlenderbotSmallForCausalLM(BlenderbotSmallPreTrainedModel):
1360
+ _tied_weights_keys = ["lm_head.weight"]
1361
+
1362
+ def __init__(self, config):
1363
+ config = copy.deepcopy(config)
1364
+ config.is_decoder = True
1365
+ config.is_encoder_decoder = False
1366
+ super().__init__(config)
1367
+ self.model = BlenderbotSmallDecoderWrapper(config)
1368
+
1369
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
1370
+
1371
+ # Initialize weights and apply final processing
1372
+ self.post_init()
1373
+
1374
+ def get_input_embeddings(self):
1375
+ return self.model.decoder.embed_tokens
1376
+
1377
+ def set_input_embeddings(self, value):
1378
+ self.model.decoder.embed_tokens = value
1379
+
1380
+ def get_output_embeddings(self):
1381
+ return self.lm_head
1382
+
1383
+ def set_output_embeddings(self, new_embeddings):
1384
+ self.lm_head = new_embeddings
1385
+
1386
+ def set_decoder(self, decoder):
1387
+ self.model.decoder = decoder
1388
+
1389
+ def get_decoder(self):
1390
+ return self.model.decoder
1391
+
1392
+ @replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC)
1393
+ def forward(
1394
+ self,
1395
+ input_ids: torch.LongTensor = None,
1396
+ attention_mask: Optional[torch.Tensor] = None,
1397
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
1398
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
1399
+ head_mask: Optional[torch.Tensor] = None,
1400
+ cross_attn_head_mask: Optional[torch.Tensor] = None,
1401
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1402
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1403
+ labels: Optional[torch.LongTensor] = None,
1404
+ use_cache: Optional[bool] = None,
1405
+ output_attentions: Optional[bool] = None,
1406
+ output_hidden_states: Optional[bool] = None,
1407
+ return_dict: Optional[bool] = None,
1408
+ ) -> Union[Tuple, CausalLMOutputWithCrossAttentions]:
1409
+ r"""
1410
+ Args:
1411
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
1412
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
1413
+ provide it.
1414
+
1415
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
1416
+ [`PreTrainedTokenizer.__call__`] for details.
1417
+
1418
+ [What are input IDs?](../glossary#input-ids)
1419
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
1420
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
1421
+
1422
+ - 1 for tokens that are **not masked**,
1423
+ - 0 for tokens that are **masked**.
1424
+
1425
+ [What are attention masks?](../glossary#attention-mask)
1426
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
1427
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
1428
+ if the model is configured as a decoder.
1429
+ encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
1430
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used
1431
+ in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
1432
+ head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
1433
+ Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
1434
+
1435
+ - 1 indicates the head is **not masked**,
1436
+ - 0 indicates the head is **masked**.
1437
+
1438
+ cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
1439
+ Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:
1440
+
1441
+ - 1 indicates the head is **not masked**,
1442
+ - 0 indicates the head is **masked**.
1443
+
1444
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
1445
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
1446
+ shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of
1447
+ shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. The two additional
1448
+ tensors are only required when the model is used as a decoder in a Sequence to Sequence model.
1449
+
1450
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and in the
1451
+ cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
1452
+
1453
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those
1454
+ that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of
1455
+ all `decoder_input_ids` of shape `(batch_size, sequence_length)`.
1456
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1457
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
1458
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
1459
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
1460
+ use_cache (`bool`, *optional*):
1461
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
1462
+ (see `past_key_values`).
1463
+
1464
+ - 1 for tokens that are **not masked**,
1465
+ - 0 for tokens that are **masked**.
1466
+ output_attentions (`bool`, *optional*):
1467
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
1468
+ returned tensors for more detail.
1469
+ output_hidden_states (`bool`, *optional*):
1470
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
1471
+ for more detail.
1472
+ return_dict (`bool`, *optional*):
1473
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
1474
+
1475
+ Returns:
1476
+
1477
+ Example:
1478
+
1479
+ ```python
1480
+ >>> from transformers import AutoTokenizer, BlenderbotSmallForCausalLM
1481
+
1482
+ >>> tokenizer = AutoTokenizer.from_pretrained("facebook/blenderbot_small-90M")
1483
+ >>> model = BlenderbotSmallForCausalLM.from_pretrained("facebook/blenderbot_small-90M", add_cross_attention=False)
1484
+ >>> assert model.config.is_decoder, f"{model.__class__} has to be configured as a decoder."
1485
+ >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
1486
+ >>> outputs = model(**inputs)
1487
+
1488
+ >>> logits = outputs.logits
1489
+ >>> expected_shape = [1, inputs.input_ids.shape[-1], model.config.vocab_size]
1490
+ >>> list(logits.shape) == expected_shape
1491
+ True
1492
+ ```"""
1493
+
1494
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1495
+ output_hidden_states = (
1496
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1497
+ )
1498
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1499
+
1500
+ # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
1501
+ outputs = self.model.decoder(
1502
+ input_ids=input_ids,
1503
+ attention_mask=attention_mask,
1504
+ encoder_hidden_states=encoder_hidden_states,
1505
+ encoder_attention_mask=encoder_attention_mask,
1506
+ head_mask=head_mask,
1507
+ cross_attn_head_mask=cross_attn_head_mask,
1508
+ past_key_values=past_key_values,
1509
+ inputs_embeds=inputs_embeds,
1510
+ use_cache=use_cache,
1511
+ output_attentions=output_attentions,
1512
+ output_hidden_states=output_hidden_states,
1513
+ return_dict=return_dict,
1514
+ )
1515
+
1516
+ logits = self.lm_head(outputs[0])
1517
+
1518
+ loss = None
1519
+ if labels is not None:
1520
+ labels = labels.to(logits.device)
1521
+ loss_fct = CrossEntropyLoss()
1522
+ loss = loss_fct(logits.view(-1, self.config.vocab_size), labels.view(-1))
1523
+
1524
+ if not return_dict:
1525
+ output = (logits,) + outputs[1:]
1526
+ return (loss,) + output if loss is not None else output
1527
+
1528
+ return CausalLMOutputWithCrossAttentions(
1529
+ loss=loss,
1530
+ logits=logits,
1531
+ past_key_values=outputs.past_key_values,
1532
+ hidden_states=outputs.hidden_states,
1533
+ attentions=outputs.attentions,
1534
+ cross_attentions=outputs.cross_attentions,
1535
+ )
1536
+
1537
+ def prepare_inputs_for_generation(
1538
+ self, input_ids, past_key_values=None, attention_mask=None, use_cache=None, **kwargs
1539
+ ):
1540
+ # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
1541
+ if attention_mask is None:
1542
+ attention_mask = input_ids.new_ones(input_ids.shape)
1543
+
1544
+ if past_key_values:
1545
+ past_length = past_key_values[0][0].shape[2]
1546
+
1547
+ # Some generation methods already pass only the last input ID
1548
+ if input_ids.shape[1] > past_length:
1549
+ remove_prefix_length = past_length
1550
+ else:
1551
+ # Default to old behavior: keep only final ID
1552
+ remove_prefix_length = input_ids.shape[1] - 1
1553
+
1554
+ input_ids = input_ids[:, remove_prefix_length:]
1555
+ # first step, decoder_cached_states are empty
1556
+ return {
1557
+ "input_ids": input_ids, # encoder_outputs is defined. input_ids not needed
1558
+ "attention_mask": attention_mask,
1559
+ "past_key_values": past_key_values,
1560
+ "use_cache": use_cache,
1561
+ }
1562
+
1563
+ @staticmethod
1564
+ def _reorder_cache(past_key_values, beam_idx):
1565
+ reordered_past = ()
1566
+ for layer_past in past_key_values:
1567
+ reordered_past += (
1568
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),
1569
+ )
1570
+ return reordered_past
env-llmeval/lib/python3.10/site-packages/transformers/models/blenderbot_small/modeling_flax_blenderbot_small.py ADDED
@@ -0,0 +1,1522 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The Facebook, Inc. and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Flax BlenderbotSmall model."""
16
+
17
+
18
+ import math
19
+ import random
20
+ from functools import partial
21
+ from typing import Callable, Optional, Tuple
22
+
23
+ import flax.linen as nn
24
+ import jax
25
+ import jax.numpy as jnp
26
+ from flax.core.frozen_dict import FrozenDict, freeze, unfreeze
27
+ from flax.linen import combine_masks, make_causal_mask
28
+ from flax.linen.attention import dot_product_attention_weights
29
+ from flax.traverse_util import flatten_dict, unflatten_dict
30
+ from jax import lax
31
+ from jax.random import PRNGKey
32
+
33
+ from ...modeling_flax_outputs import (
34
+ FlaxBaseModelOutput,
35
+ FlaxBaseModelOutputWithPastAndCrossAttentions,
36
+ FlaxCausalLMOutputWithCrossAttentions,
37
+ FlaxSeq2SeqLMOutput,
38
+ FlaxSeq2SeqModelOutput,
39
+ )
40
+ from ...modeling_flax_utils import (
41
+ ACT2FN,
42
+ FlaxPreTrainedModel,
43
+ append_call_sample_docstring,
44
+ append_replace_return_docstrings,
45
+ overwrite_call_docstring,
46
+ )
47
+ from ...utils import add_start_docstrings, logging, replace_return_docstrings
48
+ from .configuration_blenderbot_small import BlenderbotSmallConfig
49
+
50
+
51
+ logger = logging.get_logger(__name__)
52
+
53
+ _CHECKPOINT_FOR_DOC = "facebook/blenderbot_small-90M"
54
+ _CONFIG_FOR_DOC = "BlenderbotSmallConfig"
55
+
56
+ BLENDERBOT_SMALL_START_DOCSTRING = r"""
57
+ This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the
58
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
59
+ etc.)
60
+
61
+ This model is also a Flax Linen
62
+ [flax.nn.Module](https://flax.readthedocs.io/en/latest/_autosummary/flax.nn.module.html) subclass. Use it as a
63
+ regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.
64
+
65
+ Finally, this model supports inherent JAX features such as:
66
+
67
+ - [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit)
68
+ - [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation)
69
+ - [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap)
70
+ - [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap)
71
+
72
+ Parameters:
73
+ config ([`BlenderbotSmallConfig`]): Model configuration class with all the parameters of the model.
74
+ Initializing with a config file does not load the weights associated with the model, only the
75
+ configuration. Check out the [`~FlaxPreTrainedModel.from_pretrained`] method to load the model weights.
76
+ dtype (`jax.numpy.dtype`, *optional*, defaults to `jax.numpy.float32`):
77
+ The data type of the computation. Can be one of `jax.numpy.float32`, `jax.numpy.float16` (on GPUs) and
78
+ `jax.numpy.bfloat16` (on TPUs).
79
+
80
+ This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If
81
+ specified all the computation will be performed with the given `dtype`.
82
+
83
+ **Note that this only specifies the dtype of the computation and does not influence the dtype of model
84
+ parameters.**
85
+
86
+ If you wish to change the dtype of the model parameters, see [`~FlaxPreTrainedModel.to_fp16`] and
87
+ [`~FlaxPreTrainedModel.to_bf16`].
88
+ """
89
+
90
+ BLENDERBOT_SMALL_INPUTS_DOCSTRING = r"""
91
+ Args:
92
+ input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):
93
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
94
+ it.
95
+
96
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
97
+ [`PreTrainedTokenizer.__call__`] for details.
98
+
99
+ [What are input IDs?](../glossary#input-ids)
100
+ attention_mask (`jnp.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
101
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
102
+
103
+ - 1 for tokens that are **not masked**,
104
+ - 0 for tokens that are **masked**.
105
+
106
+ [What are attention masks?](../glossary#attention-mask)
107
+ decoder_input_ids (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`, *optional*):
108
+ Indices of decoder input sequence tokens in the vocabulary.
109
+
110
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
111
+ [`PreTrainedTokenizer.__call__`] for details.
112
+
113
+ [What are decoder input IDs?](../glossary#decoder-input-ids)
114
+
115
+ For translation and summarization training, `decoder_input_ids` should be provided. If no
116
+ `decoder_input_ids` is provided, the model will create this tensor by shifting the `input_ids` to the right
117
+ for denoising pre-training following the paper.
118
+ decoder_attention_mask (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`, *optional*):
119
+ Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
120
+ be used by default.
121
+
122
+ If you want to change padding behavior, you should modify to your needs. See diagram 1 in [the
123
+ paper](https://arxiv.org/abs/1910.13461) for more information on the default strategy.
124
+ position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
125
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
126
+ config.max_position_embeddings - 1]`.
127
+ decoder_position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
128
+ Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the
129
+ range `[0, config.max_position_embeddings - 1]`.
130
+ output_attentions (`bool`, *optional*):
131
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
132
+ tensors for more detail.
133
+ output_hidden_states (`bool`, *optional*):
134
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
135
+ more detail.
136
+ return_dict (`bool`, *optional*):
137
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
138
+ """
139
+
140
+
141
+ BLENDERBOT_SMALL_ENCODE_INPUTS_DOCSTRING = r"""
142
+ Args:
143
+ input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):
144
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
145
+ it.
146
+
147
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
148
+ [`PreTrainedTokenizer.__call__`] for details.
149
+
150
+ [What are input IDs?](../glossary#input-ids)
151
+ attention_mask (`jnp.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
152
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
153
+
154
+ - 1 for tokens that are **not masked**,
155
+ - 0 for tokens that are **masked**.
156
+
157
+ [What are attention masks?](../glossary#attention-mask)
158
+ position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
159
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
160
+ config.max_position_embeddings - 1]`.
161
+ output_attentions (`bool`, *optional*):
162
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
163
+ tensors for more detail.
164
+ output_hidden_states (`bool`, *optional*):
165
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
166
+ more detail.
167
+ return_dict (`bool`, *optional*):
168
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
169
+ """
170
+
171
+ BLENDERBOT_SMALL_DECODE_INPUTS_DOCSTRING = r"""
172
+ Args:
173
+ decoder_input_ids (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`):
174
+ Indices of decoder input sequence tokens in the vocabulary.
175
+
176
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
177
+ [`PreTrainedTokenizer.__call__`] for details.
178
+
179
+ [What are decoder input IDs?](../glossary#decoder-input-ids)
180
+
181
+ For translation and summarization training, `decoder_input_ids` should be provided. If no
182
+ `decoder_input_ids` is provided, the model will create this tensor by shifting the `input_ids` to the right
183
+ for denoising pre-training following the paper.
184
+ encoder_outputs (`tuple(tuple(jnp.ndarray)`):
185
+ Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`)
186
+ `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of
187
+ hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.
188
+ encoder_attention_mask (`jnp.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
189
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
190
+
191
+ - 1 for tokens that are **not masked**,
192
+ - 0 for tokens that are **masked**.
193
+
194
+ [What are attention masks?](../glossary#attention-mask)
195
+ decoder_attention_mask (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`, *optional*):
196
+ Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
197
+ be used by default.
198
+
199
+ If you want to change padding behavior, you should modify to your needs. See diagram 1 in [the
200
+ paper](https://arxiv.org/abs/1910.13461) for more information on the default strategy.
201
+ decoder_position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
202
+ Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the
203
+ range `[0, config.max_position_embeddings - 1]`.
204
+ past_key_values (`Dict[str, np.ndarray]`, *optional*, returned by `init_cache` or when passing previous `past_key_values`):
205
+ Dictionary of pre-computed hidden-states (key and values in the attention blocks) that can be used for fast
206
+ auto-regressive decoding. Pre-computed key and value hidden-states are of shape *[batch_size, max_length]*.
207
+ output_attentions (`bool`, *optional*):
208
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
209
+ tensors for more detail.
210
+ output_hidden_states (`bool`, *optional*):
211
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
212
+ more detail.
213
+ return_dict (`bool`, *optional*):
214
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
215
+ """
216
+
217
+
218
+ # Copied from transformers.models.bart.modeling_flax_bart.shift_tokens_right
219
+ def shift_tokens_right(input_ids: jnp.ndarray, pad_token_id: int, decoder_start_token_id: int) -> jnp.ndarray:
220
+ """
221
+ Shift input ids one token to the right.
222
+ """
223
+ shifted_input_ids = jnp.zeros_like(input_ids)
224
+ shifted_input_ids = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1])
225
+ shifted_input_ids = shifted_input_ids.at[:, 0].set(decoder_start_token_id)
226
+
227
+ shifted_input_ids = jnp.where(shifted_input_ids == -100, pad_token_id, shifted_input_ids)
228
+ return shifted_input_ids
229
+
230
+
231
+ # Copied from transformers.models.bart.modeling_flax_bart.FlaxBartAttention with Bart->BlenderbotSmall
232
+ class FlaxBlenderbotSmallAttention(nn.Module):
233
+ config: BlenderbotSmallConfig
234
+ embed_dim: int
235
+ num_heads: int
236
+ dropout: float = 0.0
237
+ causal: bool = False
238
+ bias: bool = True
239
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
240
+
241
+ def setup(self) -> None:
242
+ self.head_dim = self.embed_dim // self.num_heads
243
+ if self.head_dim * self.num_heads != self.embed_dim:
244
+ raise ValueError(
245
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
246
+ f" and `num_heads`: {self.num_heads})."
247
+ )
248
+
249
+ dense = partial(
250
+ nn.Dense,
251
+ self.embed_dim,
252
+ use_bias=self.bias,
253
+ dtype=self.dtype,
254
+ kernel_init=jax.nn.initializers.normal(self.config.init_std),
255
+ )
256
+
257
+ self.q_proj, self.k_proj, self.v_proj = dense(), dense(), dense()
258
+ self.out_proj = dense()
259
+
260
+ self.dropout_layer = nn.Dropout(rate=self.dropout)
261
+
262
+ if self.causal:
263
+ self.causal_mask = make_causal_mask(
264
+ jnp.ones((1, self.config.max_position_embeddings), dtype="bool"), dtype="bool"
265
+ )
266
+
267
+ def _split_heads(self, hidden_states):
268
+ return hidden_states.reshape(hidden_states.shape[:2] + (self.num_heads, self.head_dim))
269
+
270
+ def _merge_heads(self, hidden_states):
271
+ return hidden_states.reshape(hidden_states.shape[:2] + (self.embed_dim,))
272
+
273
+ @nn.compact
274
+ def _concatenate_to_cache(self, key, value, query, attention_mask):
275
+ """
276
+ This function takes projected key, value states from a single input token and concatenates the states to cached
277
+ states from previous steps. This function is slighly adapted from the official Flax repository:
278
+ https://github.com/google/flax/blob/491ce18759622506588784b4fca0e4bf05f8c8cd/flax/linen/attention.py#L252
279
+ """
280
+ # detect if we're initializing by absence of existing cache data.
281
+ is_initialized = self.has_variable("cache", "cached_key")
282
+ cached_key = self.variable("cache", "cached_key", jnp.zeros, key.shape, key.dtype)
283
+ cached_value = self.variable("cache", "cached_value", jnp.zeros, value.shape, value.dtype)
284
+ cache_index = self.variable("cache", "cache_index", lambda: jnp.array(0, dtype=jnp.int32))
285
+
286
+ if is_initialized:
287
+ *batch_dims, max_length, num_heads, depth_per_head = cached_key.value.shape
288
+ # update key, value caches with our new 1d spatial slices
289
+ cur_index = cache_index.value
290
+ indices = (0,) * len(batch_dims) + (cur_index, 0, 0)
291
+ key = lax.dynamic_update_slice(cached_key.value, key, indices)
292
+ value = lax.dynamic_update_slice(cached_value.value, value, indices)
293
+ cached_key.value = key
294
+ cached_value.value = value
295
+ num_updated_cache_vectors = query.shape[1]
296
+ cache_index.value = cache_index.value + num_updated_cache_vectors
297
+ # causal mask for cached decoder self-attention: our single query position should only attend to those key positions that have already been generated and cached, not the remaining zero elements.
298
+ pad_mask = jnp.broadcast_to(
299
+ jnp.arange(max_length) < cur_index + num_updated_cache_vectors,
300
+ tuple(batch_dims) + (1, num_updated_cache_vectors, max_length),
301
+ )
302
+ attention_mask = combine_masks(pad_mask, attention_mask)
303
+ return key, value, attention_mask
304
+
305
+ def __call__(
306
+ self,
307
+ hidden_states: jnp.ndarray,
308
+ key_value_states: Optional[jnp.ndarray] = None,
309
+ attention_mask: Optional[jnp.ndarray] = None,
310
+ init_cache: bool = False,
311
+ deterministic: bool = True,
312
+ ) -> Tuple[jnp.ndarray]:
313
+ """Input shape: Batch x Time x Channel"""
314
+
315
+ # if key_value_states are provided this layer is used as a cross-attention layer
316
+ # for the decoder
317
+ is_cross_attention = key_value_states is not None
318
+ batch_size = hidden_states.shape[0]
319
+
320
+ # get query proj
321
+ query_states = self.q_proj(hidden_states)
322
+ # get key, value proj
323
+ if is_cross_attention:
324
+ # cross_attentions
325
+ key_states = self.k_proj(key_value_states)
326
+ value_states = self.v_proj(key_value_states)
327
+ else:
328
+ # self_attention
329
+ key_states = self.k_proj(hidden_states)
330
+ value_states = self.v_proj(hidden_states)
331
+
332
+ query_states = self._split_heads(query_states)
333
+ key_states = self._split_heads(key_states)
334
+ value_states = self._split_heads(value_states)
335
+
336
+ # handle cache prepare causal attention mask
337
+ if self.causal:
338
+ query_length, key_length = query_states.shape[1], key_states.shape[1]
339
+ if self.has_variable("cache", "cached_key"):
340
+ mask_shift = self.variables["cache"]["cache_index"]
341
+ max_decoder_length = self.variables["cache"]["cached_key"].shape[1]
342
+ causal_mask = lax.dynamic_slice(
343
+ self.causal_mask, (0, 0, mask_shift, 0), (1, 1, query_length, max_decoder_length)
344
+ )
345
+ else:
346
+ causal_mask = self.causal_mask[:, :, :query_length, :key_length]
347
+ causal_mask = jnp.broadcast_to(causal_mask, (batch_size,) + causal_mask.shape[1:])
348
+
349
+ # combine masks if needed
350
+ if attention_mask is not None and self.causal:
351
+ attention_mask = jnp.broadcast_to(jnp.expand_dims(attention_mask, axis=(-3, -2)), causal_mask.shape)
352
+ attention_mask = combine_masks(attention_mask, causal_mask)
353
+ elif self.causal:
354
+ attention_mask = causal_mask
355
+ elif attention_mask is not None:
356
+ attention_mask = jnp.expand_dims(attention_mask, axis=(-3, -2))
357
+
358
+ # During fast autoregressive decoding, we feed one position at a time,
359
+ # and cache the keys and values step by step.
360
+ if self.causal and (self.has_variable("cache", "cached_key") or init_cache):
361
+ key_states, value_states, attention_mask = self._concatenate_to_cache(
362
+ key_states, value_states, query_states, attention_mask
363
+ )
364
+
365
+ # Convert the boolean attention mask to an attention bias.
366
+ if attention_mask is not None:
367
+ # attention mask in the form of attention bias
368
+ attention_bias = lax.select(
369
+ attention_mask > 0,
370
+ jnp.full(attention_mask.shape, 0.0).astype(self.dtype),
371
+ jnp.full(attention_mask.shape, jnp.finfo(self.dtype).min).astype(self.dtype),
372
+ )
373
+ else:
374
+ attention_bias = None
375
+
376
+ dropout_rng = None
377
+ if not deterministic and self.dropout > 0.0:
378
+ dropout_rng = self.make_rng("dropout")
379
+
380
+ attn_weights = dot_product_attention_weights(
381
+ query_states,
382
+ key_states,
383
+ bias=attention_bias,
384
+ dropout_rng=dropout_rng,
385
+ dropout_rate=self.dropout,
386
+ broadcast_dropout=True,
387
+ deterministic=deterministic,
388
+ dtype=self.dtype,
389
+ precision=None,
390
+ )
391
+
392
+ attn_output = jnp.einsum("...hqk,...khd->...qhd", attn_weights, value_states)
393
+ attn_output = self._merge_heads(attn_output)
394
+ attn_output = self.out_proj(attn_output)
395
+
396
+ return attn_output, attn_weights
397
+
398
+
399
+ # Copied from transformers.models.bart.modeling_flax_bart.FlaxBartEncoderLayer with Bart->BlenderbotSmall
400
+ class FlaxBlenderbotSmallEncoderLayer(nn.Module):
401
+ config: BlenderbotSmallConfig
402
+ dtype: jnp.dtype = jnp.float32
403
+
404
+ def setup(self) -> None:
405
+ self.embed_dim = self.config.d_model
406
+ self.self_attn = FlaxBlenderbotSmallAttention(
407
+ config=self.config,
408
+ embed_dim=self.embed_dim,
409
+ num_heads=self.config.encoder_attention_heads,
410
+ dropout=self.config.attention_dropout,
411
+ dtype=self.dtype,
412
+ )
413
+ self.self_attn_layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05)
414
+ self.dropout_layer = nn.Dropout(rate=self.config.dropout)
415
+ self.activation_fn = ACT2FN[self.config.activation_function]
416
+ self.activation_dropout_layer = nn.Dropout(rate=self.config.activation_dropout)
417
+ self.fc1 = nn.Dense(
418
+ self.config.encoder_ffn_dim,
419
+ dtype=self.dtype,
420
+ kernel_init=jax.nn.initializers.normal(self.config.init_std),
421
+ )
422
+ self.fc2 = nn.Dense(
423
+ self.embed_dim, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.init_std)
424
+ )
425
+ self.final_layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05)
426
+
427
+ def __call__(
428
+ self,
429
+ hidden_states: jnp.ndarray,
430
+ attention_mask: jnp.ndarray,
431
+ output_attentions: bool = True,
432
+ deterministic: bool = True,
433
+ ) -> Tuple[jnp.ndarray]:
434
+ residual = hidden_states
435
+ hidden_states, attn_weights = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask)
436
+
437
+ hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic)
438
+ hidden_states = residual + hidden_states
439
+ hidden_states = self.self_attn_layer_norm(hidden_states)
440
+
441
+ residual = hidden_states
442
+ hidden_states = self.activation_fn(self.fc1(hidden_states))
443
+ hidden_states = self.activation_dropout_layer(hidden_states, deterministic=deterministic)
444
+ hidden_states = self.fc2(hidden_states)
445
+ hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic)
446
+ hidden_states = residual + hidden_states
447
+ hidden_states = self.final_layer_norm(hidden_states)
448
+
449
+ outputs = (hidden_states,)
450
+
451
+ if output_attentions:
452
+ outputs += (attn_weights,)
453
+
454
+ return outputs
455
+
456
+
457
+ # Copied from transformers.models.bart.modeling_flax_bart.FlaxBartEncoderLayerCollection with Bart->BlenderbotSmall
458
+ class FlaxBlenderbotSmallEncoderLayerCollection(nn.Module):
459
+ config: BlenderbotSmallConfig
460
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
461
+
462
+ def setup(self):
463
+ self.layers = [
464
+ FlaxBlenderbotSmallEncoderLayer(self.config, name=str(i), dtype=self.dtype)
465
+ for i in range(self.config.encoder_layers)
466
+ ]
467
+ self.layerdrop = self.config.encoder_layerdrop
468
+
469
+ def __call__(
470
+ self,
471
+ hidden_states,
472
+ attention_mask,
473
+ deterministic: bool = True,
474
+ output_attentions: bool = False,
475
+ output_hidden_states: bool = False,
476
+ return_dict: bool = True,
477
+ ):
478
+ all_attentions = () if output_attentions else None
479
+ all_hidden_states = () if output_hidden_states else None
480
+
481
+ for encoder_layer in self.layers:
482
+ if output_hidden_states:
483
+ all_hidden_states = all_hidden_states + (hidden_states,)
484
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
485
+ dropout_probability = random.uniform(0, 1)
486
+ if not deterministic and (dropout_probability < self.layerdrop): # skip the layer
487
+ layer_outputs = (None, None)
488
+ else:
489
+ layer_outputs = encoder_layer(
490
+ hidden_states,
491
+ attention_mask,
492
+ output_attentions,
493
+ deterministic,
494
+ )
495
+ hidden_states = layer_outputs[0]
496
+ if output_attentions:
497
+ all_attentions = all_attentions + (layer_outputs[1],)
498
+
499
+ if output_hidden_states:
500
+ all_hidden_states += (hidden_states,)
501
+
502
+ outputs = (hidden_states, all_hidden_states, all_attentions)
503
+
504
+ if not return_dict:
505
+ return tuple(v for v in outputs if v is not None)
506
+
507
+ return FlaxBaseModelOutput(
508
+ last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions
509
+ )
510
+
511
+
512
+ # Copied from transformers.models.bart.modeling_flax_bart.FlaxBartDecoderLayer with Bart->BlenderbotSmall
513
+ class FlaxBlenderbotSmallDecoderLayer(nn.Module):
514
+ config: BlenderbotSmallConfig
515
+ dtype: jnp.dtype = jnp.float32
516
+
517
+ def setup(self) -> None:
518
+ self.embed_dim = self.config.d_model
519
+ self.self_attn = FlaxBlenderbotSmallAttention(
520
+ config=self.config,
521
+ embed_dim=self.embed_dim,
522
+ num_heads=self.config.decoder_attention_heads,
523
+ dropout=self.config.attention_dropout,
524
+ causal=True,
525
+ dtype=self.dtype,
526
+ )
527
+ self.dropout_layer = nn.Dropout(rate=self.config.dropout)
528
+ self.activation_fn = ACT2FN[self.config.activation_function]
529
+ self.activation_dropout_layer = nn.Dropout(rate=self.config.activation_dropout)
530
+
531
+ self.self_attn_layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05)
532
+ self.encoder_attn = FlaxBlenderbotSmallAttention(
533
+ config=self.config,
534
+ embed_dim=self.embed_dim,
535
+ num_heads=self.config.decoder_attention_heads,
536
+ dropout=self.config.attention_dropout,
537
+ dtype=self.dtype,
538
+ )
539
+ self.encoder_attn_layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05)
540
+ self.fc1 = nn.Dense(
541
+ self.config.decoder_ffn_dim,
542
+ dtype=self.dtype,
543
+ kernel_init=jax.nn.initializers.normal(self.config.init_std),
544
+ )
545
+ self.fc2 = nn.Dense(
546
+ self.embed_dim, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.init_std)
547
+ )
548
+ self.final_layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05)
549
+
550
+ def __call__(
551
+ self,
552
+ hidden_states: jnp.ndarray,
553
+ attention_mask: jnp.ndarray,
554
+ encoder_hidden_states: Optional[jnp.ndarray] = None,
555
+ encoder_attention_mask: Optional[jnp.ndarray] = None,
556
+ init_cache: bool = False,
557
+ output_attentions: bool = True,
558
+ deterministic: bool = True,
559
+ ) -> Tuple[jnp.ndarray]:
560
+ residual = hidden_states
561
+
562
+ # Self Attention
563
+ hidden_states, self_attn_weights = self.self_attn(
564
+ hidden_states=hidden_states, attention_mask=attention_mask, init_cache=init_cache
565
+ )
566
+ hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic)
567
+ hidden_states = residual + hidden_states
568
+ hidden_states = self.self_attn_layer_norm(hidden_states)
569
+
570
+ # Cross-Attention Block
571
+ cross_attn_weights = None
572
+ if encoder_hidden_states is not None:
573
+ residual = hidden_states
574
+
575
+ hidden_states, cross_attn_weights = self.encoder_attn(
576
+ hidden_states=hidden_states,
577
+ key_value_states=encoder_hidden_states,
578
+ attention_mask=encoder_attention_mask,
579
+ )
580
+ hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic)
581
+ hidden_states = residual + hidden_states
582
+ hidden_states = self.encoder_attn_layer_norm(hidden_states)
583
+
584
+ # Fully Connected
585
+ residual = hidden_states
586
+ hidden_states = self.activation_fn(self.fc1(hidden_states))
587
+ hidden_states = self.activation_dropout_layer(hidden_states, deterministic=deterministic)
588
+ hidden_states = self.fc2(hidden_states)
589
+ hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic)
590
+ hidden_states = residual + hidden_states
591
+ hidden_states = self.final_layer_norm(hidden_states)
592
+
593
+ outputs = (hidden_states,)
594
+
595
+ if output_attentions:
596
+ outputs += (self_attn_weights, cross_attn_weights)
597
+
598
+ return outputs
599
+
600
+
601
+ # Copied from transformers.models.bart.modeling_flax_bart.FlaxBartDecoderLayerCollection with Bart->BlenderbotSmall
602
+ class FlaxBlenderbotSmallDecoderLayerCollection(nn.Module):
603
+ config: BlenderbotSmallConfig
604
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
605
+
606
+ def setup(self):
607
+ self.layers = [
608
+ FlaxBlenderbotSmallDecoderLayer(self.config, name=str(i), dtype=self.dtype)
609
+ for i in range(self.config.decoder_layers)
610
+ ]
611
+ self.layerdrop = self.config.decoder_layerdrop
612
+
613
+ def __call__(
614
+ self,
615
+ hidden_states,
616
+ attention_mask,
617
+ encoder_hidden_states: Optional[jnp.ndarray] = None,
618
+ encoder_attention_mask: Optional[jnp.ndarray] = None,
619
+ deterministic: bool = True,
620
+ init_cache: bool = False,
621
+ output_attentions: bool = False,
622
+ output_hidden_states: bool = False,
623
+ return_dict: bool = True,
624
+ ):
625
+ # decoder layers
626
+ all_hidden_states = () if output_hidden_states else None
627
+ all_self_attns = () if output_attentions else None
628
+ all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None
629
+
630
+ for decoder_layer in self.layers:
631
+ if output_hidden_states:
632
+ all_hidden_states += (hidden_states,)
633
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
634
+ dropout_probability = random.uniform(0, 1)
635
+ if not deterministic and (dropout_probability < self.layerdrop):
636
+ layer_outputs = (None, None, None)
637
+ else:
638
+ layer_outputs = decoder_layer(
639
+ hidden_states,
640
+ attention_mask=attention_mask,
641
+ encoder_hidden_states=encoder_hidden_states,
642
+ encoder_attention_mask=encoder_attention_mask,
643
+ init_cache=init_cache,
644
+ output_attentions=output_attentions,
645
+ deterministic=deterministic,
646
+ )
647
+
648
+ hidden_states = layer_outputs[0]
649
+ if output_attentions:
650
+ all_self_attns += (layer_outputs[1],)
651
+
652
+ if encoder_hidden_states is not None:
653
+ all_cross_attentions += (layer_outputs[2],)
654
+
655
+ # add hidden states from the last decoder layer
656
+ if output_hidden_states:
657
+ all_hidden_states += (hidden_states,)
658
+
659
+ outputs = [hidden_states, all_hidden_states, all_self_attns, all_cross_attentions]
660
+
661
+ if not return_dict:
662
+ return tuple(v for v in outputs if v is not None)
663
+
664
+ return FlaxBaseModelOutputWithPastAndCrossAttentions(
665
+ last_hidden_state=hidden_states,
666
+ hidden_states=all_hidden_states,
667
+ attentions=all_self_attns,
668
+ cross_attentions=all_cross_attentions,
669
+ )
670
+
671
+
672
+ class FlaxBlenderbotSmallEncoder(nn.Module):
673
+ config: BlenderbotSmallConfig
674
+ embed_tokens: nn.Embed
675
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
676
+
677
+ def setup(self):
678
+ self.dropout_layer = nn.Dropout(rate=self.config.dropout)
679
+
680
+ embed_dim = self.config.d_model
681
+ self.padding_idx = self.config.pad_token_id
682
+ self.max_source_positions = self.config.max_position_embeddings
683
+ self.embed_scale = math.sqrt(embed_dim) if self.config.scale_embedding else 1.0
684
+
685
+ self.embed_positions = nn.Embed(
686
+ self.config.max_position_embeddings,
687
+ embed_dim,
688
+ embedding_init=jax.nn.initializers.normal(self.config.init_std),
689
+ )
690
+ self.layers = FlaxBlenderbotSmallEncoderLayerCollection(self.config, self.dtype)
691
+ self.layernorm_embedding = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05)
692
+
693
+ def __call__(
694
+ self,
695
+ input_ids,
696
+ attention_mask,
697
+ position_ids,
698
+ output_attentions: bool = False,
699
+ output_hidden_states: bool = False,
700
+ return_dict: bool = True,
701
+ deterministic: bool = True,
702
+ ):
703
+ input_shape = input_ids.shape
704
+ input_ids = input_ids.reshape(-1, input_shape[-1])
705
+
706
+ inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
707
+
708
+ embed_pos = self.embed_positions(position_ids)
709
+
710
+ hidden_states = inputs_embeds + embed_pos
711
+ hidden_states = self.layernorm_embedding(hidden_states)
712
+ hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic)
713
+
714
+ outputs = self.layers(
715
+ hidden_states,
716
+ attention_mask,
717
+ deterministic=deterministic,
718
+ output_attentions=output_attentions,
719
+ output_hidden_states=output_hidden_states,
720
+ return_dict=return_dict,
721
+ )
722
+
723
+ if not return_dict:
724
+ return outputs
725
+
726
+ return FlaxBaseModelOutput(
727
+ last_hidden_state=outputs.last_hidden_state,
728
+ hidden_states=outputs.hidden_states,
729
+ attentions=outputs.attentions,
730
+ )
731
+
732
+
733
+ class FlaxBlenderbotSmallDecoder(nn.Module):
734
+ config: BlenderbotSmallConfig
735
+ embed_tokens: nn.Embed
736
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
737
+
738
+ def setup(self):
739
+ self.dropout_layer = nn.Dropout(rate=self.config.dropout)
740
+
741
+ embed_dim = self.config.d_model
742
+ self.padding_idx = self.config.pad_token_id
743
+ self.max_target_positions = self.config.max_position_embeddings
744
+ self.embed_scale = math.sqrt(self.config.d_model) if self.config.scale_embedding else 1.0
745
+
746
+ self.embed_positions = nn.Embed(
747
+ self.config.max_position_embeddings,
748
+ embed_dim,
749
+ embedding_init=jax.nn.initializers.normal(self.config.init_std),
750
+ )
751
+
752
+ self.layers = FlaxBlenderbotSmallDecoderLayerCollection(self.config, self.dtype)
753
+ self.layernorm_embedding = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05)
754
+
755
+ def __call__(
756
+ self,
757
+ input_ids,
758
+ attention_mask,
759
+ position_ids,
760
+ encoder_hidden_states: Optional[jnp.ndarray] = None,
761
+ encoder_attention_mask: Optional[jnp.ndarray] = None,
762
+ init_cache: bool = False,
763
+ output_attentions: bool = False,
764
+ output_hidden_states: bool = False,
765
+ return_dict: bool = True,
766
+ deterministic: bool = True,
767
+ ):
768
+ input_shape = input_ids.shape
769
+ input_ids = input_ids.reshape(-1, input_shape[-1])
770
+
771
+ inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
772
+
773
+ # embed positions
774
+ positions = self.embed_positions(position_ids)
775
+
776
+ # BlenderbotSmall applies layer norm on inputs_embeds in decoder
777
+ inputs_embeds = self.layernorm_embedding(inputs_embeds)
778
+ hidden_states = inputs_embeds + positions
779
+
780
+ hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic)
781
+
782
+ outputs = self.layers(
783
+ hidden_states,
784
+ attention_mask,
785
+ encoder_hidden_states,
786
+ encoder_attention_mask,
787
+ deterministic=deterministic,
788
+ init_cache=init_cache,
789
+ output_attentions=output_attentions,
790
+ output_hidden_states=output_hidden_states,
791
+ return_dict=return_dict,
792
+ )
793
+
794
+ if not return_dict:
795
+ return outputs
796
+
797
+ return FlaxBaseModelOutputWithPastAndCrossAttentions(
798
+ last_hidden_state=outputs.last_hidden_state,
799
+ hidden_states=outputs.hidden_states,
800
+ attentions=outputs.attentions,
801
+ cross_attentions=outputs.cross_attentions,
802
+ )
803
+
804
+
805
+ # Copied from transformers.models.bart.modeling_flax_bart.FlaxBartModule with Bart->BlenderbotSmall
806
+ class FlaxBlenderbotSmallModule(nn.Module):
807
+ config: BlenderbotSmallConfig
808
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
809
+
810
+ def setup(self):
811
+ self.shared = nn.Embed(
812
+ self.config.vocab_size,
813
+ self.config.d_model,
814
+ embedding_init=jax.nn.initializers.normal(self.config.init_std),
815
+ dtype=self.dtype,
816
+ )
817
+
818
+ self.encoder = FlaxBlenderbotSmallEncoder(self.config, dtype=self.dtype, embed_tokens=self.shared)
819
+ self.decoder = FlaxBlenderbotSmallDecoder(self.config, dtype=self.dtype, embed_tokens=self.shared)
820
+
821
+ def _get_encoder_module(self):
822
+ return self.encoder
823
+
824
+ def _get_decoder_module(self):
825
+ return self.decoder
826
+
827
+ def __call__(
828
+ self,
829
+ input_ids,
830
+ attention_mask,
831
+ decoder_input_ids,
832
+ decoder_attention_mask,
833
+ position_ids,
834
+ decoder_position_ids,
835
+ output_attentions: bool = False,
836
+ output_hidden_states: bool = False,
837
+ return_dict: bool = True,
838
+ deterministic: bool = True,
839
+ ):
840
+ encoder_outputs = self.encoder(
841
+ input_ids=input_ids,
842
+ attention_mask=attention_mask,
843
+ position_ids=position_ids,
844
+ output_attentions=output_attentions,
845
+ output_hidden_states=output_hidden_states,
846
+ return_dict=return_dict,
847
+ deterministic=deterministic,
848
+ )
849
+
850
+ decoder_outputs = self.decoder(
851
+ input_ids=decoder_input_ids,
852
+ attention_mask=decoder_attention_mask,
853
+ position_ids=decoder_position_ids,
854
+ encoder_hidden_states=encoder_outputs[0],
855
+ encoder_attention_mask=attention_mask,
856
+ output_attentions=output_attentions,
857
+ output_hidden_states=output_hidden_states,
858
+ return_dict=return_dict,
859
+ deterministic=deterministic,
860
+ )
861
+
862
+ if not return_dict:
863
+ return decoder_outputs + encoder_outputs
864
+
865
+ return FlaxSeq2SeqModelOutput(
866
+ last_hidden_state=decoder_outputs.last_hidden_state,
867
+ decoder_hidden_states=decoder_outputs.hidden_states,
868
+ decoder_attentions=decoder_outputs.attentions,
869
+ cross_attentions=decoder_outputs.cross_attentions,
870
+ encoder_last_hidden_state=encoder_outputs.last_hidden_state,
871
+ encoder_hidden_states=encoder_outputs.hidden_states,
872
+ encoder_attentions=encoder_outputs.attentions,
873
+ )
874
+
875
+
876
+ class FlaxBlenderbotSmallPreTrainedModel(FlaxPreTrainedModel):
877
+ config_class = BlenderbotSmallConfig
878
+ base_model_prefix: str = "model"
879
+ module_class: nn.Module = None
880
+
881
+ def __init__(
882
+ self,
883
+ config: BlenderbotSmallConfig,
884
+ input_shape: Tuple[int] = (1, 1),
885
+ seed: int = 0,
886
+ dtype: jnp.dtype = jnp.float32,
887
+ _do_init: bool = True,
888
+ **kwargs,
889
+ ):
890
+ module = self.module_class(config=config, dtype=dtype, **kwargs)
891
+ super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init)
892
+
893
+ def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict = None) -> FrozenDict:
894
+ # init input tensors
895
+ input_ids = jnp.zeros(input_shape, dtype="i4")
896
+ # make sure initialization pass will work for FlaxBlenderbotSmallForSequenceClassificationModule
897
+ input_ids = input_ids.at[(..., -1)].set(self.config.eos_token_id)
898
+ attention_mask = jnp.ones_like(input_ids)
899
+ decoder_input_ids = input_ids
900
+ decoder_attention_mask = jnp.ones_like(input_ids)
901
+
902
+ batch_size, sequence_length = input_ids.shape
903
+ position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length))
904
+ decoder_position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length))
905
+
906
+ params_rng, dropout_rng = jax.random.split(rng)
907
+ rngs = {"params": params_rng, "dropout": dropout_rng}
908
+
909
+ random_params = self.module.init(
910
+ rngs,
911
+ input_ids,
912
+ attention_mask,
913
+ decoder_input_ids,
914
+ decoder_attention_mask,
915
+ position_ids,
916
+ decoder_position_ids,
917
+ )["params"]
918
+
919
+ if params is not None:
920
+ random_params = flatten_dict(unfreeze(random_params))
921
+ params = flatten_dict(unfreeze(params))
922
+ for missing_key in self._missing_keys:
923
+ params[missing_key] = random_params[missing_key]
924
+ self._missing_keys = set()
925
+ return freeze(unflatten_dict(params))
926
+ else:
927
+ return random_params
928
+
929
+ def init_cache(self, batch_size, max_length, encoder_outputs):
930
+ r"""
931
+ Args:
932
+ batch_size (`int`):
933
+ batch_size used for fast auto-regressive decoding. Defines the batch size of the initialized cache.
934
+ max_length (`int`):
935
+ maximum possible length for auto-regressive decoding. Defines the sequence length of the initialized
936
+ cache.
937
+ encoder_outputs (`Union[FlaxBaseModelOutput, tuple(tuple(jnp.ndarray)]`):
938
+ `encoder_outputs` consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*:
939
+ `attentions`). `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*)
940
+ is a sequence of hidden-states at the output of the last layer of the encoder. Used in the
941
+ cross-attention of the decoder.
942
+ """
943
+ # init input variables to retrieve cache
944
+ decoder_input_ids = jnp.ones((batch_size, max_length), dtype="i4")
945
+ decoder_attention_mask = jnp.ones_like(decoder_input_ids)
946
+ decoder_position_ids = jnp.broadcast_to(
947
+ jnp.arange(jnp.atleast_2d(decoder_input_ids).shape[-1]), decoder_input_ids.shape
948
+ )
949
+
950
+ def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs):
951
+ decoder_module = module._get_decoder_module()
952
+ return decoder_module(
953
+ decoder_input_ids,
954
+ decoder_attention_mask,
955
+ decoder_position_ids,
956
+ **kwargs,
957
+ )
958
+
959
+ init_variables = self.module.init(
960
+ jax.random.PRNGKey(0),
961
+ decoder_input_ids=decoder_input_ids,
962
+ decoder_attention_mask=decoder_attention_mask,
963
+ decoder_position_ids=decoder_position_ids,
964
+ encoder_hidden_states=encoder_outputs[0],
965
+ init_cache=True,
966
+ method=_decoder_forward, # we only need to call the decoder to init the cache
967
+ )
968
+ return unfreeze(init_variables["cache"])
969
+
970
+ @add_start_docstrings(BLENDERBOT_SMALL_ENCODE_INPUTS_DOCSTRING)
971
+ @replace_return_docstrings(output_type=FlaxBaseModelOutput, config_class=BlenderbotSmallConfig)
972
+ def encode(
973
+ self,
974
+ input_ids: jnp.ndarray,
975
+ attention_mask: Optional[jnp.ndarray] = None,
976
+ position_ids: Optional[jnp.ndarray] = None,
977
+ output_attentions: Optional[bool] = None,
978
+ output_hidden_states: Optional[bool] = None,
979
+ return_dict: Optional[bool] = None,
980
+ train: bool = False,
981
+ params: dict = None,
982
+ dropout_rng: PRNGKey = None,
983
+ ):
984
+ r"""
985
+ Returns:
986
+
987
+ Example:
988
+
989
+ ```python
990
+ >>> from transformers import AutoTokenizer, FlaxBlenderbotSmallForConditionalGeneration
991
+
992
+ >>> model = FlaxBlenderbotSmallForConditionalGeneration.from_pretrained("facebook/blenderbot_small-90M")
993
+ >>> tokenizer = AutoTokenizer.from_pretrained("facebook/blenderbot_small-90M")
994
+
995
+ >>> text = "My friends are cool but they eat too many carbs."
996
+ >>> inputs = tokenizer(text, max_length=1024, return_tensors="np")
997
+ >>> encoder_outputs = model.encode(**inputs)
998
+ ```"""
999
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1000
+ output_hidden_states = (
1001
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1002
+ )
1003
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
1004
+
1005
+ if attention_mask is None:
1006
+ attention_mask = jnp.ones_like(input_ids)
1007
+ if position_ids is None:
1008
+ batch_size, sequence_length = input_ids.shape
1009
+ position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length))
1010
+
1011
+ # Handle any PRNG if needed
1012
+ rngs = {}
1013
+ if dropout_rng is not None:
1014
+ rngs["dropout"] = dropout_rng
1015
+
1016
+ def _encoder_forward(module, input_ids, attention_mask, position_ids, **kwargs):
1017
+ encode_module = module._get_encoder_module()
1018
+ return encode_module(input_ids, attention_mask, position_ids, **kwargs)
1019
+
1020
+ return self.module.apply(
1021
+ {"params": params or self.params},
1022
+ input_ids=jnp.array(input_ids, dtype="i4"),
1023
+ attention_mask=jnp.array(attention_mask, dtype="i4"),
1024
+ position_ids=jnp.array(position_ids, dtype="i4"),
1025
+ output_attentions=output_attentions,
1026
+ output_hidden_states=output_hidden_states,
1027
+ return_dict=return_dict,
1028
+ deterministic=not train,
1029
+ rngs=rngs,
1030
+ method=_encoder_forward,
1031
+ )
1032
+
1033
+ @add_start_docstrings(BLENDERBOT_SMALL_DECODE_INPUTS_DOCSTRING)
1034
+ @replace_return_docstrings(
1035
+ output_type=FlaxBaseModelOutputWithPastAndCrossAttentions, config_class=BlenderbotSmallConfig
1036
+ )
1037
+ def decode(
1038
+ self,
1039
+ decoder_input_ids,
1040
+ encoder_outputs,
1041
+ encoder_attention_mask: Optional[jnp.ndarray] = None,
1042
+ decoder_attention_mask: Optional[jnp.ndarray] = None,
1043
+ decoder_position_ids: Optional[jnp.ndarray] = None,
1044
+ past_key_values: dict = None,
1045
+ output_attentions: Optional[bool] = None,
1046
+ output_hidden_states: Optional[bool] = None,
1047
+ return_dict: Optional[bool] = None,
1048
+ train: bool = False,
1049
+ params: dict = None,
1050
+ dropout_rng: PRNGKey = None,
1051
+ ):
1052
+ r"""
1053
+ Returns:
1054
+
1055
+ Example:
1056
+
1057
+ ```python
1058
+ >>> import jax.numpy as jnp
1059
+ >>> from transformers import AutoTokenizer, FlaxBlenderbotSmallForConditionalGeneration
1060
+
1061
+ >>> model = FlaxBlenderbotSmallForConditionalGeneration.from_pretrained("facebook/blenderbot_small-90M")
1062
+ >>> tokenizer = AutoTokenizer.from_pretrained("facebook/blenderbot_small-90M")
1063
+
1064
+ >>> text = "My friends are cool but they eat too many carbs."
1065
+ >>> inputs = tokenizer(text, max_length=1024, return_tensors="np")
1066
+ >>> encoder_outputs = model.encode(**inputs)
1067
+
1068
+ >>> decoder_start_token_id = model.config.decoder_start_token_id
1069
+ >>> decoder_input_ids = jnp.ones((inputs.input_ids.shape[0], 1), dtype="i4") * decoder_start_token_id
1070
+
1071
+ >>> outputs = model.decode(decoder_input_ids, encoder_outputs)
1072
+ >>> last_decoder_hidden_states = outputs.last_hidden_state
1073
+ ```"""
1074
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1075
+ output_hidden_states = (
1076
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1077
+ )
1078
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
1079
+
1080
+ encoder_hidden_states = encoder_outputs[0]
1081
+ if encoder_attention_mask is None:
1082
+ batch_size, sequence_length = encoder_hidden_states.shape[:2]
1083
+ encoder_attention_mask = jnp.ones((batch_size, sequence_length))
1084
+
1085
+ batch_size, sequence_length = decoder_input_ids.shape
1086
+ if decoder_attention_mask is None:
1087
+ decoder_attention_mask = jnp.ones((batch_size, sequence_length))
1088
+
1089
+ if decoder_position_ids is None:
1090
+ if past_key_values is not None:
1091
+ raise ValueError("Make sure to provide `decoder_position_ids` when passing `past_key_values`.")
1092
+
1093
+ decoder_position_ids = jnp.broadcast_to(
1094
+ jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)
1095
+ )
1096
+
1097
+ # Handle any PRNG if needed
1098
+ rngs = {}
1099
+ if dropout_rng is not None:
1100
+ rngs["dropout"] = dropout_rng
1101
+
1102
+ inputs = {"params": params or self.params}
1103
+
1104
+ # if past_key_values are passed then cache is already initialized a private flag init_cache has to be
1105
+ # passed down to ensure cache is used. It has to be made sure that cache is marked as mutable so that
1106
+ # it can be changed by FlaxBlenderbotSmallAttention module
1107
+ if past_key_values:
1108
+ inputs["cache"] = past_key_values
1109
+ mutable = ["cache"]
1110
+ else:
1111
+ mutable = False
1112
+
1113
+ def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs):
1114
+ decoder_module = module._get_decoder_module()
1115
+ return decoder_module(
1116
+ decoder_input_ids,
1117
+ decoder_attention_mask,
1118
+ decoder_position_ids,
1119
+ **kwargs,
1120
+ )
1121
+
1122
+ outputs = self.module.apply(
1123
+ inputs,
1124
+ decoder_input_ids=jnp.array(decoder_input_ids, dtype="i4"),
1125
+ decoder_attention_mask=jnp.array(decoder_attention_mask, dtype="i4"),
1126
+ decoder_position_ids=jnp.array(decoder_position_ids, dtype="i4"),
1127
+ encoder_hidden_states=encoder_hidden_states,
1128
+ encoder_attention_mask=jnp.array(encoder_attention_mask, dtype="i4"),
1129
+ output_attentions=output_attentions,
1130
+ output_hidden_states=output_hidden_states,
1131
+ return_dict=return_dict,
1132
+ deterministic=not train,
1133
+ rngs=rngs,
1134
+ mutable=mutable,
1135
+ method=_decoder_forward,
1136
+ )
1137
+
1138
+ # add updated cache to model output
1139
+ if past_key_values is not None and return_dict:
1140
+ outputs, past = outputs
1141
+ outputs["past_key_values"] = unfreeze(past["cache"])
1142
+ return outputs
1143
+ elif past_key_values is not None and not return_dict:
1144
+ outputs, past = outputs
1145
+ outputs = outputs[:1] + (unfreeze(past["cache"]),) + outputs[1:]
1146
+
1147
+ return outputs
1148
+
1149
+ def __call__(
1150
+ self,
1151
+ input_ids: jnp.ndarray,
1152
+ attention_mask: Optional[jnp.ndarray] = None,
1153
+ decoder_input_ids: Optional[jnp.ndarray] = None,
1154
+ decoder_attention_mask: Optional[jnp.ndarray] = None,
1155
+ position_ids: Optional[jnp.ndarray] = None,
1156
+ decoder_position_ids: Optional[jnp.ndarray] = None,
1157
+ output_attentions: Optional[bool] = None,
1158
+ output_hidden_states: Optional[bool] = None,
1159
+ return_dict: Optional[bool] = None,
1160
+ train: bool = False,
1161
+ params: dict = None,
1162
+ dropout_rng: PRNGKey = None,
1163
+ ):
1164
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1165
+ output_hidden_states = (
1166
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1167
+ )
1168
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
1169
+
1170
+ # prepare encoder inputs
1171
+ if attention_mask is None:
1172
+ attention_mask = jnp.ones_like(input_ids)
1173
+ if position_ids is None:
1174
+ batch_size, sequence_length = input_ids.shape
1175
+ position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length))
1176
+
1177
+ # prepare decoder inputs
1178
+ if decoder_input_ids is None:
1179
+ decoder_input_ids = shift_tokens_right(
1180
+ input_ids, self.config.pad_token_id, decoder_start_token_id=self.config.decoder_start_token_id
1181
+ )
1182
+ if decoder_attention_mask is None:
1183
+ decoder_attention_mask = jnp.ones_like(decoder_input_ids)
1184
+ if decoder_position_ids is None:
1185
+ batch_size, sequence_length = decoder_input_ids.shape
1186
+ decoder_position_ids = jnp.broadcast_to(
1187
+ jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)
1188
+ )
1189
+
1190
+ # Handle any PRNG if needed
1191
+ rngs = {"dropout": dropout_rng} if dropout_rng is not None else {}
1192
+
1193
+ return self.module.apply(
1194
+ {"params": params or self.params},
1195
+ input_ids=jnp.array(input_ids, dtype="i4"),
1196
+ attention_mask=jnp.array(attention_mask, dtype="i4"),
1197
+ position_ids=jnp.array(position_ids, dtype="i4"),
1198
+ decoder_input_ids=jnp.array(decoder_input_ids, dtype="i4"),
1199
+ decoder_attention_mask=jnp.array(decoder_attention_mask, dtype="i4"),
1200
+ decoder_position_ids=jnp.array(decoder_position_ids, dtype="i4"),
1201
+ output_attentions=output_attentions,
1202
+ output_hidden_states=output_hidden_states,
1203
+ return_dict=return_dict,
1204
+ deterministic=not train,
1205
+ rngs=rngs,
1206
+ )
1207
+
1208
+
1209
+ @add_start_docstrings(
1210
+ "The bare BlenderbotSmall Model transformer outputting raw hidden-states without any specific head on top.",
1211
+ BLENDERBOT_SMALL_START_DOCSTRING,
1212
+ )
1213
+ class FlaxBlenderbotSmallModel(FlaxBlenderbotSmallPreTrainedModel):
1214
+ config: BlenderbotSmallConfig
1215
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
1216
+ module_class = FlaxBlenderbotSmallModule
1217
+
1218
+
1219
+ append_call_sample_docstring(FlaxBlenderbotSmallModel, _CHECKPOINT_FOR_DOC, FlaxSeq2SeqModelOutput, _CONFIG_FOR_DOC)
1220
+
1221
+
1222
+ # Copied from transformers.models.bart.modeling_flax_bart.FlaxBartForConditionalGenerationModule with Bart->BlenderbotSmall
1223
+ class FlaxBlenderbotSmallForConditionalGenerationModule(nn.Module):
1224
+ config: BlenderbotSmallConfig
1225
+ dtype: jnp.dtype = jnp.float32
1226
+ bias_init: Callable[..., jnp.ndarray] = jax.nn.initializers.zeros
1227
+
1228
+ def setup(self):
1229
+ self.model = FlaxBlenderbotSmallModule(config=self.config, dtype=self.dtype)
1230
+ self.lm_head = nn.Dense(
1231
+ self.model.shared.num_embeddings,
1232
+ use_bias=False,
1233
+ dtype=self.dtype,
1234
+ kernel_init=jax.nn.initializers.normal(self.config.init_std),
1235
+ )
1236
+ self.final_logits_bias = self.param("final_logits_bias", self.bias_init, (1, self.model.shared.num_embeddings))
1237
+
1238
+ def _get_encoder_module(self):
1239
+ return self.model.encoder
1240
+
1241
+ def _get_decoder_module(self):
1242
+ return self.model.decoder
1243
+
1244
+ def __call__(
1245
+ self,
1246
+ input_ids,
1247
+ attention_mask,
1248
+ decoder_input_ids,
1249
+ decoder_attention_mask,
1250
+ position_ids,
1251
+ decoder_position_ids,
1252
+ output_attentions: bool = False,
1253
+ output_hidden_states: bool = False,
1254
+ return_dict: bool = True,
1255
+ deterministic: bool = True,
1256
+ ):
1257
+ outputs = self.model(
1258
+ input_ids=input_ids,
1259
+ attention_mask=attention_mask,
1260
+ decoder_input_ids=decoder_input_ids,
1261
+ decoder_attention_mask=decoder_attention_mask,
1262
+ position_ids=position_ids,
1263
+ decoder_position_ids=decoder_position_ids,
1264
+ output_attentions=output_attentions,
1265
+ output_hidden_states=output_hidden_states,
1266
+ return_dict=return_dict,
1267
+ deterministic=deterministic,
1268
+ )
1269
+
1270
+ hidden_states = outputs[0]
1271
+
1272
+ if self.config.tie_word_embeddings:
1273
+ shared_embedding = self.model.variables["params"]["shared"]["embedding"]
1274
+ lm_logits = self.lm_head.apply({"params": {"kernel": shared_embedding.T}}, hidden_states)
1275
+ else:
1276
+ lm_logits = self.lm_head(hidden_states)
1277
+
1278
+ lm_logits += jax.lax.stop_gradient(self.final_logits_bias.astype(self.dtype))
1279
+
1280
+ if not return_dict:
1281
+ output = (lm_logits,) + outputs[1:]
1282
+ return output
1283
+
1284
+ return FlaxSeq2SeqLMOutput(
1285
+ logits=lm_logits,
1286
+ decoder_hidden_states=outputs.decoder_hidden_states,
1287
+ decoder_attentions=outputs.decoder_attentions,
1288
+ cross_attentions=outputs.cross_attentions,
1289
+ encoder_last_hidden_state=outputs.encoder_last_hidden_state,
1290
+ encoder_hidden_states=outputs.encoder_hidden_states,
1291
+ encoder_attentions=outputs.encoder_attentions,
1292
+ )
1293
+
1294
+
1295
+ @add_start_docstrings(
1296
+ "The BLENDERBOT_SMALL Model with a language modeling head. Can be used for summarization.",
1297
+ BLENDERBOT_SMALL_START_DOCSTRING,
1298
+ )
1299
+ class FlaxBlenderbotSmallForConditionalGeneration(FlaxBlenderbotSmallPreTrainedModel):
1300
+ module_class = FlaxBlenderbotSmallForConditionalGenerationModule
1301
+ dtype: jnp.dtype = jnp.float32
1302
+
1303
+ @add_start_docstrings(BLENDERBOT_SMALL_DECODE_INPUTS_DOCSTRING)
1304
+ @replace_return_docstrings(output_type=FlaxCausalLMOutputWithCrossAttentions, config_class=BlenderbotSmallConfig)
1305
+ def decode(
1306
+ self,
1307
+ decoder_input_ids,
1308
+ encoder_outputs,
1309
+ encoder_attention_mask: Optional[jnp.ndarray] = None,
1310
+ decoder_attention_mask: Optional[jnp.ndarray] = None,
1311
+ decoder_position_ids: Optional[jnp.ndarray] = None,
1312
+ past_key_values: dict = None,
1313
+ output_attentions: Optional[bool] = None,
1314
+ output_hidden_states: Optional[bool] = None,
1315
+ return_dict: Optional[bool] = None,
1316
+ deterministic: bool = True,
1317
+ params: dict = None,
1318
+ dropout_rng: PRNGKey = None,
1319
+ ):
1320
+ r"""
1321
+ Returns:
1322
+
1323
+ Example:
1324
+
1325
+ ```python
1326
+ >>> import jax.numpy as jnp
1327
+ >>> from transformers import AutoTokenizer, FlaxBlenderbotSmallForConditionalGeneration
1328
+
1329
+ >>> model = FlaxBlenderbotSmallForConditionalGeneration.from_pretrained("facebook/blenderbot_small-90M")
1330
+ >>> tokenizer = AutoTokenizer.from_pretrained("facebook/blenderbot_small-90M")
1331
+
1332
+ >>> text = "My friends are cool but they eat too many carbs."
1333
+ >>> inputs = tokenizer(text, max_length=1024, return_tensors="np")
1334
+ >>> encoder_outputs = model.encode(**inputs)
1335
+
1336
+ >>> decoder_start_token_id = model.config.decoder_start_token_id
1337
+ >>> decoder_input_ids = jnp.ones((inputs.input_ids.shape[0], 1), dtype="i4") * decoder_start_token_id
1338
+
1339
+ >>> outputs = model.decode(decoder_input_ids, encoder_outputs)
1340
+ >>> logits = outputs.logits
1341
+ ```"""
1342
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1343
+ output_hidden_states = (
1344
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1345
+ )
1346
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
1347
+
1348
+ encoder_hidden_states = encoder_outputs[0]
1349
+ if encoder_attention_mask is None:
1350
+ batch_size, sequence_length = encoder_hidden_states.shape[:2]
1351
+ encoder_attention_mask = jnp.ones((batch_size, sequence_length))
1352
+
1353
+ batch_size, sequence_length = decoder_input_ids.shape
1354
+ if decoder_attention_mask is None:
1355
+ decoder_attention_mask = jnp.ones((batch_size, sequence_length))
1356
+
1357
+ if decoder_position_ids is None:
1358
+ if past_key_values is not None:
1359
+ raise ValueError("Make sure to provide `decoder_position_ids` when passing `past_key_values`.")
1360
+
1361
+ decoder_position_ids = jnp.broadcast_to(
1362
+ jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)
1363
+ )
1364
+
1365
+ # Handle any PRNG if needed
1366
+ rngs = {}
1367
+ if dropout_rng is not None:
1368
+ rngs["dropout"] = dropout_rng
1369
+
1370
+ inputs = {"params": params or self.params}
1371
+
1372
+ # if past_key_values are passed then cache is already initialized a private flag init_cache has to be
1373
+ # passed down to ensure cache is used. It has to be made sure that cache is marked as mutable so that
1374
+ # it can be changed by FlaxBlenderbotSmallAttention module
1375
+ if past_key_values:
1376
+ inputs["cache"] = past_key_values
1377
+ mutable = ["cache"]
1378
+ else:
1379
+ mutable = False
1380
+
1381
+ def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs):
1382
+ decoder_module = module._get_decoder_module()
1383
+ outputs = decoder_module(
1384
+ decoder_input_ids,
1385
+ decoder_attention_mask,
1386
+ decoder_position_ids,
1387
+ **kwargs,
1388
+ )
1389
+ hidden_states = outputs[0]
1390
+
1391
+ if self.config.tie_word_embeddings:
1392
+ shared_embedding = module.model.variables["params"]["shared"]["embedding"]
1393
+ lm_logits = module.lm_head.apply({"params": {"kernel": shared_embedding.T}}, hidden_states)
1394
+ else:
1395
+ lm_logits = module.lm_head(hidden_states)
1396
+
1397
+ lm_logits += module.final_logits_bias.astype(self.dtype)
1398
+ return lm_logits, outputs
1399
+
1400
+ outputs = self.module.apply(
1401
+ inputs,
1402
+ decoder_input_ids=jnp.array(decoder_input_ids, dtype="i4"),
1403
+ decoder_attention_mask=jnp.array(decoder_attention_mask, dtype="i4"),
1404
+ decoder_position_ids=jnp.array(decoder_position_ids, dtype="i4"),
1405
+ encoder_hidden_states=encoder_hidden_states,
1406
+ encoder_attention_mask=jnp.array(encoder_attention_mask, dtype="i4"),
1407
+ output_attentions=output_attentions,
1408
+ output_hidden_states=output_hidden_states,
1409
+ return_dict=return_dict,
1410
+ deterministic=deterministic,
1411
+ rngs=rngs,
1412
+ mutable=mutable,
1413
+ method=_decoder_forward,
1414
+ )
1415
+
1416
+ if past_key_values is None:
1417
+ lm_logits, decoder_outputs = outputs
1418
+ else:
1419
+ (lm_logits, decoder_outputs), past = outputs
1420
+
1421
+ if return_dict:
1422
+ outputs = FlaxCausalLMOutputWithCrossAttentions(
1423
+ logits=lm_logits,
1424
+ hidden_states=decoder_outputs.hidden_states,
1425
+ attentions=decoder_outputs.attentions,
1426
+ cross_attentions=decoder_outputs.cross_attentions,
1427
+ )
1428
+ else:
1429
+ outputs = (lm_logits,) + decoder_outputs[1:]
1430
+
1431
+ # add updated cache to model output
1432
+ if past_key_values is not None and return_dict:
1433
+ outputs["past_key_values"] = unfreeze(past["cache"])
1434
+ return outputs
1435
+ elif past_key_values is not None and not return_dict:
1436
+ outputs = outputs[:1] + (unfreeze(past["cache"]),) + outputs[1:]
1437
+
1438
+ return outputs
1439
+
1440
+ def prepare_inputs_for_generation(
1441
+ self,
1442
+ decoder_input_ids,
1443
+ max_length,
1444
+ attention_mask: Optional[jax.Array] = None,
1445
+ decoder_attention_mask: Optional[jax.Array] = None,
1446
+ encoder_outputs=None,
1447
+ **kwargs,
1448
+ ):
1449
+ # initializing the cache
1450
+ batch_size, seq_length = decoder_input_ids.shape
1451
+
1452
+ past_key_values = self.init_cache(batch_size, max_length, encoder_outputs)
1453
+ # Note that usually one would have to put 0's in the attention_mask for x > input_ids.shape[-1] and x < cache_length.
1454
+ # But since the decoder uses a causal mask, those positions are masked anyways.
1455
+ # Thus we can create a single static attention_mask here, which is more efficient for compilation
1456
+ extended_attention_mask = jnp.ones((batch_size, max_length), dtype="i4")
1457
+ if decoder_attention_mask is not None:
1458
+ position_ids = decoder_attention_mask.cumsum(axis=-1) - 1
1459
+ extended_attention_mask = lax.dynamic_update_slice(extended_attention_mask, decoder_attention_mask, (0, 0))
1460
+ else:
1461
+ position_ids = jnp.broadcast_to(jnp.arange(seq_length, dtype="i4")[None, :], (batch_size, seq_length))
1462
+
1463
+ return {
1464
+ "past_key_values": past_key_values,
1465
+ "encoder_outputs": encoder_outputs,
1466
+ "encoder_attention_mask": attention_mask,
1467
+ "decoder_attention_mask": extended_attention_mask,
1468
+ "decoder_position_ids": position_ids,
1469
+ }
1470
+
1471
+ def update_inputs_for_generation(self, model_outputs, model_kwargs):
1472
+ model_kwargs["past_key_values"] = model_outputs.past_key_values
1473
+ model_kwargs["decoder_position_ids"] = model_kwargs["decoder_position_ids"][:, -1:] + 1
1474
+ return model_kwargs
1475
+
1476
+
1477
+ FLAX_BLENDERBOT_SMALL_CONDITIONAL_GENERATION_DOCSTRING = """
1478
+ Returns:
1479
+
1480
+ Summarization example:
1481
+
1482
+ ```py
1483
+ >>> from transformers import AutoTokenizer, FlaxBlenderbotSmallForConditionalGeneration
1484
+
1485
+ >>> model = FlaxBlenderbotSmallForConditionalGeneration.from_pretrained("facebook/blenderbot_small-90M")
1486
+ >>> tokenizer = AutoTokenizer.from_pretrained("facebook/blenderbot_small-90M")
1487
+
1488
+ >>> ARTICLE_TO_SUMMARIZE = "My friends are cool but they eat too many carbs."
1489
+ >>> inputs = tokenizer([ARTICLE_TO_SUMMARIZE], max_length=1024, return_tensors="np")
1490
+
1491
+ >>> # Generate Summary
1492
+ >>> summary_ids = model.generate(inputs["input_ids"]).sequences
1493
+ >>> print(tokenizer.batch_decode(summary_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False))
1494
+ ```
1495
+
1496
+ Mask filling example:
1497
+
1498
+ ```py
1499
+ >>> from transformers import AutoTokenizer, FlaxBlenderbotSmallForConditionalGeneration
1500
+
1501
+ >>> tokenizer = AutoTokenizer.from_pretrained("facebook/blenderbot_small-90M")
1502
+ >>> TXT = "My friends are <mask> but they eat too many carbs."
1503
+
1504
+ >>> model = FlaxBlenderbotSmallForConditionalGeneration.from_pretrained("facebook/blenderbot_small-90M")
1505
+ >>> input_ids = tokenizer([TXT], return_tensors="np")["input_ids"]
1506
+ >>> logits = model(input_ids).logits
1507
+
1508
+ >>> masked_index = (input_ids[0] == tokenizer.mask_token_id).nonzero().item()
1509
+ >>> probs = jax.nn.softmax(logits[0, masked_index], axis=0)
1510
+ >>> values, predictions = jax.lax.top_k(probs)
1511
+
1512
+ >>> tokenizer.decode(predictions).split()
1513
+ ```
1514
+ """
1515
+
1516
+ overwrite_call_docstring(
1517
+ FlaxBlenderbotSmallForConditionalGeneration,
1518
+ BLENDERBOT_SMALL_INPUTS_DOCSTRING + FLAX_BLENDERBOT_SMALL_CONDITIONAL_GENERATION_DOCSTRING,
1519
+ )
1520
+ append_replace_return_docstrings(
1521
+ FlaxBlenderbotSmallForConditionalGeneration, output_type=FlaxSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC
1522
+ )
env-llmeval/lib/python3.10/site-packages/transformers/models/blenderbot_small/modeling_tf_blenderbot_small.py ADDED
@@ -0,0 +1,1526 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The Facebook, Inc and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ TF 2.0 BlenderbotSmall model."""
16
+
17
+
18
+ from __future__ import annotations
19
+
20
+ import random
21
+ from typing import List, Optional, Tuple, Union
22
+
23
+ import numpy as np
24
+ import tensorflow as tf
25
+
26
+ from ...activations_tf import get_tf_activation
27
+ from ...modeling_tf_outputs import (
28
+ TFBaseModelOutput,
29
+ TFBaseModelOutputWithPastAndCrossAttentions,
30
+ TFSeq2SeqLMOutput,
31
+ TFSeq2SeqModelOutput,
32
+ )
33
+
34
+ # Public API
35
+ from ...modeling_tf_utils import (
36
+ TFCausalLanguageModelingLoss,
37
+ TFPreTrainedModel,
38
+ keras,
39
+ keras_serializable,
40
+ unpack_inputs,
41
+ )
42
+ from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax
43
+ from ...utils import (
44
+ add_code_sample_docstrings,
45
+ add_end_docstrings,
46
+ add_start_docstrings,
47
+ add_start_docstrings_to_model_forward,
48
+ logging,
49
+ replace_return_docstrings,
50
+ )
51
+ from .configuration_blenderbot_small import BlenderbotSmallConfig
52
+
53
+
54
+ logger = logging.get_logger(__name__)
55
+
56
+ _CHECKPOINT_FOR_DOC = "facebook/blenderbot_small-90M"
57
+ _CONFIG_FOR_DOC = "BlenderbotSmallConfig"
58
+
59
+
60
+ LARGE_NEGATIVE = -1e8
61
+
62
+
63
+ # Copied from transformers.models.bart.modeling_tf_bart.shift_tokens_right
64
+ def shift_tokens_right(input_ids: tf.Tensor, pad_token_id: int, decoder_start_token_id: int):
65
+ pad_token_id = tf.cast(pad_token_id, input_ids.dtype)
66
+ decoder_start_token_id = tf.cast(decoder_start_token_id, input_ids.dtype)
67
+ start_tokens = tf.fill(
68
+ (shape_list(input_ids)[0], 1), tf.convert_to_tensor(decoder_start_token_id, input_ids.dtype)
69
+ )
70
+ shifted_input_ids = tf.concat([start_tokens, input_ids[:, :-1]], -1)
71
+ # replace possible -100 values in labels by `pad_token_id`
72
+ shifted_input_ids = tf.where(
73
+ shifted_input_ids == -100,
74
+ tf.fill(shape_list(shifted_input_ids), tf.convert_to_tensor(pad_token_id, input_ids.dtype)),
75
+ shifted_input_ids,
76
+ )
77
+
78
+ # "Verify that `labels` has only positive values and -100"
79
+ assert_gte0 = tf.debugging.assert_greater_equal(shifted_input_ids, tf.constant(0, dtype=input_ids.dtype))
80
+
81
+ # Make sure the assertion op is called by wrapping the result in an identity no-op
82
+ with tf.control_dependencies([assert_gte0]):
83
+ shifted_input_ids = tf.identity(shifted_input_ids)
84
+
85
+ return shifted_input_ids
86
+
87
+
88
+ # Copied from transformers.models.bart.modeling_tf_bart._make_causal_mask
89
+ def _make_causal_mask(input_ids_shape: tf.TensorShape, past_key_values_length: int = 0):
90
+ """
91
+ Make causal mask used for bi-directional self-attention.
92
+ """
93
+ bsz = input_ids_shape[0]
94
+ tgt_len = input_ids_shape[1]
95
+ mask = tf.ones((tgt_len, tgt_len)) * LARGE_NEGATIVE
96
+ mask_cond = tf.range(shape_list(mask)[-1])
97
+
98
+ mask = tf.where(mask_cond < tf.reshape(mask_cond + 1, (shape_list(mask)[-1], 1)), 0.0, mask)
99
+
100
+ if past_key_values_length > 0:
101
+ mask = tf.concat([tf.zeros((tgt_len, past_key_values_length)), mask], axis=-1)
102
+
103
+ return tf.tile(mask[None, None, :, :], (bsz, 1, 1, 1))
104
+
105
+
106
+ # Copied from transformers.models.bart.modeling_tf_bart._expand_mask
107
+ def _expand_mask(mask: tf.Tensor, tgt_len: Optional[int] = None):
108
+ """
109
+ Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
110
+ """
111
+ src_len = shape_list(mask)[1]
112
+ tgt_len = tgt_len if tgt_len is not None else src_len
113
+ one_cst = tf.constant(1.0)
114
+ mask = tf.cast(mask, dtype=one_cst.dtype)
115
+ expanded_mask = tf.tile(mask[:, None, None, :], (1, 1, tgt_len, 1))
116
+
117
+ return (one_cst - expanded_mask) * LARGE_NEGATIVE
118
+
119
+
120
+ # Copied from transformers.models.blenderbot.modeling_tf_blenderbot.TFBlenderbotLearnedPositionalEmbedding with Blenderbot->BlenderbotSmall
121
+ class TFBlenderbotSmallLearnedPositionalEmbedding(keras.layers.Embedding):
122
+ """
123
+ This module learns positional embeddings up to a fixed maximum size.
124
+ """
125
+
126
+ def __init__(self, num_embeddings: int, embedding_dim: int, **kwargs):
127
+ super().__init__(num_embeddings, embedding_dim, **kwargs)
128
+
129
+ def call(
130
+ self, input_shape: tf.TensorShape, past_key_values_length: int = 0, position_ids: tf.Tensor | None = None
131
+ ):
132
+ """Input is expected to be of size [bsz x seqlen]."""
133
+ if position_ids is None:
134
+ seq_len = input_shape[1]
135
+ position_ids = tf.range(seq_len, delta=1, name="range")
136
+ position_ids += past_key_values_length
137
+
138
+ return super().call(tf.cast(position_ids, dtype=tf.int32))
139
+
140
+
141
+ # Copied from transformers.models.bart.modeling_tf_bart.TFBartAttention with Bart->BlenderbotSmall
142
+ class TFBlenderbotSmallAttention(keras.layers.Layer):
143
+ """Multi-headed attention from "Attention Is All You Need"""
144
+
145
+ def __init__(
146
+ self,
147
+ embed_dim: int,
148
+ num_heads: int,
149
+ dropout: float = 0.0,
150
+ is_decoder: bool = False,
151
+ bias: bool = True,
152
+ **kwargs,
153
+ ):
154
+ super().__init__(**kwargs)
155
+ self.embed_dim = embed_dim
156
+
157
+ self.num_heads = num_heads
158
+ self.dropout = keras.layers.Dropout(dropout)
159
+ self.head_dim = embed_dim // num_heads
160
+ if (self.head_dim * num_heads) != self.embed_dim:
161
+ raise ValueError(
162
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
163
+ f" and `num_heads`: {num_heads})."
164
+ )
165
+ self.scaling = self.head_dim**-0.5
166
+ self.is_decoder = is_decoder
167
+
168
+ self.k_proj = keras.layers.Dense(embed_dim, use_bias=bias, name="k_proj")
169
+ self.q_proj = keras.layers.Dense(embed_dim, use_bias=bias, name="q_proj")
170
+ self.v_proj = keras.layers.Dense(embed_dim, use_bias=bias, name="v_proj")
171
+ self.out_proj = keras.layers.Dense(embed_dim, use_bias=bias, name="out_proj")
172
+
173
+ def _shape(self, tensor: tf.Tensor, seq_len: int, bsz: int):
174
+ return tf.transpose(tf.reshape(tensor, (bsz, seq_len, self.num_heads, self.head_dim)), (0, 2, 1, 3))
175
+
176
+ def call(
177
+ self,
178
+ hidden_states: tf.Tensor,
179
+ key_value_states: tf.Tensor | None = None,
180
+ past_key_value: Tuple[Tuple[tf.Tensor]] | None = None,
181
+ attention_mask: tf.Tensor | None = None,
182
+ layer_head_mask: tf.Tensor | None = None,
183
+ training: Optional[bool] = False,
184
+ ) -> Tuple[tf.Tensor, tf.Tensor | None]:
185
+ """Input shape: Batch x Time x Channel"""
186
+
187
+ # if key_value_states are provided this layer is used as a cross-attention layer
188
+ # for the decoder
189
+ is_cross_attention = key_value_states is not None
190
+ bsz, tgt_len, embed_dim = shape_list(hidden_states)
191
+
192
+ # get query proj
193
+ query_states = self.q_proj(hidden_states) * self.scaling
194
+ # get key, value proj
195
+ if is_cross_attention and past_key_value is not None:
196
+ # reuse k,v, cross_attentions
197
+ key_states = past_key_value[0]
198
+ value_states = past_key_value[1]
199
+ elif is_cross_attention:
200
+ # cross_attentions
201
+ key_states = self._shape(self.k_proj(key_value_states), -1, bsz)
202
+ value_states = self._shape(self.v_proj(key_value_states), -1, bsz)
203
+ elif past_key_value is not None:
204
+ # reuse k, v, self_attention
205
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
206
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
207
+ key_states = tf.concat([past_key_value[0], key_states], axis=2)
208
+ value_states = tf.concat([past_key_value[1], value_states], axis=2)
209
+ else:
210
+ # self_attention
211
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
212
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
213
+
214
+ if self.is_decoder:
215
+ # if cross_attention save Tuple(tf.Tensor, tf.Tensor) of all cross attention key/value_states.
216
+ # Further calls to cross_attention layer can then reuse all cross-attention
217
+ # key/value_states (first "if" case)
218
+ # if uni-directional self-attention (decoder) save Tuple(tf.Tensor, tf.Tensor) of
219
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
220
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
221
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
222
+ past_key_value = (key_states, value_states)
223
+
224
+ proj_shape = (bsz * self.num_heads, -1, self.head_dim)
225
+ query_states = tf.reshape(self._shape(query_states, tgt_len, bsz), proj_shape)
226
+ key_states = tf.reshape(key_states, proj_shape)
227
+ value_states = tf.reshape(value_states, proj_shape)
228
+
229
+ src_len = shape_list(key_states)[1]
230
+ attn_weights = tf.matmul(query_states, key_states, transpose_b=True)
231
+
232
+ tf.debugging.assert_equal(
233
+ shape_list(attn_weights),
234
+ [bsz * self.num_heads, tgt_len, src_len],
235
+ message=(
236
+ f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is"
237
+ f" {shape_list(attn_weights)}"
238
+ ),
239
+ )
240
+
241
+ if attention_mask is not None:
242
+ tf.debugging.assert_equal(
243
+ shape_list(attention_mask),
244
+ [bsz, 1, tgt_len, src_len],
245
+ message=(
246
+ f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is"
247
+ f" {shape_list(attention_mask)}"
248
+ ),
249
+ )
250
+
251
+ attention_mask = tf.cast(attention_mask, dtype=attn_weights.dtype)
252
+ attn_weights = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len)) + attention_mask
253
+ attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len))
254
+
255
+ attn_weights = stable_softmax(attn_weights, axis=-1)
256
+
257
+ if layer_head_mask is not None:
258
+ tf.debugging.assert_equal(
259
+ shape_list(layer_head_mask),
260
+ [self.num_heads],
261
+ message=(
262
+ f"Head mask for a single layer should be of size {(self.num_heads)}, but is"
263
+ f" {shape_list(layer_head_mask)}"
264
+ ),
265
+ )
266
+
267
+ attn_weights = tf.reshape(layer_head_mask, (1, -1, 1, 1)) * tf.reshape(
268
+ attn_weights, (bsz, self.num_heads, tgt_len, src_len)
269
+ )
270
+ attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len))
271
+
272
+ attn_probs = self.dropout(attn_weights, training=training)
273
+ attn_output = tf.matmul(attn_probs, value_states)
274
+
275
+ tf.debugging.assert_equal(
276
+ shape_list(attn_output),
277
+ [bsz * self.num_heads, tgt_len, self.head_dim],
278
+ message=(
279
+ f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is"
280
+ f" {shape_list(attn_output)}"
281
+ ),
282
+ )
283
+
284
+ attn_output = tf.transpose(
285
+ tf.reshape(attn_output, (bsz, self.num_heads, tgt_len, self.head_dim)), (0, 2, 1, 3)
286
+ )
287
+ attn_output = tf.reshape(attn_output, (bsz, tgt_len, embed_dim))
288
+
289
+ attn_output = self.out_proj(attn_output)
290
+ attn_weights: tf.Tensor = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len))
291
+
292
+ return attn_output, attn_weights, past_key_value
293
+
294
+ def build(self, input_shape=None):
295
+ if self.built:
296
+ return
297
+ self.built = True
298
+ if getattr(self, "k_proj", None) is not None:
299
+ with tf.name_scope(self.k_proj.name):
300
+ self.k_proj.build([None, None, self.embed_dim])
301
+ if getattr(self, "q_proj", None) is not None:
302
+ with tf.name_scope(self.q_proj.name):
303
+ self.q_proj.build([None, None, self.embed_dim])
304
+ if getattr(self, "v_proj", None) is not None:
305
+ with tf.name_scope(self.v_proj.name):
306
+ self.v_proj.build([None, None, self.embed_dim])
307
+ if getattr(self, "out_proj", None) is not None:
308
+ with tf.name_scope(self.out_proj.name):
309
+ self.out_proj.build([None, None, self.embed_dim])
310
+
311
+
312
+ # Copied from transformers.models.bart.modeling_tf_bart.TFBartEncoderLayer with Bart->BlenderbotSmall
313
+ class TFBlenderbotSmallEncoderLayer(keras.layers.Layer):
314
+ def __init__(self, config: BlenderbotSmallConfig, **kwargs):
315
+ super().__init__(**kwargs)
316
+ self.embed_dim = config.d_model
317
+ self.self_attn = TFBlenderbotSmallAttention(
318
+ self.embed_dim, config.encoder_attention_heads, dropout=config.attention_dropout, name="self_attn"
319
+ )
320
+ self.self_attn_layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="self_attn_layer_norm")
321
+ self.dropout = keras.layers.Dropout(config.dropout)
322
+ self.activation_fn = get_tf_activation(config.activation_function)
323
+ self.activation_dropout = keras.layers.Dropout(config.activation_dropout)
324
+ self.fc1 = keras.layers.Dense(config.encoder_ffn_dim, name="fc1")
325
+ self.fc2 = keras.layers.Dense(self.embed_dim, name="fc2")
326
+ self.final_layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="final_layer_norm")
327
+ self.config = config
328
+
329
+ def call(
330
+ self,
331
+ hidden_states: tf.Tensor,
332
+ attention_mask: np.ndarray | tf.Tensor | None,
333
+ layer_head_mask: tf.Tensor | None,
334
+ training: Optional[bool] = False,
335
+ ) -> tf.Tensor:
336
+ """
337
+ Args:
338
+ hidden_states (`tf.Tensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
339
+ attention_mask (`tf.Tensor`): attention mask of size
340
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
341
+ layer_head_mask (`tf.Tensor`): mask for attention heads in a given layer of size
342
+ `(encoder_attention_heads,)`
343
+ """
344
+ residual = hidden_states
345
+ hidden_states, self_attn_weights, _ = self.self_attn(
346
+ hidden_states=hidden_states, attention_mask=attention_mask, layer_head_mask=layer_head_mask
347
+ )
348
+
349
+ tf.debugging.assert_equal(
350
+ shape_list(hidden_states),
351
+ shape_list(residual),
352
+ message=f"Self attn modified the shape of query {shape_list(residual)} to {shape_list(hidden_states)}",
353
+ )
354
+
355
+ hidden_states = self.dropout(hidden_states, training=training)
356
+ hidden_states = residual + hidden_states
357
+ hidden_states = self.self_attn_layer_norm(hidden_states)
358
+
359
+ residual = hidden_states
360
+ hidden_states = self.activation_fn(self.fc1(hidden_states))
361
+ hidden_states = self.activation_dropout(hidden_states, training=training)
362
+ hidden_states = self.fc2(hidden_states)
363
+ hidden_states = self.dropout(hidden_states, training=training)
364
+ hidden_states = residual + hidden_states
365
+ hidden_states = self.final_layer_norm(hidden_states)
366
+
367
+ return hidden_states, self_attn_weights
368
+
369
+ def build(self, input_shape=None):
370
+ if self.built:
371
+ return
372
+ self.built = True
373
+ if getattr(self, "self_attn", None) is not None:
374
+ with tf.name_scope(self.self_attn.name):
375
+ self.self_attn.build(None)
376
+ if getattr(self, "self_attn_layer_norm", None) is not None:
377
+ with tf.name_scope(self.self_attn_layer_norm.name):
378
+ self.self_attn_layer_norm.build([None, None, self.embed_dim])
379
+ if getattr(self, "fc1", None) is not None:
380
+ with tf.name_scope(self.fc1.name):
381
+ self.fc1.build([None, None, self.embed_dim])
382
+ if getattr(self, "fc2", None) is not None:
383
+ with tf.name_scope(self.fc2.name):
384
+ self.fc2.build([None, None, self.config.encoder_ffn_dim])
385
+ if getattr(self, "final_layer_norm", None) is not None:
386
+ with tf.name_scope(self.final_layer_norm.name):
387
+ self.final_layer_norm.build([None, None, self.embed_dim])
388
+
389
+
390
+ # Copied from transformers.models.bart.modeling_tf_bart.TFBartDecoderLayer with Bart->BlenderbotSmall
391
+ class TFBlenderbotSmallDecoderLayer(keras.layers.Layer):
392
+ def __init__(self, config: BlenderbotSmallConfig, **kwargs):
393
+ super().__init__(**kwargs)
394
+ self.embed_dim = config.d_model
395
+ self.self_attn = TFBlenderbotSmallAttention(
396
+ embed_dim=self.embed_dim,
397
+ num_heads=config.decoder_attention_heads,
398
+ dropout=config.attention_dropout,
399
+ name="self_attn",
400
+ is_decoder=True,
401
+ )
402
+ self.dropout = keras.layers.Dropout(config.dropout)
403
+ self.activation_fn = get_tf_activation(config.activation_function)
404
+ self.activation_dropout = keras.layers.Dropout(config.activation_dropout)
405
+
406
+ self.self_attn_layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="self_attn_layer_norm")
407
+ self.encoder_attn = TFBlenderbotSmallAttention(
408
+ self.embed_dim,
409
+ config.decoder_attention_heads,
410
+ dropout=config.attention_dropout,
411
+ name="encoder_attn",
412
+ is_decoder=True,
413
+ )
414
+ self.encoder_attn_layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="encoder_attn_layer_norm")
415
+ self.fc1 = keras.layers.Dense(config.decoder_ffn_dim, name="fc1")
416
+ self.fc2 = keras.layers.Dense(self.embed_dim, name="fc2")
417
+ self.final_layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="final_layer_norm")
418
+ self.config = config
419
+
420
+ def call(
421
+ self,
422
+ hidden_states: tf.Tensor,
423
+ attention_mask: np.ndarray | tf.Tensor | None = None,
424
+ encoder_hidden_states: np.ndarray | tf.Tensor | None = None,
425
+ encoder_attention_mask: np.ndarray | tf.Tensor | None = None,
426
+ layer_head_mask: tf.Tensor | None = None,
427
+ cross_attn_layer_head_mask: tf.Tensor | None = None,
428
+ past_key_value: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None,
429
+ training: Optional[bool] = False,
430
+ ) -> Tuple[tf.Tensor, tf.Tensor, Tuple[Tuple[tf.Tensor]]]:
431
+ """
432
+ Args:
433
+ hidden_states (`tf.Tensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
434
+ attention_mask (`tf.Tensor`): attention mask of size
435
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
436
+ encoder_hidden_states (`tf.Tensor`):
437
+ cross attention input to the layer of shape `(batch, seq_len, embed_dim)`
438
+ encoder_attention_mask (`tf.Tensor`): encoder attention mask of size
439
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
440
+ layer_head_mask (`tf.Tensor`): mask for attention heads in a given layer of size
441
+ `(decoder_attention_heads,)`
442
+ cross_attn_layer_head_mask (`tf.Tensor`): mask for heads of the cross-attention module.
443
+ `(decoder_attention_heads,)`
444
+ past_key_value (`Tuple(tf.Tensor)`): cached past key and value projection states
445
+ """
446
+ residual = hidden_states
447
+
448
+ # Self Attention
449
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
450
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
451
+ # add present self-attn cache to positions 1,2 of present_key_value tuple
452
+ hidden_states, self_attn_weights, present_key_value = self.self_attn(
453
+ hidden_states=hidden_states,
454
+ past_key_value=self_attn_past_key_value,
455
+ attention_mask=attention_mask,
456
+ layer_head_mask=layer_head_mask,
457
+ )
458
+ hidden_states = self.dropout(hidden_states, training=training)
459
+ hidden_states = residual + hidden_states
460
+ hidden_states = self.self_attn_layer_norm(hidden_states)
461
+
462
+ # Cross-Attention Block
463
+ cross_attn_present_key_value = None
464
+ cross_attn_weights = None
465
+ if encoder_hidden_states is not None:
466
+ residual = hidden_states
467
+
468
+ # cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple
469
+ cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
470
+ hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn(
471
+ hidden_states=hidden_states,
472
+ key_value_states=encoder_hidden_states,
473
+ attention_mask=encoder_attention_mask,
474
+ layer_head_mask=cross_attn_layer_head_mask,
475
+ past_key_value=cross_attn_past_key_value,
476
+ )
477
+ hidden_states = self.dropout(hidden_states, training=training)
478
+ hidden_states = residual + hidden_states
479
+ hidden_states = self.encoder_attn_layer_norm(hidden_states)
480
+
481
+ # add cross-attn to positions 3,4 of present_key_value tuple
482
+ present_key_value = present_key_value + cross_attn_present_key_value
483
+
484
+ # Fully Connected
485
+ residual = hidden_states
486
+ hidden_states = self.activation_fn(self.fc1(hidden_states))
487
+ hidden_states = self.activation_dropout(hidden_states, training=training)
488
+ hidden_states = self.fc2(hidden_states)
489
+ hidden_states = self.dropout(hidden_states, training=training)
490
+ hidden_states = residual + hidden_states
491
+ hidden_states = self.final_layer_norm(hidden_states)
492
+
493
+ return (
494
+ hidden_states,
495
+ self_attn_weights,
496
+ cross_attn_weights,
497
+ present_key_value,
498
+ )
499
+
500
+ def build(self, input_shape=None):
501
+ if self.built:
502
+ return
503
+ self.built = True
504
+ if getattr(self, "self_attn", None) is not None:
505
+ with tf.name_scope(self.self_attn.name):
506
+ self.self_attn.build(None)
507
+ if getattr(self, "self_attn_layer_norm", None) is not None:
508
+ with tf.name_scope(self.self_attn_layer_norm.name):
509
+ self.self_attn_layer_norm.build([None, None, self.embed_dim])
510
+ if getattr(self, "encoder_attn", None) is not None:
511
+ with tf.name_scope(self.encoder_attn.name):
512
+ self.encoder_attn.build(None)
513
+ if getattr(self, "encoder_attn_layer_norm", None) is not None:
514
+ with tf.name_scope(self.encoder_attn_layer_norm.name):
515
+ self.encoder_attn_layer_norm.build([None, None, self.embed_dim])
516
+ if getattr(self, "fc1", None) is not None:
517
+ with tf.name_scope(self.fc1.name):
518
+ self.fc1.build([None, None, self.embed_dim])
519
+ if getattr(self, "fc2", None) is not None:
520
+ with tf.name_scope(self.fc2.name):
521
+ self.fc2.build([None, None, self.config.decoder_ffn_dim])
522
+ if getattr(self, "final_layer_norm", None) is not None:
523
+ with tf.name_scope(self.final_layer_norm.name):
524
+ self.final_layer_norm.build([None, None, self.embed_dim])
525
+
526
+
527
+ class TFBlenderbotSmallPreTrainedModel(TFPreTrainedModel):
528
+ config_class = BlenderbotSmallConfig
529
+ base_model_prefix = "model"
530
+
531
+
532
+ BLENDERBOT_SMALL_START_DOCSTRING = r"""
533
+ This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
534
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
535
+ etc.)
536
+
537
+ This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
538
+ as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
539
+ behavior.
540
+
541
+ <Tip>
542
+
543
+ TensorFlow models and layers in `transformers` accept two formats as input:
544
+
545
+ - having all inputs as keyword arguments (like PyTorch models), or
546
+ - having all inputs as a list, tuple or dict in the first positional argument.
547
+
548
+ The reason the second format is supported is that Keras methods prefer this format when passing inputs to models
549
+ and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just
550
+ pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second
551
+ format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with
552
+ the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first
553
+ positional argument:
554
+
555
+ - a single Tensor with `input_ids` only and nothing else: `model(input_ids)`
556
+ - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
557
+ `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`
558
+ - a dictionary with one or several input Tensors associated to the input names given in the docstring:
559
+ `model({"input_ids": input_ids, "token_type_ids": token_type_ids})`
560
+
561
+ Note that when creating models and layers with
562
+ [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry
563
+ about any of this, as you can just pass inputs like you would to any other Python function!
564
+
565
+ </Tip>
566
+
567
+ Args:
568
+ config ([`BlenderbotSmallConfig`]): Model configuration class with all the parameters of the model.
569
+ Initializing with a config file does not load the weights associated with the model, only the
570
+ configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
571
+ """
572
+
573
+ BLENDERBOT_SMALL_GENERATION_EXAMPLE = r"""
574
+ Conversation example::
575
+
576
+ ```py
577
+ >>> from transformers import AutoTokenizer, TFBlenderbotSmallForConditionalGeneration
578
+
579
+ >>> mname = "facebook/blenderbot_small-90M"
580
+ >>> model = BlenderbotSmallForConditionalGeneration.from_pretrained(mname)
581
+ >>> tokenizer = AutoTokenizer.from_pretrained(mname)
582
+
583
+ >>> UTTERANCE = "My friends are cool but they eat too many carbs."
584
+ >>> print("Human: ", UTTERANCE)
585
+ >>> inputs = tokenizer([UTTERANCE], return_tensors="tf")
586
+
587
+ >>> reply_ids = model.generate(**inputs)
588
+ >>> print("Bot: ", tokenizer.batch_decode(reply_ids, skip_special_tokens=True)[0])
589
+ what kind of carbs do they eat? i don't know much about carbs.
590
+
591
+ >>> REPLY = "I'm not sure"
592
+ >>> print("Human: ", REPLY)
593
+ >>> NEXT_UTTERANCE = (
594
+ ... "My friends are cool but they eat too many carbs.</s> "
595
+ ... "<s>what kind of carbs do they eat? i don't know much about carbs.</s> "
596
+ ... "<s>I'm not sure."
597
+ ... )
598
+
599
+ >>> inputs = tokenizer([NEXT_UTTERANCE], return_tensors="tf")
600
+ >>> inputs.pop("token_type_ids")
601
+ >>> next_reply_ids = model.generate(**inputs)
602
+ >>> print("Bot: ", tokenizer.batch_decode(next_reply_ids, skip_special_tokens=True)[0])
603
+ ```
604
+ """
605
+
606
+ BLENDERBOT_SMALL_INPUTS_DOCSTRING = r"""
607
+ Args:
608
+ input_ids (`tf.Tensor` of shape `({0})`):
609
+ Indices of input sequence tokens in the vocabulary.
610
+
611
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
612
+ [`PreTrainedTokenizer.__call__`] for details.
613
+
614
+ [What are input IDs?](../glossary#input-ids)
615
+ attention_mask (`tf.Tensor` of shape `({0})`, *optional*):
616
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
617
+
618
+ - 1 for tokens that are **not masked**,
619
+ - 0 for tokens that are **masked**.
620
+
621
+ [What are attention masks?](../glossary#attention-mask)
622
+ decoder_input_ids (`tf.Tensor` of shape `(batch_size, target_sequence_length)`, *optional*):
623
+ Indices of decoder input sequence tokens in the vocabulary.
624
+
625
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
626
+ [`PreTrainedTokenizer.__call__`] for details.
627
+
628
+ [What are decoder input IDs?](../glossary#decoder-input-ids)
629
+
630
+ BlenderbotSmall uses the `bos_token_id` as the starting token for `decoder_input_ids` generation. If
631
+ `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
632
+ `past_key_values`).
633
+ decoder_attention_mask (`tf.Tensor` of shape `(batch_size, target_sequence_length)`, *optional*):
634
+ will be made by default and ignore pad tokens. It is not recommended to set this for most use cases.
635
+ decoder_position_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
636
+ Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the
637
+ range `[0, config.max_position_embeddings - 1]`.
638
+ head_mask (`tf.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
639
+ Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`:
640
+
641
+ - 1 indicates the head is **not masked**,
642
+ - 0 indicates the head is **masked**.
643
+
644
+ decoder_head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
645
+ Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in `[0, 1]`:
646
+
647
+ - 1 indicates the head is **not masked**,
648
+ - 0 indicates the head is **masked**.
649
+
650
+ cross_attn_head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
651
+ Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:
652
+
653
+ - 1 indicates the head is **not masked**,
654
+ - 0 indicates the head is **masked**.
655
+
656
+ encoder_outputs (`tf.FloatTensor`, *optional*):
657
+ hidden states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.
658
+ of shape `(batch_size, sequence_length, hidden_size)` is a sequence of
659
+ past_key_values (`Tuple[Tuple[tf.Tensor]]` of length `config.n_layers`)
660
+ contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
661
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
662
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
663
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
664
+ use_cache (`bool`, *optional*, defaults to `True`):
665
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
666
+ `past_key_values`). Set to `False` during training, `True` during generation
667
+ output_attentions (`bool`, *optional*):
668
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
669
+ tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the
670
+ config will be used instead.
671
+ output_hidden_states (`bool`, *optional*):
672
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
673
+ more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
674
+ used instead.
675
+ return_dict (`bool`, *optional*):
676
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in
677
+ eager mode, in graph mode the value will always be set to True.
678
+ training (`bool`, *optional*, defaults to `False`):
679
+ Whether or not to use the model in training mode (some modules like dropout modules have different
680
+ behaviors between training and evaluation).
681
+ """
682
+
683
+
684
+ @keras_serializable
685
+ class TFBlenderbotSmallEncoder(keras.layers.Layer):
686
+ config_class = BlenderbotSmallConfig
687
+ """
688
+ Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a
689
+ [`TFBlenderbotSmallEncoderLayer`].
690
+
691
+ Args:
692
+ config: BlenderbotSmallConfig
693
+ """
694
+
695
+ def __init__(self, config: BlenderbotSmallConfig, embed_tokens: Optional[keras.layers.Embedding] = None, **kwargs):
696
+ super().__init__(**kwargs)
697
+ self.config = config
698
+ self.dropout = keras.layers.Dropout(config.dropout)
699
+ self.layerdrop = config.encoder_layerdrop
700
+ self.padding_idx = config.pad_token_id
701
+ self.max_source_positions = config.max_position_embeddings
702
+ self.embed_scale = tf.math.sqrt(float(config.d_model)) if config.scale_embedding else 1.0
703
+
704
+ self.embed_tokens = embed_tokens
705
+ self.embed_positions = TFBlenderbotSmallLearnedPositionalEmbedding(
706
+ config.max_position_embeddings,
707
+ config.d_model,
708
+ name="embed_positions",
709
+ )
710
+ self.layers = [TFBlenderbotSmallEncoderLayer(config, name=f"layers.{i}") for i in range(config.encoder_layers)]
711
+ self.layernorm_embedding = keras.layers.LayerNormalization(epsilon=1e-5, name="layernorm_embedding")
712
+ self.embed_dim = config.d_model
713
+
714
+ def get_embed_tokens(self):
715
+ return self.embed_tokens
716
+
717
+ def set_embed_tokens(self, embed_tokens):
718
+ self.embed_tokens = embed_tokens
719
+
720
+ @unpack_inputs
721
+ def call(
722
+ self,
723
+ input_ids=None,
724
+ inputs_embeds=None,
725
+ attention_mask=None,
726
+ head_mask=None,
727
+ output_attentions=None,
728
+ output_hidden_states=None,
729
+ return_dict=None,
730
+ training=False,
731
+ ):
732
+ """
733
+ Args:
734
+ input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`):
735
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
736
+ provide it.
737
+
738
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
739
+ [`PreTrainedTokenizer.__call__`] for details.
740
+
741
+ [What are input IDs?](../glossary#input-ids)
742
+ attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
743
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
744
+
745
+ - 1 for tokens that are **not masked**,
746
+ - 0 for tokens that are **masked**.
747
+
748
+ [What are attention masks?](../glossary#attention-mask)
749
+ head_mask (`tf.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, `optional):
750
+ Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
751
+
752
+ - 1 indicates the head is **not masked**,
753
+ - 0 indicates the head is **masked**.
754
+
755
+ inputs_embeds (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
756
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
757
+ This is useful if you want more control over how to convert `input_ids` indices into associated vectors
758
+ than the model's internal embedding lookup matrix.
759
+ output_attentions (`bool`, *optional*):
760
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
761
+ returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value
762
+ in the config will be used instead.
763
+ output_hidden_states (`bool`, *optional*):
764
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
765
+ for more detail. This argument can be used only in eager mode, in graph mode the value in the config
766
+ will be used instead.
767
+ return_dict (`bool`, *optional*):
768
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used
769
+ in eager mode, in graph mode the value will always be set to True.
770
+ training (`bool`, *optional*, defaults to `False`):
771
+ Whether or not to use the model in training mode (some modules like dropout modules have different
772
+ behaviors between training and evaluation).
773
+ """
774
+ if input_ids is not None and inputs_embeds is not None:
775
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
776
+ elif input_ids is not None:
777
+ input_shape = shape_list(input_ids)
778
+ elif inputs_embeds is not None:
779
+ input_shape = shape_list(inputs_embeds)[:-1]
780
+ else:
781
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
782
+
783
+ if inputs_embeds is None:
784
+ check_embeddings_within_bounds(input_ids, self.embed_tokens.input_dim)
785
+ inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
786
+
787
+ embed_pos = self.embed_positions(input_shape)
788
+ hidden_states = inputs_embeds + embed_pos
789
+ hidden_states = self.layernorm_embedding(hidden_states)
790
+ hidden_states = self.dropout(hidden_states, training=training)
791
+
792
+ # check attention mask and invert
793
+ if attention_mask is not None:
794
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
795
+ attention_mask = _expand_mask(attention_mask)
796
+ else:
797
+ attention_mask = None
798
+
799
+ encoder_states = () if output_hidden_states else None
800
+ all_attentions = () if output_attentions else None
801
+
802
+ # check if head_mask has a correct number of layers specified if desired
803
+ if head_mask is not None:
804
+ tf.debugging.assert_equal(
805
+ shape_list(head_mask)[0],
806
+ len(self.layers),
807
+ message=(
808
+ f"The head_mask should be specified for {len(self.layers)} layers, but it is for"
809
+ f" {shape_list(head_mask)[0]}."
810
+ ),
811
+ )
812
+
813
+ # encoder layers
814
+ for idx, encoder_layer in enumerate(self.layers):
815
+ if output_hidden_states:
816
+ encoder_states = encoder_states + (hidden_states,)
817
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
818
+ dropout_probability = random.uniform(0, 1)
819
+ if training and (dropout_probability < self.layerdrop): # skip the layer
820
+ continue
821
+
822
+ hidden_states, attn = encoder_layer(
823
+ hidden_states,
824
+ attention_mask,
825
+ head_mask[idx] if head_mask is not None else None,
826
+ )
827
+
828
+ if output_attentions:
829
+ all_attentions += (attn,)
830
+
831
+ if output_hidden_states:
832
+ encoder_states = encoder_states + (hidden_states,)
833
+
834
+ if not return_dict:
835
+ return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
836
+ return TFBaseModelOutput(
837
+ last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
838
+ )
839
+
840
+ def build(self, input_shape=None):
841
+ if self.built:
842
+ return
843
+ self.built = True
844
+ if getattr(self, "embed_positions", None) is not None:
845
+ with tf.name_scope(self.embed_positions.name):
846
+ self.embed_positions.build(None)
847
+ if getattr(self, "layernorm_embedding", None) is not None:
848
+ with tf.name_scope(self.layernorm_embedding.name):
849
+ self.layernorm_embedding.build([None, None, self.embed_dim])
850
+ if getattr(self, "layers", None) is not None:
851
+ for layer in self.layers:
852
+ with tf.name_scope(layer.name):
853
+ layer.build(None)
854
+
855
+
856
+ @keras_serializable
857
+ class TFBlenderbotSmallDecoder(keras.layers.Layer):
858
+ config_class = BlenderbotSmallConfig
859
+ """
860
+ Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`TFBlenderbotSmallDecoderLayer`]
861
+
862
+ Args:
863
+ config: BlenderbotSmallConfig
864
+ embed_tokens: output embedding
865
+ """
866
+
867
+ def __init__(self, config: BlenderbotSmallConfig, embed_tokens: Optional[keras.layers.Embedding] = None, **kwargs):
868
+ super().__init__(**kwargs)
869
+ self.config = config
870
+ self.padding_idx = config.pad_token_id
871
+ self.embed_tokens = embed_tokens
872
+ self.layerdrop = config.decoder_layerdrop
873
+ self.embed_positions = TFBlenderbotSmallLearnedPositionalEmbedding(
874
+ config.max_position_embeddings,
875
+ config.d_model,
876
+ name="embed_positions",
877
+ )
878
+ self.embed_scale = tf.math.sqrt(float(config.d_model)) if config.scale_embedding else 1.0
879
+ self.layers = [TFBlenderbotSmallDecoderLayer(config, name=f"layers.{i}") for i in range(config.decoder_layers)]
880
+ self.layernorm_embedding = keras.layers.LayerNormalization(epsilon=1e-5, name="layernorm_embedding")
881
+
882
+ self.dropout = keras.layers.Dropout(config.dropout)
883
+
884
+ def get_embed_tokens(self):
885
+ return self.embed_tokens
886
+
887
+ def set_embed_tokens(self, embed_tokens):
888
+ self.embed_tokens = embed_tokens
889
+
890
+ @unpack_inputs
891
+ def call(
892
+ self,
893
+ input_ids=None,
894
+ inputs_embeds=None,
895
+ attention_mask=None,
896
+ position_ids=None,
897
+ encoder_hidden_states=None,
898
+ encoder_attention_mask=None,
899
+ head_mask=None,
900
+ cross_attn_head_mask=None,
901
+ past_key_values=None,
902
+ use_cache=None,
903
+ output_attentions=None,
904
+ output_hidden_states=None,
905
+ return_dict=None,
906
+ training=False,
907
+ ):
908
+ r"""
909
+ Args:
910
+ input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`):
911
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
912
+ provide it.
913
+
914
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
915
+ [`PreTrainedTokenizer.__call__`] for details.
916
+
917
+ [What are input IDs?](../glossary#input-ids)
918
+ attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
919
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
920
+
921
+ - 1 for tokens that are **not masked**,
922
+ - 0 for tokens that are **masked**.
923
+
924
+ [What are attention masks?](../glossary#attention-mask)
925
+ position_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
926
+ Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the
927
+ range `[0, config.max_position_embeddings - 1]`.
928
+ encoder_hidden_states (`tf.Tensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*):
929
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
930
+ of the decoder.
931
+ encoder_attention_mask (`tf.Tensor` of shape `(batch_size, encoder_sequence_length)`, *optional*):
932
+ Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values
933
+ selected in `[0, 1]`:
934
+
935
+ - 1 for tokens that are **not masked**,
936
+ - 0 for tokens that are **masked**.
937
+
938
+ [What are attention masks?](../glossary#attention-mask)
939
+ head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
940
+ Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
941
+
942
+ - 1 indicates the head is **not masked**,
943
+ - 0 indicates the head is **masked**.
944
+
945
+ cross_attn_head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
946
+ Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:
947
+
948
+ - 1 indicates the head is **not masked**,
949
+ - 0 indicates the head is **masked**.
950
+
951
+ past_key_values (`Tuple[Tuple[tf.Tensor]]` of length `config.n_layers` with each tuple having 2 tuples each of which has 2 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
952
+ Contains precomputed key and value hidden-states of the attention blocks. Can be used to speed up
953
+ decoding.
954
+
955
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those
956
+ that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of
957
+ all `decoder_input_ids` of shape `(batch_size, sequence_length)`.
958
+ inputs_embeds (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
959
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
960
+ This is useful if you want more control over how to convert `input_ids` indices into associated vectors
961
+ than the model's internal embedding lookup matrix.
962
+ output_attentions (`bool`, *optional*):
963
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
964
+ returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value
965
+ in the config will be used instead.
966
+ output_hidden_states (`bool`, *optional*):
967
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
968
+ for more detail. This argument can be used only in eager mode, in graph mode the value in the config
969
+ will be used instead.
970
+ return_dict (`bool`, *optional*):
971
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used
972
+ in eager mode, in graph mode the value will always be set to True.
973
+ training (`bool`, *optional*, defaults to `False`):
974
+ Whether or not to use the model in training mode (some modules like dropout modules have different
975
+ behaviors between training and evaluation).
976
+ """
977
+ if input_ids is not None and inputs_embeds is not None:
978
+ raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
979
+ elif input_ids is not None:
980
+ input_shape = shape_list(input_ids)
981
+ elif inputs_embeds is not None:
982
+ input_shape = shape_list(inputs_embeds)[:-1]
983
+ else:
984
+ raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
985
+
986
+ past_key_values_length = shape_list(past_key_values[0][0])[2] if past_key_values is not None else 0
987
+
988
+ if inputs_embeds is None:
989
+ check_embeddings_within_bounds(input_ids, self.embed_tokens.input_dim)
990
+ inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
991
+
992
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
993
+ if input_shape[-1] > 1:
994
+ combined_attention_mask = _make_causal_mask(input_shape, past_key_values_length=past_key_values_length)
995
+ else:
996
+ combined_attention_mask = _expand_mask(
997
+ tf.ones((input_shape[0], input_shape[1] + past_key_values_length)), tgt_len=input_shape[-1]
998
+ )
999
+
1000
+ if attention_mask is not None:
1001
+ combined_attention_mask = combined_attention_mask + _expand_mask(attention_mask, tgt_len=input_shape[-1])
1002
+
1003
+ if encoder_hidden_states is not None and encoder_attention_mask is not None:
1004
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
1005
+ encoder_attention_mask = _expand_mask(encoder_attention_mask, tgt_len=input_shape[-1])
1006
+
1007
+ # embed positions
1008
+ if position_ids is None:
1009
+ positions = self.embed_positions(input_shape, past_key_values_length)
1010
+ else:
1011
+ positions = self.embed_positions(input_shape, position_ids=position_ids)
1012
+
1013
+ hidden_states = self.layernorm_embedding(inputs_embeds) + positions
1014
+ hidden_states = self.dropout(hidden_states, training=training)
1015
+
1016
+ # decoder layers
1017
+ all_hidden_states = () if output_hidden_states else None
1018
+ all_self_attns = () if output_attentions else None
1019
+ all_cross_attns = () if (output_attentions and encoder_hidden_states is not None) else None
1020
+ present_key_values = () if use_cache else None
1021
+
1022
+ # check if head_mask and cross_attn_head_mask have a correct number of layers specified if desired
1023
+ for attn_mask_name, attn_mask in [("head_mask", head_mask), ("cross_attn_head_mask", cross_attn_head_mask)]:
1024
+ if attn_mask is not None:
1025
+ tf.debugging.assert_equal(
1026
+ shape_list(attn_mask)[0],
1027
+ len(self.layers),
1028
+ message=(
1029
+ f"The {attn_mask_name} should be specified for {len(self.layers)} layers, but it is for"
1030
+ f" {shape_list(attn_mask)[0]}."
1031
+ ),
1032
+ )
1033
+
1034
+ for idx, decoder_layer in enumerate(self.layers):
1035
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
1036
+ if output_hidden_states:
1037
+ all_hidden_states += (hidden_states,)
1038
+ dropout_probability = random.uniform(0, 1)
1039
+
1040
+ if training and (dropout_probability < self.layerdrop):
1041
+ continue
1042
+
1043
+ past_key_value = past_key_values[idx] if past_key_values is not None else None
1044
+
1045
+ hidden_states, layer_self_attn, layer_cross_attn, present_key_value = decoder_layer(
1046
+ hidden_states,
1047
+ attention_mask=combined_attention_mask,
1048
+ encoder_hidden_states=encoder_hidden_states,
1049
+ encoder_attention_mask=encoder_attention_mask,
1050
+ layer_head_mask=head_mask[idx] if head_mask is not None else None,
1051
+ cross_attn_layer_head_mask=cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None,
1052
+ past_key_value=past_key_value,
1053
+ )
1054
+
1055
+ if use_cache:
1056
+ present_key_values += (present_key_value,)
1057
+
1058
+ if output_attentions:
1059
+ all_self_attns += (layer_self_attn,)
1060
+
1061
+ if encoder_hidden_states is not None:
1062
+ all_cross_attns += (layer_cross_attn,)
1063
+
1064
+ if output_hidden_states:
1065
+ all_hidden_states += (hidden_states,)
1066
+
1067
+ if not return_dict:
1068
+ return hidden_states, present_key_values, all_hidden_states, all_self_attns, all_cross_attns
1069
+ else:
1070
+ return TFBaseModelOutputWithPastAndCrossAttentions(
1071
+ last_hidden_state=hidden_states,
1072
+ past_key_values=present_key_values,
1073
+ hidden_states=all_hidden_states,
1074
+ attentions=all_self_attns,
1075
+ cross_attentions=all_cross_attns,
1076
+ )
1077
+
1078
+ def build(self, input_shape=None):
1079
+ if self.built:
1080
+ return
1081
+ self.built = True
1082
+ if getattr(self, "embed_positions", None) is not None:
1083
+ with tf.name_scope(self.embed_positions.name):
1084
+ self.embed_positions.build(None)
1085
+ if getattr(self, "layernorm_embedding", None) is not None:
1086
+ with tf.name_scope(self.layernorm_embedding.name):
1087
+ self.layernorm_embedding.build([None, None, self.config.d_model])
1088
+ if getattr(self, "layers", None) is not None:
1089
+ for layer in self.layers:
1090
+ with tf.name_scope(layer.name):
1091
+ layer.build(None)
1092
+
1093
+
1094
+ @keras_serializable
1095
+ class TFBlenderbotSmallMainLayer(keras.layers.Layer):
1096
+ config_class = BlenderbotSmallConfig
1097
+
1098
+ def __init__(self, config: BlenderbotSmallConfig, **kwargs):
1099
+ super().__init__(**kwargs)
1100
+
1101
+ self.config = config
1102
+ self.shared = keras.layers.Embedding(
1103
+ input_dim=config.vocab_size,
1104
+ output_dim=config.d_model,
1105
+ embeddings_initializer=keras.initializers.TruncatedNormal(stddev=self.config.init_std),
1106
+ name="model.shared",
1107
+ )
1108
+ # Additional attribute to specify the expected name scope of the layer (for loading/storing weights)
1109
+ self.shared.load_weight_prefix = "model.shared"
1110
+
1111
+ self.encoder = TFBlenderbotSmallEncoder(config, self.shared, name="encoder")
1112
+ self.decoder = TFBlenderbotSmallDecoder(config, self.shared, name="decoder")
1113
+
1114
+ def get_input_embeddings(self):
1115
+ return self.shared
1116
+
1117
+ def set_input_embeddings(self, new_embeddings):
1118
+ self.shared = new_embeddings
1119
+ self.encoder.embed_tokens = self.shared
1120
+ self.decoder.embed_tokens = self.shared
1121
+
1122
+ @unpack_inputs
1123
+ def call(
1124
+ self,
1125
+ input_ids=None,
1126
+ attention_mask=None,
1127
+ decoder_input_ids=None,
1128
+ decoder_attention_mask=None,
1129
+ decoder_position_ids=None,
1130
+ head_mask=None,
1131
+ decoder_head_mask=None,
1132
+ cross_attn_head_mask=None,
1133
+ encoder_outputs: Optional[Union[Tuple, TFBaseModelOutput]] = None,
1134
+ past_key_values=None,
1135
+ inputs_embeds=None,
1136
+ decoder_inputs_embeds=None,
1137
+ use_cache=None,
1138
+ output_attentions=None,
1139
+ output_hidden_states=None,
1140
+ return_dict=None,
1141
+ training=False,
1142
+ **kwargs,
1143
+ ):
1144
+ output_hidden_states = (
1145
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1146
+ )
1147
+
1148
+ if encoder_outputs is None:
1149
+ encoder_outputs = self.encoder(
1150
+ input_ids=input_ids,
1151
+ attention_mask=attention_mask,
1152
+ head_mask=head_mask,
1153
+ inputs_embeds=inputs_embeds,
1154
+ output_attentions=output_attentions,
1155
+ output_hidden_states=output_hidden_states,
1156
+ return_dict=return_dict,
1157
+ training=training,
1158
+ )
1159
+ # If the user passed a tuple for encoder_outputs, we wrap it in a TFBaseModelOutput when return_dict=True
1160
+ elif return_dict and not isinstance(encoder_outputs, TFBaseModelOutput):
1161
+ encoder_outputs = TFBaseModelOutput(
1162
+ last_hidden_state=encoder_outputs[0],
1163
+ hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
1164
+ attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
1165
+ )
1166
+ # If the user passed a TFBaseModelOutput for encoder_outputs, we wrap it in a tuple when return_dict=False
1167
+ elif not return_dict and not isinstance(encoder_outputs, tuple):
1168
+ encoder_outputs = encoder_outputs.to_tuple()
1169
+
1170
+ decoder_outputs = self.decoder(
1171
+ decoder_input_ids,
1172
+ attention_mask=decoder_attention_mask,
1173
+ position_ids=decoder_position_ids,
1174
+ encoder_hidden_states=encoder_outputs[0],
1175
+ encoder_attention_mask=attention_mask,
1176
+ head_mask=decoder_head_mask,
1177
+ cross_attn_head_mask=cross_attn_head_mask,
1178
+ past_key_values=past_key_values,
1179
+ inputs_embeds=decoder_inputs_embeds,
1180
+ use_cache=use_cache,
1181
+ output_attentions=output_attentions,
1182
+ output_hidden_states=output_hidden_states,
1183
+ return_dict=return_dict,
1184
+ training=training,
1185
+ )
1186
+
1187
+ if not return_dict:
1188
+ return decoder_outputs + encoder_outputs
1189
+
1190
+ return TFSeq2SeqModelOutput(
1191
+ last_hidden_state=decoder_outputs.last_hidden_state,
1192
+ past_key_values=decoder_outputs.past_key_values,
1193
+ decoder_hidden_states=decoder_outputs.hidden_states,
1194
+ decoder_attentions=decoder_outputs.attentions,
1195
+ cross_attentions=decoder_outputs.cross_attentions,
1196
+ encoder_last_hidden_state=encoder_outputs.last_hidden_state,
1197
+ encoder_hidden_states=encoder_outputs.hidden_states,
1198
+ encoder_attentions=encoder_outputs.attentions,
1199
+ )
1200
+
1201
+ def build(self, input_shape=None):
1202
+ if self.built:
1203
+ return
1204
+ self.built = True
1205
+ # The shared/tied weights expect to be in the model base namespace
1206
+ # Adding "/" to the end (not the start!) of a tf.name_scope puts it in the root namespace rather than
1207
+ # the current one.
1208
+ with tf.name_scope(self.shared.load_weight_prefix + "/" + self.shared.name + "/"):
1209
+ self.shared.build(None)
1210
+ if getattr(self, "encoder", None) is not None:
1211
+ with tf.name_scope(self.encoder.name):
1212
+ self.encoder.build(None)
1213
+ if getattr(self, "decoder", None) is not None:
1214
+ with tf.name_scope(self.decoder.name):
1215
+ self.decoder.build(None)
1216
+
1217
+
1218
+ @add_start_docstrings(
1219
+ "The bare BLENDERBOT_SMALL Model outputting raw hidden-states without any specific head on top.",
1220
+ BLENDERBOT_SMALL_START_DOCSTRING,
1221
+ )
1222
+ class TFBlenderbotSmallModel(TFBlenderbotSmallPreTrainedModel):
1223
+ def __init__(self, config: BlenderbotSmallConfig, *inputs, **kwargs):
1224
+ super().__init__(config, *inputs, **kwargs)
1225
+
1226
+ self.model = TFBlenderbotSmallMainLayer(config, name="model")
1227
+
1228
+ def get_encoder(self):
1229
+ return self.model.encoder
1230
+
1231
+ def get_decoder(self):
1232
+ return self.model.decoder
1233
+
1234
+ @unpack_inputs
1235
+ @add_start_docstrings_to_model_forward(BLENDERBOT_SMALL_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1236
+ @add_code_sample_docstrings(
1237
+ checkpoint=_CHECKPOINT_FOR_DOC,
1238
+ output_type=TFSeq2SeqModelOutput,
1239
+ config_class=_CONFIG_FOR_DOC,
1240
+ )
1241
+ def call(
1242
+ self,
1243
+ input_ids: tf.Tensor | None = None,
1244
+ attention_mask: tf.Tensor | None = None,
1245
+ decoder_input_ids: tf.Tensor | None = None,
1246
+ decoder_attention_mask: tf.Tensor | None = None,
1247
+ decoder_position_ids: tf.Tensor | None = None,
1248
+ head_mask: tf.Tensor | None = None,
1249
+ decoder_head_mask: tf.Tensor | None = None,
1250
+ cross_attn_head_mask: tf.Tensor | None = None,
1251
+ encoder_outputs: Optional[Union[Tuple, TFBaseModelOutput]] = None,
1252
+ past_key_values: List[tf.Tensor] | None = None,
1253
+ inputs_embeds: tf.Tensor | None = None,
1254
+ decoder_inputs_embeds: tf.Tensor | None = None,
1255
+ use_cache: Optional[bool] = None,
1256
+ output_attentions: Optional[bool] = None,
1257
+ output_hidden_states: Optional[bool] = None,
1258
+ return_dict: Optional[bool] = None,
1259
+ training: Optional[bool] = False,
1260
+ **kwargs,
1261
+ ) -> Union[Tuple[tf.Tensor], TFSeq2SeqModelOutput]:
1262
+ outputs = self.model(
1263
+ input_ids=input_ids,
1264
+ attention_mask=attention_mask,
1265
+ decoder_input_ids=decoder_input_ids,
1266
+ decoder_attention_mask=decoder_attention_mask,
1267
+ decoder_position_ids=decoder_position_ids,
1268
+ head_mask=head_mask,
1269
+ decoder_head_mask=decoder_head_mask,
1270
+ cross_attn_head_mask=cross_attn_head_mask,
1271
+ encoder_outputs=encoder_outputs,
1272
+ past_key_values=past_key_values,
1273
+ inputs_embeds=inputs_embeds,
1274
+ decoder_inputs_embeds=decoder_inputs_embeds,
1275
+ use_cache=use_cache,
1276
+ output_attentions=output_attentions,
1277
+ output_hidden_states=output_hidden_states,
1278
+ return_dict=return_dict,
1279
+ training=training,
1280
+ )
1281
+
1282
+ return outputs
1283
+
1284
+ # Copied from transformers.models.bart.modeling_tf_bart.TFBartModel.serving_output
1285
+ def serving_output(self, output):
1286
+ pkv = tf.tuple(output.past_key_values)[1] if self.config.use_cache else None
1287
+ dec_hs = tf.convert_to_tensor(output.decoder_hidden_states) if self.config.output_hidden_states else None
1288
+ dec_attns = tf.convert_to_tensor(output.decoder_attentions) if self.config.output_attentions else None
1289
+ cross_attns = tf.convert_to_tensor(output.cross_attentions) if self.config.output_attentions else None
1290
+ enc_hs = tf.convert_to_tensor(output.encoder_hidden_states) if self.config.output_hidden_states else None
1291
+ enc_attns = tf.convert_to_tensor(output.encoder_attentions) if self.config.output_attentions else None
1292
+
1293
+ return TFSeq2SeqModelOutput(
1294
+ last_hidden_state=output.last_hidden_state,
1295
+ past_key_values=pkv,
1296
+ decoder_hidden_states=dec_hs,
1297
+ decoder_attentions=dec_attns,
1298
+ cross_attentions=cross_attns,
1299
+ encoder_last_hidden_state=output.encoder_last_hidden_state,
1300
+ encoder_hidden_states=enc_hs,
1301
+ encoder_attentions=enc_attns,
1302
+ )
1303
+
1304
+ def build(self, input_shape=None):
1305
+ if self.built:
1306
+ return
1307
+ self.built = True
1308
+ if getattr(self, "model", None) is not None:
1309
+ with tf.name_scope(self.model.name):
1310
+ self.model.build(None)
1311
+
1312
+
1313
+ # Copied from transformers.models.bart.modeling_tf_bart.BiasLayer
1314
+ class BiasLayer(keras.layers.Layer):
1315
+ """
1316
+ Bias as a layer. It is used for serialization purposes: `keras.Model.save_weights` stores on a per-layer basis,
1317
+ so all weights have to be registered in a layer.
1318
+ """
1319
+
1320
+ def __init__(self, shape, initializer, trainable, name, **kwargs):
1321
+ super().__init__(name=name, **kwargs)
1322
+ # Note: the name of this variable will NOT be scoped when serialized, i.e. it will not be in the format of
1323
+ # "outer_layer/inner_layer/.../name:0". Instead, it will be "name:0". For further details, see:
1324
+ # https://github.com/huggingface/transformers/pull/18833#issuecomment-1233090214
1325
+ self.bias = self.add_weight(name=name, shape=shape, initializer=initializer, trainable=trainable)
1326
+
1327
+ def call(self, x):
1328
+ return x + self.bias
1329
+
1330
+
1331
+ @add_start_docstrings(
1332
+ "The BLENDERBOT_SMALL Model with a language modeling head. Can be used for summarization.",
1333
+ BLENDERBOT_SMALL_START_DOCSTRING,
1334
+ )
1335
+ class TFBlenderbotSmallForConditionalGeneration(TFBlenderbotSmallPreTrainedModel, TFCausalLanguageModelingLoss):
1336
+ _keys_to_ignore_on_load_unexpected = [
1337
+ r"model.encoder.embed_tokens.weight",
1338
+ r"model.decoder.embed_tokens.weight",
1339
+ ]
1340
+
1341
+ def __init__(self, config, *inputs, **kwargs):
1342
+ super().__init__(config, *inputs, **kwargs)
1343
+ self.model = TFBlenderbotSmallMainLayer(config, name="model")
1344
+ self.use_cache = config.use_cache
1345
+ # final_bias_logits is registered as a buffer in pytorch, so not trainable for the sake of consistency.
1346
+ self.bias_layer = BiasLayer(
1347
+ name="final_logits_bias", shape=[1, config.vocab_size], initializer="zeros", trainable=False
1348
+ )
1349
+
1350
+ def get_decoder(self):
1351
+ return self.model.decoder
1352
+
1353
+ def get_encoder(self):
1354
+ return self.model.encoder
1355
+
1356
+ def get_output_embeddings(self):
1357
+ return self.get_input_embeddings()
1358
+
1359
+ def set_output_embeddings(self, value):
1360
+ self.set_input_embeddings(value)
1361
+
1362
+ def get_bias(self):
1363
+ return {"final_logits_bias": self.bias_layer.bias}
1364
+
1365
+ def set_bias(self, value):
1366
+ # Replaces the existing layers containing bias for correct (de)serialization.
1367
+ vocab_size = value["final_logits_bias"].shape[-1]
1368
+ self.bias_layer = BiasLayer(
1369
+ name="final_logits_bias", shape=[1, vocab_size], initializer="zeros", trainable=False
1370
+ )
1371
+ self.bias_layer.bias.assign(value["final_logits_bias"])
1372
+
1373
+ @unpack_inputs
1374
+ @add_start_docstrings_to_model_forward(BLENDERBOT_SMALL_INPUTS_DOCSTRING)
1375
+ @replace_return_docstrings(output_type=TFSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)
1376
+ @add_end_docstrings(BLENDERBOT_SMALL_GENERATION_EXAMPLE)
1377
+ def call(
1378
+ self,
1379
+ input_ids: tf.Tensor | None = None,
1380
+ attention_mask: tf.Tensor | None = None,
1381
+ decoder_input_ids: tf.Tensor | None = None,
1382
+ decoder_attention_mask: tf.Tensor | None = None,
1383
+ decoder_position_ids: tf.Tensor | None = None,
1384
+ head_mask: tf.Tensor | None = None,
1385
+ decoder_head_mask: tf.Tensor | None = None,
1386
+ cross_attn_head_mask: tf.Tensor | None = None,
1387
+ encoder_outputs: Optional[TFBaseModelOutput] = None,
1388
+ past_key_values: List[tf.Tensor] | None = None,
1389
+ inputs_embeds: tf.Tensor | None = None,
1390
+ decoder_inputs_embeds: tf.Tensor | None = None,
1391
+ use_cache: Optional[bool] = None,
1392
+ output_attentions: Optional[bool] = None,
1393
+ output_hidden_states: Optional[bool] = None,
1394
+ return_dict: Optional[bool] = None,
1395
+ labels: tf.Tensor | None = None,
1396
+ training: Optional[bool] = False,
1397
+ ) -> Union[Tuple[tf.Tensor], TFSeq2SeqLMOutput]:
1398
+ r"""
1399
+ labels (`tf.tensor` of shape `(batch_size, sequence_length)`, *optional*):
1400
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
1401
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
1402
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
1403
+
1404
+ Returns:
1405
+
1406
+ """
1407
+
1408
+ if labels is not None:
1409
+ labels = tf.where(
1410
+ labels == self.config.pad_token_id,
1411
+ tf.cast(tf.fill(shape_list(labels), -100), labels.dtype),
1412
+ labels,
1413
+ )
1414
+ use_cache = False
1415
+ if decoder_input_ids is None and decoder_inputs_embeds is None:
1416
+ decoder_input_ids = shift_tokens_right(
1417
+ labels, self.config.pad_token_id, self.config.decoder_start_token_id
1418
+ )
1419
+
1420
+ outputs = self.model(
1421
+ input_ids,
1422
+ attention_mask=attention_mask,
1423
+ decoder_input_ids=decoder_input_ids,
1424
+ decoder_attention_mask=decoder_attention_mask,
1425
+ decoder_position_ids=decoder_position_ids,
1426
+ head_mask=head_mask,
1427
+ decoder_head_mask=decoder_head_mask,
1428
+ cross_attn_head_mask=cross_attn_head_mask,
1429
+ encoder_outputs=encoder_outputs,
1430
+ past_key_values=past_key_values,
1431
+ inputs_embeds=inputs_embeds,
1432
+ decoder_inputs_embeds=decoder_inputs_embeds,
1433
+ use_cache=use_cache,
1434
+ output_attentions=output_attentions,
1435
+ output_hidden_states=output_hidden_states,
1436
+ return_dict=return_dict,
1437
+ training=training,
1438
+ )
1439
+ lm_logits = tf.matmul(outputs[0], self.model.shared.weights, transpose_b=True)
1440
+ lm_logits = self.bias_layer(lm_logits)
1441
+ masked_lm_loss = None if labels is None else self.hf_compute_loss(labels, lm_logits)
1442
+
1443
+ if not return_dict:
1444
+ output = (lm_logits,) + outputs[1:]
1445
+ return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
1446
+ return TFSeq2SeqLMOutput(
1447
+ loss=masked_lm_loss,
1448
+ logits=lm_logits,
1449
+ past_key_values=outputs.past_key_values, # index 1 of d outputs
1450
+ decoder_hidden_states=outputs.decoder_hidden_states, # index 2 of d outputs
1451
+ decoder_attentions=outputs.decoder_attentions, # index 3 of d outputs
1452
+ cross_attentions=outputs.cross_attentions, # index 4 of d outputs
1453
+ encoder_last_hidden_state=outputs.encoder_last_hidden_state, # index 0 of encoder outputs
1454
+ encoder_hidden_states=outputs.encoder_hidden_states, # 1 of e out
1455
+ encoder_attentions=outputs.encoder_attentions, # 2 of e out
1456
+ )
1457
+
1458
+ # Copied from transformers.models.bart.modeling_tf_bart.TFBartForConditionalGeneration.serving_output
1459
+ def serving_output(self, output):
1460
+ pkv = tf.tuple(output.past_key_values)[1] if self.config.use_cache else None
1461
+ dec_hs = tf.convert_to_tensor(output.decoder_hidden_states) if self.config.output_hidden_states else None
1462
+ dec_attns = tf.convert_to_tensor(output.decoder_attentions) if self.config.output_attentions else None
1463
+ cross_attns = tf.convert_to_tensor(output.cross_attentions) if self.config.output_attentions else None
1464
+ enc_hs = tf.convert_to_tensor(output.encoder_hidden_states) if self.config.output_hidden_states else None
1465
+ enc_attns = tf.convert_to_tensor(output.encoder_attentions) if self.config.output_attentions else None
1466
+
1467
+ return TFSeq2SeqLMOutput(
1468
+ logits=output.logits,
1469
+ past_key_values=pkv,
1470
+ decoder_hidden_states=dec_hs,
1471
+ decoder_attentions=dec_attns,
1472
+ cross_attentions=cross_attns,
1473
+ encoder_last_hidden_state=output.encoder_last_hidden_state,
1474
+ encoder_hidden_states=enc_hs,
1475
+ encoder_attentions=enc_attns,
1476
+ )
1477
+
1478
+ # Copied from transformers.models.bart.modeling_tf_bart.TFBartForConditionalGeneration.prepare_inputs_for_generation
1479
+ def prepare_inputs_for_generation(
1480
+ self,
1481
+ decoder_input_ids,
1482
+ past_key_values=None,
1483
+ attention_mask=None,
1484
+ decoder_attention_mask=None,
1485
+ head_mask=None,
1486
+ decoder_head_mask=None,
1487
+ cross_attn_head_mask=None,
1488
+ use_cache=None,
1489
+ encoder_outputs=None,
1490
+ **kwargs,
1491
+ ):
1492
+ # cut decoder_input_ids if past_key_values is used
1493
+ if past_key_values is not None:
1494
+ decoder_input_ids = decoder_input_ids[:, -1:]
1495
+
1496
+ if decoder_attention_mask is not None: # xla
1497
+ decoder_position_ids = tf.math.cumsum(decoder_attention_mask, axis=-1, exclusive=True)[:, -1:]
1498
+ elif past_key_values is not None: # no xla + past_key_values
1499
+ decoder_position_ids = past_key_values[0][0].shape[2]
1500
+ else: # no xla + no past_key_values
1501
+ decoder_position_ids = tf.range(decoder_input_ids.shape[1])
1502
+
1503
+ return {
1504
+ "input_ids": None, # encoder_outputs is defined. input_ids not needed
1505
+ "encoder_outputs": encoder_outputs,
1506
+ "past_key_values": past_key_values,
1507
+ "decoder_input_ids": decoder_input_ids,
1508
+ "attention_mask": attention_mask,
1509
+ "decoder_attention_mask": decoder_attention_mask,
1510
+ "decoder_position_ids": decoder_position_ids,
1511
+ "head_mask": head_mask,
1512
+ "decoder_head_mask": decoder_head_mask,
1513
+ "cross_attn_head_mask": cross_attn_head_mask,
1514
+ "use_cache": use_cache, # change this to avoid caching (presumably for debugging)
1515
+ }
1516
+
1517
+ def build(self, input_shape=None):
1518
+ if self.built:
1519
+ return
1520
+ self.built = True
1521
+ if getattr(self, "model", None) is not None:
1522
+ with tf.name_scope(self.model.name):
1523
+ self.model.build(None)
1524
+ if getattr(self, "bias_layer", None) is not None:
1525
+ with tf.name_scope(self.bias_layer.name):
1526
+ self.bias_layer.build(None)
env-llmeval/lib/python3.10/site-packages/transformers/models/blenderbot_small/tokenization_blenderbot_small.py ADDED
@@ -0,0 +1,258 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The Facebook Inc. and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Tokenization class for BlenderbotSmall."""
16
+
17
+ import json
18
+ import os
19
+ from typing import Dict, List, Optional, Tuple
20
+
21
+ import regex as re
22
+
23
+ from ...tokenization_utils import PreTrainedTokenizer
24
+ from ...utils import logging
25
+
26
+
27
+ logger = logging.get_logger(__name__)
28
+
29
+
30
+ VOCAB_FILES_NAMES = {
31
+ "vocab_file": "vocab.json",
32
+ "merges_file": "merges.txt",
33
+ "tokenizer_config_file": "tokenizer_config.json",
34
+ }
35
+
36
+ PRETRAINED_VOCAB_FILES_MAP = {
37
+ "vocab_file": {
38
+ "facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"
39
+ },
40
+ "merges_file": {
41
+ "facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"
42
+ },
43
+ "tokenizer_config_file": {
44
+ "facebook/blenderbot_small-90M": (
45
+ "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"
46
+ )
47
+ },
48
+ }
49
+
50
+ PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {"facebook/blenderbot_small-90M": 512}
51
+
52
+
53
+ def get_pairs(word):
54
+ """
55
+ Return set of symbol pairs in a word.
56
+
57
+ Word is represented as tuple of symbols (symbols being variable-length strings).
58
+ """
59
+ pairs = set()
60
+ prev_char = word[0]
61
+ for char in word[1:]:
62
+ pairs.add((prev_char, char))
63
+ prev_char = char
64
+
65
+ pairs = set(pairs)
66
+ return pairs
67
+
68
+
69
+ class BlenderbotSmallTokenizer(PreTrainedTokenizer):
70
+ """
71
+ Constructs a Blenderbot-90M tokenizer based on BPE (Byte-Pair-Encoding)
72
+
73
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
74
+ the superclass for more information regarding methods.
75
+
76
+ Args:
77
+ vocab_file (`str`):
78
+ File containing the vocabulary.
79
+ merges_file (`str`):
80
+ Path to the merges file.
81
+ bos_token (`str`, *optional*, defaults to `"__start__"`):
82
+ The beginning of sentence token.
83
+ eos_token (`str`, *optional*, defaults to `"__end__"`):
84
+ The end of sentence token.
85
+ unk_token (`str`, *optional*, defaults to `"__unk__"`):
86
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
87
+ token instead.
88
+ pad_token (`str`, *optional*, defaults to `"__null__"`):
89
+ The token used for padding, for example when batching sequences of different lengths.
90
+ kwargs (*optional*):
91
+ Additional keyword arguments passed along to [`PreTrainedTokenizer`]
92
+ """
93
+
94
+ vocab_files_names = VOCAB_FILES_NAMES
95
+ pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
96
+ max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
97
+ model_input_names = ["input_ids", "attention_mask"]
98
+
99
+ def __init__(
100
+ self,
101
+ vocab_file,
102
+ merges_file,
103
+ bos_token="__start__",
104
+ eos_token="__end__",
105
+ unk_token="__unk__",
106
+ pad_token="__null__",
107
+ **kwargs,
108
+ ):
109
+ with open(vocab_file, encoding="utf-8") as vocab_handle:
110
+ self.encoder = json.load(vocab_handle)
111
+ self.decoder = {v: k for k, v in self.encoder.items()}
112
+ with open(merges_file, encoding="utf-8") as merges_handle:
113
+ merges = merges_handle.read().split("\n")[1:-1]
114
+ merges = [tuple(merge.split()) for merge in merges]
115
+ self.bpe_ranks = dict(zip(merges, range(len(merges))))
116
+ self.cache = {}
117
+ super().__init__(unk_token=unk_token, bos_token=bos_token, eos_token=eos_token, pad_token=pad_token, **kwargs)
118
+
119
+ @property
120
+ def vocab_size(self) -> int:
121
+ return len(self.encoder)
122
+
123
+ def get_vocab(self) -> Dict:
124
+ return dict(self.encoder, **self.added_tokens_encoder)
125
+
126
+ def bpe(self, token: str) -> str:
127
+ if token in self.cache:
128
+ return self.cache[token]
129
+ token = re.sub("([.,!?()])", r" \1", token)
130
+ token = re.sub("(')", r" \1 ", token)
131
+ token = re.sub(r"\s{2,}", " ", token)
132
+ if "\n" in token:
133
+ token = token.replace("\n", " __newln__")
134
+
135
+ tokens = token.split(" ")
136
+ words = []
137
+ for token in tokens:
138
+ if not len(token):
139
+ continue
140
+
141
+ token = token.lower()
142
+ word = tuple(token)
143
+ word = tuple(list(word[:-1]) + [word[-1] + "</w>"])
144
+ pairs = get_pairs(word)
145
+
146
+ if not pairs:
147
+ words.append(token)
148
+ continue
149
+
150
+ while True:
151
+ bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf")))
152
+ if bigram not in self.bpe_ranks:
153
+ break
154
+ first, second = bigram
155
+ new_word = []
156
+ i = 0
157
+
158
+ while i < len(word):
159
+ try:
160
+ j = word.index(first, i)
161
+ new_word.extend(word[i:j])
162
+ i = j
163
+ except ValueError:
164
+ new_word.extend(word[i:])
165
+ break
166
+
167
+ if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
168
+ new_word.append(first + second)
169
+ i += 2
170
+ else:
171
+ new_word.append(word[i])
172
+ i += 1
173
+ new_word = tuple(new_word)
174
+ word = new_word
175
+ if len(word) == 1:
176
+ break
177
+ else:
178
+ pairs = get_pairs(word)
179
+ word = "@@ ".join(word)
180
+ word = word[:-4]
181
+
182
+ self.cache[token] = word
183
+ words.append(word)
184
+ return " ".join(words)
185
+
186
+ def _tokenize(self, text: str) -> List[str]:
187
+ """Split a string into tokens using BPE."""
188
+ split_tokens = []
189
+
190
+ words = re.findall(r"\S+\n?", text)
191
+
192
+ for token in words:
193
+ split_tokens.extend(list(self.bpe(token).split(" ")))
194
+ return split_tokens
195
+
196
+ def _convert_token_to_id(self, token: str) -> int:
197
+ """Converts a token to an id using the vocab."""
198
+ token = token.lower()
199
+ return self.encoder.get(token, self.encoder.get(self.unk_token))
200
+
201
+ def _convert_id_to_token(self, index: int) -> str:
202
+ """Converts an index (integer) in a token (str) using the vocab."""
203
+ return self.decoder.get(index, self.unk_token)
204
+
205
+ def convert_tokens_to_string(self, tokens: List[str]) -> str:
206
+ """Converts a sequence of tokens in a single string."""
207
+ out_string = " ".join(tokens).replace("@@ ", "").strip()
208
+ return out_string
209
+
210
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
211
+ if not os.path.isdir(save_directory):
212
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
213
+ return
214
+ vocab_file = os.path.join(
215
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
216
+ )
217
+ merge_file = os.path.join(
218
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"]
219
+ )
220
+
221
+ with open(vocab_file, "w", encoding="utf-8") as f:
222
+ f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + "\n")
223
+
224
+ index = 0
225
+ with open(merge_file, "w", encoding="utf-8") as writer:
226
+ writer.write("#version: 0.2\n")
227
+ for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]):
228
+ if index != token_index:
229
+ logger.warning(
230
+ f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
231
+ " Please check that the tokenizer is not corrupted!"
232
+ )
233
+ index = token_index
234
+ writer.write(" ".join(bpe_tokens) + "\n")
235
+ index += 1
236
+
237
+ return vocab_file, merge_file
238
+
239
+ @property
240
+ # Copied from transformers.models.blenderbot.tokenization_blenderbot.BlenderbotTokenizer.default_chat_template
241
+ def default_chat_template(self):
242
+ """
243
+ A very simple chat template that just adds whitespace between messages.
244
+ """
245
+ logger.warning_once(
246
+ "\nNo chat template is defined for this tokenizer - using the default template "
247
+ f"for the {self.__class__.__name__} class. If the default is not appropriate for "
248
+ "your model, please set `tokenizer.chat_template` to an appropriate template. "
249
+ "See https://huggingface.co/docs/transformers/main/chat_templating for more information.\n"
250
+ )
251
+ return (
252
+ "{% for message in messages %}"
253
+ "{% if message['role'] == 'user' %}{{ ' ' }}{% endif %}"
254
+ "{{ message['content'] }}"
255
+ "{% if not loop.last %}{{ ' ' }}{% endif %}"
256
+ "{% endfor %}"
257
+ "{{ eos_token }}"
258
+ )
env-llmeval/lib/python3.10/site-packages/transformers/models/blenderbot_small/tokenization_blenderbot_small_fast.py ADDED
@@ -0,0 +1,140 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021, The Facebook, Inc. and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Fast tokenization class for BlenderbotSmall."""
16
+ from typing import List, Optional
17
+
18
+ from tokenizers import ByteLevelBPETokenizer
19
+
20
+ from ...tokenization_utils_fast import PreTrainedTokenizerFast
21
+ from ...utils import logging
22
+ from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
23
+
24
+
25
+ logger = logging.get_logger(__name__)
26
+
27
+ VOCAB_FILES_NAMES = {
28
+ "vocab_file": "vocab.json",
29
+ "merges_file": "merges.txt",
30
+ "tokenizer_config_file": "tokenizer_config.json",
31
+ }
32
+
33
+ PRETRAINED_VOCAB_FILES_MAP = {
34
+ "vocab_file": {
35
+ "facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"
36
+ },
37
+ "merges_file": {
38
+ "facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"
39
+ },
40
+ "tokenizer_config_file": {
41
+ "facebook/blenderbot_small-90M": (
42
+ "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"
43
+ )
44
+ },
45
+ }
46
+
47
+ PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
48
+ "facebook/blenderbot_small-90M": 512,
49
+ }
50
+
51
+
52
+ class BlenderbotSmallTokenizerFast(PreTrainedTokenizerFast):
53
+ """
54
+ Construct a "fast" BlenderbotSmall tokenizer (backed by HuggingFace's *tokenizers* library).
55
+
56
+ Args:
57
+ vocab_file (`str`):
58
+ Path to the vocabulary file.
59
+ """
60
+
61
+ vocab_files_names = VOCAB_FILES_NAMES
62
+ pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
63
+ max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
64
+ slow_tokenizer_class = BlenderbotSmallTokenizer
65
+
66
+ def __init__(
67
+ self,
68
+ vocab_file=None,
69
+ merges_file=None,
70
+ unk_token="<|endoftext|>",
71
+ bos_token="<|endoftext|>",
72
+ eos_token="<|endoftext|>",
73
+ add_prefix_space=False,
74
+ trim_offsets=True,
75
+ **kwargs,
76
+ ):
77
+ super().__init__(
78
+ ByteLevelBPETokenizer(
79
+ vocab=vocab_file,
80
+ merges=merges_file,
81
+ add_prefix_space=add_prefix_space,
82
+ trim_offsets=trim_offsets,
83
+ ),
84
+ bos_token=bos_token,
85
+ eos_token=eos_token,
86
+ unk_token=unk_token,
87
+ **kwargs,
88
+ )
89
+ self.add_prefix_space = add_prefix_space
90
+
91
+ def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
92
+ output = [self.bos_token_id] + token_ids_0 + [self.eos_token_id]
93
+ if token_ids_1 is None:
94
+ return output
95
+
96
+ return output + [self.eos_token_id] + token_ids_1 + [self.eos_token_id]
97
+
98
+ def create_token_type_ids_from_sequences(
99
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
100
+ ) -> List[int]:
101
+ """
102
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. BlenderbotSmall
103
+ does not make use of token type ids, therefore a list of zeros is returned.
104
+
105
+ Args:
106
+ token_ids_0 (`List[int]`):
107
+ List of IDs.
108
+ token_ids_1 (`List[int]`, *optional*):
109
+ Optional second list of IDs for sequence pairs.
110
+
111
+ Returns:
112
+ `List[int]`: List of zeros.
113
+ """
114
+ sep = [self.sep_token_id]
115
+ cls = [self.cls_token_id]
116
+
117
+ if token_ids_1 is None:
118
+ return len(cls + token_ids_0 + sep) * [0]
119
+ return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
120
+
121
+ @property
122
+ # Copied from transformers.models.blenderbot.tokenization_blenderbot.BlenderbotTokenizer.default_chat_template
123
+ def default_chat_template(self):
124
+ """
125
+ A very simple chat template that just adds whitespace between messages.
126
+ """
127
+ logger.warning_once(
128
+ "\nNo chat template is defined for this tokenizer - using the default template "
129
+ f"for the {self.__class__.__name__} class. If the default is not appropriate for "
130
+ "your model, please set `tokenizer.chat_template` to an appropriate template. "
131
+ "See https://huggingface.co/docs/transformers/main/chat_templating for more information.\n"
132
+ )
133
+ return (
134
+ "{% for message in messages %}"
135
+ "{% if message['role'] == 'user' %}{{ ' ' }}{% endif %}"
136
+ "{{ message['content'] }}"
137
+ "{% if not loop.last %}{{ ' ' }}{% endif %}"
138
+ "{% endfor %}"
139
+ "{{ eos_token }}"
140
+ )
env-llmeval/lib/python3.10/site-packages/transformers/models/clap/__init__.py ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
17
+
18
+
19
+ _import_structure = {
20
+ "configuration_clap": [
21
+ "CLAP_PRETRAINED_MODEL_ARCHIVE_LIST",
22
+ "ClapAudioConfig",
23
+ "ClapConfig",
24
+ "ClapTextConfig",
25
+ ],
26
+ "processing_clap": ["ClapProcessor"],
27
+ }
28
+
29
+ try:
30
+ if not is_torch_available():
31
+ raise OptionalDependencyNotAvailable()
32
+ except OptionalDependencyNotAvailable:
33
+ pass
34
+ else:
35
+ _import_structure["modeling_clap"] = [
36
+ "CLAP_PRETRAINED_MODEL_ARCHIVE_LIST",
37
+ "ClapModel",
38
+ "ClapPreTrainedModel",
39
+ "ClapTextModel",
40
+ "ClapTextModelWithProjection",
41
+ "ClapAudioModel",
42
+ "ClapAudioModelWithProjection",
43
+ ]
44
+ _import_structure["feature_extraction_clap"] = ["ClapFeatureExtractor"]
45
+
46
+ if TYPE_CHECKING:
47
+ from .configuration_clap import (
48
+ CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
49
+ ClapAudioConfig,
50
+ ClapConfig,
51
+ ClapTextConfig,
52
+ )
53
+ from .processing_clap import ClapProcessor
54
+
55
+ try:
56
+ if not is_torch_available():
57
+ raise OptionalDependencyNotAvailable()
58
+ except OptionalDependencyNotAvailable:
59
+ pass
60
+ else:
61
+ from .feature_extraction_clap import ClapFeatureExtractor
62
+ from .modeling_clap import (
63
+ CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
64
+ ClapAudioModel,
65
+ ClapAudioModelWithProjection,
66
+ ClapModel,
67
+ ClapPreTrainedModel,
68
+ ClapTextModel,
69
+ ClapTextModelWithProjection,
70
+ )
71
+
72
+
73
+ else:
74
+ import sys
75
+
76
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
env-llmeval/lib/python3.10/site-packages/transformers/models/clap/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.19 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/clap/__pycache__/configuration_clap.cpython-310.pyc ADDED
Binary file (17.4 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/clap/__pycache__/convert_clap_original_pytorch_to_hf.cpython-310.pyc ADDED
Binary file (3.34 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/clap/__pycache__/feature_extraction_clap.cpython-310.pyc ADDED
Binary file (14.5 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/clap/__pycache__/modeling_clap.cpython-310.pyc ADDED
Binary file (66.8 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/clap/__pycache__/processing_clap.cpython-310.pyc ADDED
Binary file (5.26 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/clap/configuration_clap.py ADDED
@@ -0,0 +1,432 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ CLAP model configuration"""
16
+
17
+ import os
18
+ from typing import Union
19
+
20
+ from ...configuration_utils import PretrainedConfig
21
+ from ...utils import logging
22
+
23
+
24
+ logger = logging.get_logger(__name__)
25
+
26
+ CLAP_PRETRAINED_MODEL_ARCHIVE_LIST = {
27
+ "laion/clap-htsat-fused": "https://huggingface.co/laion/clap-htsat-fused/resolve/main/config.json",
28
+ "laion/clap-htsat-unfused": "https://huggingface.co/laion/clap-htsat-unfused/resolve/main/config.json",
29
+ }
30
+
31
+
32
+ class ClapTextConfig(PretrainedConfig):
33
+ r"""
34
+ This is the configuration class to store the configuration of a [`ClapTextModel`]. It is used to instantiate a CLAP
35
+ model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
36
+ defaults will yield a similar configuration to that of the CLAP
37
+ [calp-hsat-fused](https://huggingface.co/laion/clap-hsat-fused) architecture.
38
+
39
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
40
+ documentation from [`PretrainedConfig`] for more information.
41
+
42
+
43
+ Args:
44
+ vocab_size (`int`, *optional*, defaults to 30522):
45
+ Vocabulary size of the CLAP model. Defines the number of different tokens that can be represented by the
46
+ `inputs_ids` passed when calling [`ClapTextModel`].
47
+ hidden_size (`int`, *optional*, defaults to 768):
48
+ Dimensionality of the encoder layers and the pooler layer.
49
+ num_hidden_layers (`int`, *optional*, defaults to 12):
50
+ Number of hidden layers in the Transformer encoder.
51
+ num_attention_heads (`int`, *optional*, defaults to 12):
52
+ Number of attention heads for each attention layer in the Transformer encoder.
53
+ intermediate_size (`int`, *optional*, defaults to 3072):
54
+ Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
55
+ hidden_act (`str` or `Callable`, *optional*, defaults to `"relu"`):
56
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"relu"`,
57
+ `"relu"`, `"silu"` and `"relu_new"` are supported.
58
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
59
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
60
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
61
+ The dropout ratio for the attention probabilities.
62
+ max_position_embeddings (`int`, *optional*, defaults to 512):
63
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
64
+ just in case (e.g., 512 or 1024 or 2048).
65
+ type_vocab_size (`int`, *optional*, defaults to 2):
66
+ The vocabulary size of the `token_type_ids` passed when calling [`ClapTextModel`].
67
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
68
+ The epsilon used by the layer normalization layers.
69
+ position_embedding_type (`str`, *optional*, defaults to `"absolute"`):
70
+ Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For
71
+ positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to
72
+ [Self-Attention with Relative Position Representations (Shaw et al.)](https://arxiv.org/abs/1803.02155).
73
+ For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models
74
+ with Better Relative Position Embeddings (Huang et al.)](https://arxiv.org/abs/2009.13658).
75
+ is_decoder (`bool`, *optional*, defaults to `False`):
76
+ Whether the model is used as a decoder or not. If `False`, the model is used as an encoder.
77
+ use_cache (`bool`, *optional*, defaults to `True`):
78
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
79
+ relevant if `config.is_decoder=True`.
80
+ projection_hidden_act (`str`, *optional*, defaults to `"relu"`):
81
+ The non-linear activation function (function or string) in the projection layer. If string, `"gelu"`,
82
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
83
+ projection_dim (`int`, *optional*, defaults to 512)
84
+ Dimension of the projection head of the `ClapTextModelWithProjection`.
85
+
86
+ Examples:
87
+
88
+ ```python
89
+ >>> from transformers import ClapTextConfig, ClapTextModel
90
+
91
+ >>> # Initializing a CLAP text configuration
92
+ >>> configuration = ClapTextConfig()
93
+
94
+ >>> # Initializing a model (with random weights) from the configuration
95
+ >>> model = ClapTextModel(configuration)
96
+
97
+ >>> # Accessing the model configuration
98
+ >>> configuration = model.config
99
+ ```"""
100
+
101
+ model_type = "clap_text_model"
102
+
103
+ def __init__(
104
+ self,
105
+ vocab_size=50265,
106
+ hidden_size=768,
107
+ num_hidden_layers=12,
108
+ num_attention_heads=12,
109
+ intermediate_size=3072,
110
+ hidden_act="gelu",
111
+ hidden_dropout_prob=0.1,
112
+ attention_probs_dropout_prob=0.1,
113
+ max_position_embeddings=514,
114
+ type_vocab_size=1,
115
+ initializer_factor=1.0,
116
+ layer_norm_eps=1e-12,
117
+ projection_dim=512,
118
+ pad_token_id=1,
119
+ bos_token_id=0,
120
+ eos_token_id=2,
121
+ position_embedding_type="absolute",
122
+ use_cache=True,
123
+ projection_hidden_act="relu",
124
+ **kwargs,
125
+ ):
126
+ super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
127
+
128
+ self.vocab_size = vocab_size
129
+ self.hidden_size = hidden_size
130
+ self.num_hidden_layers = num_hidden_layers
131
+ self.num_attention_heads = num_attention_heads
132
+ self.hidden_act = hidden_act
133
+ self.intermediate_size = intermediate_size
134
+ self.hidden_dropout_prob = hidden_dropout_prob
135
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
136
+ self.max_position_embeddings = max_position_embeddings
137
+ self.type_vocab_size = type_vocab_size
138
+ self.initializer_factor = initializer_factor
139
+ self.layer_norm_eps = layer_norm_eps
140
+ self.position_embedding_type = position_embedding_type
141
+ self.use_cache = use_cache
142
+ self.projection_hidden_act = projection_hidden_act
143
+ self.projection_dim = projection_dim
144
+
145
+ @classmethod
146
+ def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig":
147
+ cls._set_token_in_kwargs(kwargs)
148
+
149
+ config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
150
+
151
+ # get the text config dict if we are loading from ClapConfig
152
+ if config_dict.get("model_type") == "clap":
153
+ config_dict = config_dict["text_config"]
154
+
155
+ if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
156
+ logger.warning(
157
+ f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
158
+ f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
159
+ )
160
+
161
+ return cls.from_dict(config_dict, **kwargs)
162
+
163
+
164
+ class ClapAudioConfig(PretrainedConfig):
165
+ r"""
166
+ This is the configuration class to store the configuration of a [`ClapAudioModel`]. It is used to instantiate a
167
+ CLAP audio encoder according to the specified arguments, defining the model architecture. Instantiating a
168
+ configuration with the defaults will yield a similar configuration to that of the audio encoder of the CLAP
169
+ [laion/clap-htsat-fused](https://huggingface.co/laion/clap-htsat-fused) architecture.
170
+
171
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
172
+ documentation from [`PretrainedConfig`] for more information.
173
+
174
+ Args:
175
+ window_size (`int`, *optional*, defaults to 8):
176
+ Image size of the spectrogram
177
+ num_mel_bins (`int`, *optional*, defaults to 64):
178
+ Number of mel features used per frames. Should correspond to the value used in the `ClapProcessor` class.
179
+ spec_size (`int`, *optional*, defaults to 256):
180
+ Desired input size of the spectrogram that the model supports. It can be different from the output of the
181
+ `ClapFeatureExtractor`, in which case the input features will be resized. Corresponds to the `image_size`
182
+ of the audio models.
183
+ hidden_act (`str`, *optional*, defaults to `"gelu"`):
184
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
185
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
186
+ patch_size (`int`, *optional*, defaults to 4):
187
+ Patch size for the audio spectrogram
188
+ patch_stride (`list`, *optional*, defaults to `[4, 4]`):
189
+ Patch stride for the audio spectrogram
190
+ num_classes (`int`, *optional*, defaults to 527):
191
+ Number of classes used for the head training
192
+ hidden_size (`int`, *optional*, defaults to 768):
193
+ Hidden size of the output of the audio encoder. Correspond to the dimension of the penultimate layer's
194
+ output,which is sent to the projection MLP layer.
195
+ projection_dim (`int`, *optional*, defaults to 512):
196
+ Hidden size of the projection layer.
197
+ depths (`list`, *optional*, defaults to `[2, 2, 6, 2]`):
198
+ Depths used for the Swin Layers of the audio model
199
+ num_attention_heads (`list`, *optional*, defaults to `[4, 8, 16, 32]`):
200
+ Number of attention heads used for the Swin Layers of the audio model
201
+ enable_fusion (`bool`, *optional*, defaults to `False`):
202
+ Whether or not to enable patch fusion. This is the main contribution of the authors, and should give the
203
+ best results.
204
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
205
+ The dropout probability for all fully connected layers in the encoder.
206
+ fusion_type (`[type]`, *optional*):
207
+ Fusion type used for the patch fusion.
208
+ patch_embed_input_channels (`int`, *optional*, defaults to 1):
209
+ Number of channels used for the input spectrogram
210
+ flatten_patch_embeds (`bool`, *optional*, defaults to `True`):
211
+ Whether or not to flatten the patch embeddings
212
+ patch_embeds_hidden_size (`int`, *optional*, defaults to 96):
213
+ Hidden size of the patch embeddings. It is used as the number of output channels.
214
+ enable_patch_layer_norm (`bool`, *optional*, defaults to `True`):
215
+ Whether or not to enable layer normalization for the patch embeddings
216
+ drop_path_rate (`float`, *optional*, defaults to 0.0):
217
+ Drop path rate for the patch fusion
218
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0):
219
+ The dropout ratio for the attention probabilities.
220
+ qkv_bias (`bool`, *optional*, defaults to `True`):
221
+ Whether or not to add a bias to the query, key, value projections.
222
+ mlp_ratio (`float`, *optional*, defaults to 4.0):
223
+ Ratio of the mlp hidden dim to embedding dim.
224
+ aff_block_r (`int`, *optional*, defaults to 4):
225
+ downsize_ratio used in the AudioFF block
226
+ num_hidden_layers (`int`, *optional*, defaults to 4):
227
+ Number of hidden layers in the Transformer encoder.
228
+ projection_hidden_act (`str`, *optional*, defaults to `"relu"`):
229
+ The non-linear activation function (function or string) in the projection layer. If string, `"gelu"`,
230
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
231
+ layer_norm_eps (`[type]`, *optional*, defaults to 1e-05):
232
+ The epsilon used by the layer normalization layers.
233
+ initializer_factor (`float`, *optional*, defaults to 1.0):
234
+ A factor for initializing all weight matrices (should be kept to 1, used internally for initialization
235
+ testing).
236
+
237
+ Example:
238
+
239
+ ```python
240
+ >>> from transformers import ClapAudioConfig, ClapAudioModel
241
+
242
+ >>> # Initializing a ClapAudioConfig with laion/clap-htsat-fused style configuration
243
+ >>> configuration = ClapAudioConfig()
244
+
245
+ >>> # Initializing a ClapAudioModel (with random weights) from the laion/clap-htsat-fused style configuration
246
+ >>> model = ClapAudioModel(configuration)
247
+
248
+ >>> # Accessing the model configuration
249
+ >>> configuration = model.config
250
+ ```"""
251
+
252
+ model_type = "clap_audio_model"
253
+
254
+ def __init__(
255
+ self,
256
+ window_size=8,
257
+ num_mel_bins=64,
258
+ spec_size=256,
259
+ hidden_act="gelu",
260
+ patch_size=4,
261
+ patch_stride=[4, 4],
262
+ num_classes=527,
263
+ hidden_size=768,
264
+ projection_dim=512,
265
+ depths=[2, 2, 6, 2],
266
+ num_attention_heads=[4, 8, 16, 32],
267
+ enable_fusion=False,
268
+ hidden_dropout_prob=0.1,
269
+ fusion_type=None,
270
+ patch_embed_input_channels=1,
271
+ flatten_patch_embeds=True,
272
+ patch_embeds_hidden_size=96,
273
+ enable_patch_layer_norm=True,
274
+ drop_path_rate=0.0,
275
+ attention_probs_dropout_prob=0.0,
276
+ qkv_bias=True,
277
+ mlp_ratio=4.0,
278
+ aff_block_r=4,
279
+ num_hidden_layers=4,
280
+ projection_hidden_act="relu",
281
+ layer_norm_eps=1e-5,
282
+ initializer_factor=1.0,
283
+ **kwargs,
284
+ ):
285
+ super().__init__(**kwargs)
286
+ self.window_size = window_size
287
+ self.num_mel_bins = num_mel_bins
288
+ self.spec_size = spec_size
289
+ self.patch_size = patch_size
290
+ self.patch_stride = patch_stride
291
+ self.num_classes = num_classes
292
+ self.hidden_size = hidden_size
293
+ self.depths = depths
294
+ self.num_hidden_layers = num_hidden_layers
295
+ self.num_attention_heads = num_attention_heads
296
+ self.window_size = window_size
297
+ self.enable_fusion = enable_fusion
298
+ self.fusion_type = fusion_type
299
+ self.hidden_act = hidden_act
300
+ self.hidden_dropout_prob = hidden_dropout_prob
301
+ self.projection_dim = projection_dim
302
+ self.flatten_patch_embeds = flatten_patch_embeds
303
+ self.patch_embeds_hidden_size = patch_embeds_hidden_size
304
+ self.enable_patch_layer_norm = enable_patch_layer_norm
305
+ self.drop_path_rate = drop_path_rate
306
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
307
+ self.qkv_bias = qkv_bias
308
+ self.mlp_ratio = mlp_ratio
309
+ self.patch_embed_input_channels = patch_embed_input_channels
310
+ self.aff_block_r = aff_block_r
311
+ self.layer_norm_eps = layer_norm_eps
312
+ self.initializer_factor = initializer_factor
313
+ self.projection_hidden_act = projection_hidden_act
314
+
315
+ @classmethod
316
+ def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig":
317
+ cls._set_token_in_kwargs(kwargs)
318
+
319
+ config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
320
+
321
+ # get the audio config dict if we are loading from ClapConfig
322
+ if config_dict.get("model_type") == "clap":
323
+ config_dict = config_dict["audio_config"]
324
+
325
+ if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
326
+ logger.warning(
327
+ f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
328
+ f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
329
+ )
330
+
331
+ return cls.from_dict(config_dict, **kwargs)
332
+
333
+
334
+ class ClapConfig(PretrainedConfig):
335
+ r"""
336
+ [`ClapConfig`] is the configuration class to store the configuration of a [`ClapModel`]. It is used to instantiate
337
+ a CLAP model according to the specified arguments, defining the text model and audio model configs. Instantiating a
338
+ configuration with the defaults will yield a similar configuration to that of the CLAP
339
+ [laion/clap-htsat-fused](https://huggingface.co/laion/clap-htsat-fused) architecture.
340
+
341
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
342
+ documentation from [`PretrainedConfig`] for more information.
343
+
344
+ Args:
345
+ text_config (`dict`, *optional*):
346
+ Dictionary of configuration options used to initialize [`ClapTextConfig`].
347
+ audio_config (`dict`, *optional*):
348
+ Dictionary of configuration options used to initialize [`ClapAudioConfig`].
349
+ logit_scale_init_value (`float`, *optional*, defaults to 14.29):
350
+ The inital value of the *logit_scale* paramter. Default is used as per the original CLAP implementation.
351
+ projection_dim (`int`, *optional*, defaults to 512):
352
+ Dimentionality of text and audio projection layers.
353
+ projection_hidden_act (`str`, *optional*, defaults to `"relu"`):
354
+ Activation function for the projection layers.
355
+ initializer_factor (`float`, *optional*, defaults to 1.0):
356
+ Factor to scale the initialization of the model weights.
357
+ kwargs (*optional*):
358
+ Dictionary of keyword arguments.
359
+
360
+ Example:
361
+
362
+ ```python
363
+ >>> from transformers import ClapConfig, ClapModel
364
+
365
+ >>> # Initializing a ClapConfig with laion-ai/base style configuration
366
+ >>> configuration = ClapConfig()
367
+
368
+ >>> # Initializing a ClapModel (with random weights) from the laion-ai/base style configuration
369
+ >>> model = ClapModel(configuration)
370
+
371
+ >>> # Accessing the model configuration
372
+ >>> configuration = model.config
373
+
374
+ >>> # We can also initialize a ClapConfig from a ClapTextConfig and a ClapAudioConfig
375
+ >>> from transformers import ClapTextConfig, ClapAudioConfig
376
+
377
+ >>> # Initializing a ClapText and ClapAudioConfig configuration
378
+ >>> config_text = ClapTextConfig()
379
+ >>> config_audio = ClapAudioConfig()
380
+
381
+ >>> config = ClapConfig.from_text_audio_configs(config_text, config_audio)
382
+ ```"""
383
+
384
+ model_type = "clap"
385
+
386
+ def __init__(
387
+ self,
388
+ text_config=None,
389
+ audio_config=None,
390
+ logit_scale_init_value=(1 / 0.07),
391
+ projection_dim=512,
392
+ projection_hidden_act="relu",
393
+ initializer_factor=1.0,
394
+ **kwargs,
395
+ ):
396
+ super().__init__(**kwargs)
397
+
398
+ if text_config is None:
399
+ text_config = {}
400
+ logger.info("text_config is None. Initializing the ClapTextConfig with default values.")
401
+
402
+ if audio_config is None:
403
+ audio_config = {}
404
+ logger.info("audio_config is None. initializing the ClapAudioConfig with default values.")
405
+
406
+ self.text_config = ClapTextConfig(**text_config)
407
+ self.audio_config = ClapAudioConfig(**audio_config)
408
+ self.text_config.projection_dim = projection_dim
409
+ self.audio_config.projection_dim = projection_dim
410
+
411
+ self.text_config.projection_hidden_act = projection_hidden_act
412
+ self.audio_config.projection_hidden_act = projection_hidden_act
413
+
414
+ self.projection_dim = projection_dim
415
+ self.projection_hidden_act = projection_hidden_act
416
+ self.hidden_size = self.text_config.hidden_size
417
+
418
+ self.logit_scale_init_value = logit_scale_init_value
419
+ self.initializer_factor = initializer_factor
420
+ self.num_hidden_layers = self.text_config.num_hidden_layers + len(self.audio_config.depths)
421
+
422
+ @classmethod
423
+ def from_text_audio_configs(cls, text_config: ClapTextConfig, audio_config: ClapAudioConfig, **kwargs):
424
+ r"""
425
+ Instantiate a [`ClapConfig`] (or a derived class) from clap text model configuration and clap audio model
426
+ configuration.
427
+
428
+ Returns:
429
+ [`ClapConfig`]: An instance of a configuration object
430
+ """
431
+
432
+ return cls(text_config=text_config.to_dict(), audio_config=audio_config.to_dict(), **kwargs)
env-llmeval/lib/python3.10/site-packages/transformers/models/clap/convert_clap_original_pytorch_to_hf.py ADDED
@@ -0,0 +1,133 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import argparse
17
+ import re
18
+
19
+ from laion_clap import CLAP_Module
20
+
21
+ from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
22
+
23
+
24
+ KEYS_TO_MODIFY_MAPPING = {
25
+ "text_branch": "text_model",
26
+ "audio_branch": "audio_model.audio_encoder",
27
+ "attn": "attention.self",
28
+ "self.proj": "output.dense",
29
+ "attention.self_mask": "attn_mask",
30
+ "mlp.fc1": "intermediate.dense",
31
+ "mlp.fc2": "output.dense",
32
+ "norm1": "layernorm_before",
33
+ "norm2": "layernorm_after",
34
+ "bn0": "batch_norm",
35
+ }
36
+
37
+ processor = AutoFeatureExtractor.from_pretrained("laion/clap-htsat-unfused", truncation="rand_trunc")
38
+
39
+
40
+ def init_clap(checkpoint_path, model_type, enable_fusion=False):
41
+ model = CLAP_Module(
42
+ amodel=model_type,
43
+ enable_fusion=enable_fusion,
44
+ )
45
+ model.load_ckpt(checkpoint_path)
46
+ return model
47
+
48
+
49
+ def get_config_from_original(clap_model):
50
+ audio_config = {
51
+ "patch_embeds_hidden_size": clap_model.model.audio_branch.embed_dim,
52
+ "depths": clap_model.model.audio_branch.depths,
53
+ "hidden_size": clap_model.model.audio_projection[0].in_features,
54
+ }
55
+
56
+ text_config = {"hidden_size": clap_model.model.text_branch.pooler.dense.in_features}
57
+
58
+ return ClapConfig(audio_config=audio_config, text_config=text_config)
59
+
60
+
61
+ def rename_state_dict(state_dict):
62
+ model_state_dict = {}
63
+
64
+ sequential_layers_pattern = r".*sequential.(\d+).*"
65
+ text_projection_pattern = r".*_projection.(\d+).*"
66
+
67
+ for key, value in state_dict.items():
68
+ # check if any key needs to be modified
69
+ for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
70
+ if key_to_modify in key:
71
+ key = key.replace(key_to_modify, new_key)
72
+
73
+ if re.match(sequential_layers_pattern, key):
74
+ # replace sequential layers with list
75
+ sequential_layer = re.match(sequential_layers_pattern, key).group(1)
76
+
77
+ key = key.replace(f"sequential.{sequential_layer}.", f"layers.{int(sequential_layer)//3}.linear.")
78
+ elif re.match(text_projection_pattern, key):
79
+ projecton_layer = int(re.match(text_projection_pattern, key).group(1))
80
+
81
+ # Because in CLAP they use `nn.Sequential`...
82
+ transformers_projection_layer = 1 if projecton_layer == 0 else 2
83
+
84
+ key = key.replace(f"_projection.{projecton_layer}.", f"_projection.linear{transformers_projection_layer}.")
85
+
86
+ if "audio" and "qkv" in key:
87
+ # split qkv into query key and value
88
+ mixed_qkv = value
89
+ qkv_dim = mixed_qkv.size(0) // 3
90
+
91
+ query_layer = mixed_qkv[:qkv_dim]
92
+ key_layer = mixed_qkv[qkv_dim : qkv_dim * 2]
93
+ value_layer = mixed_qkv[qkv_dim * 2 :]
94
+
95
+ model_state_dict[key.replace("qkv", "query")] = query_layer
96
+ model_state_dict[key.replace("qkv", "key")] = key_layer
97
+ model_state_dict[key.replace("qkv", "value")] = value_layer
98
+ else:
99
+ model_state_dict[key] = value
100
+
101
+ return model_state_dict
102
+
103
+
104
+ def convert_clap_checkpoint(checkpoint_path, pytorch_dump_folder_path, config_path, model_type, enable_fusion=False):
105
+ clap_model = init_clap(checkpoint_path, model_type, enable_fusion=enable_fusion)
106
+
107
+ clap_model.eval()
108
+ state_dict = clap_model.model.state_dict()
109
+ state_dict = rename_state_dict(state_dict)
110
+
111
+ transformers_config = get_config_from_original(clap_model)
112
+ transformers_config.audio_config.enable_fusion = enable_fusion
113
+ model = ClapModel(transformers_config)
114
+
115
+ # ignore the spectrogram embedding layer
116
+ model.load_state_dict(state_dict, strict=False)
117
+
118
+ model.save_pretrained(pytorch_dump_folder_path)
119
+ transformers_config.save_pretrained(pytorch_dump_folder_path)
120
+
121
+
122
+ if __name__ == "__main__":
123
+ parser = argparse.ArgumentParser()
124
+ parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
125
+ parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
126
+ parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
127
+ parser.add_argument("--enable_fusion", action="store_true", help="Whether to enable fusion or not")
128
+ parser.add_argument("--model_type", default="HTSAT-tiny", type=str, help="Whether to enable fusion or not")
129
+ args = parser.parse_args()
130
+
131
+ convert_clap_checkpoint(
132
+ args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.model_type, args.enable_fusion
133
+ )
env-llmeval/lib/python3.10/site-packages/transformers/models/clap/feature_extraction_clap.py ADDED
@@ -0,0 +1,363 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Feature extractor class for CLAP."""
16
+
17
+
18
+ import copy
19
+ from typing import Any, Dict, List, Optional, Union
20
+
21
+ import numpy as np
22
+ import torch
23
+
24
+ from ...audio_utils import mel_filter_bank, spectrogram, window_function
25
+ from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
26
+ from ...feature_extraction_utils import BatchFeature
27
+ from ...utils import TensorType, logging
28
+
29
+
30
+ logger = logging.get_logger(__name__)
31
+
32
+
33
+ class ClapFeatureExtractor(SequenceFeatureExtractor):
34
+ r"""
35
+ Constructs a CLAP feature extractor.
36
+
37
+ This feature extractor inherits from [`~feature_extraction_sequence_utils.SequenceFeatureExtractor`] which contains
38
+ most of the main methods. Users should refer to this superclass for more information regarding those methods.
39
+
40
+ This class extracts mel-filter bank features from raw speech using a custom numpy implementation of the *Short Time
41
+ Fourier Transform* (STFT) which should match pytorch's `torch.stft` equivalent.
42
+
43
+ Args:
44
+ feature_size (`int`, *optional*, defaults to 64):
45
+ The feature dimension of the extracted Mel spectrograms. This corresponds to the number of mel filters
46
+ (`n_mels`).
47
+ sampling_rate (`int`, *optional*, defaults to 48000):
48
+ The sampling rate at which the audio files should be digitalized expressed in hertz (Hz). This only serves
49
+ to warn users if the audio fed to the feature extractor does not have the same sampling rate.
50
+ hop_length (`int`,*optional*, defaults to 480):
51
+ Length of the overlaping windows for the STFT used to obtain the Mel Spectrogram. The audio will be split
52
+ in smaller `frames` with a step of `hop_length` between each frame.
53
+ max_length_s (`int`, *optional*, defaults to 10):
54
+ The maximum input length of the model in seconds. This is used to pad the audio.
55
+ fft_window_size (`int`, *optional*, defaults to 1024):
56
+ Size of the window (in samples) on which the Fourier transform is applied. This controls the frequency
57
+ resolution of the spectrogram. 400 means that the fourrier transform is computed on windows of 400 samples.
58
+ padding_value (`float`, *optional*, defaults to 0.0):
59
+ Padding value used to pad the audio. Should correspond to silences.
60
+ return_attention_mask (`bool`, *optional*, defaults to `False`):
61
+ Whether or not the model should return the attention masks coresponding to the input.
62
+ frequency_min (`float`, *optional*, defaults to 0):
63
+ The lowest frequency of interest. The STFT will not be computed for values below this.
64
+ frequency_max (`float`, *optional*, defaults to 14000):
65
+ The highest frequency of interest. The STFT will not be computed for values above this.
66
+ top_db (`float`, *optional*):
67
+ The highest decibel value used to convert the mel spectrogram to the log scale. For more details see the
68
+ `audio_utils.power_to_db` function
69
+ truncation (`str`, *optional*, defaults to `"fusion"`):
70
+ Truncation pattern for long audio inputs. Two patterns are available:
71
+ - `fusion` will use `_random_mel_fusion`, which stacks 3 random crops from the mel spectrogram and a
72
+ downsampled version of the entire mel spectrogram.
73
+ If `config.fusion` is set to True, shorter audios also need to to return 4 mels, which will just be a copy
74
+ of the original mel obtained from the padded audio.
75
+ - `rand_trunc` will select a random crop of the mel spectrogram.
76
+ padding (`str`, *optional*, defaults to `"repeatpad"`):
77
+ Padding pattern for shorter audio inputs. Three patterns were originally implemented:
78
+ - `repeatpad`: the audio is repeated, and then padded to fit the `max_length`.
79
+ - `repeat`: the audio is repeated and then cut to fit the `max_length`
80
+ - `pad`: the audio is padded.
81
+ """
82
+
83
+ model_input_names = ["input_features", "is_longer"]
84
+
85
+ def __init__(
86
+ self,
87
+ feature_size=64,
88
+ sampling_rate=48_000,
89
+ hop_length=480,
90
+ max_length_s=10,
91
+ fft_window_size=1024,
92
+ padding_value=0.0,
93
+ return_attention_mask=False, # pad inputs to max length with silence token (zero) and no attention mask
94
+ frequency_min: float = 0,
95
+ frequency_max: float = 14_000,
96
+ top_db: int = None,
97
+ truncation: str = "fusion",
98
+ padding: str = "repeatpad",
99
+ **kwargs,
100
+ ):
101
+ super().__init__(
102
+ feature_size=feature_size,
103
+ sampling_rate=sampling_rate,
104
+ padding_value=padding_value,
105
+ return_attention_mask=return_attention_mask,
106
+ **kwargs,
107
+ )
108
+ self.top_db = top_db
109
+ self.truncation = truncation
110
+ self.padding = padding
111
+ self.fft_window_size = fft_window_size
112
+ self.nb_frequency_bins = (fft_window_size >> 1) + 1
113
+ self.hop_length = hop_length
114
+ self.max_length_s = max_length_s
115
+ self.nb_max_samples = max_length_s * sampling_rate
116
+ self.sampling_rate = sampling_rate
117
+ self.frequency_min = frequency_min
118
+ self.frequency_max = frequency_max
119
+ self.mel_filters = mel_filter_bank(
120
+ num_frequency_bins=self.nb_frequency_bins,
121
+ num_mel_filters=feature_size,
122
+ min_frequency=frequency_min,
123
+ max_frequency=frequency_max,
124
+ sampling_rate=sampling_rate,
125
+ norm=None,
126
+ mel_scale="htk",
127
+ )
128
+ self.mel_filters_slaney = mel_filter_bank(
129
+ num_frequency_bins=self.nb_frequency_bins,
130
+ num_mel_filters=feature_size,
131
+ min_frequency=frequency_min,
132
+ max_frequency=frequency_max,
133
+ sampling_rate=sampling_rate,
134
+ norm="slaney",
135
+ mel_scale="slaney",
136
+ )
137
+
138
+ def to_dict(self) -> Dict[str, Any]:
139
+ """
140
+ Serializes this instance to a Python dictionary.
141
+
142
+ Returns:
143
+ `Dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance, excpet for the
144
+ mel filter banks, which do not need to be saved or printed as they are too long.
145
+ """
146
+ output = copy.deepcopy(self.__dict__)
147
+ output["feature_extractor_type"] = self.__class__.__name__
148
+ if "mel_filters" in output:
149
+ del output["mel_filters"]
150
+ if "mel_filters_slaney" in output:
151
+ del output["mel_filters_slaney"]
152
+ return output
153
+
154
+ def _np_extract_fbank_features(self, waveform: np.array, mel_filters: Optional[np.array] = None) -> np.ndarray:
155
+ """
156
+ Compute the log-mel spectrogram of the provided `waveform` using the Hann window. In CLAP, two different filter
157
+ banks are used depending on the truncation pattern:
158
+ - `self.mel_filters`: they correspond to the default parameters of `torchaudio` which can be obtained from
159
+ calling `torchaudio.transforms.MelSpectrogram().mel_scale.fb`. These filters are used when `truncation`
160
+ is set to `"fusion"`.
161
+ - `self.mel_filteres_slaney` : they correspond to the default parameters of `librosa` which used
162
+ `librosa.filters.mel` when computing the mel spectrogram. These filters were only used in the original
163
+ implementation when the truncation mode is not `"fusion"`.
164
+ """
165
+ log_mel_spectrogram = spectrogram(
166
+ waveform,
167
+ window_function(self.fft_window_size, "hann"),
168
+ frame_length=self.fft_window_size,
169
+ hop_length=self.hop_length,
170
+ power=2.0,
171
+ mel_filters=mel_filters,
172
+ log_mel="dB",
173
+ )
174
+ return log_mel_spectrogram.T
175
+
176
+ def _random_mel_fusion(self, mel, total_frames, chunk_frames):
177
+ ranges = np.array_split(list(range(0, total_frames - chunk_frames + 1)), 3)
178
+ if len(ranges[1]) == 0:
179
+ # if the audio is too short, we just use the first chunk
180
+ ranges[1] = [0]
181
+ if len(ranges[2]) == 0:
182
+ # if the audio is too short, we just use the first chunk
183
+ ranges[2] = [0]
184
+ # randomly choose index for each part
185
+ idx_front = np.random.choice(ranges[0])
186
+ idx_middle = np.random.choice(ranges[1])
187
+ idx_back = np.random.choice(ranges[2])
188
+
189
+ mel_chunk_front = mel[idx_front : idx_front + chunk_frames, :]
190
+ mel_chunk_middle = mel[idx_middle : idx_middle + chunk_frames, :]
191
+ mel_chunk_back = mel[idx_back : idx_back + chunk_frames, :]
192
+
193
+ mel = torch.tensor(mel[None, None, :])
194
+ mel_shrink = torch.nn.functional.interpolate(
195
+ mel, size=[chunk_frames, 64], mode="bilinear", align_corners=False
196
+ )
197
+ mel_shrink = mel_shrink[0][0].numpy()
198
+ mel_fusion = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back], axis=0)
199
+ return mel_fusion
200
+
201
+ def _get_input_mel(self, waveform: np.array, max_length, truncation, padding) -> np.array:
202
+ """
203
+ Extracts the mel spectrogram and prepares it for the mode based on the `truncation` and `padding` arguments.
204
+ Four different path are possible:
205
+ - `truncation="fusion"` and the length of the waveform is greater than the max length: the mel spectrogram
206
+ will be computed on the entire audio. 3 random crops and a dowsampled version of the full mel spectrogram
207
+ are then stacked together. They will later be used for `feature_fusion`.
208
+ - `truncation="rand_trunc"` and the length of the waveform is smaller than the max length: the audio is
209
+ padded based on `padding`.
210
+ - `truncation="fusion"` and the length of the waveform is smaller than the max length: the audio is padded
211
+ based on `padding`, and is repeated `4` times.
212
+ - `truncation="rand_trunc"` and the length of the waveform is greater than the max length: the mel
213
+ spectrogram will be computed on a random crop of the waveform.
214
+
215
+ """
216
+ if waveform.shape[0] > max_length:
217
+ if truncation == "rand_trunc":
218
+ longer = True
219
+ # random crop to max_length (for compatibility) -> this should be handled by self.pad
220
+ overflow = len(waveform) - max_length
221
+ idx = np.random.randint(0, overflow + 1)
222
+ waveform = waveform[idx : idx + max_length]
223
+ input_mel = self._np_extract_fbank_features(waveform, self.mel_filters_slaney)[None, :]
224
+ elif truncation == "fusion":
225
+ mel = self._np_extract_fbank_features(waveform, self.mel_filters)
226
+ chunk_frames = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
227
+ total_frames = mel.shape[0]
228
+ if chunk_frames == total_frames:
229
+ # there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
230
+ # In this case, we just use the whole audio.
231
+ input_mel = np.stack([mel, mel, mel, mel], axis=0)
232
+ longer = False
233
+ else:
234
+ input_mel = self._random_mel_fusion(mel, total_frames, chunk_frames)
235
+ longer = True
236
+ else:
237
+ raise NotImplementedError(f"data_truncating {truncation} not implemented")
238
+
239
+ else:
240
+ longer = False
241
+ # only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
242
+ if waveform.shape[0] < max_length:
243
+ if padding == "repeat":
244
+ n_repeat = int(max_length / len(waveform))
245
+ waveform = np.tile(waveform, n_repeat + 1)[:max_length]
246
+ if padding == "repeatpad":
247
+ n_repeat = int(max_length / len(waveform))
248
+ waveform = np.tile(waveform, n_repeat)
249
+ waveform = np.pad(waveform, (0, max_length - waveform.shape[0]), mode="constant", constant_values=0)
250
+
251
+ if truncation == "fusion":
252
+ input_mel = self._np_extract_fbank_features(waveform, self.mel_filters)
253
+ input_mel = np.stack([input_mel, input_mel, input_mel, input_mel], axis=0)
254
+ else:
255
+ input_mel = self._np_extract_fbank_features(waveform, self.mel_filters_slaney)[None, :]
256
+
257
+ return input_mel, longer
258
+
259
+ def __call__(
260
+ self,
261
+ raw_speech: Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]],
262
+ truncation: str = None,
263
+ padding: Optional[str] = None,
264
+ max_length: Optional[int] = None,
265
+ sampling_rate: Optional[int] = None,
266
+ return_tensors: Optional[Union[str, TensorType]] = None,
267
+ **kwargs,
268
+ ) -> BatchFeature:
269
+ """
270
+ Main method to featurize and prepare for the model one or several sequence(s).
271
+
272
+ Args:
273
+ raw_speech (`np.ndarray`, `List[float]`, `List[np.ndarray]`, `List[List[float]]`):
274
+ The sequence or batch of sequences to be padded. Each sequence can be a numpy array, a list of float
275
+ values, a list of numpy arrays or a list of list of float values. Must be mono channel audio, not
276
+ stereo, i.e. single float per timestep.
277
+ truncation (`str`, *optional*):
278
+ Truncation pattern for long audio inputs. Two patterns are available:
279
+ - `fusion` will use `_random_mel_fusion`, which stacks 3 random crops from the mel spectrogram and
280
+ a downsampled version of the entire mel spectrogram.
281
+ If `config.fusion` is set to True, shorter audios also need to to return 4 mels, which will just be a
282
+ copy of the original mel obtained from the padded audio.
283
+ - `rand_trunc` will select a random crop of the mel spectrogram.
284
+ padding (`str`, *optional*):
285
+ Padding pattern for shorter audio inputs. Three patterns were originally implemented:
286
+ - `repeatpad`: the audio is repeated, and then padded to fit the `max_length`.
287
+ - `repeat`: the audio is repeated and then cut to fit the `max_length`
288
+ - `pad`: the audio is padded.
289
+ return_tensors (`str` or [`~utils.TensorType`], *optional*):
290
+ If set, will return tensors instead of list of python integers. Acceptable values are:
291
+
292
+ - `'tf'`: Return TensorFlow `tf.constant` objects.
293
+ - `'pt'`: Return PyTorch `torch.np.array` objects.
294
+ - `'np'`: Return Numpy `np.ndarray` objects.
295
+ sampling_rate (`int`, *optional*):
296
+ The sampling rate at which the `raw_speech` input was sampled. It is strongly recommended to pass
297
+ `sampling_rate` at the forward call to prevent silent errors and allow automatic speech recognition
298
+ pipeline.
299
+ """
300
+ truncation = truncation if truncation is not None else self.truncation
301
+ padding = padding if padding else self.padding
302
+
303
+ if sampling_rate is not None:
304
+ if sampling_rate != self.sampling_rate:
305
+ raise ValueError(
306
+ f"The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"
307
+ f" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"
308
+ f" was sampled with {self.sampling_rate} and not {sampling_rate}."
309
+ )
310
+ else:
311
+ logger.warning(
312
+ "It is strongly recommended to pass the `sampling_rate` argument to this function. "
313
+ "Failing to do so can result in silent errors that might be hard to debug."
314
+ )
315
+
316
+ is_batched_numpy = isinstance(raw_speech, np.ndarray) and len(raw_speech.shape) > 1
317
+ if is_batched_numpy and len(raw_speech.shape) > 2:
318
+ raise ValueError(f"Only mono-channel audio is supported for input to {self}")
319
+ is_batched = is_batched_numpy or (
320
+ isinstance(raw_speech, (list, tuple)) and (isinstance(raw_speech[0], (np.ndarray, tuple, list)))
321
+ )
322
+
323
+ if is_batched:
324
+ raw_speech = [np.asarray(speech, dtype=np.float64) for speech in raw_speech]
325
+ elif not is_batched and not isinstance(raw_speech, np.ndarray):
326
+ raw_speech = np.asarray(raw_speech, dtype=np.float64)
327
+ elif isinstance(raw_speech, np.ndarray) and raw_speech.dtype is np.dtype(np.float64):
328
+ raw_speech = raw_speech.astype(np.float64)
329
+
330
+ # always return batch
331
+ if not is_batched:
332
+ raw_speech = [np.asarray(raw_speech)]
333
+
334
+ # convert to mel spectrogram, truncate and pad if needed.
335
+ padded_inputs = [
336
+ self._get_input_mel(waveform, max_length if max_length else self.nb_max_samples, truncation, padding)
337
+ for waveform in raw_speech
338
+ ]
339
+
340
+ input_mel = []
341
+ is_longer = []
342
+ for mel, longer in padded_inputs:
343
+ input_mel.append(mel)
344
+ is_longer.append(longer)
345
+
346
+ if truncation == "fusion" and sum(is_longer) == 0:
347
+ # if no audio is longer than 10s, then randomly select one audio to be longer
348
+ rand_idx = np.random.randint(0, len(input_mel))
349
+ is_longer[rand_idx] = True
350
+
351
+ if isinstance(input_mel[0], List):
352
+ input_mel = [np.asarray(feature, dtype=np.float64) for feature in input_mel]
353
+
354
+ # is_longer is a list of bool
355
+ is_longer = [[longer] for longer in is_longer]
356
+
357
+ input_features = {"input_features": input_mel, "is_longer": is_longer}
358
+ input_features = BatchFeature(input_features)
359
+
360
+ if return_tensors is not None:
361
+ input_features = input_features.convert_to_tensors(return_tensors)
362
+
363
+ return input_features
env-llmeval/lib/python3.10/site-packages/transformers/models/clap/modeling_clap.py ADDED
The diff for this file is too large to render. See raw diff
 
env-llmeval/lib/python3.10/site-packages/transformers/models/clap/processing_clap.py ADDED
@@ -0,0 +1,117 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """
16
+ Audio/Text processor class for CLAP
17
+ """
18
+
19
+ from ...processing_utils import ProcessorMixin
20
+ from ...tokenization_utils_base import BatchEncoding
21
+
22
+
23
+ class ClapProcessor(ProcessorMixin):
24
+ r"""
25
+ Constructs a CLAP processor which wraps a CLAP feature extractor and a RoBerta tokenizer into a single processor.
26
+
27
+ [`ClapProcessor`] offers all the functionalities of [`ClapFeatureExtractor`] and [`RobertaTokenizerFast`]. See the
28
+ [`~ClapProcessor.__call__`] and [`~ClapProcessor.decode`] for more information.
29
+
30
+ Args:
31
+ feature_extractor ([`ClapFeatureExtractor`]):
32
+ The audio processor is a required input.
33
+ tokenizer ([`RobertaTokenizerFast`]):
34
+ The tokenizer is a required input.
35
+ """
36
+
37
+ feature_extractor_class = "ClapFeatureExtractor"
38
+ tokenizer_class = ("RobertaTokenizer", "RobertaTokenizerFast")
39
+
40
+ def __init__(self, feature_extractor, tokenizer):
41
+ super().__init__(feature_extractor, tokenizer)
42
+
43
+ def __call__(self, text=None, audios=None, return_tensors=None, **kwargs):
44
+ """
45
+ Main method to prepare for the model one or several sequences(s) and audio(s). This method forwards the `text`
46
+ and `kwargs` arguments to RobertaTokenizerFast's [`~RobertaTokenizerFast.__call__`] if `text` is not `None` to
47
+ encode the text. To prepare the audio(s), this method forwards the `audios` and `kwrags` arguments to
48
+ ClapFeatureExtractor's [`~ClapFeatureExtractor.__call__`] if `audios` is not `None`. Please refer to the
49
+ doctsring of the above two methods for more information.
50
+
51
+ Args:
52
+ text (`str`, `List[str]`, `List[List[str]]`):
53
+ The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
54
+ (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
55
+ `is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
56
+ audios (`np.ndarray`, `torch.Tensor`, `List[np.ndarray]`, `List[torch.Tensor]`):
57
+ The audio or batch of audios to be prepared. Each audio can be NumPy array or PyTorch tensor. In case
58
+ of a NumPy array/PyTorch tensor, each audio should be of shape (C, T), where C is a number of channels,
59
+ and T the sample length of the audio.
60
+
61
+ return_tensors (`str` or [`~utils.TensorType`], *optional*):
62
+ If set, will return tensors of a particular framework. Acceptable values are:
63
+
64
+ - `'tf'`: Return TensorFlow `tf.constant` objects.
65
+ - `'pt'`: Return PyTorch `torch.Tensor` objects.
66
+ - `'np'`: Return NumPy `np.ndarray` objects.
67
+ - `'jax'`: Return JAX `jnp.ndarray` objects.
68
+
69
+ Returns:
70
+ [`BatchEncoding`]: A [`BatchEncoding`] with the following fields:
71
+
72
+ - **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`.
73
+ - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
74
+ `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not
75
+ `None`).
76
+ - **audio_features** -- Audio features to be fed to a model. Returned when `audios` is not `None`.
77
+ """
78
+ sampling_rate = kwargs.pop("sampling_rate", None)
79
+
80
+ if text is None and audios is None:
81
+ raise ValueError("You have to specify either text or audios. Both cannot be none.")
82
+
83
+ if text is not None:
84
+ encoding = self.tokenizer(text, return_tensors=return_tensors, **kwargs)
85
+
86
+ if audios is not None:
87
+ audio_features = self.feature_extractor(
88
+ audios, sampling_rate=sampling_rate, return_tensors=return_tensors, **kwargs
89
+ )
90
+
91
+ if text is not None and audios is not None:
92
+ encoding["input_features"] = audio_features.input_features
93
+ return encoding
94
+ elif text is not None:
95
+ return encoding
96
+ else:
97
+ return BatchEncoding(data=dict(**audio_features), tensor_type=return_tensors)
98
+
99
+ def batch_decode(self, *args, **kwargs):
100
+ """
101
+ This method forwards all its arguments to RobertaTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please
102
+ refer to the docstring of this method for more information.
103
+ """
104
+ return self.tokenizer.batch_decode(*args, **kwargs)
105
+
106
+ def decode(self, *args, **kwargs):
107
+ """
108
+ This method forwards all its arguments to RobertaTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer
109
+ to the docstring of this method for more information.
110
+ """
111
+ return self.tokenizer.decode(*args, **kwargs)
112
+
113
+ @property
114
+ def model_input_names(self):
115
+ tokenizer_input_names = self.tokenizer.model_input_names
116
+ feature_extractor_input_names = self.feature_extractor.model_input_names
117
+ return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names))
env-llmeval/lib/python3.10/site-packages/transformers/models/fastspeech2_conformer/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.44 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/fastspeech2_conformer/__pycache__/convert_model_with_hifigan.cpython-310.pyc ADDED
Binary file (2.32 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/fastspeech2_conformer/__pycache__/tokenization_fastspeech2_conformer.cpython-310.pyc ADDED
Binary file (6.73 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/nllb/__init__.py ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import (
17
+ OptionalDependencyNotAvailable,
18
+ _LazyModule,
19
+ is_sentencepiece_available,
20
+ is_tokenizers_available,
21
+ is_torch_available,
22
+ )
23
+
24
+
25
+ _import_structure = {}
26
+
27
+ try:
28
+ if not is_sentencepiece_available():
29
+ raise OptionalDependencyNotAvailable()
30
+ except OptionalDependencyNotAvailable:
31
+ pass
32
+ else:
33
+ _import_structure["tokenization_nllb"] = ["NllbTokenizer"]
34
+
35
+ try:
36
+ if not is_tokenizers_available():
37
+ raise OptionalDependencyNotAvailable()
38
+ except OptionalDependencyNotAvailable:
39
+ pass
40
+ else:
41
+ _import_structure["tokenization_nllb_fast"] = ["NllbTokenizerFast"]
42
+
43
+
44
+ if TYPE_CHECKING:
45
+ try:
46
+ if not is_sentencepiece_available():
47
+ raise OptionalDependencyNotAvailable()
48
+ except OptionalDependencyNotAvailable:
49
+ pass
50
+ else:
51
+ from .tokenization_nllb import NllbTokenizer
52
+
53
+ try:
54
+ if not is_tokenizers_available():
55
+ raise OptionalDependencyNotAvailable()
56
+ except OptionalDependencyNotAvailable:
57
+ pass
58
+ else:
59
+ from .tokenization_nllb_fast import NllbTokenizerFast
60
+
61
+ else:
62
+ import sys
63
+
64
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
env-llmeval/lib/python3.10/site-packages/transformers/models/nllb/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (933 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/nllb/__pycache__/tokenization_nllb.cpython-310.pyc ADDED
Binary file (18.5 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/nllb/__pycache__/tokenization_nllb_fast.cpython-310.pyc ADDED
Binary file (14.4 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/nllb/tokenization_nllb.py ADDED
@@ -0,0 +1,446 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The Facebook AI Research Team Authors and The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import os
17
+ from shutil import copyfile
18
+ from typing import Any, Dict, List, Optional, Tuple
19
+
20
+ import sentencepiece as spm
21
+
22
+ from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
23
+ from ...utils import logging
24
+
25
+
26
+ logger = logging.get_logger(__name__)
27
+
28
+ SPIECE_UNDERLINE = "▁"
29
+
30
+ VOCAB_FILES_NAMES = {"vocab_file": "sentencepiece.bpe.model"}
31
+
32
+ PRETRAINED_VOCAB_FILES_MAP = {
33
+ "vocab_file": {
34
+ "facebook/nllb-200-distilled-600M": (
35
+ "https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model"
36
+ ),
37
+ }
38
+ }
39
+
40
+ PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
41
+ "facebook/nllb-200-distilled-600M": 1024,
42
+ }
43
+
44
+ FAIRSEQ_LANGUAGE_CODES = ['ace_Arab', 'ace_Latn', 'acm_Arab', 'acq_Arab', 'aeb_Arab', 'afr_Latn', 'ajp_Arab', 'aka_Latn', 'amh_Ethi', 'apc_Arab', 'arb_Arab', 'ars_Arab', 'ary_Arab', 'arz_Arab', 'asm_Beng', 'ast_Latn', 'awa_Deva', 'ayr_Latn', 'azb_Arab', 'azj_Latn', 'bak_Cyrl', 'bam_Latn', 'ban_Latn', 'bel_Cyrl', 'bem_Latn', 'ben_Beng', 'bho_Deva', 'bjn_Arab', 'bjn_Latn', 'bod_Tibt', 'bos_Latn', 'bug_Latn', 'bul_Cyrl', 'cat_Latn', 'ceb_Latn', 'ces_Latn', 'cjk_Latn', 'ckb_Arab', 'crh_Latn', 'cym_Latn', 'dan_Latn', 'deu_Latn', 'dik_Latn', 'dyu_Latn', 'dzo_Tibt', 'ell_Grek', 'eng_Latn', 'epo_Latn', 'est_Latn', 'eus_Latn', 'ewe_Latn', 'fao_Latn', 'pes_Arab', 'fij_Latn', 'fin_Latn', 'fon_Latn', 'fra_Latn', 'fur_Latn', 'fuv_Latn', 'gla_Latn', 'gle_Latn', 'glg_Latn', 'grn_Latn', 'guj_Gujr', 'hat_Latn', 'hau_Latn', 'heb_Hebr', 'hin_Deva', 'hne_Deva', 'hrv_Latn', 'hun_Latn', 'hye_Armn', 'ibo_Latn', 'ilo_Latn', 'ind_Latn', 'isl_Latn', 'ita_Latn', 'jav_Latn', 'jpn_Jpan', 'kab_Latn', 'kac_Latn', 'kam_Latn', 'kan_Knda', 'kas_Arab', 'kas_Deva', 'kat_Geor', 'knc_Arab', 'knc_Latn', 'kaz_Cyrl', 'kbp_Latn', 'kea_Latn', 'khm_Khmr', 'kik_Latn', 'kin_Latn', 'kir_Cyrl', 'kmb_Latn', 'kon_Latn', 'kor_Hang', 'kmr_Latn', 'lao_Laoo', 'lvs_Latn', 'lij_Latn', 'lim_Latn', 'lin_Latn', 'lit_Latn', 'lmo_Latn', 'ltg_Latn', 'ltz_Latn', 'lua_Latn', 'lug_Latn', 'luo_Latn', 'lus_Latn', 'mag_Deva', 'mai_Deva', 'mal_Mlym', 'mar_Deva', 'min_Latn', 'mkd_Cyrl', 'plt_Latn', 'mlt_Latn', 'mni_Beng', 'khk_Cyrl', 'mos_Latn', 'mri_Latn', 'zsm_Latn', 'mya_Mymr', 'nld_Latn', 'nno_Latn', 'nob_Latn', 'npi_Deva', 'nso_Latn', 'nus_Latn', 'nya_Latn', 'oci_Latn', 'gaz_Latn', 'ory_Orya', 'pag_Latn', 'pan_Guru', 'pap_Latn', 'pol_Latn', 'por_Latn', 'prs_Arab', 'pbt_Arab', 'quy_Latn', 'ron_Latn', 'run_Latn', 'rus_Cyrl', 'sag_Latn', 'san_Deva', 'sat_Beng', 'scn_Latn', 'shn_Mymr', 'sin_Sinh', 'slk_Latn', 'slv_Latn', 'smo_Latn', 'sna_Latn', 'snd_Arab', 'som_Latn', 'sot_Latn', 'spa_Latn', 'als_Latn', 'srd_Latn', 'srp_Cyrl', 'ssw_Latn', 'sun_Latn', 'swe_Latn', 'swh_Latn', 'szl_Latn', 'tam_Taml', 'tat_Cyrl', 'tel_Telu', 'tgk_Cyrl', 'tgl_Latn', 'tha_Thai', 'tir_Ethi', 'taq_Latn', 'taq_Tfng', 'tpi_Latn', 'tsn_Latn', 'tso_Latn', 'tuk_Latn', 'tum_Latn', 'tur_Latn', 'twi_Latn', 'tzm_Tfng', 'uig_Arab', 'ukr_Cyrl', 'umb_Latn', 'urd_Arab', 'uzn_Latn', 'vec_Latn', 'vie_Latn', 'war_Latn', 'wol_Latn', 'xho_Latn', 'ydd_Hebr', 'yor_Latn', 'yue_Hant', 'zho_Hans', 'zho_Hant', 'zul_Latn'] # fmt: skip
45
+
46
+
47
+ class NllbTokenizer(PreTrainedTokenizer):
48
+ """
49
+ Construct an NLLB tokenizer.
50
+
51
+ Adapted from [`RobertaTokenizer`] and [`XLNetTokenizer`]. Based on
52
+ [SentencePiece](https://github.com/google/sentencepiece).
53
+
54
+ The tokenization method is `<tokens> <eos> <language code>` for source language documents, and `<language code>
55
+ <tokens> <eos>` for target language documents.
56
+
57
+ Examples:
58
+
59
+ ```python
60
+ >>> from transformers import NllbTokenizer
61
+
62
+ >>> tokenizer = NllbTokenizer.from_pretrained(
63
+ ... "facebook/nllb-200-distilled-600M", src_lang="eng_Latn", tgt_lang="fra_Latn"
64
+ ... )
65
+ >>> example_english_phrase = " UN Chief Says There Is No Military Solution in Syria"
66
+ >>> expected_translation_french = "Le chef de l'ONU affirme qu'il n'y a pas de solution militaire en Syrie."
67
+ >>> inputs = tokenizer(example_english_phrase, text_target=expected_translation_french, return_tensors="pt")
68
+ ```
69
+
70
+ Args:
71
+ vocab_file (`str`):
72
+ Path to the vocabulary file.
73
+ bos_token (`str`, *optional*, defaults to `"<s>"`):
74
+ The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
75
+
76
+ <Tip>
77
+
78
+ When building a sequence using special tokens, this is not the token that is used for the beginning of
79
+ sequence. The token used is the `cls_token`.
80
+
81
+ </Tip>
82
+
83
+ eos_token (`str`, *optional*, defaults to `"</s>"`):
84
+ The end of sequence token.
85
+
86
+ <Tip>
87
+
88
+ When building a sequence using special tokens, this is not the token that is used for the end of sequence.
89
+ The token used is the `sep_token`.
90
+
91
+ </Tip>
92
+
93
+ sep_token (`str`, *optional*, defaults to `"</s>"`):
94
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
95
+ sequence classification or for a text and a question for question answering. It is also used as the last
96
+ token of a sequence built with special tokens.
97
+ cls_token (`str`, *optional*, defaults to `"<s>"`):
98
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
99
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
100
+ unk_token (`str`, *optional*, defaults to `"<unk>"`):
101
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
102
+ token instead.
103
+ pad_token (`str`, *optional*, defaults to `"<pad>"`):
104
+ The token used for padding, for example when batching sequences of different lengths.
105
+ mask_token (`str`, *optional*, defaults to `"<mask>"`):
106
+ The token used for masking values. This is the token used when training this model with masked language
107
+ modeling. This is the token which the model will try to predict.
108
+ tokenizer_file (`str`, *optional*):
109
+ The path to a tokenizer file to use instead of the vocab file.
110
+ src_lang (`str`, *optional*):
111
+ The language to use as source language for translation.
112
+ tgt_lang (`str`, *optional*):
113
+ The language to use as target language for translation.
114
+ sp_model_kwargs (`Dict[str, str]`):
115
+ Additional keyword arguments to pass to the model initialization.
116
+ """
117
+
118
+ vocab_files_names = VOCAB_FILES_NAMES
119
+ max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
120
+ pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
121
+ model_input_names = ["input_ids", "attention_mask"]
122
+
123
+ prefix_tokens: List[int] = []
124
+ suffix_tokens: List[int] = []
125
+
126
+ def __init__(
127
+ self,
128
+ vocab_file,
129
+ bos_token="<s>",
130
+ eos_token="</s>",
131
+ sep_token="</s>",
132
+ cls_token="<s>",
133
+ unk_token="<unk>",
134
+ pad_token="<pad>",
135
+ mask_token="<mask>",
136
+ tokenizer_file=None,
137
+ src_lang=None,
138
+ tgt_lang=None,
139
+ sp_model_kwargs: Optional[Dict[str, Any]] = None,
140
+ additional_special_tokens=None,
141
+ legacy_behaviour=False,
142
+ **kwargs,
143
+ ):
144
+ if additional_special_tokens is None:
145
+ additional_special_tokens = FAIRSEQ_LANGUAGE_CODES
146
+ bos_token = AddedToken(bos_token, normalized=False, special=True) if isinstance(bos_token, str) else bos_token
147
+ pad_token = AddedToken(pad_token, normalized=False, special=True) if isinstance(pad_token, str) else pad_token
148
+ eos_token = AddedToken(eos_token, normalized=False, special=True) if isinstance(eos_token, str) else eos_token
149
+ unk_token = AddedToken(unk_token, normalized=False, special=True) if isinstance(unk_token, str) else unk_token
150
+ # Mask token behave like a normal word, i.e. include the space before it
151
+ mask_token = (
152
+ AddedToken(mask_token, normalized=True, lstrip=True, special=True)
153
+ if isinstance(mask_token, str)
154
+ else mask_token
155
+ )
156
+
157
+ self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
158
+ self.legacy_behaviour = legacy_behaviour
159
+
160
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
161
+ self.sp_model.Load(str(vocab_file))
162
+ self.vocab_file = vocab_file
163
+ # Original fairseq vocab and spm vocab must be "aligned":
164
+ # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
165
+ # -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ----
166
+ # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a'
167
+ # spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s'
168
+
169
+ # unk token needs to be in the vocab with correct index
170
+ self._added_tokens_decoder = {0: bos_token, 1: pad_token, 2: eos_token, 3: unk_token}
171
+ # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
172
+ self.fairseq_offset = 1
173
+ self.sp_model_size = len(self.sp_model)
174
+
175
+ # Everything that follows is kept for BC and will be removed in v4.38
176
+ self._fairseq_tokens_to_ids = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
177
+ language_codes = FAIRSEQ_LANGUAGE_CODES if additional_special_tokens is None else additional_special_tokens
178
+ self._lang_code_to_id = {
179
+ code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(language_codes)
180
+ }
181
+ self._id_to_lang_code = {v: k for k, v in self._lang_code_to_id.items()}
182
+ self._fairseq_tokens_to_ids["<mask>"] = len(self.sp_model) + len(self.lang_code_to_id) + self.fairseq_offset
183
+
184
+ self._fairseq_tokens_to_ids.update(self.lang_code_to_id)
185
+ self._fairseq_ids_to_tokens = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
186
+
187
+ super().__init__(
188
+ bos_token=bos_token,
189
+ eos_token=eos_token,
190
+ unk_token=unk_token,
191
+ sep_token=sep_token,
192
+ cls_token=cls_token,
193
+ pad_token=pad_token,
194
+ mask_token=mask_token,
195
+ tokenizer_file=tokenizer_file,
196
+ src_lang=src_lang,
197
+ tgt_lang=tgt_lang,
198
+ additional_special_tokens=additional_special_tokens,
199
+ sp_model_kwargs=self.sp_model_kwargs,
200
+ legacy_behaviour=legacy_behaviour,
201
+ **kwargs,
202
+ )
203
+
204
+ self._src_lang = src_lang if src_lang is not None else "eng_Latn"
205
+ self.cur_lang_code_id = self.convert_tokens_to_ids(self._src_lang)
206
+ self.tgt_lang = tgt_lang
207
+ self.set_src_lang_special_tokens(self._src_lang)
208
+
209
+ def __getstate__(self):
210
+ state = self.__dict__.copy()
211
+ state["sp_model"] = None
212
+ state["sp_model_proto"] = self.sp_model.serialized_model_proto()
213
+ return state
214
+
215
+ def __setstate__(self, d):
216
+ self.__dict__ = d
217
+
218
+ # for backward compatibility
219
+ if not hasattr(self, "sp_model_kwargs"):
220
+ self.sp_model_kwargs = {}
221
+
222
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
223
+ self.sp_model.LoadFromSerializedProto(self.sp_model_proto)
224
+
225
+ @property
226
+ def vocab_size(self):
227
+ return len(self.sp_model) + self.fairseq_offset
228
+
229
+ @property
230
+ def src_lang(self) -> str:
231
+ return self._src_lang
232
+
233
+ @property
234
+ def lang_code_to_id(self):
235
+ logger.warning_once(
236
+ "the `lang_code_to_id` attribute is deprecated. The logic is natively handled in the `tokenizer.adder_tokens_decoder`"
237
+ " this attribute will be removed in `transformers` v4.38"
238
+ )
239
+ return self._lang_code_to_id
240
+
241
+ @property
242
+ def fairseq_tokens_to_ids(self):
243
+ logger.warning_once(
244
+ "the `fairseq_tokens_to_ids` attribute is deprecated. The logic is natively handled in the `tokenizer.adder_tokens_decoder`"
245
+ " this attribute will be removed in `transformers` v4.38"
246
+ )
247
+ return self._fairseq_tokens_to_ids
248
+
249
+ @property
250
+ def id_to_lang_code(self):
251
+ logger.warning_once(
252
+ "the `id_to_lang_code` attribute is deprecated. The logic is natively handled in the `tokenizer.adder_tokens_decoder`"
253
+ " this attribute will be removed in `transformers` v4.38"
254
+ )
255
+ return self._id_to_lang_code
256
+
257
+ @property
258
+ def fairseq_ids_to_tokens(self):
259
+ logger.warning_once(
260
+ "the `_fairseq_ids_to_tokens` attribute is deprecated. The logic is natively handled in the `tokenizer.adder_tokens_decoder`"
261
+ " this attribute will be removed in `transformers` v4.38"
262
+ )
263
+ return self._fairseq_ids_to_tokens
264
+
265
+ @src_lang.setter
266
+ def src_lang(self, new_src_lang: str) -> None:
267
+ self._src_lang = new_src_lang
268
+ self.set_src_lang_special_tokens(self._src_lang)
269
+
270
+ def get_special_tokens_mask(
271
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
272
+ ) -> List[int]:
273
+ """
274
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
275
+ special tokens using the tokenizer `prepare_for_model` method.
276
+
277
+ Args:
278
+ token_ids_0 (`List[int]`):
279
+ List of IDs.
280
+ token_ids_1 (`List[int]`, *optional*):
281
+ Optional second list of IDs for sequence pairs.
282
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
283
+ Whether or not the token list is already formatted with special tokens for the model.
284
+
285
+ Returns:
286
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
287
+ """
288
+
289
+ if already_has_special_tokens:
290
+ return super().get_special_tokens_mask(
291
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
292
+ )
293
+
294
+ prefix_ones = [1] * len(self.prefix_tokens)
295
+ suffix_ones = [1] * len(self.suffix_tokens)
296
+ if token_ids_1 is None:
297
+ return prefix_ones + ([0] * len(token_ids_0)) + suffix_ones
298
+ return prefix_ones + ([0] * len(token_ids_0)) + ([0] * len(token_ids_1)) + suffix_ones
299
+
300
+ def build_inputs_with_special_tokens(
301
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
302
+ ) -> List[int]:
303
+ """
304
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
305
+ adding special tokens. An NLLB sequence has the following format, where `X` represents the sequence:
306
+
307
+ - `input_ids` (for encoder) `X [eos, src_lang_code]`
308
+ - `decoder_input_ids`: (for decoder) `X [eos, tgt_lang_code]`
309
+
310
+ BOS is never used. Pairs of sequences are not the expected use case, but they will be handled without a
311
+ separator.
312
+
313
+ Args:
314
+ token_ids_0 (`List[int]`):
315
+ List of IDs to which the special tokens will be added.
316
+ token_ids_1 (`List[int]`, *optional*):
317
+ Optional second list of IDs for sequence pairs.
318
+
319
+ Returns:
320
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
321
+ """
322
+ if token_ids_1 is None:
323
+ return self.prefix_tokens + token_ids_0 + self.suffix_tokens
324
+ # We don't expect to process pairs, but leave the pair logic for API consistency
325
+ return self.prefix_tokens + token_ids_0 + token_ids_1 + self.suffix_tokens
326
+
327
+ def create_token_type_ids_from_sequences(
328
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
329
+ ) -> List[int]:
330
+ """
331
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. nllb does not
332
+ make use of token type ids, therefore a list of zeros is returned.
333
+
334
+ Args:
335
+ token_ids_0 (`List[int]`):
336
+ List of IDs.
337
+ token_ids_1 (`List[int]`, *optional*):
338
+ Optional second list of IDs for sequence pairs.
339
+
340
+ Returns:
341
+ `List[int]`: List of zeros.
342
+
343
+ """
344
+
345
+ sep = [self.sep_token_id]
346
+ cls = [self.cls_token_id]
347
+
348
+ if token_ids_1 is None:
349
+ return len(cls + token_ids_0 + sep) * [0]
350
+ return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
351
+
352
+ def _build_translation_inputs(
353
+ self, raw_inputs, return_tensors: str, src_lang: Optional[str], tgt_lang: Optional[str], **extra_kwargs
354
+ ):
355
+ """Used by translation pipeline, to prepare inputs for the generate function"""
356
+ if src_lang is None or tgt_lang is None:
357
+ raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model")
358
+ self.src_lang = src_lang
359
+ inputs = self(raw_inputs, add_special_tokens=True, return_tensors=return_tensors, **extra_kwargs)
360
+ tgt_lang_id = self.convert_tokens_to_ids(tgt_lang)
361
+ inputs["forced_bos_token_id"] = tgt_lang_id
362
+ return inputs
363
+
364
+ def get_vocab(self):
365
+ vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
366
+ vocab.update(self.added_tokens_encoder)
367
+ return vocab
368
+
369
+ def _tokenize(self, text: str) -> List[str]:
370
+ return self.sp_model.encode(text, out_type=str)
371
+
372
+ def _convert_token_to_id(self, token):
373
+ """Converts a token (str) in an id using the vocab."""
374
+ spm_id = self.sp_model.PieceToId(token)
375
+ # Need to return unknown token if the SP model returned 0
376
+ return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
377
+
378
+ def _convert_id_to_token(self, index):
379
+ """Converts an index (integer) in a token (str) using the vocab."""
380
+ return self.sp_model.IdToPiece(index - self.fairseq_offset)
381
+
382
+ def convert_tokens_to_string(self, tokens):
383
+ """Converts a sequence of tokens (strings for sub-words) in a single string."""
384
+ out_string = "".join(tokens).replace(SPIECE_UNDERLINE, " ").strip()
385
+ return out_string
386
+
387
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
388
+ if not os.path.isdir(save_directory):
389
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
390
+ return
391
+ out_vocab_file = os.path.join(
392
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
393
+ )
394
+
395
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
396
+ copyfile(self.vocab_file, out_vocab_file)
397
+ elif not os.path.isfile(self.vocab_file):
398
+ with open(out_vocab_file, "wb") as fi:
399
+ content_spiece_model = self.sp_model.serialized_model_proto()
400
+ fi.write(content_spiece_model)
401
+
402
+ return (out_vocab_file,)
403
+
404
+ def prepare_seq2seq_batch(
405
+ self,
406
+ src_texts: List[str],
407
+ src_lang: str = "eng_Latn",
408
+ tgt_texts: Optional[List[str]] = None,
409
+ tgt_lang: str = "fra_Latn",
410
+ **kwargs,
411
+ ) -> BatchEncoding:
412
+ self.src_lang = src_lang
413
+ self.tgt_lang = tgt_lang
414
+ return super().prepare_seq2seq_batch(src_texts, tgt_texts, **kwargs)
415
+
416
+ def _switch_to_input_mode(self):
417
+ return self.set_src_lang_special_tokens(self.src_lang)
418
+
419
+ def _switch_to_target_mode(self):
420
+ return self.set_tgt_lang_special_tokens(self.tgt_lang)
421
+
422
+ def set_src_lang_special_tokens(self, src_lang) -> None:
423
+ """Reset the special tokens to the source lang setting.
424
+ - In legacy mode: No prefix and suffix=[eos, src_lang_code].
425
+ - In default mode: Prefix=[src_lang_code], suffix = [eos]
426
+ """
427
+ self.cur_lang_code = self.convert_tokens_to_ids(src_lang)
428
+ if self.legacy_behaviour:
429
+ self.prefix_tokens = []
430
+ self.suffix_tokens = [self.eos_token_id, self.cur_lang_code]
431
+ else:
432
+ self.prefix_tokens = [self.cur_lang_code]
433
+ self.suffix_tokens = [self.eos_token_id]
434
+
435
+ def set_tgt_lang_special_tokens(self, lang: str) -> None:
436
+ """Reset the special tokens to the target lang setting.
437
+ - In legacy mode: No prefix and suffix=[eos, tgt_lang_code].
438
+ - In default mode: Prefix=[tgt_lang_code], suffix = [eos]
439
+ """
440
+ self.cur_lang_code = self.convert_tokens_to_ids(lang)
441
+ if self.legacy_behaviour:
442
+ self.prefix_tokens = []
443
+ self.suffix_tokens = [self.eos_token_id, self.cur_lang_code]
444
+ else:
445
+ self.prefix_tokens = [self.cur_lang_code]
446
+ self.suffix_tokens = [self.eos_token_id]
env-llmeval/lib/python3.10/site-packages/transformers/models/nllb/tokenization_nllb_fast.py ADDED
@@ -0,0 +1,359 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The Facebook AI Research Team Authors and The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import os
17
+ from shutil import copyfile
18
+ from typing import List, Optional, Tuple
19
+
20
+ from tokenizers import processors
21
+
22
+ from ...tokenization_utils import AddedToken, BatchEncoding
23
+ from ...tokenization_utils_fast import PreTrainedTokenizerFast
24
+ from ...utils import is_sentencepiece_available, logging
25
+
26
+
27
+ if is_sentencepiece_available():
28
+ from .tokenization_nllb import NllbTokenizer
29
+ else:
30
+ NllbTokenizer = None
31
+
32
+
33
+ logger = logging.get_logger(__name__)
34
+
35
+
36
+ VOCAB_FILES_NAMES = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
37
+
38
+ PRETRAINED_VOCAB_FILES_MAP = {
39
+ "vocab_file": {
40
+ "facebook/nllb-200-distilled-600M": (
41
+ "https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model"
42
+ ),
43
+ },
44
+ "tokenizer_file": {
45
+ "facebook/nllb-200-distilled-600M": (
46
+ "https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json"
47
+ ),
48
+ },
49
+ }
50
+
51
+ PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
52
+ "facebook/nllb-large-en-ro": 1024,
53
+ "facebook/nllb-200-distilled-600M": 1024,
54
+ }
55
+
56
+ FAIRSEQ_LANGUAGE_CODES = ['ace_Arab', 'ace_Latn', 'acm_Arab', 'acq_Arab', 'aeb_Arab', 'afr_Latn', 'ajp_Arab', 'aka_Latn', 'amh_Ethi', 'apc_Arab', 'arb_Arab', 'ars_Arab', 'ary_Arab', 'arz_Arab', 'asm_Beng', 'ast_Latn', 'awa_Deva', 'ayr_Latn', 'azb_Arab', 'azj_Latn', 'bak_Cyrl', 'bam_Latn', 'ban_Latn', 'bel_Cyrl', 'bem_Latn', 'ben_Beng', 'bho_Deva', 'bjn_Arab', 'bjn_Latn', 'bod_Tibt', 'bos_Latn', 'bug_Latn', 'bul_Cyrl', 'cat_Latn', 'ceb_Latn', 'ces_Latn', 'cjk_Latn', 'ckb_Arab', 'crh_Latn', 'cym_Latn', 'dan_Latn', 'deu_Latn', 'dik_Latn', 'dyu_Latn', 'dzo_Tibt', 'ell_Grek', 'eng_Latn', 'epo_Latn', 'est_Latn', 'eus_Latn', 'ewe_Latn', 'fao_Latn', 'pes_Arab', 'fij_Latn', 'fin_Latn', 'fon_Latn', 'fra_Latn', 'fur_Latn', 'fuv_Latn', 'gla_Latn', 'gle_Latn', 'glg_Latn', 'grn_Latn', 'guj_Gujr', 'hat_Latn', 'hau_Latn', 'heb_Hebr', 'hin_Deva', 'hne_Deva', 'hrv_Latn', 'hun_Latn', 'hye_Armn', 'ibo_Latn', 'ilo_Latn', 'ind_Latn', 'isl_Latn', 'ita_Latn', 'jav_Latn', 'jpn_Jpan', 'kab_Latn', 'kac_Latn', 'kam_Latn', 'kan_Knda', 'kas_Arab', 'kas_Deva', 'kat_Geor', 'knc_Arab', 'knc_Latn', 'kaz_Cyrl', 'kbp_Latn', 'kea_Latn', 'khm_Khmr', 'kik_Latn', 'kin_Latn', 'kir_Cyrl', 'kmb_Latn', 'kon_Latn', 'kor_Hang', 'kmr_Latn', 'lao_Laoo', 'lvs_Latn', 'lij_Latn', 'lim_Latn', 'lin_Latn', 'lit_Latn', 'lmo_Latn', 'ltg_Latn', 'ltz_Latn', 'lua_Latn', 'lug_Latn', 'luo_Latn', 'lus_Latn', 'mag_Deva', 'mai_Deva', 'mal_Mlym', 'mar_Deva', 'min_Latn', 'mkd_Cyrl', 'plt_Latn', 'mlt_Latn', 'mni_Beng', 'khk_Cyrl', 'mos_Latn', 'mri_Latn', 'zsm_Latn', 'mya_Mymr', 'nld_Latn', 'nno_Latn', 'nob_Latn', 'npi_Deva', 'nso_Latn', 'nus_Latn', 'nya_Latn', 'oci_Latn', 'gaz_Latn', 'ory_Orya', 'pag_Latn', 'pan_Guru', 'pap_Latn', 'pol_Latn', 'por_Latn', 'prs_Arab', 'pbt_Arab', 'quy_Latn', 'ron_Latn', 'run_Latn', 'rus_Cyrl', 'sag_Latn', 'san_Deva', 'sat_Beng', 'scn_Latn', 'shn_Mymr', 'sin_Sinh', 'slk_Latn', 'slv_Latn', 'smo_Latn', 'sna_Latn', 'snd_Arab', 'som_Latn', 'sot_Latn', 'spa_Latn', 'als_Latn', 'srd_Latn', 'srp_Cyrl', 'ssw_Latn', 'sun_Latn', 'swe_Latn', 'swh_Latn', 'szl_Latn', 'tam_Taml', 'tat_Cyrl', 'tel_Telu', 'tgk_Cyrl', 'tgl_Latn', 'tha_Thai', 'tir_Ethi', 'taq_Latn', 'taq_Tfng', 'tpi_Latn', 'tsn_Latn', 'tso_Latn', 'tuk_Latn', 'tum_Latn', 'tur_Latn', 'twi_Latn', 'tzm_Tfng', 'uig_Arab', 'ukr_Cyrl', 'umb_Latn', 'urd_Arab', 'uzn_Latn', 'vec_Latn', 'vie_Latn', 'war_Latn', 'wol_Latn', 'xho_Latn', 'ydd_Hebr', 'yor_Latn', 'yue_Hant', 'zho_Hans', 'zho_Hant', 'zul_Latn'] # fmt: skip
57
+
58
+
59
+ class NllbTokenizerFast(PreTrainedTokenizerFast):
60
+ """
61
+ Construct a "fast" NLLB tokenizer (backed by HuggingFace's *tokenizers* library). Based on
62
+ [BPE](https://huggingface.co/docs/tokenizers/python/latest/components.html?highlight=BPE#models).
63
+
64
+ This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
65
+ refer to this superclass for more information regarding those methods.
66
+
67
+ The tokenization method is `<tokens> <eos> <language code>` for source language documents, and `<language code>
68
+ <tokens> <eos>` for target language documents.
69
+
70
+ Examples:
71
+
72
+ ```python
73
+ >>> from transformers import NllbTokenizerFast
74
+
75
+ >>> tokenizer = NllbTokenizerFast.from_pretrained(
76
+ ... "facebook/nllb-200-distilled-600M", src_lang="eng_Latn", tgt_lang="fra_Latn"
77
+ ... )
78
+ >>> example_english_phrase = " UN Chief Says There Is No Military Solution in Syria"
79
+ >>> expected_translation_french = "Le chef de l'ONU affirme qu'il n'y a pas de solution militaire en Syrie."
80
+ >>> inputs = tokenizer(example_english_phrase, text_target=expected_translation_french, return_tensors="pt")
81
+ ```
82
+
83
+ Args:
84
+ vocab_file (`str`):
85
+ Path to the vocabulary file.
86
+ bos_token (`str`, *optional*, defaults to `"<s>"`):
87
+ The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
88
+
89
+ <Tip>
90
+
91
+ When building a sequence using special tokens, this is not the token that is used for the beginning of
92
+ sequence. The token used is the `cls_token`.
93
+
94
+ </Tip>
95
+
96
+ eos_token (`str`, *optional*, defaults to `"</s>"`):
97
+ The end of sequence token.
98
+
99
+ <Tip>
100
+
101
+ When building a sequence using special tokens, this is not the token that is used for the end of sequence.
102
+ The token used is the `sep_token`.
103
+
104
+ </Tip>
105
+
106
+ sep_token (`str`, *optional*, defaults to `"</s>"`):
107
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
108
+ sequence classification or for a text and a question for question answering. It is also used as the last
109
+ token of a sequence built with special tokens.
110
+ cls_token (`str`, *optional*, defaults to `"<s>"`):
111
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
112
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
113
+ unk_token (`str`, *optional*, defaults to `"<unk>"`):
114
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
115
+ token instead.
116
+ pad_token (`str`, *optional*, defaults to `"<pad>"`):
117
+ The token used for padding, for example when batching sequences of different lengths.
118
+ mask_token (`str`, *optional*, defaults to `"<mask>"`):
119
+ The token used for masking values. This is the token used when training this model with masked language
120
+ modeling. This is the token which the model will try to predict.
121
+ tokenizer_file (`str`, *optional*):
122
+ The path to a tokenizer file to use instead of the vocab file.
123
+ src_lang (`str`, *optional*):
124
+ The language to use as source language for translation.
125
+ tgt_lang (`str`, *optional*):
126
+ The language to use as target language for translation.
127
+ """
128
+
129
+ vocab_files_names = VOCAB_FILES_NAMES
130
+ max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
131
+ pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
132
+ model_input_names = ["input_ids", "attention_mask"]
133
+ slow_tokenizer_class = NllbTokenizer
134
+
135
+ prefix_tokens: List[int] = []
136
+ suffix_tokens: List[int] = []
137
+
138
+ def __init__(
139
+ self,
140
+ vocab_file=None,
141
+ tokenizer_file=None,
142
+ bos_token="<s>",
143
+ eos_token="</s>",
144
+ sep_token="</s>",
145
+ cls_token="<s>",
146
+ unk_token="<unk>",
147
+ pad_token="<pad>",
148
+ mask_token="<mask>",
149
+ src_lang=None,
150
+ tgt_lang=None,
151
+ additional_special_tokens=None,
152
+ legacy_behaviour=False,
153
+ **kwargs,
154
+ ):
155
+ if additional_special_tokens is None:
156
+ additional_special_tokens = FAIRSEQ_LANGUAGE_CODES
157
+
158
+ self.vocab_file = vocab_file
159
+ # Mask token behave like a normal word, i.e. include the space before it
160
+ mask_token = (
161
+ AddedToken(mask_token, normalized=True, lstrip=True, special=True)
162
+ if isinstance(mask_token, str)
163
+ else mask_token
164
+ )
165
+ self.legacy_behaviour = legacy_behaviour
166
+ super().__init__(
167
+ vocab_file=vocab_file,
168
+ tokenizer_file=tokenizer_file,
169
+ bos_token=bos_token,
170
+ eos_token=eos_token,
171
+ sep_token=sep_token,
172
+ cls_token=cls_token,
173
+ unk_token=unk_token,
174
+ pad_token=pad_token,
175
+ src_lang=src_lang,
176
+ tgt_lang=tgt_lang,
177
+ mask_token=mask_token,
178
+ additional_special_tokens=additional_special_tokens,
179
+ legacy_behaviour=legacy_behaviour,
180
+ **kwargs,
181
+ )
182
+
183
+ self._lang_code_to_id = {
184
+ lang_code: self.convert_tokens_to_ids(str(lang_code)) for lang_code in additional_special_tokens
185
+ }
186
+
187
+ self._src_lang = src_lang if src_lang is not None else "eng_Latn"
188
+ self.cur_lang_code = self.convert_tokens_to_ids(self._src_lang)
189
+ self.tgt_lang = tgt_lang
190
+ self.set_src_lang_special_tokens(self._src_lang)
191
+
192
+ @property
193
+ def lang_code_to_id(self):
194
+ logger.warning_once(
195
+ "the `lang_code_to_id` attribute is deprecated. The logic is natively handled in the `tokenizer.adder_tokens_decoder`"
196
+ " this attribute will be removed in `transformers` v4.38"
197
+ )
198
+ return self._lang_code_to_id
199
+
200
+ @property
201
+ def can_save_slow_tokenizer(self) -> bool:
202
+ return os.path.isfile(self.vocab_file) if self.vocab_file else False
203
+
204
+ @property
205
+ def src_lang(self) -> str:
206
+ return self._src_lang
207
+
208
+ @src_lang.setter
209
+ def src_lang(self, new_src_lang: str) -> None:
210
+ self._src_lang = new_src_lang
211
+ self.set_src_lang_special_tokens(self._src_lang)
212
+
213
+ def build_inputs_with_special_tokens(
214
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
215
+ ) -> List[int]:
216
+ """
217
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
218
+ adding special tokens. The special tokens depend on calling set_lang.
219
+
220
+ An NLLB sequence has the following format, where `X` represents the sequence:
221
+
222
+ - `input_ids` (for encoder) `X [eos, src_lang_code]`
223
+ - `decoder_input_ids`: (for decoder) `X [eos, tgt_lang_code]`
224
+
225
+ BOS is never used. Pairs of sequences are not the expected use case, but they will be handled without a
226
+ separator.
227
+
228
+ Args:
229
+ token_ids_0 (`List[int]`):
230
+ List of IDs to which the special tokens will be added.
231
+ token_ids_1 (`List[int]`, *optional*):
232
+ Optional second list of IDs for sequence pairs.
233
+
234
+ Returns:
235
+ `List[int]`: list of [input IDs](../glossary#input-ids) with the appropriate special tokens.
236
+ """
237
+ if token_ids_1 is None:
238
+ return self.prefix_tokens + token_ids_0 + self.suffix_tokens
239
+ # We don't expect to process pairs, but leave the pair logic for API consistency
240
+ return self.prefix_tokens + token_ids_0 + token_ids_1 + self.suffix_tokens
241
+
242
+ def create_token_type_ids_from_sequences(
243
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
244
+ ) -> List[int]:
245
+ """
246
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. nllb does not
247
+ make use of token type ids, therefore a list of zeros is returned.
248
+
249
+ Args:
250
+ token_ids_0 (`List[int]`):
251
+ List of IDs.
252
+ token_ids_1 (`List[int]`, *optional*):
253
+ Optional second list of IDs for sequence pairs.
254
+
255
+ Returns:
256
+ `List[int]`: List of zeros.
257
+
258
+ """
259
+
260
+ sep = [self.sep_token_id]
261
+ cls = [self.cls_token_id]
262
+
263
+ if token_ids_1 is None:
264
+ return len(cls + token_ids_0 + sep) * [0]
265
+ return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
266
+
267
+ def _build_translation_inputs(
268
+ self, raw_inputs, return_tensors: str, src_lang: Optional[str], tgt_lang: Optional[str], **extra_kwargs
269
+ ):
270
+ """Used by translation pipeline, to prepare inputs for the generate function"""
271
+ if src_lang is None or tgt_lang is None:
272
+ raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model")
273
+ self.src_lang = src_lang
274
+ inputs = self(raw_inputs, add_special_tokens=True, return_tensors=return_tensors, **extra_kwargs)
275
+ tgt_lang_id = self.convert_tokens_to_ids(tgt_lang)
276
+ inputs["forced_bos_token_id"] = tgt_lang_id
277
+ return inputs
278
+
279
+ def prepare_seq2seq_batch(
280
+ self,
281
+ src_texts: List[str],
282
+ src_lang: str = "eng_Latn",
283
+ tgt_texts: Optional[List[str]] = None,
284
+ tgt_lang: str = "fra_Latn",
285
+ **kwargs,
286
+ ) -> BatchEncoding:
287
+ self.src_lang = src_lang
288
+ self.tgt_lang = tgt_lang
289
+ return super().prepare_seq2seq_batch(src_texts, tgt_texts, **kwargs)
290
+
291
+ def _switch_to_input_mode(self):
292
+ return self.set_src_lang_special_tokens(self.src_lang)
293
+
294
+ def _switch_to_target_mode(self):
295
+ return self.set_tgt_lang_special_tokens(self.tgt_lang)
296
+
297
+ def set_src_lang_special_tokens(self, src_lang) -> None:
298
+ """Reset the special tokens to the source lang setting.
299
+ - In legacy mode: No prefix and suffix=[eos, src_lang_code].
300
+ - In default mode: Prefix=[src_lang_code], suffix = [eos]
301
+ """
302
+ self.cur_lang_code = self.convert_tokens_to_ids(src_lang)
303
+
304
+ if self.legacy_behaviour:
305
+ self.prefix_tokens = []
306
+ self.suffix_tokens = [self.eos_token_id, self.cur_lang_code]
307
+ else:
308
+ self.prefix_tokens = [self.cur_lang_code]
309
+ self.suffix_tokens = [self.eos_token_id]
310
+
311
+ prefix_tokens_str = self.convert_ids_to_tokens(self.prefix_tokens)
312
+ suffix_tokens_str = self.convert_ids_to_tokens(self.suffix_tokens)
313
+
314
+ self._tokenizer.post_processor = processors.TemplateProcessing(
315
+ single=prefix_tokens_str + ["$A"] + suffix_tokens_str,
316
+ pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str,
317
+ special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str, self.prefix_tokens + self.suffix_tokens)),
318
+ )
319
+
320
+ def set_tgt_lang_special_tokens(self, lang: str) -> None:
321
+ """Reset the special tokens to the target lang setting.
322
+ - In legacy mode: No prefix and suffix=[eos, tgt_lang_code].
323
+ - In default mode: Prefix=[tgt_lang_code], suffix = [eos]
324
+ """
325
+ self.cur_lang_code = self.convert_tokens_to_ids(lang)
326
+ if self.legacy_behaviour:
327
+ self.prefix_tokens = []
328
+ self.suffix_tokens = [self.eos_token_id, self.cur_lang_code]
329
+ else:
330
+ self.prefix_tokens = [self.cur_lang_code]
331
+ self.suffix_tokens = [self.eos_token_id]
332
+
333
+ prefix_tokens_str = self.convert_ids_to_tokens(self.prefix_tokens)
334
+ suffix_tokens_str = self.convert_ids_to_tokens(self.suffix_tokens)
335
+
336
+ self._tokenizer.post_processor = processors.TemplateProcessing(
337
+ single=prefix_tokens_str + ["$A"] + suffix_tokens_str,
338
+ pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str,
339
+ special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str, self.prefix_tokens + self.suffix_tokens)),
340
+ )
341
+
342
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
343
+ if not self.can_save_slow_tokenizer:
344
+ raise ValueError(
345
+ "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
346
+ "tokenizer."
347
+ )
348
+
349
+ if not os.path.isdir(save_directory):
350
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory.")
351
+ return
352
+ out_vocab_file = os.path.join(
353
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
354
+ )
355
+
356
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file):
357
+ copyfile(self.vocab_file, out_vocab_file)
358
+
359
+ return (out_vocab_file,)
env-llmeval/lib/python3.10/site-packages/transformers/models/qdqbert/__pycache__/configuration_qdqbert.cpython-310.pyc ADDED
Binary file (5.16 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/qdqbert/__pycache__/modeling_qdqbert.cpython-310.pyc ADDED
Binary file (48.8 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/qdqbert/modeling_qdqbert.py ADDED
@@ -0,0 +1,1739 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 NVIDIA Corporation and The HuggingFace Team.
3
+ # Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """ PyTorch QDQBERT model."""
17
+
18
+
19
+ import math
20
+ import os
21
+ import warnings
22
+ from typing import Dict, List, Optional, Tuple, Union
23
+
24
+ import torch
25
+ import torch.utils.checkpoint
26
+ from torch import nn
27
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
28
+
29
+ from ...activations import ACT2FN
30
+ from ...modeling_outputs import (
31
+ BaseModelOutputWithPastAndCrossAttentions,
32
+ BaseModelOutputWithPoolingAndCrossAttentions,
33
+ CausalLMOutputWithCrossAttentions,
34
+ MaskedLMOutput,
35
+ MultipleChoiceModelOutput,
36
+ NextSentencePredictorOutput,
37
+ QuestionAnsweringModelOutput,
38
+ SequenceClassifierOutput,
39
+ TokenClassifierOutput,
40
+ )
41
+ from ...modeling_utils import PreTrainedModel
42
+ from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer
43
+ from ...utils import (
44
+ add_code_sample_docstrings,
45
+ add_start_docstrings,
46
+ add_start_docstrings_to_model_forward,
47
+ is_pytorch_quantization_available,
48
+ logging,
49
+ replace_return_docstrings,
50
+ requires_backends,
51
+ )
52
+ from .configuration_qdqbert import QDQBertConfig
53
+
54
+
55
+ logger = logging.get_logger(__name__)
56
+
57
+ # soft dependency
58
+ if is_pytorch_quantization_available():
59
+ try:
60
+ from pytorch_quantization import nn as quant_nn
61
+ from pytorch_quantization.nn.modules.tensor_quantizer import TensorQuantizer
62
+ except OSError:
63
+ logger.error(
64
+ "QDQBERT model are not usable since `pytorch_quantization` can't be loaded. Please try to reinstall it"
65
+ " following the instructions here:"
66
+ " https://github.com/NVIDIA/TensorRT/tree/master/tools/pytorch-quantization."
67
+ )
68
+
69
+ _CHECKPOINT_FOR_DOC = "google-bert/bert-base-uncased"
70
+ _CONFIG_FOR_DOC = "QDQBertConfig"
71
+
72
+ QDQBERT_PRETRAINED_MODEL_ARCHIVE_LIST = [
73
+ "google-bert/bert-base-uncased",
74
+ # See all BERT models at https://huggingface.co/models?filter=bert
75
+ ]
76
+
77
+
78
+ def load_tf_weights_in_qdqbert(model, tf_checkpoint_path):
79
+ """Load tf checkpoints in a pytorch model."""
80
+ try:
81
+ import re
82
+
83
+ import numpy as np
84
+ import tensorflow as tf
85
+ except ImportError:
86
+ logger.error(
87
+ "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
88
+ "https://www.tensorflow.org/install/ for installation instructions."
89
+ )
90
+ raise
91
+ tf_path = os.path.abspath(tf_checkpoint_path)
92
+ logger.info(f"Converting TensorFlow checkpoint from {tf_path}")
93
+ # Load weights from TF model
94
+ init_vars = tf.train.list_variables(tf_path)
95
+ names = []
96
+ arrays = []
97
+ for name, shape in init_vars:
98
+ logger.info(f"Loading TF weight {name} with shape {shape}")
99
+ array = tf.train.load_variable(tf_path, name)
100
+ names.append(name)
101
+ arrays.append(array)
102
+
103
+ for name, array in zip(names, arrays):
104
+ name = name.split("/")
105
+ # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
106
+ # which are not required for using pretrained model
107
+ if any(
108
+ n in ["adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step"]
109
+ for n in name
110
+ ):
111
+ logger.info(f"Skipping {'/'.join(name)}")
112
+ continue
113
+ pointer = model
114
+ for m_name in name:
115
+ if re.fullmatch(r"[A-Za-z]+_\d+", m_name):
116
+ scope_names = re.split(r"_(\d+)", m_name)
117
+ else:
118
+ scope_names = [m_name]
119
+ if scope_names[0] == "kernel" or scope_names[0] == "gamma":
120
+ pointer = getattr(pointer, "weight")
121
+ elif scope_names[0] == "output_bias" or scope_names[0] == "beta":
122
+ pointer = getattr(pointer, "bias")
123
+ elif scope_names[0] == "output_weights":
124
+ pointer = getattr(pointer, "weight")
125
+ elif scope_names[0] == "squad":
126
+ pointer = getattr(pointer, "classifier")
127
+ else:
128
+ try:
129
+ pointer = getattr(pointer, scope_names[0])
130
+ except AttributeError:
131
+ logger.info(f"Skipping {'/'.join(name)}")
132
+ continue
133
+ if len(scope_names) >= 2:
134
+ num = int(scope_names[1])
135
+ pointer = pointer[num]
136
+ if m_name[-11:] == "_embeddings":
137
+ pointer = getattr(pointer, "weight")
138
+ elif m_name == "kernel":
139
+ array = np.transpose(array)
140
+ try:
141
+ if pointer.shape != array.shape:
142
+ raise ValueError(f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched")
143
+ except AssertionError as e:
144
+ e.args += (pointer.shape, array.shape)
145
+ raise
146
+ logger.info(f"Initialize PyTorch weight {name}")
147
+ pointer.data = torch.from_numpy(array)
148
+ return model
149
+
150
+
151
+ # Copied from transformers.models.bert.modeling_bert.BertEmbeddings with Bert -> QDQBert
152
+ class QDQBertEmbeddings(nn.Module):
153
+ """Construct the embeddings from word, position and token_type embeddings."""
154
+
155
+ def __init__(self, config):
156
+ super().__init__()
157
+ self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
158
+ self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
159
+ self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
160
+
161
+ # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
162
+ # any TensorFlow checkpoint file
163
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
164
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
165
+ # position_ids (1, len position emb) is contiguous in memory and exported when serialized
166
+ self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
167
+ self.register_buffer(
168
+ "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
169
+ )
170
+ self.register_buffer(
171
+ "token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False
172
+ )
173
+
174
+ def forward(
175
+ self,
176
+ input_ids: Optional[torch.LongTensor] = None,
177
+ token_type_ids: Optional[torch.LongTensor] = None,
178
+ position_ids: Optional[torch.LongTensor] = None,
179
+ inputs_embeds: Optional[torch.FloatTensor] = None,
180
+ past_key_values_length: int = 0,
181
+ ) -> torch.Tensor:
182
+ if input_ids is not None:
183
+ input_shape = input_ids.size()
184
+ else:
185
+ input_shape = inputs_embeds.size()[:-1]
186
+
187
+ seq_length = input_shape[1]
188
+
189
+ if position_ids is None:
190
+ position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length]
191
+
192
+ # Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs
193
+ # when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves
194
+ # issue #5664
195
+ if token_type_ids is None:
196
+ if hasattr(self, "token_type_ids"):
197
+ buffered_token_type_ids = self.token_type_ids[:, :seq_length]
198
+ buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length)
199
+ token_type_ids = buffered_token_type_ids_expanded
200
+ else:
201
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
202
+
203
+ if inputs_embeds is None:
204
+ inputs_embeds = self.word_embeddings(input_ids)
205
+ token_type_embeddings = self.token_type_embeddings(token_type_ids)
206
+
207
+ embeddings = inputs_embeds + token_type_embeddings
208
+ if self.position_embedding_type == "absolute":
209
+ position_embeddings = self.position_embeddings(position_ids)
210
+ embeddings += position_embeddings
211
+ embeddings = self.LayerNorm(embeddings)
212
+ embeddings = self.dropout(embeddings)
213
+ return embeddings
214
+
215
+
216
+ class QDQBertSelfAttention(nn.Module):
217
+ def __init__(self, config):
218
+ super().__init__()
219
+ if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
220
+ raise ValueError(
221
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
222
+ f"heads ({config.num_attention_heads})"
223
+ )
224
+
225
+ self.num_attention_heads = config.num_attention_heads
226
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
227
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
228
+
229
+ self.query = quant_nn.QuantLinear(config.hidden_size, self.all_head_size)
230
+ self.key = quant_nn.QuantLinear(config.hidden_size, self.all_head_size)
231
+ self.value = quant_nn.QuantLinear(config.hidden_size, self.all_head_size)
232
+
233
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
234
+ self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
235
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
236
+ self.max_position_embeddings = config.max_position_embeddings
237
+ self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
238
+
239
+ self.is_decoder = config.is_decoder
240
+
241
+ self.matmul_q_input_quantizer = TensorQuantizer(quant_nn.QuantLinear.default_quant_desc_input)
242
+ self.matmul_k_input_quantizer = TensorQuantizer(quant_nn.QuantLinear.default_quant_desc_input)
243
+ self.matmul_v_input_quantizer = TensorQuantizer(quant_nn.QuantLinear.default_quant_desc_input)
244
+ self.matmul_a_input_quantizer = TensorQuantizer(quant_nn.QuantLinear.default_quant_desc_input)
245
+
246
+ def transpose_for_scores(self, x):
247
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
248
+ x = x.view(*new_x_shape)
249
+ return x.permute(0, 2, 1, 3)
250
+
251
+ def forward(
252
+ self,
253
+ hidden_states,
254
+ attention_mask=None,
255
+ head_mask=None,
256
+ encoder_hidden_states=None,
257
+ encoder_attention_mask=None,
258
+ past_key_value=None,
259
+ output_attentions=False,
260
+ ):
261
+ mixed_query_layer = self.query(hidden_states)
262
+
263
+ # If this is instantiated as a cross-attention module, the keys
264
+ # and values come from an encoder; the attention mask needs to be
265
+ # such that the encoder's padding tokens are not attended to.
266
+ is_cross_attention = encoder_hidden_states is not None
267
+
268
+ if is_cross_attention and past_key_value is not None:
269
+ # reuse k,v, cross_attentions
270
+ key_layer = past_key_value[0]
271
+ value_layer = past_key_value[1]
272
+ attention_mask = encoder_attention_mask
273
+ elif is_cross_attention:
274
+ key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
275
+ value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
276
+ attention_mask = encoder_attention_mask
277
+ elif past_key_value is not None:
278
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
279
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
280
+ key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
281
+ value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
282
+ else:
283
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
284
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
285
+
286
+ query_layer = self.transpose_for_scores(mixed_query_layer)
287
+
288
+ if self.is_decoder:
289
+ # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
290
+ # Further calls to cross_attention layer can then reuse all cross-attention
291
+ # key/value_states (first "if" case)
292
+ # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
293
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
294
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
295
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
296
+ past_key_value = (key_layer, value_layer)
297
+
298
+ # Take the dot product between "query" and "key" to get the raw attention scores.
299
+ attention_scores = torch.matmul(
300
+ self.matmul_q_input_quantizer(query_layer), self.matmul_k_input_quantizer(key_layer.transpose(-1, -2))
301
+ )
302
+
303
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
304
+ seq_length = hidden_states.size()[1]
305
+ position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
306
+ position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
307
+ distance = position_ids_l - position_ids_r
308
+ positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
309
+ positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
310
+
311
+ if self.position_embedding_type == "relative_key":
312
+ relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
313
+ attention_scores = attention_scores + relative_position_scores
314
+ elif self.position_embedding_type == "relative_key_query":
315
+ relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
316
+ relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
317
+ attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
318
+
319
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
320
+ if attention_mask is not None:
321
+ # Apply the attention mask is (precomputed for all layers in QDQBertModel forward() function)
322
+ attention_scores = attention_scores + attention_mask
323
+
324
+ # Normalize the attention scores to probabilities.
325
+ attention_probs = nn.Softmax(dim=-1)(attention_scores)
326
+
327
+ # This is actually dropping out entire tokens to attend to, which might
328
+ # seem a bit unusual, but is taken from the original Transformer paper.
329
+ attention_probs = self.dropout(attention_probs)
330
+
331
+ # Mask heads if we want to
332
+ if head_mask is not None:
333
+ attention_probs = attention_probs * head_mask
334
+
335
+ context_layer = torch.matmul(
336
+ self.matmul_a_input_quantizer(attention_probs), self.matmul_v_input_quantizer(value_layer)
337
+ )
338
+
339
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
340
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
341
+ context_layer = context_layer.view(*new_context_layer_shape)
342
+
343
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
344
+
345
+ if self.is_decoder:
346
+ outputs = outputs + (past_key_value,)
347
+ return outputs
348
+
349
+
350
+ class QDQBertSelfOutput(nn.Module):
351
+ def __init__(self, config):
352
+ super().__init__()
353
+ # Quantize Linear layer
354
+ self.dense = quant_nn.QuantLinear(config.hidden_size, config.hidden_size)
355
+
356
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
357
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
358
+
359
+ # Quantize the inputs to the residual add
360
+ self.add_local_input_quantizer = TensorQuantizer(quant_nn.QuantLinear.default_quant_desc_input)
361
+ self.add_residual_input_quantizer = TensorQuantizer(quant_nn.QuantLinear.default_quant_desc_input)
362
+
363
+ def forward(self, hidden_states, input_tensor):
364
+ hidden_states = self.dense(hidden_states)
365
+ hidden_states = self.dropout(hidden_states)
366
+ # Quantize the inputs to the residual add
367
+ add_local = self.add_local_input_quantizer(hidden_states)
368
+ add_residual = self.add_residual_input_quantizer(input_tensor)
369
+ hidden_states = self.LayerNorm(add_local + add_residual)
370
+ return hidden_states
371
+
372
+
373
+ # Based on transformers.models.bert.modeling_bert.BertAttention with Bert -> QDQBert
374
+ class QDQBertAttention(nn.Module):
375
+ def __init__(self, config):
376
+ super().__init__()
377
+ self.self = QDQBertSelfAttention(config)
378
+ self.output = QDQBertSelfOutput(config)
379
+ self.pruned_heads = set()
380
+
381
+ def prune_heads(self, heads):
382
+ if len(heads) == 0:
383
+ return
384
+ heads, index = find_pruneable_heads_and_indices(
385
+ heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
386
+ )
387
+
388
+ # Prune linear layers
389
+ self.self.query = prune_linear_layer(self.self.query, index)
390
+ self.self.key = prune_linear_layer(self.self.key, index)
391
+ self.self.value = prune_linear_layer(self.self.value, index)
392
+ self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
393
+
394
+ # Update hyper params and store pruned heads
395
+ self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
396
+ self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
397
+ self.pruned_heads = self.pruned_heads.union(heads)
398
+
399
+ def forward(
400
+ self,
401
+ hidden_states,
402
+ attention_mask=None,
403
+ head_mask=None,
404
+ encoder_hidden_states=None,
405
+ encoder_attention_mask=None,
406
+ past_key_value=None,
407
+ output_attentions=False,
408
+ ):
409
+ self_outputs = self.self(
410
+ hidden_states,
411
+ attention_mask,
412
+ head_mask,
413
+ encoder_hidden_states,
414
+ encoder_attention_mask,
415
+ past_key_value,
416
+ output_attentions,
417
+ )
418
+ attention_output = self.output(self_outputs[0], hidden_states)
419
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
420
+ return outputs
421
+
422
+
423
+ class QDQBertIntermediate(nn.Module):
424
+ def __init__(self, config):
425
+ super().__init__()
426
+ # Quantize Linear layer
427
+ self.dense = quant_nn.QuantLinear(config.hidden_size, config.intermediate_size)
428
+ if isinstance(config.hidden_act, str):
429
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
430
+ else:
431
+ self.intermediate_act_fn = config.hidden_act
432
+
433
+ def forward(self, hidden_states):
434
+ hidden_states = self.dense(hidden_states)
435
+ hidden_states = self.intermediate_act_fn(hidden_states)
436
+ return hidden_states
437
+
438
+
439
+ class QDQBertOutput(nn.Module):
440
+ def __init__(self, config):
441
+ super().__init__()
442
+ # Quantize Linear layer
443
+ self.dense = quant_nn.QuantLinear(config.intermediate_size, config.hidden_size)
444
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
445
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
446
+
447
+ # Quantize the inputs to the residual add
448
+ self.add_local_input_quantizer = TensorQuantizer(quant_nn.QuantLinear.default_quant_desc_input)
449
+ self.add_residual_input_quantizer = TensorQuantizer(quant_nn.QuantLinear.default_quant_desc_input)
450
+
451
+ def forward(self, hidden_states, input_tensor):
452
+ hidden_states = self.dense(hidden_states)
453
+ hidden_states = self.dropout(hidden_states)
454
+ # Quantize the inputs to the residual add
455
+ add_local = self.add_local_input_quantizer(hidden_states)
456
+ add_residual = self.add_residual_input_quantizer(input_tensor)
457
+ hidden_states = self.LayerNorm(add_local + add_residual)
458
+ return hidden_states
459
+
460
+
461
+ # Based on transformers.models.bert.modeling_bert.BertLayer with Bert -> QDQBert
462
+ class QDQBertLayer(nn.Module):
463
+ def __init__(self, config):
464
+ super().__init__()
465
+ self.seq_len_dim = 1
466
+ self.attention = QDQBertAttention(config)
467
+ self.is_decoder = config.is_decoder
468
+ self.add_cross_attention = config.add_cross_attention
469
+ if self.add_cross_attention:
470
+ if not self.is_decoder:
471
+ raise ValueError(f"{self} should be used as a decoder model if cross attention is added")
472
+ self.crossattention = QDQBertAttention(config)
473
+ self.intermediate = QDQBertIntermediate(config)
474
+ self.output = QDQBertOutput(config)
475
+
476
+ def forward(
477
+ self,
478
+ hidden_states,
479
+ attention_mask=None,
480
+ head_mask=None,
481
+ encoder_hidden_states=None,
482
+ encoder_attention_mask=None,
483
+ past_key_value=None,
484
+ output_attentions=False,
485
+ ):
486
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
487
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
488
+ self_attention_outputs = self.attention(
489
+ hidden_states,
490
+ attention_mask,
491
+ head_mask,
492
+ output_attentions=output_attentions,
493
+ past_key_value=self_attn_past_key_value,
494
+ )
495
+ attention_output = self_attention_outputs[0]
496
+
497
+ # if decoder, the last output is tuple of self-attn cache
498
+ if self.is_decoder:
499
+ outputs = self_attention_outputs[1:-1]
500
+ present_key_value = self_attention_outputs[-1]
501
+ else:
502
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
503
+
504
+ cross_attn_present_key_value = None
505
+ if self.is_decoder and encoder_hidden_states is not None:
506
+ if not hasattr(self, "crossattention"):
507
+ raise ValueError(
508
+ f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers"
509
+ " by setting `config.add_cross_attention=True`"
510
+ )
511
+
512
+ # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple
513
+ cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
514
+ cross_attention_outputs = self.crossattention(
515
+ attention_output,
516
+ attention_mask,
517
+ head_mask,
518
+ encoder_hidden_states,
519
+ encoder_attention_mask,
520
+ cross_attn_past_key_value,
521
+ output_attentions,
522
+ )
523
+ attention_output = cross_attention_outputs[0]
524
+ outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
525
+
526
+ # add cross-attn cache to positions 3,4 of present_key_value tuple
527
+ cross_attn_present_key_value = cross_attention_outputs[-1]
528
+ present_key_value = present_key_value + cross_attn_present_key_value
529
+
530
+ layer_output = self.feed_forward_chunk(attention_output)
531
+ outputs = (layer_output,) + outputs
532
+
533
+ # if decoder, return the attn key/values as the last output
534
+ if self.is_decoder:
535
+ outputs = outputs + (present_key_value,)
536
+
537
+ return outputs
538
+
539
+ def feed_forward_chunk(self, attention_output):
540
+ intermediate_output = self.intermediate(attention_output)
541
+ layer_output = self.output(intermediate_output, attention_output)
542
+ return layer_output
543
+
544
+
545
+ # Based on transformers.models.bert.modeling_bert.BertEncoder with Bert -> QDQBert
546
+ class QDQBertEncoder(nn.Module):
547
+ def __init__(self, config):
548
+ super().__init__()
549
+ self.config = config
550
+ self.layer = nn.ModuleList([QDQBertLayer(config) for _ in range(config.num_hidden_layers)])
551
+ self.gradient_checkpointing = False
552
+
553
+ def forward(
554
+ self,
555
+ hidden_states,
556
+ attention_mask=None,
557
+ head_mask=None,
558
+ encoder_hidden_states=None,
559
+ encoder_attention_mask=None,
560
+ past_key_values=None,
561
+ use_cache=None,
562
+ output_attentions=False,
563
+ output_hidden_states=False,
564
+ return_dict=True,
565
+ ):
566
+ all_hidden_states = () if output_hidden_states else None
567
+ all_self_attentions = () if output_attentions else None
568
+ all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
569
+
570
+ next_decoder_cache = () if use_cache else None
571
+ for i, layer_module in enumerate(self.layer):
572
+ if output_hidden_states:
573
+ all_hidden_states = all_hidden_states + (hidden_states,)
574
+
575
+ layer_head_mask = head_mask[i] if head_mask is not None else None
576
+ past_key_value = past_key_values[i] if past_key_values is not None else None
577
+
578
+ if self.gradient_checkpointing and self.training:
579
+ if use_cache:
580
+ logger.warning_once(
581
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
582
+ )
583
+ use_cache = False
584
+ layer_outputs = self._gradient_checkpointing_func(
585
+ layer_module.__call__,
586
+ hidden_states,
587
+ attention_mask,
588
+ layer_head_mask,
589
+ encoder_hidden_states,
590
+ encoder_attention_mask,
591
+ past_key_value,
592
+ output_attentions,
593
+ )
594
+ else:
595
+ layer_outputs = layer_module(
596
+ hidden_states,
597
+ attention_mask,
598
+ layer_head_mask,
599
+ encoder_hidden_states,
600
+ encoder_attention_mask,
601
+ past_key_value,
602
+ output_attentions,
603
+ )
604
+
605
+ hidden_states = layer_outputs[0]
606
+ if use_cache:
607
+ next_decoder_cache += (layer_outputs[-1],)
608
+ if output_attentions:
609
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
610
+ if self.config.add_cross_attention:
611
+ all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
612
+
613
+ if output_hidden_states:
614
+ all_hidden_states = all_hidden_states + (hidden_states,)
615
+
616
+ if not return_dict:
617
+ return tuple(
618
+ v
619
+ for v in [
620
+ hidden_states,
621
+ next_decoder_cache,
622
+ all_hidden_states,
623
+ all_self_attentions,
624
+ all_cross_attentions,
625
+ ]
626
+ if v is not None
627
+ )
628
+ return BaseModelOutputWithPastAndCrossAttentions(
629
+ last_hidden_state=hidden_states,
630
+ past_key_values=next_decoder_cache,
631
+ hidden_states=all_hidden_states,
632
+ attentions=all_self_attentions,
633
+ cross_attentions=all_cross_attentions,
634
+ )
635
+
636
+
637
+ # Copied from transformers.models.bert.modeling_bert.BertPooler with Bert -> QDQBert
638
+ class QDQBertPooler(nn.Module):
639
+ def __init__(self, config):
640
+ super().__init__()
641
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
642
+ self.activation = nn.Tanh()
643
+
644
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
645
+ # We "pool" the model by simply taking the hidden state corresponding
646
+ # to the first token.
647
+ first_token_tensor = hidden_states[:, 0]
648
+ pooled_output = self.dense(first_token_tensor)
649
+ pooled_output = self.activation(pooled_output)
650
+ return pooled_output
651
+
652
+
653
+ # Copied from transformers.models.bert.modeling_bert.BertPredictionHeadTransform with Bert -> QDQBert
654
+ class QDQBertPredictionHeadTransform(nn.Module):
655
+ def __init__(self, config):
656
+ super().__init__()
657
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
658
+ if isinstance(config.hidden_act, str):
659
+ self.transform_act_fn = ACT2FN[config.hidden_act]
660
+ else:
661
+ self.transform_act_fn = config.hidden_act
662
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
663
+
664
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
665
+ hidden_states = self.dense(hidden_states)
666
+ hidden_states = self.transform_act_fn(hidden_states)
667
+ hidden_states = self.LayerNorm(hidden_states)
668
+ return hidden_states
669
+
670
+
671
+ # Based on transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert -> QDQBert
672
+ class QDQBertLMPredictionHead(nn.Module):
673
+ def __init__(self, config):
674
+ super().__init__()
675
+ self.transform = QDQBertPredictionHeadTransform(config)
676
+
677
+ # The output weights are the same as the input embeddings, but there is
678
+ # an output-only bias for each token.
679
+ self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
680
+
681
+ self.bias = nn.Parameter(torch.zeros(config.vocab_size))
682
+
683
+ # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
684
+ self.decoder.bias = self.bias
685
+
686
+ def forward(self, hidden_states):
687
+ hidden_states = self.transform(hidden_states)
688
+ hidden_states = self.decoder(hidden_states)
689
+ return hidden_states
690
+
691
+
692
+ # Based on transformers.models.bert.modeling_bert.BertOnlyMLMHead with Bert -> QDQBert
693
+ class QDQBertOnlyMLMHead(nn.Module):
694
+ def __init__(self, config):
695
+ super().__init__()
696
+ self.predictions = QDQBertLMPredictionHead(config)
697
+
698
+ def forward(self, sequence_output):
699
+ prediction_scores = self.predictions(sequence_output)
700
+ return prediction_scores
701
+
702
+
703
+ # Copied from transformers.models.bert.modeling_bert.BertOnlyNSPHead with Bert -> QDQBert
704
+ class QDQBertOnlyNSPHead(nn.Module):
705
+ def __init__(self, config):
706
+ super().__init__()
707
+ self.seq_relationship = nn.Linear(config.hidden_size, 2)
708
+
709
+ def forward(self, pooled_output):
710
+ seq_relationship_score = self.seq_relationship(pooled_output)
711
+ return seq_relationship_score
712
+
713
+
714
+ # Based on transformers.models.bert.modeling_bert.BertPreTrainingHeads with Bert -> QDQBert
715
+ class QDQBertPreTrainingHeads(nn.Module):
716
+ def __init__(self, config):
717
+ super().__init__()
718
+ self.predictions = QDQBertLMPredictionHead(config)
719
+ self.seq_relationship = nn.Linear(config.hidden_size, 2)
720
+
721
+ def forward(self, sequence_output, pooled_output):
722
+ prediction_scores = self.predictions(sequence_output)
723
+ seq_relationship_score = self.seq_relationship(pooled_output)
724
+ return prediction_scores, seq_relationship_score
725
+
726
+
727
+ # Based on transformers.models.bert.modeling_bert.BertPreTrainedModel with Bert -> QDQBert
728
+ class QDQBertPreTrainedModel(PreTrainedModel):
729
+ """
730
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
731
+ models.
732
+ """
733
+
734
+ config_class = QDQBertConfig
735
+ load_tf_weights = load_tf_weights_in_qdqbert
736
+ base_model_prefix = "bert"
737
+ supports_gradient_checkpointing = True
738
+
739
+ def _init_weights(self, module):
740
+ """Initialize the weights"""
741
+ if isinstance(module, nn.Linear):
742
+ # Slightly different from the TF version which uses truncated_normal for initialization
743
+ # cf https://github.com/pytorch/pytorch/pull/5617
744
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
745
+ if module.bias is not None:
746
+ module.bias.data.zero_()
747
+ elif isinstance(module, nn.Embedding):
748
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
749
+ if module.padding_idx is not None:
750
+ module.weight.data[module.padding_idx].zero_()
751
+ elif isinstance(module, nn.LayerNorm):
752
+ module.bias.data.zero_()
753
+ module.weight.data.fill_(1.0)
754
+
755
+
756
+ QDQBERT_START_DOCSTRING = r"""
757
+
758
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
759
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
760
+ etc.)
761
+
762
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
763
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
764
+ and behavior.
765
+
766
+ Parameters:
767
+ config ([`QDQBertConfig`]): Model configuration class with all the parameters of the model.
768
+ Initializing with a config file does not load the weights associated with the model, only the
769
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
770
+ """
771
+
772
+ QDQBERT_INPUTS_DOCSTRING = r"""
773
+ Args:
774
+ input_ids (`torch.LongTensor` of shape `({0})`):
775
+ Indices of input sequence tokens in the vocabulary.
776
+
777
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
778
+ [`PreTrainedTokenizer.__call__`] for details.
779
+
780
+ [What are input IDs?](../glossary#input-ids)
781
+ attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
782
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
783
+
784
+ - 1 for tokens that are **not masked**,
785
+ - 0 for tokens that are **masked**.
786
+
787
+ [What are attention masks?](../glossary#attention-mask)
788
+ token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
789
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
790
+ 1]`:
791
+
792
+ - 0 corresponds to a *sentence A* token,
793
+ - 1 corresponds to a *sentence B* token.
794
+
795
+ [What are token type IDs?](../glossary#token-type-ids)
796
+ position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
797
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
798
+ config.max_position_embeddings - 1]`.
799
+
800
+ [What are position IDs?](../glossary#position-ids)
801
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
802
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
803
+
804
+ - 1 indicates the head is **not masked**,
805
+ - 0 indicates the head is **masked**.
806
+
807
+ inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
808
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
809
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
810
+ model's internal embedding lookup matrix.
811
+ output_attentions (`bool`, *optional*):
812
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
813
+ tensors for more detail.
814
+ output_hidden_states (`bool`, *optional*):
815
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
816
+ more detail.
817
+ return_dict (`bool`, *optional*):
818
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
819
+ """
820
+
821
+
822
+ @add_start_docstrings(
823
+ "The bare QDQBERT Model transformer outputting raw hidden-states without any specific head on top.",
824
+ QDQBERT_START_DOCSTRING,
825
+ )
826
+ class QDQBertModel(QDQBertPreTrainedModel):
827
+ """
828
+
829
+ The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
830
+ cross-attention is added between the self-attention layers, following the architecture described in [Attention is
831
+ all you need](https://arxiv.org/abs/1706.03762) by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit,
832
+ Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.
833
+
834
+ To behave as an decoder the model needs to be initialized with the `is_decoder` argument of the configuration set
835
+ to `True`. To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder` argument and
836
+ `add_cross_attention` set to `True`; an `encoder_hidden_states` is then expected as an input to the forward pass.
837
+ """
838
+
839
+ def __init__(self, config, add_pooling_layer: bool = True):
840
+ requires_backends(self, "pytorch_quantization")
841
+ super().__init__(config)
842
+ self.config = config
843
+
844
+ self.embeddings = QDQBertEmbeddings(config)
845
+ self.encoder = QDQBertEncoder(config)
846
+
847
+ self.pooler = QDQBertPooler(config) if add_pooling_layer else None
848
+
849
+ # Initialize weights and apply final processing
850
+ self.post_init()
851
+
852
+ def get_input_embeddings(self):
853
+ return self.embeddings.word_embeddings
854
+
855
+ def set_input_embeddings(self, value):
856
+ self.embeddings.word_embeddings = value
857
+
858
+ def _prune_heads(self, heads_to_prune: Dict[int, List[int]]):
859
+ """
860
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
861
+ class PreTrainedModel
862
+ """
863
+ for layer, heads in heads_to_prune.items():
864
+ self.encoder.layer[layer].attention.prune_heads(heads)
865
+
866
+ @add_start_docstrings_to_model_forward(QDQBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
867
+ @add_code_sample_docstrings(
868
+ checkpoint=_CHECKPOINT_FOR_DOC,
869
+ output_type=BaseModelOutputWithPoolingAndCrossAttentions,
870
+ config_class=_CONFIG_FOR_DOC,
871
+ )
872
+ def forward(
873
+ self,
874
+ input_ids: Optional[torch.LongTensor] = None,
875
+ attention_mask: Optional[torch.FloatTensor] = None,
876
+ token_type_ids: Optional[torch.LongTensor] = None,
877
+ position_ids: Optional[torch.LongTensor] = None,
878
+ head_mask: Optional[torch.FloatTensor] = None,
879
+ inputs_embeds: Optional[torch.FloatTensor] = None,
880
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
881
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
882
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
883
+ use_cache: Optional[bool] = None,
884
+ output_attentions: Optional[bool] = None,
885
+ output_hidden_states: Optional[bool] = None,
886
+ return_dict: Optional[bool] = None,
887
+ ) -> Union[Tuple, BaseModelOutputWithPoolingAndCrossAttentions]:
888
+ r"""
889
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
890
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
891
+ the model is configured as a decoder.
892
+ encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
893
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
894
+ the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
895
+
896
+ - 1 for tokens that are **not masked**,
897
+ - 0 for tokens that are **masked**.
898
+ past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
899
+ Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
900
+
901
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
902
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
903
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
904
+ use_cache (`bool`, *optional*):
905
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
906
+ `past_key_values`).
907
+ """
908
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
909
+ output_hidden_states = (
910
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
911
+ )
912
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
913
+
914
+ if self.config.is_decoder:
915
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
916
+ else:
917
+ use_cache = False
918
+
919
+ if input_ids is not None and inputs_embeds is not None:
920
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
921
+ elif input_ids is not None:
922
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
923
+ input_shape = input_ids.size()
924
+ batch_size, seq_length = input_shape
925
+ elif inputs_embeds is not None:
926
+ input_shape = inputs_embeds.size()[:-1]
927
+ batch_size, seq_length = input_shape
928
+ else:
929
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
930
+
931
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
932
+
933
+ # past_key_values_length
934
+ past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
935
+
936
+ if attention_mask is None:
937
+ attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)
938
+
939
+ if token_type_ids is None:
940
+ if hasattr(self.embeddings, "token_type_ids"):
941
+ buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length]
942
+ buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length)
943
+ token_type_ids = buffered_token_type_ids_expanded
944
+ else:
945
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
946
+
947
+ # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
948
+ # ourselves in which case we just need to make it broadcastable to all heads.
949
+ extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape)
950
+
951
+ # If a 2D or 3D attention mask is provided for the cross-attention
952
+ # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
953
+ if self.config.is_decoder and encoder_hidden_states is not None:
954
+ encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
955
+ encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
956
+ if encoder_attention_mask is None:
957
+ encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
958
+ encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
959
+ else:
960
+ encoder_extended_attention_mask = None
961
+
962
+ # Prepare head mask if needed
963
+ # 1.0 in head_mask indicate we keep the head
964
+ # attention_probs has shape bsz x n_heads x N x N
965
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
966
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
967
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
968
+
969
+ embedding_output = self.embeddings(
970
+ input_ids=input_ids,
971
+ position_ids=position_ids,
972
+ token_type_ids=token_type_ids,
973
+ inputs_embeds=inputs_embeds,
974
+ past_key_values_length=past_key_values_length,
975
+ )
976
+ encoder_outputs = self.encoder(
977
+ embedding_output,
978
+ attention_mask=extended_attention_mask,
979
+ head_mask=head_mask,
980
+ encoder_hidden_states=encoder_hidden_states,
981
+ encoder_attention_mask=encoder_extended_attention_mask,
982
+ past_key_values=past_key_values,
983
+ use_cache=use_cache,
984
+ output_attentions=output_attentions,
985
+ output_hidden_states=output_hidden_states,
986
+ return_dict=return_dict,
987
+ )
988
+ sequence_output = encoder_outputs[0]
989
+ pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
990
+
991
+ if not return_dict:
992
+ return (sequence_output, pooled_output) + encoder_outputs[1:]
993
+
994
+ return BaseModelOutputWithPoolingAndCrossAttentions(
995
+ last_hidden_state=sequence_output,
996
+ pooler_output=pooled_output,
997
+ past_key_values=encoder_outputs.past_key_values,
998
+ hidden_states=encoder_outputs.hidden_states,
999
+ attentions=encoder_outputs.attentions,
1000
+ cross_attentions=encoder_outputs.cross_attentions,
1001
+ )
1002
+
1003
+
1004
+ @add_start_docstrings(
1005
+ """QDQBERT Model with a `language modeling` head on top for CLM fine-tuning.""", QDQBERT_START_DOCSTRING
1006
+ )
1007
+ class QDQBertLMHeadModel(QDQBertPreTrainedModel):
1008
+ _tied_weights_keys = ["predictions.decoder.weight", "predictions.decoder.bias"]
1009
+
1010
+ def __init__(self, config):
1011
+ super().__init__(config)
1012
+
1013
+ if not config.is_decoder:
1014
+ logger.warning("If you want to use `QDQBertLMHeadModel` as a standalone, add `is_decoder=True.`")
1015
+
1016
+ self.bert = QDQBertModel(config, add_pooling_layer=False)
1017
+ self.cls = QDQBertOnlyMLMHead(config)
1018
+
1019
+ # Initialize weights and apply final processing
1020
+ self.post_init()
1021
+
1022
+ def get_output_embeddings(self):
1023
+ return self.cls.predictions.decoder
1024
+
1025
+ def set_output_embeddings(self, new_embeddings):
1026
+ self.cls.predictions.decoder = new_embeddings
1027
+
1028
+ @add_start_docstrings_to_model_forward(QDQBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1029
+ @replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC)
1030
+ def forward(
1031
+ self,
1032
+ input_ids: Optional[torch.LongTensor] = None,
1033
+ attention_mask: Optional[torch.Tensor] = None,
1034
+ token_type_ids: Optional[torch.LongTensor] = None,
1035
+ position_ids: Optional[torch.LongTensor] = None,
1036
+ head_mask: Optional[torch.Tensor] = None,
1037
+ inputs_embeds: Optional[torch.Tensor] = None,
1038
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
1039
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
1040
+ labels: Optional[torch.LongTensor] = None,
1041
+ past_key_values: Optional[Tuple[Tuple[torch.LongTensor]]] = None,
1042
+ use_cache: Optional[bool] = None,
1043
+ output_attentions: Optional[bool] = None,
1044
+ output_hidden_states: Optional[bool] = None,
1045
+ return_dict: Optional[bool] = None,
1046
+ ) -> Union[Tuple, CausalLMOutputWithCrossAttentions]:
1047
+ r"""
1048
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
1049
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
1050
+ the model is configured as a decoder.
1051
+ encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
1052
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
1053
+ the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
1054
+
1055
+ - 1 for tokens that are **not masked**,
1056
+ - 0 for tokens that are **masked**.
1057
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1058
+ Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in
1059
+ `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are
1060
+ ignored (masked), the loss is only computed for the tokens with labels n `[0, ..., config.vocab_size]`
1061
+ past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
1062
+ Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
1063
+
1064
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
1065
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
1066
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
1067
+ use_cache (`bool`, *optional*):
1068
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
1069
+ `past_key_values`).
1070
+
1071
+ Returns:
1072
+
1073
+ Example:
1074
+
1075
+ ```python
1076
+ >>> from transformers import AutoTokenizer, QDQBertLMHeadModel, QDQBertConfig
1077
+ >>> import torch
1078
+
1079
+ >>> tokenizer = AutoTokenizer.from_pretrained("google-bert/bert-base-cased")
1080
+ >>> config = QDQBertConfig.from_pretrained("google-bert/bert-base-cased")
1081
+ >>> config.is_decoder = True
1082
+ >>> model = QDQBertLMHeadModel.from_pretrained("google-bert/bert-base-cased", config=config)
1083
+
1084
+ >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
1085
+ >>> outputs = model(**inputs)
1086
+
1087
+ >>> prediction_logits = outputs.logits
1088
+ ```"""
1089
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1090
+ if labels is not None:
1091
+ use_cache = False
1092
+
1093
+ outputs = self.bert(
1094
+ input_ids,
1095
+ attention_mask=attention_mask,
1096
+ token_type_ids=token_type_ids,
1097
+ position_ids=position_ids,
1098
+ head_mask=head_mask,
1099
+ inputs_embeds=inputs_embeds,
1100
+ encoder_hidden_states=encoder_hidden_states,
1101
+ encoder_attention_mask=encoder_attention_mask,
1102
+ past_key_values=past_key_values,
1103
+ use_cache=use_cache,
1104
+ output_attentions=output_attentions,
1105
+ output_hidden_states=output_hidden_states,
1106
+ return_dict=return_dict,
1107
+ )
1108
+
1109
+ sequence_output = outputs[0]
1110
+ prediction_scores = self.cls(sequence_output)
1111
+
1112
+ lm_loss = None
1113
+ if labels is not None:
1114
+ # we are doing next-token prediction; shift prediction scores and input ids by one
1115
+ shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous()
1116
+ labels = labels[:, 1:].contiguous()
1117
+ loss_fct = CrossEntropyLoss()
1118
+ lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
1119
+
1120
+ if not return_dict:
1121
+ output = (prediction_scores,) + outputs[2:]
1122
+ return ((lm_loss,) + output) if lm_loss is not None else output
1123
+
1124
+ return CausalLMOutputWithCrossAttentions(
1125
+ loss=lm_loss,
1126
+ logits=prediction_scores,
1127
+ past_key_values=outputs.past_key_values,
1128
+ hidden_states=outputs.hidden_states,
1129
+ attentions=outputs.attentions,
1130
+ cross_attentions=outputs.cross_attentions,
1131
+ )
1132
+
1133
+ def prepare_inputs_for_generation(
1134
+ self,
1135
+ input_ids: Optional[torch.LongTensor],
1136
+ past_key_values=None,
1137
+ attention_mask: Optional[torch.Tensor] = None,
1138
+ **model_kwargs,
1139
+ ):
1140
+ input_shape = input_ids.shape
1141
+ # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
1142
+ if attention_mask is None:
1143
+ attention_mask = input_ids.new_ones(input_shape)
1144
+
1145
+ # cut decoder_input_ids if past_key_values is used
1146
+ if past_key_values is not None:
1147
+ past_length = past_key_values[0][0].shape[2]
1148
+
1149
+ # Some generation methods already pass only the last input ID
1150
+ if input_ids.shape[1] > past_length:
1151
+ remove_prefix_length = past_length
1152
+ else:
1153
+ # Default to old behavior: keep only final ID
1154
+ remove_prefix_length = input_ids.shape[1] - 1
1155
+
1156
+ input_ids = input_ids[:, remove_prefix_length:]
1157
+
1158
+ return {"input_ids": input_ids, "attention_mask": attention_mask, "past_key_values": past_key_values}
1159
+
1160
+ def _reorder_cache(self, past_key_values, beam_idx):
1161
+ reordered_past = ()
1162
+ for layer_past in past_key_values:
1163
+ reordered_past += (
1164
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),
1165
+ )
1166
+ return reordered_past
1167
+
1168
+
1169
+ @add_start_docstrings("""QDQBERT Model with a `language modeling` head on top.""", QDQBERT_START_DOCSTRING)
1170
+ class QDQBertForMaskedLM(QDQBertPreTrainedModel):
1171
+ _tied_weights_keys = ["predictions.decoder.weight", "predictions.decoder.bias"]
1172
+
1173
+ def __init__(self, config):
1174
+ super().__init__(config)
1175
+
1176
+ if config.is_decoder:
1177
+ logger.warning(
1178
+ "If you want to use `QDQBertForMaskedLM` make sure `config.is_decoder=False` for "
1179
+ "bi-directional self-attention."
1180
+ )
1181
+
1182
+ self.bert = QDQBertModel(config, add_pooling_layer=False)
1183
+ self.cls = QDQBertOnlyMLMHead(config)
1184
+
1185
+ # Initialize weights and apply final processing
1186
+ self.post_init()
1187
+
1188
+ def get_output_embeddings(self):
1189
+ return self.cls.predictions.decoder
1190
+
1191
+ def set_output_embeddings(self, new_embeddings):
1192
+ self.cls.predictions.decoder = new_embeddings
1193
+
1194
+ @add_start_docstrings_to_model_forward(QDQBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1195
+ @add_code_sample_docstrings(
1196
+ checkpoint=_CHECKPOINT_FOR_DOC,
1197
+ output_type=MaskedLMOutput,
1198
+ config_class=_CONFIG_FOR_DOC,
1199
+ )
1200
+ def forward(
1201
+ self,
1202
+ input_ids: Optional[torch.LongTensor] = None,
1203
+ attention_mask: Optional[torch.FloatTensor] = None,
1204
+ token_type_ids: Optional[torch.LongTensor] = None,
1205
+ position_ids: Optional[torch.LongTensor] = None,
1206
+ head_mask: Optional[torch.FloatTensor] = None,
1207
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1208
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
1209
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
1210
+ labels: Optional[torch.LongTensor] = None,
1211
+ output_attentions: Optional[bool] = None,
1212
+ output_hidden_states: Optional[bool] = None,
1213
+ return_dict: Optional[bool] = None,
1214
+ ) -> Union[Tuple, MaskedLMOutput]:
1215
+ r"""
1216
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1217
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
1218
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
1219
+ loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
1220
+ """
1221
+
1222
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1223
+
1224
+ outputs = self.bert(
1225
+ input_ids,
1226
+ attention_mask=attention_mask,
1227
+ token_type_ids=token_type_ids,
1228
+ position_ids=position_ids,
1229
+ head_mask=head_mask,
1230
+ inputs_embeds=inputs_embeds,
1231
+ encoder_hidden_states=encoder_hidden_states,
1232
+ encoder_attention_mask=encoder_attention_mask,
1233
+ output_attentions=output_attentions,
1234
+ output_hidden_states=output_hidden_states,
1235
+ return_dict=return_dict,
1236
+ )
1237
+
1238
+ sequence_output = outputs[0]
1239
+ prediction_scores = self.cls(sequence_output)
1240
+
1241
+ masked_lm_loss = None
1242
+ if labels is not None:
1243
+ loss_fct = CrossEntropyLoss() # -100 index = padding token
1244
+ masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
1245
+
1246
+ if not return_dict:
1247
+ output = (prediction_scores,) + outputs[2:]
1248
+ return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
1249
+
1250
+ return MaskedLMOutput(
1251
+ loss=masked_lm_loss,
1252
+ logits=prediction_scores,
1253
+ hidden_states=outputs.hidden_states,
1254
+ attentions=outputs.attentions,
1255
+ )
1256
+
1257
+ def prepare_inputs_for_generation(
1258
+ self, input_ids: torch.LongTensor, attention_mask: Optional[torch.FloatTensor] = None, **model_kwargs
1259
+ ):
1260
+ input_shape = input_ids.shape
1261
+ effective_batch_size = input_shape[0]
1262
+
1263
+ # add a dummy token
1264
+ if self.config.pad_token_id is None:
1265
+ raise ValueError("The PAD token should be defined for generation")
1266
+
1267
+ attention_mask = torch.cat([attention_mask, attention_mask.new_zeros((attention_mask.shape[0], 1))], dim=-1)
1268
+ dummy_token = torch.full(
1269
+ (effective_batch_size, 1), self.config.pad_token_id, dtype=torch.long, device=input_ids.device
1270
+ )
1271
+ input_ids = torch.cat([input_ids, dummy_token], dim=1)
1272
+
1273
+ return {"input_ids": input_ids, "attention_mask": attention_mask}
1274
+
1275
+
1276
+ @add_start_docstrings(
1277
+ """Bert Model with a `next sentence prediction (classification)` head on top.""",
1278
+ QDQBERT_START_DOCSTRING,
1279
+ )
1280
+ class QDQBertForNextSentencePrediction(QDQBertPreTrainedModel):
1281
+ def __init__(self, config):
1282
+ super().__init__(config)
1283
+
1284
+ self.bert = QDQBertModel(config)
1285
+ self.cls = QDQBertOnlyNSPHead(config)
1286
+
1287
+ # Initialize weights and apply final processing
1288
+ self.post_init()
1289
+
1290
+ @add_start_docstrings_to_model_forward(QDQBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1291
+ @replace_return_docstrings(output_type=NextSentencePredictorOutput, config_class=_CONFIG_FOR_DOC)
1292
+ def forward(
1293
+ self,
1294
+ input_ids: Optional[torch.LongTensor] = None,
1295
+ attention_mask: Optional[torch.FloatTensor] = None,
1296
+ token_type_ids: Optional[torch.LongTensor] = None,
1297
+ position_ids: Optional[torch.LongTensor] = None,
1298
+ head_mask: Optional[torch.FloatTensor] = None,
1299
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1300
+ labels: Optional[torch.LongTensor] = None,
1301
+ output_attentions: Optional[bool] = None,
1302
+ output_hidden_states: Optional[bool] = None,
1303
+ return_dict: Optional[bool] = None,
1304
+ **kwargs,
1305
+ ) -> Union[Tuple, NextSentencePredictorOutput]:
1306
+ r"""
1307
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1308
+ Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair
1309
+ (see `input_ids` docstring). Indices should be in `[0, 1]`:
1310
+
1311
+ - 0 indicates sequence B is a continuation of sequence A,
1312
+ - 1 indicates sequence B is a random sequence.
1313
+
1314
+ Returns:
1315
+
1316
+ Example:
1317
+
1318
+ ```python
1319
+ >>> from transformers import AutoTokenizer, QDQBertForNextSentencePrediction
1320
+ >>> import torch
1321
+
1322
+ >>> tokenizer = AutoTokenizer.from_pretrained("google-bert/bert-base-uncased")
1323
+ >>> model = QDQBertForNextSentencePrediction.from_pretrained("google-bert/bert-base-uncased")
1324
+
1325
+ >>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced."
1326
+ >>> next_sentence = "The sky is blue due to the shorter wavelength of blue light."
1327
+ >>> encoding = tokenizer(prompt, next_sentence, return_tensors="pt")
1328
+
1329
+ >>> outputs = model(**encoding, labels=torch.LongTensor([1]))
1330
+ >>> logits = outputs.logits
1331
+ >>> assert logits[0, 0] < logits[0, 1] # next sentence was random
1332
+ ```"""
1333
+
1334
+ if "next_sentence_label" in kwargs:
1335
+ warnings.warn(
1336
+ "The `next_sentence_label` argument is deprecated and will be removed in a future version, use"
1337
+ " `labels` instead.",
1338
+ FutureWarning,
1339
+ )
1340
+ labels = kwargs.pop("next_sentence_label")
1341
+
1342
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1343
+
1344
+ outputs = self.bert(
1345
+ input_ids,
1346
+ attention_mask=attention_mask,
1347
+ token_type_ids=token_type_ids,
1348
+ position_ids=position_ids,
1349
+ head_mask=head_mask,
1350
+ inputs_embeds=inputs_embeds,
1351
+ output_attentions=output_attentions,
1352
+ output_hidden_states=output_hidden_states,
1353
+ return_dict=return_dict,
1354
+ )
1355
+
1356
+ pooled_output = outputs[1]
1357
+
1358
+ seq_relationship_scores = self.cls(pooled_output)
1359
+
1360
+ next_sentence_loss = None
1361
+ if labels is not None:
1362
+ loss_fct = CrossEntropyLoss()
1363
+ next_sentence_loss = loss_fct(seq_relationship_scores.view(-1, 2), labels.view(-1))
1364
+
1365
+ if not return_dict:
1366
+ output = (seq_relationship_scores,) + outputs[2:]
1367
+ return ((next_sentence_loss,) + output) if next_sentence_loss is not None else output
1368
+
1369
+ return NextSentencePredictorOutput(
1370
+ loss=next_sentence_loss,
1371
+ logits=seq_relationship_scores,
1372
+ hidden_states=outputs.hidden_states,
1373
+ attentions=outputs.attentions,
1374
+ )
1375
+
1376
+
1377
+ @add_start_docstrings(
1378
+ """
1379
+ Bert Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled
1380
+ output) e.g. for GLUE tasks.
1381
+ """,
1382
+ QDQBERT_START_DOCSTRING,
1383
+ )
1384
+ class QDQBertForSequenceClassification(QDQBertPreTrainedModel):
1385
+ def __init__(self, config):
1386
+ super().__init__(config)
1387
+ self.num_labels = config.num_labels
1388
+ self.config = config
1389
+
1390
+ self.bert = QDQBertModel(config)
1391
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
1392
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
1393
+ # Initialize weights and apply final processing
1394
+ self.post_init()
1395
+
1396
+ @add_start_docstrings_to_model_forward(QDQBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1397
+ @add_code_sample_docstrings(
1398
+ checkpoint=_CHECKPOINT_FOR_DOC,
1399
+ output_type=SequenceClassifierOutput,
1400
+ config_class=_CONFIG_FOR_DOC,
1401
+ )
1402
+ def forward(
1403
+ self,
1404
+ input_ids: Optional[torch.LongTensor] = None,
1405
+ attention_mask: Optional[torch.FloatTensor] = None,
1406
+ token_type_ids: Optional[torch.LongTensor] = None,
1407
+ position_ids: Optional[torch.LongTensor] = None,
1408
+ head_mask: Optional[torch.FloatTensor] = None,
1409
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1410
+ labels: Optional[torch.LongTensor] = None,
1411
+ output_attentions: Optional[bool] = None,
1412
+ output_hidden_states: Optional[bool] = None,
1413
+ return_dict: Optional[bool] = None,
1414
+ ) -> Union[Tuple, SequenceClassifierOutput]:
1415
+ r"""
1416
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1417
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1418
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1419
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1420
+ """
1421
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1422
+
1423
+ outputs = self.bert(
1424
+ input_ids,
1425
+ attention_mask=attention_mask,
1426
+ token_type_ids=token_type_ids,
1427
+ position_ids=position_ids,
1428
+ head_mask=head_mask,
1429
+ inputs_embeds=inputs_embeds,
1430
+ output_attentions=output_attentions,
1431
+ output_hidden_states=output_hidden_states,
1432
+ return_dict=return_dict,
1433
+ )
1434
+
1435
+ pooled_output = outputs[1]
1436
+
1437
+ pooled_output = self.dropout(pooled_output)
1438
+ logits = self.classifier(pooled_output)
1439
+
1440
+ loss = None
1441
+ if labels is not None:
1442
+ if self.config.problem_type is None:
1443
+ if self.num_labels == 1:
1444
+ self.config.problem_type = "regression"
1445
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
1446
+ self.config.problem_type = "single_label_classification"
1447
+ else:
1448
+ self.config.problem_type = "multi_label_classification"
1449
+
1450
+ if self.config.problem_type == "regression":
1451
+ loss_fct = MSELoss()
1452
+ if self.num_labels == 1:
1453
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
1454
+ else:
1455
+ loss = loss_fct(logits, labels)
1456
+ elif self.config.problem_type == "single_label_classification":
1457
+ loss_fct = CrossEntropyLoss()
1458
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1459
+ elif self.config.problem_type == "multi_label_classification":
1460
+ loss_fct = BCEWithLogitsLoss()
1461
+ loss = loss_fct(logits, labels)
1462
+ if not return_dict:
1463
+ output = (logits,) + outputs[2:]
1464
+ return ((loss,) + output) if loss is not None else output
1465
+
1466
+ return SequenceClassifierOutput(
1467
+ loss=loss,
1468
+ logits=logits,
1469
+ hidden_states=outputs.hidden_states,
1470
+ attentions=outputs.attentions,
1471
+ )
1472
+
1473
+
1474
+ @add_start_docstrings(
1475
+ """
1476
+ Bert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
1477
+ softmax) e.g. for RocStories/SWAG tasks.
1478
+ """,
1479
+ QDQBERT_START_DOCSTRING,
1480
+ )
1481
+ class QDQBertForMultipleChoice(QDQBertPreTrainedModel):
1482
+ def __init__(self, config):
1483
+ super().__init__(config)
1484
+
1485
+ self.bert = QDQBertModel(config)
1486
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
1487
+ self.classifier = nn.Linear(config.hidden_size, 1)
1488
+
1489
+ # Initialize weights and apply final processing
1490
+ self.post_init()
1491
+
1492
+ @add_start_docstrings_to_model_forward(QDQBERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length"))
1493
+ @add_code_sample_docstrings(
1494
+ checkpoint=_CHECKPOINT_FOR_DOC,
1495
+ output_type=MultipleChoiceModelOutput,
1496
+ config_class=_CONFIG_FOR_DOC,
1497
+ )
1498
+ def forward(
1499
+ self,
1500
+ input_ids: Optional[torch.LongTensor] = None,
1501
+ attention_mask: Optional[torch.FloatTensor] = None,
1502
+ token_type_ids: Optional[torch.LongTensor] = None,
1503
+ position_ids: Optional[torch.LongTensor] = None,
1504
+ head_mask: Optional[torch.FloatTensor] = None,
1505
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1506
+ labels: Optional[torch.LongTensor] = None,
1507
+ output_attentions: Optional[bool] = None,
1508
+ output_hidden_states: Optional[bool] = None,
1509
+ return_dict: Optional[bool] = None,
1510
+ ) -> Union[Tuple, MultipleChoiceModelOutput]:
1511
+ r"""
1512
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1513
+ Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
1514
+ num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
1515
+ `input_ids` above)
1516
+ """
1517
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1518
+ num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
1519
+
1520
+ input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
1521
+ attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
1522
+ token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
1523
+ position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
1524
+ inputs_embeds = (
1525
+ inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
1526
+ if inputs_embeds is not None
1527
+ else None
1528
+ )
1529
+
1530
+ outputs = self.bert(
1531
+ input_ids,
1532
+ attention_mask=attention_mask,
1533
+ token_type_ids=token_type_ids,
1534
+ position_ids=position_ids,
1535
+ head_mask=head_mask,
1536
+ inputs_embeds=inputs_embeds,
1537
+ output_attentions=output_attentions,
1538
+ output_hidden_states=output_hidden_states,
1539
+ return_dict=return_dict,
1540
+ )
1541
+
1542
+ pooled_output = outputs[1]
1543
+
1544
+ pooled_output = self.dropout(pooled_output)
1545
+ logits = self.classifier(pooled_output)
1546
+ reshaped_logits = logits.view(-1, num_choices)
1547
+
1548
+ loss = None
1549
+ if labels is not None:
1550
+ loss_fct = CrossEntropyLoss()
1551
+ loss = loss_fct(reshaped_logits, labels)
1552
+
1553
+ if not return_dict:
1554
+ output = (reshaped_logits,) + outputs[2:]
1555
+ return ((loss,) + output) if loss is not None else output
1556
+
1557
+ return MultipleChoiceModelOutput(
1558
+ loss=loss,
1559
+ logits=reshaped_logits,
1560
+ hidden_states=outputs.hidden_states,
1561
+ attentions=outputs.attentions,
1562
+ )
1563
+
1564
+
1565
+ @add_start_docstrings(
1566
+ """
1567
+ QDQBERT Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
1568
+ Named-Entity-Recognition (NER) tasks.
1569
+ """,
1570
+ QDQBERT_START_DOCSTRING,
1571
+ )
1572
+ class QDQBertForTokenClassification(QDQBertPreTrainedModel):
1573
+ def __init__(self, config):
1574
+ super().__init__(config)
1575
+ self.num_labels = config.num_labels
1576
+
1577
+ self.bert = QDQBertModel(config, add_pooling_layer=False)
1578
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
1579
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
1580
+
1581
+ # Initialize weights and apply final processing
1582
+ self.post_init()
1583
+
1584
+ @add_start_docstrings_to_model_forward(QDQBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1585
+ @add_code_sample_docstrings(
1586
+ checkpoint=_CHECKPOINT_FOR_DOC,
1587
+ output_type=TokenClassifierOutput,
1588
+ config_class=_CONFIG_FOR_DOC,
1589
+ )
1590
+ def forward(
1591
+ self,
1592
+ input_ids: Optional[torch.LongTensor] = None,
1593
+ attention_mask: Optional[torch.FloatTensor] = None,
1594
+ token_type_ids: Optional[torch.LongTensor] = None,
1595
+ position_ids: Optional[torch.LongTensor] = None,
1596
+ head_mask: Optional[torch.FloatTensor] = None,
1597
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1598
+ labels: Optional[torch.LongTensor] = None,
1599
+ output_attentions: Optional[bool] = None,
1600
+ output_hidden_states: Optional[bool] = None,
1601
+ return_dict: Optional[bool] = None,
1602
+ ) -> Union[Tuple, TokenClassifierOutput]:
1603
+ r"""
1604
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1605
+ Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
1606
+ """
1607
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1608
+
1609
+ outputs = self.bert(
1610
+ input_ids,
1611
+ attention_mask=attention_mask,
1612
+ token_type_ids=token_type_ids,
1613
+ position_ids=position_ids,
1614
+ head_mask=head_mask,
1615
+ inputs_embeds=inputs_embeds,
1616
+ output_attentions=output_attentions,
1617
+ output_hidden_states=output_hidden_states,
1618
+ return_dict=return_dict,
1619
+ )
1620
+
1621
+ sequence_output = outputs[0]
1622
+
1623
+ sequence_output = self.dropout(sequence_output)
1624
+ logits = self.classifier(sequence_output)
1625
+
1626
+ loss = None
1627
+ if labels is not None:
1628
+ loss_fct = CrossEntropyLoss()
1629
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1630
+
1631
+ if not return_dict:
1632
+ output = (logits,) + outputs[2:]
1633
+ return ((loss,) + output) if loss is not None else output
1634
+
1635
+ return TokenClassifierOutput(
1636
+ loss=loss,
1637
+ logits=logits,
1638
+ hidden_states=outputs.hidden_states,
1639
+ attentions=outputs.attentions,
1640
+ )
1641
+
1642
+
1643
+ @add_start_docstrings(
1644
+ """
1645
+ QDQBERT Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
1646
+ layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
1647
+ """,
1648
+ QDQBERT_START_DOCSTRING,
1649
+ )
1650
+ class QDQBertForQuestionAnswering(QDQBertPreTrainedModel):
1651
+ def __init__(self, config):
1652
+ super().__init__(config)
1653
+ self.num_labels = config.num_labels
1654
+
1655
+ self.bert = QDQBertModel(config, add_pooling_layer=False)
1656
+ self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
1657
+
1658
+ # Initialize weights and apply final processing
1659
+ self.post_init()
1660
+
1661
+ @add_start_docstrings_to_model_forward(QDQBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1662
+ @add_code_sample_docstrings(
1663
+ checkpoint=_CHECKPOINT_FOR_DOC,
1664
+ output_type=QuestionAnsweringModelOutput,
1665
+ config_class=_CONFIG_FOR_DOC,
1666
+ )
1667
+ def forward(
1668
+ self,
1669
+ input_ids: Optional[torch.LongTensor] = None,
1670
+ attention_mask: Optional[torch.FloatTensor] = None,
1671
+ token_type_ids: Optional[torch.LongTensor] = None,
1672
+ position_ids: Optional[torch.LongTensor] = None,
1673
+ head_mask: Optional[torch.FloatTensor] = None,
1674
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1675
+ start_positions: Optional[torch.LongTensor] = None,
1676
+ end_positions: Optional[torch.LongTensor] = None,
1677
+ output_attentions: Optional[bool] = None,
1678
+ output_hidden_states: Optional[bool] = None,
1679
+ return_dict: Optional[bool] = None,
1680
+ ) -> Union[Tuple, QuestionAnsweringModelOutput]:
1681
+ r"""
1682
+ start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1683
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
1684
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1685
+ are not taken into account for computing the loss.
1686
+ end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1687
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
1688
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1689
+ are not taken into account for computing the loss.
1690
+ """
1691
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1692
+
1693
+ outputs = self.bert(
1694
+ input_ids,
1695
+ attention_mask=attention_mask,
1696
+ token_type_ids=token_type_ids,
1697
+ position_ids=position_ids,
1698
+ head_mask=head_mask,
1699
+ inputs_embeds=inputs_embeds,
1700
+ output_attentions=output_attentions,
1701
+ output_hidden_states=output_hidden_states,
1702
+ return_dict=return_dict,
1703
+ )
1704
+
1705
+ sequence_output = outputs[0]
1706
+
1707
+ logits = self.qa_outputs(sequence_output)
1708
+ start_logits, end_logits = logits.split(1, dim=-1)
1709
+ start_logits = start_logits.squeeze(-1).contiguous()
1710
+ end_logits = end_logits.squeeze(-1).contiguous()
1711
+
1712
+ total_loss = None
1713
+ if start_positions is not None and end_positions is not None:
1714
+ # If we are on multi-GPU, split add a dimension
1715
+ if len(start_positions.size()) > 1:
1716
+ start_positions = start_positions.squeeze(-1)
1717
+ if len(end_positions.size()) > 1:
1718
+ end_positions = end_positions.squeeze(-1)
1719
+ # sometimes the start/end positions are outside our model inputs, we ignore these terms
1720
+ ignored_index = start_logits.size(1)
1721
+ start_positions = start_positions.clamp(0, ignored_index)
1722
+ end_positions = end_positions.clamp(0, ignored_index)
1723
+
1724
+ loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
1725
+ start_loss = loss_fct(start_logits, start_positions)
1726
+ end_loss = loss_fct(end_logits, end_positions)
1727
+ total_loss = (start_loss + end_loss) / 2
1728
+
1729
+ if not return_dict:
1730
+ output = (start_logits, end_logits) + outputs[2:]
1731
+ return ((total_loss,) + output) if total_loss is not None else output
1732
+
1733
+ return QuestionAnsweringModelOutput(
1734
+ loss=total_loss,
1735
+ start_logits=start_logits,
1736
+ end_logits=end_logits,
1737
+ hidden_states=outputs.hidden_states,
1738
+ attentions=outputs.attentions,
1739
+ )
env-llmeval/lib/python3.10/site-packages/transformers/models/qwen2/__init__.py ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The Qwen Team and The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import (
17
+ OptionalDependencyNotAvailable,
18
+ _LazyModule,
19
+ is_tokenizers_available,
20
+ is_torch_available,
21
+ )
22
+
23
+
24
+ _import_structure = {
25
+ "configuration_qwen2": ["QWEN2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Qwen2Config"],
26
+ "tokenization_qwen2": ["Qwen2Tokenizer"],
27
+ }
28
+
29
+ try:
30
+ if not is_tokenizers_available():
31
+ raise OptionalDependencyNotAvailable()
32
+ except OptionalDependencyNotAvailable:
33
+ pass
34
+ else:
35
+ _import_structure["tokenization_qwen2_fast"] = ["Qwen2TokenizerFast"]
36
+
37
+ try:
38
+ if not is_torch_available():
39
+ raise OptionalDependencyNotAvailable()
40
+ except OptionalDependencyNotAvailable:
41
+ pass
42
+ else:
43
+ _import_structure["modeling_qwen2"] = [
44
+ "Qwen2ForCausalLM",
45
+ "Qwen2Model",
46
+ "Qwen2PreTrainedModel",
47
+ "Qwen2ForSequenceClassification",
48
+ ]
49
+
50
+
51
+ if TYPE_CHECKING:
52
+ from .configuration_qwen2 import QWEN2_PRETRAINED_CONFIG_ARCHIVE_MAP, Qwen2Config
53
+ from .tokenization_qwen2 import Qwen2Tokenizer
54
+
55
+ try:
56
+ if not is_tokenizers_available():
57
+ raise OptionalDependencyNotAvailable()
58
+ except OptionalDependencyNotAvailable:
59
+ pass
60
+ else:
61
+ from .tokenization_qwen2_fast import Qwen2TokenizerFast
62
+
63
+ try:
64
+ if not is_torch_available():
65
+ raise OptionalDependencyNotAvailable()
66
+ except OptionalDependencyNotAvailable:
67
+ pass
68
+ else:
69
+ from .modeling_qwen2 import (
70
+ Qwen2ForCausalLM,
71
+ Qwen2ForSequenceClassification,
72
+ Qwen2Model,
73
+ Qwen2PreTrainedModel,
74
+ )
75
+
76
+
77
+ else:
78
+ import sys
79
+
80
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
env-llmeval/lib/python3.10/site-packages/transformers/models/qwen2/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.21 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/qwen2/__pycache__/configuration_qwen2.cpython-310.pyc ADDED
Binary file (5.83 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/qwen2/__pycache__/modeling_qwen2.cpython-310.pyc ADDED
Binary file (39.2 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/qwen2/__pycache__/tokenization_qwen2.cpython-310.pyc ADDED
Binary file (11.7 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/qwen2/__pycache__/tokenization_qwen2_fast.cpython-310.pyc ADDED
Binary file (4.2 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/qwen2/configuration_qwen2.py ADDED
@@ -0,0 +1,144 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 The Qwen team, Alibaba Group and the HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Qwen2 model configuration"""
16
+
17
+ from ...configuration_utils import PretrainedConfig
18
+ from ...utils import logging
19
+
20
+
21
+ logger = logging.get_logger(__name__)
22
+
23
+ QWEN2_PRETRAINED_CONFIG_ARCHIVE_MAP = {
24
+ "Qwen/Qwen2-7B-beta": "https://huggingface.co/Qwen/Qwen2-7B-beta/resolve/main/config.json",
25
+ }
26
+
27
+
28
+ class Qwen2Config(PretrainedConfig):
29
+ r"""
30
+ This is the configuration class to store the configuration of a [`Qwen2Model`]. It is used to instantiate a
31
+ Qwen2 model according to the specified arguments, defining the model architecture. Instantiating a configuration
32
+ with the defaults will yield a similar configuration to that of
33
+ Qwen2-7B-beta [Qwen/Qwen2-7B-beta](https://huggingface.co/Qwen/Qwen2-7B-beta).
34
+
35
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
36
+ documentation from [`PretrainedConfig`] for more information.
37
+
38
+
39
+ Args:
40
+ vocab_size (`int`, *optional*, defaults to 151936):
41
+ Vocabulary size of the Qwen2 model. Defines the number of different tokens that can be represented by the
42
+ `inputs_ids` passed when calling [`Qwen2Model`]
43
+ hidden_size (`int`, *optional*, defaults to 4096):
44
+ Dimension of the hidden representations.
45
+ intermediate_size (`int`, *optional*, defaults to 22016):
46
+ Dimension of the MLP representations.
47
+ num_hidden_layers (`int`, *optional*, defaults to 32):
48
+ Number of hidden layers in the Transformer encoder.
49
+ num_attention_heads (`int`, *optional*, defaults to 32):
50
+ Number of attention heads for each attention layer in the Transformer encoder.
51
+ num_key_value_heads (`int`, *optional*, defaults to 32):
52
+ This is the number of key_value heads that should be used to implement Grouped Query Attention. If
53
+ `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
54
+ `num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When
55
+ converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
56
+ by meanpooling all the original heads within that group. For more details checkout [this
57
+ paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to `32`.
58
+ hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
59
+ The non-linear activation function (function or string) in the decoder.
60
+ max_position_embeddings (`int`, *optional*, defaults to 32768):
61
+ The maximum sequence length that this model might ever be used with.
62
+ initializer_range (`float`, *optional*, defaults to 0.02):
63
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
64
+ rms_norm_eps (`float`, *optional*, defaults to 1e-06):
65
+ The epsilon used by the rms normalization layers.
66
+ use_cache (`bool`, *optional*, defaults to `True`):
67
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
68
+ relevant if `config.is_decoder=True`.
69
+ tie_word_embeddings (`bool`, *optional*, defaults to `False`):
70
+ Whether the model's input and output word embeddings should be tied.
71
+ rope_theta (`float`, *optional*, defaults to 10000.0):
72
+ The base period of the RoPE embeddings.
73
+ use_sliding_window (`bool`, *optional*, defaults to `False`):
74
+ Whether to use sliding window attention.
75
+ sliding_window (`int`, *optional*, defaults to 4096):
76
+ Sliding window attention (SWA) window size. If not specified, will default to `4096`.
77
+ max_window_layers (`int`, *optional*, defaults to 28):
78
+ The number of layers that use SWA (Sliding Window Attention). The bottom layers use SWA while the top use full attention.
79
+ attention_dropout (`float`, *optional*, defaults to 0.0):
80
+ The dropout ratio for the attention probabilities.
81
+
82
+ ```python
83
+ >>> from transformers import Qwen2Model, Qwen2Config
84
+
85
+ >>> # Initializing a Qwen2 style configuration
86
+ >>> configuration = Qwen2Config()
87
+
88
+ >>> # Initializing a model from the Qwen2-7B style configuration
89
+ >>> model = Qwen2Model(configuration)
90
+
91
+ >>> # Accessing the model configuration
92
+ >>> configuration = model.config
93
+ ```"""
94
+
95
+ model_type = "qwen2"
96
+ keys_to_ignore_at_inference = ["past_key_values"]
97
+
98
+ def __init__(
99
+ self,
100
+ vocab_size=151936,
101
+ hidden_size=4096,
102
+ intermediate_size=22016,
103
+ num_hidden_layers=32,
104
+ num_attention_heads=32,
105
+ num_key_value_heads=32,
106
+ hidden_act="silu",
107
+ max_position_embeddings=32768,
108
+ initializer_range=0.02,
109
+ rms_norm_eps=1e-6,
110
+ use_cache=True,
111
+ tie_word_embeddings=False,
112
+ rope_theta=10000.0,
113
+ use_sliding_window=False,
114
+ sliding_window=4096,
115
+ max_window_layers=28,
116
+ attention_dropout=0.0,
117
+ **kwargs,
118
+ ):
119
+ self.vocab_size = vocab_size
120
+ self.max_position_embeddings = max_position_embeddings
121
+ self.hidden_size = hidden_size
122
+ self.intermediate_size = intermediate_size
123
+ self.num_hidden_layers = num_hidden_layers
124
+ self.num_attention_heads = num_attention_heads
125
+ self.use_sliding_window = use_sliding_window
126
+ self.sliding_window = sliding_window
127
+ self.max_window_layers = max_window_layers
128
+
129
+ # for backward compatibility
130
+ if num_key_value_heads is None:
131
+ num_key_value_heads = num_attention_heads
132
+
133
+ self.num_key_value_heads = num_key_value_heads
134
+ self.hidden_act = hidden_act
135
+ self.initializer_range = initializer_range
136
+ self.rms_norm_eps = rms_norm_eps
137
+ self.use_cache = use_cache
138
+ self.rope_theta = rope_theta
139
+ self.attention_dropout = attention_dropout
140
+
141
+ super().__init__(
142
+ tie_word_embeddings=tie_word_embeddings,
143
+ **kwargs,
144
+ )
env-llmeval/lib/python3.10/site-packages/transformers/models/qwen2/modeling_qwen2.py ADDED
@@ -0,0 +1,1401 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 The Qwen team, Alibaba Group and the HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
5
+ # and OPT implementations in this library. It has been modified from its
6
+ # original forms to accommodate minor architectural differences compared
7
+ # to GPT-NeoX and OPT used by the Meta AI team that trained the model.
8
+ #
9
+ # Licensed under the Apache License, Version 2.0 (the "License");
10
+ # you may not use this file except in compliance with the License.
11
+ # You may obtain a copy of the License at
12
+ #
13
+ # http://www.apache.org/licenses/LICENSE-2.0
14
+ #
15
+ # Unless required by applicable law or agreed to in writing, software
16
+ # distributed under the License is distributed on an "AS IS" BASIS,
17
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18
+ # See the License for the specific language governing permissions and
19
+ # limitations under the License.
20
+ """ PyTorch Qwen2 model."""
21
+ import inspect
22
+ import math
23
+ import warnings
24
+ from typing import List, Optional, Tuple, Union
25
+
26
+ import torch
27
+ import torch.nn.functional as F
28
+ import torch.utils.checkpoint
29
+ from torch import nn
30
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
31
+
32
+ from ...activations import ACT2FN
33
+ from ...cache_utils import Cache, DynamicCache
34
+ from ...modeling_attn_mask_utils import _prepare_4d_causal_attention_mask, _prepare_4d_causal_attention_mask_for_sdpa
35
+ from ...modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast, SequenceClassifierOutputWithPast
36
+ from ...modeling_utils import PreTrainedModel
37
+ from ...utils import (
38
+ add_start_docstrings,
39
+ add_start_docstrings_to_model_forward,
40
+ is_flash_attn_2_available,
41
+ is_flash_attn_greater_or_equal_2_10,
42
+ logging,
43
+ replace_return_docstrings,
44
+ )
45
+ from .configuration_qwen2 import Qwen2Config
46
+
47
+
48
+ if is_flash_attn_2_available():
49
+ from flash_attn import flash_attn_func, flash_attn_varlen_func
50
+ from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa
51
+
52
+ _flash_supports_window_size = "window_size" in list(inspect.signature(flash_attn_func).parameters)
53
+
54
+
55
+ logger = logging.get_logger(__name__)
56
+
57
+
58
+ _CHECKPOINT_FOR_DOC = "Qwen/Qwen2-7B-beta"
59
+ _CONFIG_FOR_DOC = "Qwen2Config"
60
+
61
+ QWEN2_PRETRAINED_MODEL_ARCHIVE_LIST = [
62
+ "Qwen/Qwen2-7B-beta",
63
+ # See all Qwen2 models at https://huggingface.co/models?filter=qwen2
64
+ ]
65
+
66
+
67
+ # Copied from transformers.models.llama.modeling_llama._get_unpad_data
68
+ def _get_unpad_data(attention_mask):
69
+ seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)
70
+ indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
71
+ max_seqlen_in_batch = seqlens_in_batch.max().item()
72
+ cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.int32), (1, 0))
73
+ return (
74
+ indices,
75
+ cu_seqlens,
76
+ max_seqlen_in_batch,
77
+ )
78
+
79
+
80
+ # Copied from transformers.models.llama.modeling_llama.LlamaRMSNorm with Llama->Qwen2
81
+ class Qwen2RMSNorm(nn.Module):
82
+ def __init__(self, hidden_size, eps=1e-6):
83
+ """
84
+ Qwen2RMSNorm is equivalent to T5LayerNorm
85
+ """
86
+ super().__init__()
87
+ self.weight = nn.Parameter(torch.ones(hidden_size))
88
+ self.variance_epsilon = eps
89
+
90
+ def forward(self, hidden_states):
91
+ input_dtype = hidden_states.dtype
92
+ hidden_states = hidden_states.to(torch.float32)
93
+ variance = hidden_states.pow(2).mean(-1, keepdim=True)
94
+ hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
95
+ return self.weight * hidden_states.to(input_dtype)
96
+
97
+
98
+ # Copied from transformers.models.mistral.modeling_mistral.MistralRotaryEmbedding with Mistral->Qwen2
99
+ class Qwen2RotaryEmbedding(nn.Module):
100
+ def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None):
101
+ super().__init__()
102
+
103
+ self.dim = dim
104
+ self.max_position_embeddings = max_position_embeddings
105
+ self.base = base
106
+ inv_freq = 1.0 / (self.base ** (torch.arange(0, self.dim, 2, dtype=torch.int64).float().to(device) / self.dim))
107
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
108
+
109
+ # Build here to make `torch.jit.trace` work.
110
+ self._set_cos_sin_cache(
111
+ seq_len=max_position_embeddings, device=self.inv_freq.device, dtype=torch.get_default_dtype()
112
+ )
113
+
114
+ def _set_cos_sin_cache(self, seq_len, device, dtype):
115
+ self.max_seq_len_cached = seq_len
116
+ t = torch.arange(self.max_seq_len_cached, device=device, dtype=torch.int64).type_as(self.inv_freq)
117
+
118
+ freqs = torch.outer(t, self.inv_freq)
119
+ # Different from paper, but it uses a different permutation in order to obtain the same calculation
120
+ emb = torch.cat((freqs, freqs), dim=-1)
121
+ self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False)
122
+ self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False)
123
+
124
+ def forward(self, x, seq_len=None):
125
+ # x: [bs, num_attention_heads, seq_len, head_size]
126
+ if seq_len > self.max_seq_len_cached:
127
+ self._set_cos_sin_cache(seq_len=seq_len, device=x.device, dtype=x.dtype)
128
+
129
+ return (
130
+ self.cos_cached[:seq_len].to(dtype=x.dtype),
131
+ self.sin_cached[:seq_len].to(dtype=x.dtype),
132
+ )
133
+
134
+
135
+ # Copied from transformers.models.llama.modeling_llama.rotate_half
136
+ def rotate_half(x):
137
+ """Rotates half the hidden dims of the input."""
138
+ x1 = x[..., : x.shape[-1] // 2]
139
+ x2 = x[..., x.shape[-1] // 2 :]
140
+ return torch.cat((-x2, x1), dim=-1)
141
+
142
+
143
+ # Copied from transformers.models.mistral.modeling_mistral.apply_rotary_pos_emb
144
+ def apply_rotary_pos_emb(q, k, cos, sin, position_ids, unsqueeze_dim=1):
145
+ """Applies Rotary Position Embedding to the query and key tensors.
146
+
147
+ Args:
148
+ q (`torch.Tensor`): The query tensor.
149
+ k (`torch.Tensor`): The key tensor.
150
+ cos (`torch.Tensor`): The cosine part of the rotary embedding.
151
+ sin (`torch.Tensor`): The sine part of the rotary embedding.
152
+ position_ids (`torch.Tensor`):
153
+ The position indices of the tokens corresponding to the query and key tensors. For example, this can be
154
+ used to pass offsetted position ids when working with a KV-cache.
155
+ unsqueeze_dim (`int`, *optional*, defaults to 1):
156
+ The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
157
+ sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
158
+ that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
159
+ k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
160
+ cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
161
+ the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
162
+ Returns:
163
+ `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
164
+ """
165
+ cos = cos[position_ids].unsqueeze(unsqueeze_dim)
166
+ sin = sin[position_ids].unsqueeze(unsqueeze_dim)
167
+ q_embed = (q * cos) + (rotate_half(q) * sin)
168
+ k_embed = (k * cos) + (rotate_half(k) * sin)
169
+ return q_embed, k_embed
170
+
171
+
172
+ # Copied from transformers.models.mistral.modeling_mistral.MistralMLP with Mistral->Qwen2
173
+ class Qwen2MLP(nn.Module):
174
+ def __init__(self, config):
175
+ super().__init__()
176
+ self.config = config
177
+ self.hidden_size = config.hidden_size
178
+ self.intermediate_size = config.intermediate_size
179
+ self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
180
+ self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
181
+ self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
182
+ self.act_fn = ACT2FN[config.hidden_act]
183
+
184
+ def forward(self, x):
185
+ return self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
186
+
187
+
188
+ # Copied from transformers.models.llama.modeling_llama.repeat_kv
189
+ def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
190
+ """
191
+ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
192
+ num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
193
+ """
194
+ batch, num_key_value_heads, slen, head_dim = hidden_states.shape
195
+ if n_rep == 1:
196
+ return hidden_states
197
+ hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
198
+ return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
199
+
200
+
201
+ class Qwen2Attention(nn.Module):
202
+ """
203
+ Multi-headed attention from 'Attention Is All You Need' paper. Modified to use sliding window attention: Longformer
204
+ and "Generating Long Sequences with Sparse Transformers".
205
+ """
206
+
207
+ def __init__(self, config: Qwen2Config, layer_idx: Optional[int] = None):
208
+ super().__init__()
209
+ self.config = config
210
+ self.layer_idx = layer_idx
211
+ if layer_idx is None:
212
+ logger.warning_once(
213
+ f"Instantiating {self.__class__.__name__} without passing `layer_idx` is not recommended and will "
214
+ "to errors during the forward call, if caching is used. Please make sure to provide a `layer_idx` "
215
+ "when creating this class."
216
+ )
217
+
218
+ self.hidden_size = config.hidden_size
219
+ self.num_heads = config.num_attention_heads
220
+ self.head_dim = self.hidden_size // self.num_heads
221
+ self.num_key_value_heads = config.num_key_value_heads
222
+ self.num_key_value_groups = self.num_heads // self.num_key_value_heads
223
+ self.max_position_embeddings = config.max_position_embeddings
224
+ self.rope_theta = config.rope_theta
225
+ self.is_causal = True
226
+ self.attention_dropout = config.attention_dropout
227
+
228
+ if (self.head_dim * self.num_heads) != self.hidden_size:
229
+ raise ValueError(
230
+ f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}"
231
+ f" and `num_heads`: {self.num_heads})."
232
+ )
233
+ self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=True)
234
+ self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=True)
235
+ self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=True)
236
+ self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False)
237
+
238
+ self.rotary_emb = Qwen2RotaryEmbedding(
239
+ self.head_dim,
240
+ max_position_embeddings=self.max_position_embeddings,
241
+ base=self.rope_theta,
242
+ )
243
+
244
+ def forward(
245
+ self,
246
+ hidden_states: torch.Tensor,
247
+ attention_mask: Optional[torch.Tensor] = None,
248
+ position_ids: Optional[torch.LongTensor] = None,
249
+ past_key_value: Optional[Cache] = None,
250
+ output_attentions: bool = False,
251
+ use_cache: bool = False,
252
+ **kwargs,
253
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
254
+ if "padding_mask" in kwargs:
255
+ warnings.warn(
256
+ "Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`"
257
+ )
258
+ bsz, q_len, _ = hidden_states.size()
259
+
260
+ query_states = self.q_proj(hidden_states)
261
+ key_states = self.k_proj(hidden_states)
262
+ value_states = self.v_proj(hidden_states)
263
+
264
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
265
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
266
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
267
+
268
+ kv_seq_len = key_states.shape[-2]
269
+ if past_key_value is not None:
270
+ if self.layer_idx is None:
271
+ raise ValueError(
272
+ f"The cache structure has changed since version v4.36. If you are using {self.__class__.__name__} "
273
+ "for auto-regressive decoding with k/v caching, please make sure to initialize the attention class "
274
+ "with a layer index."
275
+ )
276
+ kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
277
+ cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
278
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
279
+
280
+ if past_key_value is not None:
281
+ cache_kwargs = {"sin": sin, "cos": cos} # Specific to RoPE models
282
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
283
+
284
+ # repeat k/v heads if n_kv_heads < n_heads
285
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
286
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
287
+
288
+ attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
289
+
290
+ if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len):
291
+ raise ValueError(
292
+ f"Attention weights should be of size {(bsz, self.num_heads, q_len, kv_seq_len)}, but is"
293
+ f" {attn_weights.size()}"
294
+ )
295
+
296
+ if attention_mask is not None:
297
+ if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):
298
+ raise ValueError(
299
+ f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}"
300
+ )
301
+
302
+ attn_weights = attn_weights + attention_mask
303
+
304
+ # upcast attention to fp32
305
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
306
+ attn_weights = nn.functional.dropout(attn_weights, p=self.attention_dropout, training=self.training)
307
+ attn_output = torch.matmul(attn_weights, value_states)
308
+
309
+ if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
310
+ raise ValueError(
311
+ f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is"
312
+ f" {attn_output.size()}"
313
+ )
314
+
315
+ attn_output = attn_output.transpose(1, 2).contiguous()
316
+ attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
317
+
318
+ attn_output = self.o_proj(attn_output)
319
+
320
+ if not output_attentions:
321
+ attn_weights = None
322
+
323
+ return attn_output, attn_weights, past_key_value
324
+
325
+
326
+ class Qwen2FlashAttention2(Qwen2Attention):
327
+ """
328
+ Qwen2 flash attention module, following Qwen2 attention module. This module inherits from `Qwen2Attention`
329
+ as the weights of the module stays untouched. The only required change would be on the forward pass
330
+ where it needs to correctly call the public API of flash attention and deal with padding tokens
331
+ in case the input contains any of them. Additionally, for sliding window attention, we apply SWA only to the bottom
332
+ config.max_window_layers layers.
333
+ """
334
+
335
+ # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2.__init__
336
+ def __init__(self, *args, **kwargs):
337
+ super().__init__(*args, **kwargs)
338
+
339
+ # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1.
340
+ # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0.
341
+ # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left).
342
+ self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10()
343
+
344
+ def forward(
345
+ self,
346
+ hidden_states: torch.Tensor,
347
+ attention_mask: Optional[torch.Tensor] = None,
348
+ position_ids: Optional[torch.LongTensor] = None,
349
+ past_key_value: Optional[Cache] = None,
350
+ output_attentions: bool = False,
351
+ use_cache: bool = False,
352
+ **kwargs,
353
+ ):
354
+ if "padding_mask" in kwargs:
355
+ warnings.warn(
356
+ "Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`"
357
+ )
358
+
359
+ # overwrite attention_mask with padding_mask
360
+ attention_mask = kwargs.pop("padding_mask")
361
+ bsz, q_len, _ = hidden_states.size()
362
+
363
+ query_states = self.q_proj(hidden_states)
364
+ key_states = self.k_proj(hidden_states)
365
+ value_states = self.v_proj(hidden_states)
366
+
367
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
368
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
369
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
370
+
371
+ kv_seq_len = key_states.shape[-2]
372
+ if past_key_value is not None:
373
+ if self.layer_idx is None:
374
+ raise ValueError(
375
+ f"The cache structure has changed since version v4.36. If you are using {self.__class__.__name__} "
376
+ "for auto-regressive decoding with k/v caching, please make sure to initialize the attention class "
377
+ "with a layer index."
378
+ )
379
+ kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
380
+
381
+ # Because the input can be padded, the absolute sequence length depends on the max position id.
382
+ rotary_seq_len = max(kv_seq_len, position_ids[:, -1].max().item()) + 1
383
+ cos, sin = self.rotary_emb(value_states, seq_len=rotary_seq_len)
384
+
385
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
386
+
387
+ use_sliding_windows = (
388
+ _flash_supports_window_size
389
+ and getattr(self.config, "sliding_window", None) is not None
390
+ and kv_seq_len > self.config.sliding_window
391
+ and self.config.use_sliding_window
392
+ )
393
+
394
+ if not _flash_supports_window_size:
395
+ logger.warning_once(
396
+ "The current flash attention version does not support sliding window attention, for a more memory efficient implementation"
397
+ " make sure to upgrade flash-attn library."
398
+ )
399
+
400
+ if past_key_value is not None:
401
+ # Activate slicing cache only if the config has a value `sliding_windows` attribute
402
+ cache_has_contents = past_key_value.get_seq_length(self.layer_idx) > 0
403
+ if (
404
+ getattr(self.config, "sliding_window", None) is not None
405
+ and kv_seq_len > self.config.sliding_window
406
+ and cache_has_contents
407
+ ):
408
+ slicing_tokens = 1 - self.config.sliding_window
409
+
410
+ past_key = past_key_value[self.layer_idx][0]
411
+ past_value = past_key_value[self.layer_idx][1]
412
+
413
+ past_key = past_key[:, :, slicing_tokens:, :].contiguous()
414
+ past_value = past_value[:, :, slicing_tokens:, :].contiguous()
415
+
416
+ if past_key.shape[-2] != self.config.sliding_window - 1:
417
+ raise ValueError(
418
+ f"past key must have a shape of (`batch_size, num_heads, self.config.sliding_window-1, head_dim`), got"
419
+ f" {past_key.shape}"
420
+ )
421
+
422
+ if attention_mask is not None:
423
+ attention_mask = attention_mask[:, slicing_tokens:]
424
+ attention_mask = torch.cat([attention_mask, torch.ones_like(attention_mask[:, -1:])], dim=-1)
425
+
426
+ cache_kwargs = {"sin": sin, "cos": cos} # Specific to RoPE models
427
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
428
+
429
+ # repeat k/v heads if n_kv_heads < n_heads
430
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
431
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
432
+ dropout_rate = 0.0 if not self.training else self.attention_dropout
433
+
434
+ # In PEFT, usually we cast the layer norms in float32 for training stability reasons
435
+ # therefore the input hidden states gets silently casted in float32. Hence, we need
436
+ # cast them back in float16 just to be sure everything works as expected.
437
+ input_dtype = query_states.dtype
438
+ if input_dtype == torch.float32:
439
+ if torch.is_autocast_enabled():
440
+ target_dtype = torch.get_autocast_gpu_dtype()
441
+ # Handle the case where the model is quantized
442
+ elif hasattr(self.config, "_pre_quantization_dtype"):
443
+ target_dtype = self.config._pre_quantization_dtype
444
+ else:
445
+ target_dtype = self.q_proj.weight.dtype
446
+
447
+ logger.warning_once(
448
+ f"The input hidden states seems to be silently casted in float32, this might be related to"
449
+ f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in"
450
+ f" {target_dtype}."
451
+ )
452
+
453
+ query_states = query_states.to(target_dtype)
454
+ key_states = key_states.to(target_dtype)
455
+ value_states = value_states.to(target_dtype)
456
+
457
+ # Reashape to the expected shape for Flash Attention
458
+ query_states = query_states.transpose(1, 2)
459
+ key_states = key_states.transpose(1, 2)
460
+ value_states = value_states.transpose(1, 2)
461
+
462
+ attn_output = self._flash_attention_forward(
463
+ query_states,
464
+ key_states,
465
+ value_states,
466
+ attention_mask,
467
+ q_len,
468
+ dropout=dropout_rate,
469
+ use_sliding_windows=use_sliding_windows,
470
+ )
471
+
472
+ attn_output = attn_output.reshape(bsz, q_len, self.hidden_size).contiguous()
473
+ attn_output = self.o_proj(attn_output)
474
+
475
+ if not output_attentions:
476
+ attn_weights = None
477
+
478
+ return attn_output, attn_weights, past_key_value
479
+
480
+ def _flash_attention_forward(
481
+ self,
482
+ query_states,
483
+ key_states,
484
+ value_states,
485
+ attention_mask,
486
+ query_length,
487
+ dropout=0.0,
488
+ softmax_scale=None,
489
+ use_sliding_windows=False,
490
+ ):
491
+ """
492
+ Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token
493
+ first unpad the input, then computes the attention scores and pad the final attention scores.
494
+
495
+ Args:
496
+ query_states (`torch.Tensor`):
497
+ Input query states to be passed to Flash Attention API
498
+ key_states (`torch.Tensor`):
499
+ Input key states to be passed to Flash Attention API
500
+ value_states (`torch.Tensor`):
501
+ Input value states to be passed to Flash Attention API
502
+ attention_mask (`torch.Tensor`):
503
+ The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the
504
+ position of padding tokens and 1 for the position of non-padding tokens.
505
+ dropout (`float`):
506
+ Attention dropout
507
+ softmax_scale (`float`, *optional*):
508
+ The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim)
509
+ use_sliding_windows (`bool`, *optional*):
510
+ Whether to activate sliding window attention.
511
+ """
512
+ if not self._flash_attn_uses_top_left_mask:
513
+ causal = self.is_causal
514
+ else:
515
+ # TODO: Remove the `query_length != 1` check once Flash Attention for RoCm is bumped to 2.1. For details, please see the comment in LlamaFlashAttention2 __init__.
516
+ causal = self.is_causal and query_length != 1
517
+
518
+ # Decide whether to use SWA or not by layer index.
519
+ if use_sliding_windows and self.layer_idx >= self.config.max_window_layers:
520
+ use_sliding_windows = False
521
+
522
+ # Contains at least one padding token in the sequence
523
+ if attention_mask is not None:
524
+ batch_size = query_states.shape[0]
525
+ query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input(
526
+ query_states, key_states, value_states, attention_mask, query_length
527
+ )
528
+
529
+ cu_seqlens_q, cu_seqlens_k = cu_seq_lens
530
+ max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens
531
+
532
+ if not use_sliding_windows:
533
+ attn_output_unpad = flash_attn_varlen_func(
534
+ query_states,
535
+ key_states,
536
+ value_states,
537
+ cu_seqlens_q=cu_seqlens_q,
538
+ cu_seqlens_k=cu_seqlens_k,
539
+ max_seqlen_q=max_seqlen_in_batch_q,
540
+ max_seqlen_k=max_seqlen_in_batch_k,
541
+ dropout_p=dropout,
542
+ softmax_scale=softmax_scale,
543
+ causal=causal,
544
+ )
545
+ else:
546
+ attn_output_unpad = flash_attn_varlen_func(
547
+ query_states,
548
+ key_states,
549
+ value_states,
550
+ cu_seqlens_q=cu_seqlens_q,
551
+ cu_seqlens_k=cu_seqlens_k,
552
+ max_seqlen_q=max_seqlen_in_batch_q,
553
+ max_seqlen_k=max_seqlen_in_batch_k,
554
+ dropout_p=dropout,
555
+ softmax_scale=softmax_scale,
556
+ causal=causal,
557
+ window_size=(self.config.sliding_window, self.config.sliding_window),
558
+ )
559
+
560
+ attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length)
561
+ else:
562
+ if not use_sliding_windows:
563
+ attn_output = flash_attn_func(
564
+ query_states,
565
+ key_states,
566
+ value_states,
567
+ dropout,
568
+ softmax_scale=softmax_scale,
569
+ causal=causal,
570
+ )
571
+ else:
572
+ attn_output = flash_attn_func(
573
+ query_states,
574
+ key_states,
575
+ value_states,
576
+ dropout,
577
+ softmax_scale=softmax_scale,
578
+ causal=causal,
579
+ window_size=(self.config.sliding_window, self.config.sliding_window),
580
+ )
581
+
582
+ return attn_output
583
+
584
+ # Copied from transformers.models.mistral.modeling_mistral.MistralFlashAttention2._upad_input
585
+ def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length):
586
+ batch_size, kv_seq_len, num_heads, head_dim = key_layer.shape
587
+
588
+ # On the first iteration we need to properly re-create the padding mask
589
+ # by slicing it on the proper place
590
+ if kv_seq_len != attention_mask.shape[-1]:
591
+ attention_mask_num_tokens = attention_mask.shape[-1]
592
+ attention_mask = attention_mask[:, attention_mask_num_tokens - kv_seq_len :]
593
+
594
+ indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask)
595
+
596
+ key_layer = index_first_axis(key_layer.reshape(batch_size * kv_seq_len, num_heads, head_dim), indices_k)
597
+ value_layer = index_first_axis(value_layer.reshape(batch_size * kv_seq_len, num_heads, head_dim), indices_k)
598
+
599
+ if query_length == kv_seq_len:
600
+ query_layer = index_first_axis(
601
+ query_layer.reshape(batch_size * kv_seq_len, num_heads, head_dim), indices_k
602
+ )
603
+ cu_seqlens_q = cu_seqlens_k
604
+ max_seqlen_in_batch_q = max_seqlen_in_batch_k
605
+ indices_q = indices_k
606
+ elif query_length == 1:
607
+ max_seqlen_in_batch_q = 1
608
+ cu_seqlens_q = torch.arange(
609
+ batch_size + 1, dtype=torch.int32, device=query_layer.device
610
+ ) # There is a memcpy here, that is very bad.
611
+ indices_q = cu_seqlens_q[:-1]
612
+ query_layer = query_layer.squeeze(1)
613
+ else:
614
+ # The -q_len: slice assumes left padding.
615
+ attention_mask = attention_mask[:, -query_length:]
616
+ query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask)
617
+
618
+ return (
619
+ query_layer,
620
+ key_layer,
621
+ value_layer,
622
+ indices_q,
623
+ (cu_seqlens_q, cu_seqlens_k),
624
+ (max_seqlen_in_batch_q, max_seqlen_in_batch_k),
625
+ )
626
+
627
+
628
+ # Copied from transformers.models.mistral.modeling_mistral.MistralSdpaAttention with Mistral->Qwen2
629
+ class Qwen2SdpaAttention(Qwen2Attention):
630
+ """
631
+ Qwen2 attention module using torch.nn.functional.scaled_dot_product_attention. This module inherits from
632
+ `Qwen2Attention` as the weights of the module stays untouched. The only changes are on the forward pass to adapt to
633
+ SDPA API.
634
+ """
635
+
636
+ # Adapted from Qwen2Attention.forward
637
+ def forward(
638
+ self,
639
+ hidden_states: torch.Tensor,
640
+ attention_mask: Optional[torch.Tensor] = None,
641
+ position_ids: Optional[torch.LongTensor] = None,
642
+ past_key_value: Optional[Cache] = None,
643
+ output_attentions: bool = False,
644
+ use_cache: bool = False,
645
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
646
+ if output_attentions:
647
+ # TODO: Improve this warning with e.g. `model.config.attn_implementation = "manual"` once this is implemented.
648
+ logger.warning_once(
649
+ "Qwen2Model is using Qwen2SdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to the manual attention implementation, "
650
+ 'but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.'
651
+ )
652
+ return super().forward(
653
+ hidden_states=hidden_states,
654
+ attention_mask=attention_mask,
655
+ position_ids=position_ids,
656
+ past_key_value=past_key_value,
657
+ output_attentions=output_attentions,
658
+ use_cache=use_cache,
659
+ )
660
+
661
+ bsz, q_len, _ = hidden_states.size()
662
+
663
+ query_states = self.q_proj(hidden_states)
664
+ key_states = self.k_proj(hidden_states)
665
+ value_states = self.v_proj(hidden_states)
666
+
667
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
668
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
669
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
670
+
671
+ kv_seq_len = key_states.shape[-2]
672
+ if past_key_value is not None:
673
+ kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
674
+ cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
675
+
676
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
677
+
678
+ if past_key_value is not None:
679
+ cache_kwargs = {"sin": sin, "cos": cos} # Specific to RoPE models
680
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
681
+
682
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
683
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
684
+
685
+ if attention_mask is not None:
686
+ if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):
687
+ raise ValueError(
688
+ f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}"
689
+ )
690
+
691
+ # SDPA with memory-efficient backend is currently (torch==2.1.2) bugged with non-contiguous inputs with custom attn_mask,
692
+ # Reference: https://github.com/pytorch/pytorch/issues/112577.
693
+ if query_states.device.type == "cuda" and attention_mask is not None:
694
+ query_states = query_states.contiguous()
695
+ key_states = key_states.contiguous()
696
+ value_states = value_states.contiguous()
697
+
698
+ attn_output = torch.nn.functional.scaled_dot_product_attention(
699
+ query_states,
700
+ key_states,
701
+ value_states,
702
+ attn_mask=attention_mask,
703
+ dropout_p=self.attention_dropout if self.training else 0.0,
704
+ # The q_len > 1 is necessary to match with AttentionMaskConverter.to_causal_4d that does not create a causal mask in case q_len == 1.
705
+ is_causal=self.is_causal and attention_mask is None and q_len > 1,
706
+ )
707
+
708
+ attn_output = attn_output.transpose(1, 2).contiguous()
709
+ attn_output = attn_output.view(bsz, q_len, self.hidden_size)
710
+
711
+ attn_output = self.o_proj(attn_output)
712
+
713
+ return attn_output, None, past_key_value
714
+
715
+
716
+ QWEN2_ATTENTION_CLASSES = {
717
+ "eager": Qwen2Attention,
718
+ "flash_attention_2": Qwen2FlashAttention2,
719
+ "sdpa": Qwen2SdpaAttention,
720
+ }
721
+
722
+
723
+ class Qwen2DecoderLayer(nn.Module):
724
+ def __init__(self, config: Qwen2Config, layer_idx: int):
725
+ super().__init__()
726
+ self.hidden_size = config.hidden_size
727
+
728
+ if config.use_sliding_window and config._attn_implementation != "flash_attention_2":
729
+ logger.warning_once(
730
+ f"Sliding Window Attention is enabled but not implemented for `{config._attn_implementation}`; "
731
+ "unexpected results may be encountered."
732
+ )
733
+ self.self_attn = QWEN2_ATTENTION_CLASSES[config._attn_implementation](config, layer_idx)
734
+
735
+ self.mlp = Qwen2MLP(config)
736
+ self.input_layernorm = Qwen2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
737
+ self.post_attention_layernorm = Qwen2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
738
+
739
+ def forward(
740
+ self,
741
+ hidden_states: torch.Tensor,
742
+ attention_mask: Optional[torch.Tensor] = None,
743
+ position_ids: Optional[torch.LongTensor] = None,
744
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
745
+ output_attentions: Optional[bool] = False,
746
+ use_cache: Optional[bool] = False,
747
+ **kwargs,
748
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
749
+ if "padding_mask" in kwargs:
750
+ warnings.warn(
751
+ "Passing `padding_mask` is deprecated and will be removed in v4.37. "
752
+ "Please make sure use `attention_mask` instead.`"
753
+ )
754
+ """
755
+ Args:
756
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
757
+ attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
758
+ `(batch, sequence_length)` where padding elements are indicated by 0.
759
+ output_attentions (`bool`, *optional*):
760
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
761
+ returned tensors for more detail.
762
+ use_cache (`bool`, *optional*):
763
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
764
+ (see `past_key_values`).
765
+ past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
766
+ """
767
+
768
+ residual = hidden_states
769
+
770
+ hidden_states = self.input_layernorm(hidden_states)
771
+
772
+ # Self Attention
773
+ hidden_states, self_attn_weights, present_key_value = self.self_attn(
774
+ hidden_states=hidden_states,
775
+ attention_mask=attention_mask,
776
+ position_ids=position_ids,
777
+ past_key_value=past_key_value,
778
+ output_attentions=output_attentions,
779
+ use_cache=use_cache,
780
+ )
781
+ hidden_states = residual + hidden_states
782
+
783
+ # Fully Connected
784
+ residual = hidden_states
785
+ hidden_states = self.post_attention_layernorm(hidden_states)
786
+ hidden_states = self.mlp(hidden_states)
787
+ hidden_states = residual + hidden_states
788
+
789
+ outputs = (hidden_states,)
790
+
791
+ if output_attentions:
792
+ outputs += (self_attn_weights,)
793
+
794
+ if use_cache:
795
+ outputs += (present_key_value,)
796
+
797
+ return outputs
798
+
799
+
800
+ QWEN2_START_DOCSTRING = r"""
801
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
802
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
803
+ etc.)
804
+
805
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
806
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
807
+ and behavior.
808
+
809
+ Parameters:
810
+ config ([`Qwen2Config`]):
811
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
812
+ load the weights associated with the model, only the configuration. Check out the
813
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
814
+ """
815
+
816
+
817
+ @add_start_docstrings(
818
+ "The bare Qwen2 Model outputting raw hidden-states without any specific head on top.",
819
+ QWEN2_START_DOCSTRING,
820
+ )
821
+ class Qwen2PreTrainedModel(PreTrainedModel):
822
+ config_class = Qwen2Config
823
+ base_model_prefix = "model"
824
+ supports_gradient_checkpointing = True
825
+ _no_split_modules = ["Qwen2DecoderLayer"]
826
+ _skip_keys_device_placement = "past_key_values"
827
+ _supports_flash_attn_2 = True
828
+ _supports_sdpa = True
829
+ _supports_cache_class = True
830
+
831
+ def _init_weights(self, module):
832
+ std = self.config.initializer_range
833
+ if isinstance(module, nn.Linear):
834
+ module.weight.data.normal_(mean=0.0, std=std)
835
+ if module.bias is not None:
836
+ module.bias.data.zero_()
837
+ elif isinstance(module, nn.Embedding):
838
+ module.weight.data.normal_(mean=0.0, std=std)
839
+ if module.padding_idx is not None:
840
+ module.weight.data[module.padding_idx].zero_()
841
+
842
+
843
+ QWEN2_INPUTS_DOCSTRING = r"""
844
+ Args:
845
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
846
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
847
+ it.
848
+
849
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
850
+ [`PreTrainedTokenizer.__call__`] for details.
851
+
852
+ [What are input IDs?](../glossary#input-ids)
853
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
854
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
855
+
856
+ - 1 for tokens that are **not masked**,
857
+ - 0 for tokens that are **masked**.
858
+
859
+ [What are attention masks?](../glossary#attention-mask)
860
+
861
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
862
+ [`PreTrainedTokenizer.__call__`] for details.
863
+
864
+ If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
865
+ `past_key_values`).
866
+
867
+ If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
868
+ and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
869
+ information on the default strategy.
870
+
871
+ - 1 indicates the head is **not masked**,
872
+ - 0 indicates the head is **masked**.
873
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
874
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
875
+ config.n_positions - 1]`.
876
+
877
+ [What are position IDs?](../glossary#position-ids)
878
+ past_key_values (`Cache` or `tuple(tuple(torch.FloatTensor))`, *optional*):
879
+ Pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
880
+ blocks) that can be used to speed up sequential decoding. This typically consists in the `past_key_values`
881
+ returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`.
882
+
883
+ Two formats are allowed:
884
+ - a [`~cache_utils.Cache`] instance;
885
+ - Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
886
+ shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`). This is also known as the legacy
887
+ cache format.
888
+
889
+ The model will output the same cache format that is fed as input. If no `past_key_values` are passed, the
890
+ legacy cache format will be returned.
891
+
892
+ If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't
893
+ have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids`
894
+ of shape `(batch_size, sequence_length)`.
895
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
896
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
897
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
898
+ model's internal embedding lookup matrix.
899
+ use_cache (`bool`, *optional*):
900
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
901
+ `past_key_values`).
902
+ output_attentions (`bool`, *optional*):
903
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
904
+ tensors for more detail.
905
+ output_hidden_states (`bool`, *optional*):
906
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
907
+ more detail.
908
+ return_dict (`bool`, *optional*):
909
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
910
+ """
911
+
912
+
913
+ @add_start_docstrings(
914
+ "The bare Qwen2 Model outputting raw hidden-states without any specific head on top.",
915
+ QWEN2_START_DOCSTRING,
916
+ )
917
+ class Qwen2Model(Qwen2PreTrainedModel):
918
+ """
919
+ Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`Qwen2DecoderLayer`]
920
+
921
+ Args:
922
+ config: Qwen2Config
923
+ """
924
+
925
+ def __init__(self, config: Qwen2Config):
926
+ super().__init__(config)
927
+ self.padding_idx = config.pad_token_id
928
+ self.vocab_size = config.vocab_size
929
+
930
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
931
+ self.layers = nn.ModuleList(
932
+ [Qwen2DecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
933
+ )
934
+ self._attn_implementation = config._attn_implementation
935
+ self.norm = Qwen2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
936
+
937
+ self.gradient_checkpointing = False
938
+ # Initialize weights and apply final processing
939
+ self.post_init()
940
+
941
+ def get_input_embeddings(self):
942
+ return self.embed_tokens
943
+
944
+ def set_input_embeddings(self, value):
945
+ self.embed_tokens = value
946
+
947
+ @add_start_docstrings_to_model_forward(QWEN2_INPUTS_DOCSTRING)
948
+ def forward(
949
+ self,
950
+ input_ids: torch.LongTensor = None,
951
+ attention_mask: Optional[torch.Tensor] = None,
952
+ position_ids: Optional[torch.LongTensor] = None,
953
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
954
+ inputs_embeds: Optional[torch.FloatTensor] = None,
955
+ use_cache: Optional[bool] = None,
956
+ output_attentions: Optional[bool] = None,
957
+ output_hidden_states: Optional[bool] = None,
958
+ return_dict: Optional[bool] = None,
959
+ ) -> Union[Tuple, BaseModelOutputWithPast]:
960
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
961
+ output_hidden_states = (
962
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
963
+ )
964
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
965
+
966
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
967
+
968
+ # retrieve input_ids and inputs_embeds
969
+ if input_ids is not None and inputs_embeds is not None:
970
+ raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
971
+ elif input_ids is not None:
972
+ batch_size, seq_length = input_ids.shape
973
+ elif inputs_embeds is not None:
974
+ batch_size, seq_length, _ = inputs_embeds.shape
975
+ else:
976
+ raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
977
+
978
+ if self.gradient_checkpointing and self.training:
979
+ if use_cache:
980
+ logger.warning_once(
981
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
982
+ )
983
+ use_cache = False
984
+
985
+ past_key_values_length = 0
986
+
987
+ if use_cache:
988
+ use_legacy_cache = not isinstance(past_key_values, Cache)
989
+ if use_legacy_cache:
990
+ past_key_values = DynamicCache.from_legacy_cache(past_key_values)
991
+ past_key_values_length = past_key_values.get_usable_length(seq_length)
992
+
993
+ if position_ids is None:
994
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
995
+ position_ids = torch.arange(
996
+ past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device
997
+ )
998
+ position_ids = position_ids.unsqueeze(0).view(-1, seq_length)
999
+ else:
1000
+ position_ids = position_ids.view(-1, seq_length).long()
1001
+
1002
+ if inputs_embeds is None:
1003
+ inputs_embeds = self.embed_tokens(input_ids)
1004
+
1005
+ if attention_mask is not None and self._attn_implementation == "flash_attention_2" and use_cache:
1006
+ is_padding_right = attention_mask[:, -1].sum().item() != batch_size
1007
+ if is_padding_right:
1008
+ raise ValueError(
1009
+ "You are attempting to perform batched generation with padding_side='right'"
1010
+ " this may lead to unexpected behaviour for Flash Attention version of Qwen2. Make sure to "
1011
+ " call `tokenizer.padding_side = 'left'` before tokenizing the input. "
1012
+ )
1013
+
1014
+ if self._attn_implementation == "flash_attention_2":
1015
+ # 2d mask is passed through the layers
1016
+ attention_mask = attention_mask if (attention_mask is not None and 0 in attention_mask) else None
1017
+ elif self._attn_implementation == "sdpa" and not output_attentions:
1018
+ # output_attentions=True can not be supported when using SDPA, and we fall back on
1019
+ # the manual implementation that requires a 4D causal mask in all cases.
1020
+ attention_mask = _prepare_4d_causal_attention_mask_for_sdpa(
1021
+ attention_mask,
1022
+ (batch_size, seq_length),
1023
+ inputs_embeds,
1024
+ past_key_values_length,
1025
+ )
1026
+ else:
1027
+ # 4d mask is passed through the layers
1028
+ attention_mask = _prepare_4d_causal_attention_mask(
1029
+ attention_mask,
1030
+ (batch_size, seq_length),
1031
+ inputs_embeds,
1032
+ past_key_values_length,
1033
+ sliding_window=self.config.sliding_window,
1034
+ )
1035
+
1036
+ hidden_states = inputs_embeds
1037
+
1038
+ # decoder layers
1039
+ all_hidden_states = () if output_hidden_states else None
1040
+ all_self_attns = () if output_attentions else None
1041
+ next_decoder_cache = None
1042
+
1043
+ for decoder_layer in self.layers:
1044
+ if output_hidden_states:
1045
+ all_hidden_states += (hidden_states,)
1046
+
1047
+ if self.gradient_checkpointing and self.training:
1048
+ layer_outputs = self._gradient_checkpointing_func(
1049
+ decoder_layer.__call__,
1050
+ hidden_states,
1051
+ attention_mask,
1052
+ position_ids,
1053
+ past_key_values,
1054
+ output_attentions,
1055
+ use_cache,
1056
+ )
1057
+ else:
1058
+ layer_outputs = decoder_layer(
1059
+ hidden_states,
1060
+ attention_mask=attention_mask,
1061
+ position_ids=position_ids,
1062
+ past_key_value=past_key_values,
1063
+ output_attentions=output_attentions,
1064
+ use_cache=use_cache,
1065
+ )
1066
+
1067
+ hidden_states = layer_outputs[0]
1068
+
1069
+ if use_cache:
1070
+ next_decoder_cache = layer_outputs[2 if output_attentions else 1]
1071
+
1072
+ if output_attentions:
1073
+ all_self_attns += (layer_outputs[1],)
1074
+
1075
+ hidden_states = self.norm(hidden_states)
1076
+
1077
+ # add hidden states from the last decoder layer
1078
+ if output_hidden_states:
1079
+ all_hidden_states += (hidden_states,)
1080
+
1081
+ next_cache = None
1082
+ if use_cache:
1083
+ next_cache = next_decoder_cache.to_legacy_cache() if use_legacy_cache else next_decoder_cache
1084
+
1085
+ if not return_dict:
1086
+ return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None)
1087
+ return BaseModelOutputWithPast(
1088
+ last_hidden_state=hidden_states,
1089
+ past_key_values=next_cache,
1090
+ hidden_states=all_hidden_states,
1091
+ attentions=all_self_attns,
1092
+ )
1093
+
1094
+
1095
+ class Qwen2ForCausalLM(Qwen2PreTrainedModel):
1096
+ _tied_weights_keys = ["lm_head.weight"]
1097
+
1098
+ def __init__(self, config):
1099
+ super().__init__(config)
1100
+ self.model = Qwen2Model(config)
1101
+ self.vocab_size = config.vocab_size
1102
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
1103
+
1104
+ # Initialize weights and apply final processing
1105
+ self.post_init()
1106
+
1107
+ def get_input_embeddings(self):
1108
+ return self.model.embed_tokens
1109
+
1110
+ def set_input_embeddings(self, value):
1111
+ self.model.embed_tokens = value
1112
+
1113
+ def get_output_embeddings(self):
1114
+ return self.lm_head
1115
+
1116
+ def set_output_embeddings(self, new_embeddings):
1117
+ self.lm_head = new_embeddings
1118
+
1119
+ def set_decoder(self, decoder):
1120
+ self.model = decoder
1121
+
1122
+ def get_decoder(self):
1123
+ return self.model
1124
+
1125
+ @add_start_docstrings_to_model_forward(QWEN2_INPUTS_DOCSTRING)
1126
+ @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
1127
+ def forward(
1128
+ self,
1129
+ input_ids: torch.LongTensor = None,
1130
+ attention_mask: Optional[torch.Tensor] = None,
1131
+ position_ids: Optional[torch.LongTensor] = None,
1132
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1133
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1134
+ labels: Optional[torch.LongTensor] = None,
1135
+ use_cache: Optional[bool] = None,
1136
+ output_attentions: Optional[bool] = None,
1137
+ output_hidden_states: Optional[bool] = None,
1138
+ return_dict: Optional[bool] = None,
1139
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
1140
+ r"""
1141
+ Args:
1142
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1143
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
1144
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
1145
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
1146
+
1147
+ Returns:
1148
+
1149
+ Example:
1150
+
1151
+ ```python
1152
+ >>> from transformers import AutoTokenizer, Qwen2ForCausalLM
1153
+
1154
+ >>> model = Qwen2ForCausalLM.from_pretrained(PATH_TO_CONVERTED_WEIGHTS)
1155
+ >>> tokenizer = AutoTokenizer.from_pretrained(PATH_TO_CONVERTED_TOKENIZER)
1156
+
1157
+ >>> prompt = "Hey, are you conscious? Can you talk to me?"
1158
+ >>> inputs = tokenizer(prompt, return_tensors="pt")
1159
+
1160
+ >>> # Generate
1161
+ >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
1162
+ >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
1163
+ "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
1164
+ ```"""
1165
+
1166
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1167
+ output_hidden_states = (
1168
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1169
+ )
1170
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1171
+
1172
+ # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
1173
+ outputs = self.model(
1174
+ input_ids=input_ids,
1175
+ attention_mask=attention_mask,
1176
+ position_ids=position_ids,
1177
+ past_key_values=past_key_values,
1178
+ inputs_embeds=inputs_embeds,
1179
+ use_cache=use_cache,
1180
+ output_attentions=output_attentions,
1181
+ output_hidden_states=output_hidden_states,
1182
+ return_dict=return_dict,
1183
+ )
1184
+
1185
+ hidden_states = outputs[0]
1186
+ logits = self.lm_head(hidden_states)
1187
+ logits = logits.float()
1188
+
1189
+ loss = None
1190
+ if labels is not None:
1191
+ # Shift so that tokens < n predict n
1192
+ shift_logits = logits[..., :-1, :].contiguous()
1193
+ shift_labels = labels[..., 1:].contiguous()
1194
+ # Flatten the tokens
1195
+ loss_fct = CrossEntropyLoss()
1196
+ shift_logits = shift_logits.view(-1, self.config.vocab_size)
1197
+ shift_labels = shift_labels.view(-1)
1198
+ # Enable model parallelism
1199
+ shift_labels = shift_labels.to(shift_logits.device)
1200
+ loss = loss_fct(shift_logits, shift_labels)
1201
+
1202
+ if not return_dict:
1203
+ output = (logits,) + outputs[1:]
1204
+ return (loss,) + output if loss is not None else output
1205
+
1206
+ return CausalLMOutputWithPast(
1207
+ loss=loss,
1208
+ logits=logits,
1209
+ past_key_values=outputs.past_key_values,
1210
+ hidden_states=outputs.hidden_states,
1211
+ attentions=outputs.attentions,
1212
+ )
1213
+
1214
+ def prepare_inputs_for_generation(
1215
+ self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs
1216
+ ):
1217
+ # Omit tokens covered by past_key_values
1218
+ if past_key_values is not None:
1219
+ if isinstance(past_key_values, Cache):
1220
+ cache_length = past_key_values.get_seq_length()
1221
+ past_length = past_key_values.seen_tokens
1222
+ max_cache_length = past_key_values.get_max_length()
1223
+ else:
1224
+ cache_length = past_length = past_key_values[0][0].shape[2]
1225
+ max_cache_length = None
1226
+
1227
+ # Keep only the unprocessed tokens:
1228
+ # 1 - If the length of the attention_mask exceeds the length of input_ids, then we are in a setting where
1229
+ # some of the inputs are exclusively passed as part of the cache (e.g. when passing input_embeds as
1230
+ # input)
1231
+ if attention_mask is not None and attention_mask.shape[1] > input_ids.shape[1]:
1232
+ input_ids = input_ids[:, -(attention_mask.shape[1] - past_length) :]
1233
+ # 2 - If the past_length is smaller than input_ids', then input_ids holds all input tokens. We can discard
1234
+ # input_ids based on the past_length.
1235
+ elif past_length < input_ids.shape[1]:
1236
+ input_ids = input_ids[:, past_length:]
1237
+ # 3 - Otherwise (past_length >= input_ids.shape[1]), let's assume input_ids only has unprocessed tokens.
1238
+
1239
+ # If we are about to go beyond the maximum cache length, we need to crop the input attention mask.
1240
+ if (
1241
+ max_cache_length is not None
1242
+ and attention_mask is not None
1243
+ and cache_length + input_ids.shape[1] > max_cache_length
1244
+ ):
1245
+ attention_mask = attention_mask[:, -max_cache_length:]
1246
+
1247
+ position_ids = kwargs.get("position_ids", None)
1248
+ if attention_mask is not None and position_ids is None:
1249
+ # create position_ids on the fly for batch generation
1250
+ position_ids = attention_mask.long().cumsum(-1) - 1
1251
+ position_ids.masked_fill_(attention_mask == 0, 1)
1252
+ if past_key_values:
1253
+ position_ids = position_ids[:, -input_ids.shape[1] :]
1254
+
1255
+ # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
1256
+ if inputs_embeds is not None and past_key_values is None:
1257
+ model_inputs = {"inputs_embeds": inputs_embeds}
1258
+ else:
1259
+ model_inputs = {"input_ids": input_ids}
1260
+
1261
+ model_inputs.update(
1262
+ {
1263
+ "position_ids": position_ids,
1264
+ "past_key_values": past_key_values,
1265
+ "use_cache": kwargs.get("use_cache"),
1266
+ "attention_mask": attention_mask,
1267
+ }
1268
+ )
1269
+ return model_inputs
1270
+
1271
+ @staticmethod
1272
+ def _reorder_cache(past_key_values, beam_idx):
1273
+ reordered_past = ()
1274
+ for layer_past in past_key_values:
1275
+ reordered_past += (
1276
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),
1277
+ )
1278
+ return reordered_past
1279
+
1280
+
1281
+ @add_start_docstrings(
1282
+ """
1283
+ The Qwen2 Model transformer with a sequence classification head on top (linear layer).
1284
+
1285
+ [`Qwen2ForSequenceClassification`] uses the last token in order to do the classification, as other causal models
1286
+ (e.g. GPT-2) do.
1287
+
1288
+ Since it does classification on the last token, it requires to know the position of the last token. If a
1289
+ `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
1290
+ no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
1291
+ padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
1292
+ each row of the batch).
1293
+ """,
1294
+ QWEN2_START_DOCSTRING,
1295
+ )
1296
+ class Qwen2ForSequenceClassification(Qwen2PreTrainedModel):
1297
+ def __init__(self, config):
1298
+ super().__init__(config)
1299
+ self.num_labels = config.num_labels
1300
+ self.model = Qwen2Model(config)
1301
+ self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False)
1302
+
1303
+ # Initialize weights and apply final processing
1304
+ self.post_init()
1305
+
1306
+ def get_input_embeddings(self):
1307
+ return self.model.embed_tokens
1308
+
1309
+ def set_input_embeddings(self, value):
1310
+ self.model.embed_tokens = value
1311
+
1312
+ @add_start_docstrings_to_model_forward(QWEN2_INPUTS_DOCSTRING)
1313
+ def forward(
1314
+ self,
1315
+ input_ids: torch.LongTensor = None,
1316
+ attention_mask: Optional[torch.Tensor] = None,
1317
+ position_ids: Optional[torch.LongTensor] = None,
1318
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1319
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1320
+ labels: Optional[torch.LongTensor] = None,
1321
+ use_cache: Optional[bool] = None,
1322
+ output_attentions: Optional[bool] = None,
1323
+ output_hidden_states: Optional[bool] = None,
1324
+ return_dict: Optional[bool] = None,
1325
+ ) -> Union[Tuple, SequenceClassifierOutputWithPast]:
1326
+ r"""
1327
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1328
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1329
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1330
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1331
+ """
1332
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1333
+
1334
+ transformer_outputs = self.model(
1335
+ input_ids,
1336
+ attention_mask=attention_mask,
1337
+ position_ids=position_ids,
1338
+ past_key_values=past_key_values,
1339
+ inputs_embeds=inputs_embeds,
1340
+ use_cache=use_cache,
1341
+ output_attentions=output_attentions,
1342
+ output_hidden_states=output_hidden_states,
1343
+ return_dict=return_dict,
1344
+ )
1345
+ hidden_states = transformer_outputs[0]
1346
+ logits = self.score(hidden_states)
1347
+
1348
+ if input_ids is not None:
1349
+ batch_size = input_ids.shape[0]
1350
+ else:
1351
+ batch_size = inputs_embeds.shape[0]
1352
+
1353
+ if self.config.pad_token_id is None and batch_size != 1:
1354
+ raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.")
1355
+ if self.config.pad_token_id is None:
1356
+ sequence_lengths = -1
1357
+ else:
1358
+ if input_ids is not None:
1359
+ # if no pad token found, use modulo instead of reverse indexing for ONNX compatibility
1360
+ sequence_lengths = torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1
1361
+ sequence_lengths = sequence_lengths % input_ids.shape[-1]
1362
+ sequence_lengths = sequence_lengths.to(logits.device)
1363
+ else:
1364
+ sequence_lengths = -1
1365
+
1366
+ pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths]
1367
+
1368
+ loss = None
1369
+ if labels is not None:
1370
+ labels = labels.to(logits.device)
1371
+ if self.config.problem_type is None:
1372
+ if self.num_labels == 1:
1373
+ self.config.problem_type = "regression"
1374
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
1375
+ self.config.problem_type = "single_label_classification"
1376
+ else:
1377
+ self.config.problem_type = "multi_label_classification"
1378
+
1379
+ if self.config.problem_type == "regression":
1380
+ loss_fct = MSELoss()
1381
+ if self.num_labels == 1:
1382
+ loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())
1383
+ else:
1384
+ loss = loss_fct(pooled_logits, labels)
1385
+ elif self.config.problem_type == "single_label_classification":
1386
+ loss_fct = CrossEntropyLoss()
1387
+ loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))
1388
+ elif self.config.problem_type == "multi_label_classification":
1389
+ loss_fct = BCEWithLogitsLoss()
1390
+ loss = loss_fct(pooled_logits, labels)
1391
+ if not return_dict:
1392
+ output = (pooled_logits,) + transformer_outputs[1:]
1393
+ return ((loss,) + output) if loss is not None else output
1394
+
1395
+ return SequenceClassifierOutputWithPast(
1396
+ loss=loss,
1397
+ logits=pooled_logits,
1398
+ past_key_values=transformer_outputs.past_key_values,
1399
+ hidden_states=transformer_outputs.hidden_states,
1400
+ attentions=transformer_outputs.attentions,
1401
+ )
env-llmeval/lib/python3.10/site-packages/transformers/models/qwen2/tokenization_qwen2.py ADDED
@@ -0,0 +1,345 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 The Qwen team, Alibaba Group and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Tokenization classes for Qwen2."""
16
+
17
+ import json
18
+ import os
19
+ import unicodedata
20
+ from functools import lru_cache
21
+ from typing import Optional, Tuple
22
+
23
+ import regex as re
24
+
25
+ from ...tokenization_utils import AddedToken, PreTrainedTokenizer
26
+ from ...utils import logging
27
+
28
+
29
+ logger = logging.get_logger(__name__)
30
+
31
+ VOCAB_FILES_NAMES = {
32
+ "vocab_file": "vocab.json",
33
+ "merges_file": "merges.txt",
34
+ }
35
+
36
+ PRETRAINED_VOCAB_FILES_MAP = {
37
+ "vocab_file": {"qwen/qwen-tokenizer": "https://huggingface.co/qwen/qwen-tokenizer/resolve/main/vocab.json"},
38
+ "merges_file": {"qwen/qwen-tokenizer": "https://huggingface.co/qwen/qwen-tokenizer/resolve/main/merges.txt"},
39
+ }
40
+
41
+ MAX_MODEL_INPUT_SIZES = {"qwen/qwen-tokenizer": 32768}
42
+
43
+ PRETOKENIZE_REGEX = r"""(?i:'s|'t|'re|'ve|'m|'ll|'d)|[^\r\n\p{L}\p{N}]?\p{L}+|\p{N}| ?[^\s\p{L}\p{N}]+[\r\n]*|\s*[\r\n]+|\s+(?!\S)|\s+"""
44
+
45
+
46
+ @lru_cache()
47
+ # Copied from transformers.models.gpt2.tokenization_gpt2.bytes_to_unicode
48
+ def bytes_to_unicode():
49
+ """
50
+ Returns list of utf-8 byte and a mapping to unicode strings. We specifically avoids mapping to whitespace/control
51
+ characters the bpe code barfs on.
52
+
53
+ The reversible bpe codes work on unicode strings. This means you need a large # of unicode characters in your vocab
54
+ if you want to avoid UNKs. When you're at something like a 10B token dataset you end up needing around 5K for
55
+ decent coverage. This is a significant percentage of your normal, say, 32K bpe vocab. To avoid that, we want lookup
56
+ tables between utf-8 bytes and unicode strings.
57
+ """
58
+ bs = (
59
+ list(range(ord("!"), ord("~") + 1)) + list(range(ord("¡"), ord("¬") + 1)) + list(range(ord("®"), ord("ÿ") + 1))
60
+ )
61
+ cs = bs[:]
62
+ n = 0
63
+ for b in range(2**8):
64
+ if b not in bs:
65
+ bs.append(b)
66
+ cs.append(2**8 + n)
67
+ n += 1
68
+ cs = [chr(n) for n in cs]
69
+ return dict(zip(bs, cs))
70
+
71
+
72
+ # Copied from transformers.models.gpt2.tokenization_gpt2.get_pairs
73
+ def get_pairs(word):
74
+ """
75
+ Return set of symbol pairs in a word.
76
+
77
+ Word is represented as tuple of symbols (symbols being variable-length strings).
78
+ """
79
+ pairs = set()
80
+ prev_char = word[0]
81
+ for char in word[1:]:
82
+ pairs.add((prev_char, char))
83
+ prev_char = char
84
+ return pairs
85
+
86
+
87
+ class Qwen2Tokenizer(PreTrainedTokenizer):
88
+ """
89
+ Construct a Qwen2 tokenizer. Based on byte-level Byte-Pair-Encoding.
90
+
91
+ Same with GPT2Tokenizer, this tokenizer has been trained to treat spaces like parts of the tokens so a word will
92
+ be encoded differently whether it is at the beginning of the sentence (without space) or not:
93
+
94
+ ```python
95
+ >>> from transformers import Qwen2Tokenizer
96
+
97
+ >>> tokenizer = Qwen2Tokenizer.from_pretrained("Qwen/Qwen-tokenizer")
98
+ >>> tokenizer("Hello world")["input_ids"]
99
+ [9707, 1879]
100
+
101
+ >>> tokenizer(" Hello world")["input_ids"]
102
+ [21927, 1879]
103
+ ```
104
+ This is expected.
105
+
106
+ You should not use GPT2Tokenizer instead, because of the different pretokenization rules.
107
+
108
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
109
+ this superclass for more information regarding those methods.
110
+
111
+ Args:
112
+ vocab_file (`str`):
113
+ Path to the vocabulary file.
114
+ merges_file (`str`):
115
+ Path to the merges file.
116
+ errors (`str`, *optional*, defaults to `"replace"`):
117
+ Paradigm to follow when decoding bytes to UTF-8. See
118
+ [bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information.
119
+ unk_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
120
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
121
+ token instead.
122
+ bos_token (`str`, *optional*):
123
+ The beginning of sequence token. Not applicable for this tokenizer.
124
+ eos_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
125
+ The end of sequence token.
126
+ pad_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
127
+ The token used for padding, for example when batching sequences of different lengths.
128
+ clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`):
129
+ Whether or not the model should cleanup the spaces that were added when splitting the input text during the
130
+ tokenization process. Not applicable to this tokenizer, since tokenization does not add spaces.
131
+ split_special_tokens (`bool`, *optional*, defaults to `False`):
132
+ Whether or not the special tokens should be split during the tokenization process. The default behavior is
133
+ to not split special tokens. This means that if `<|endoftext|>` is the `eos_token`, then `tokenizer.tokenize("<|endoftext|>") =
134
+ ['<|endoftext|>`]. Otherwise, if `split_special_tokens=True`, then `tokenizer.tokenize("<|endoftext|>")` will be give `['<',
135
+ '|', 'endo', 'ft', 'ext', '|', '>']`. This argument is only supported for `slow` tokenizers for the moment.
136
+ """
137
+
138
+ vocab_files_names = VOCAB_FILES_NAMES
139
+ pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
140
+ max_model_input_sizes = MAX_MODEL_INPUT_SIZES
141
+ model_input_names = ["input_ids", "attention_mask"]
142
+
143
+ def __init__(
144
+ self,
145
+ vocab_file,
146
+ merges_file,
147
+ errors="replace",
148
+ unk_token="<|endoftext|>",
149
+ bos_token=None,
150
+ eos_token="<|endoftext|>",
151
+ pad_token="<|endoftext|>",
152
+ clean_up_tokenization_spaces=False,
153
+ split_special_tokens=False,
154
+ **kwargs,
155
+ ):
156
+ # Qwen vocab does not contain control tokens; added tokens need to be special
157
+ bos_token = (
158
+ AddedToken(bos_token, lstrip=False, rstrip=False, special=True, normalized=False)
159
+ if isinstance(bos_token, str)
160
+ else bos_token
161
+ )
162
+ eos_token = (
163
+ AddedToken(eos_token, lstrip=False, rstrip=False, special=True, normalized=False)
164
+ if isinstance(eos_token, str)
165
+ else eos_token
166
+ )
167
+ unk_token = (
168
+ AddedToken(unk_token, lstrip=False, rstrip=False, special=True, normalized=False)
169
+ if isinstance(unk_token, str)
170
+ else unk_token
171
+ )
172
+ pad_token = (
173
+ AddedToken(pad_token, lstrip=False, rstrip=False, special=True, normalized=False)
174
+ if isinstance(pad_token, str)
175
+ else pad_token
176
+ )
177
+
178
+ with open(vocab_file, encoding="utf-8") as vocab_handle:
179
+ self.encoder = json.load(vocab_handle)
180
+ self.decoder = {v: k for k, v in self.encoder.items()}
181
+ self.errors = errors # how to handle errors in decoding
182
+ self.byte_encoder = bytes_to_unicode()
183
+ self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
184
+ bpe_merges = []
185
+ with open(merges_file, encoding="utf-8") as merges_handle:
186
+ for line in merges_handle:
187
+ line = line.strip()
188
+ if not line or line.startswith("#"):
189
+ continue
190
+ bpe_merges.append(tuple(line.split()))
191
+ self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges))))
192
+ # NOTE: the cache can grow without bound and will get really large for long running processes
193
+ # (esp. for texts of language that do not use space between word, e.g. Chinese); technically
194
+ # not a memory leak but appears as one.
195
+ # GPT2Tokenizer has the same problem, so let's be consistent.
196
+ self.cache = {}
197
+
198
+ self.pat = re.compile(PRETOKENIZE_REGEX)
199
+
200
+ if kwargs.get("add_prefix_space", False):
201
+ logger.warning_once(
202
+ f"{self.__class__.__name} does not support `add_prefix_space`, setting it to True has no effect."
203
+ )
204
+
205
+ super().__init__(
206
+ errors=errors,
207
+ bos_token=bos_token,
208
+ eos_token=eos_token,
209
+ pad_token=pad_token,
210
+ unk_token=unk_token,
211
+ clean_up_tokenization_spaces=clean_up_tokenization_spaces,
212
+ split_special_tokens=split_special_tokens,
213
+ **kwargs,
214
+ )
215
+
216
+ @property
217
+ def vocab_size(self) -> int:
218
+ return len(self.encoder)
219
+
220
+ # Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer.get_vocab
221
+ def get_vocab(self):
222
+ return dict(self.encoder, **self.added_tokens_encoder)
223
+
224
+ # Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer.bpe
225
+ def bpe(self, token):
226
+ if token in self.cache:
227
+ return self.cache[token]
228
+ word = tuple(token)
229
+ pairs = get_pairs(word)
230
+
231
+ if not pairs:
232
+ return token
233
+
234
+ while True:
235
+ bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf")))
236
+ if bigram not in self.bpe_ranks:
237
+ break
238
+ first, second = bigram
239
+ new_word = []
240
+ i = 0
241
+ while i < len(word):
242
+ try:
243
+ j = word.index(first, i)
244
+ except ValueError:
245
+ new_word.extend(word[i:])
246
+ break
247
+ else:
248
+ new_word.extend(word[i:j])
249
+ i = j
250
+
251
+ if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
252
+ new_word.append(first + second)
253
+ i += 2
254
+ else:
255
+ new_word.append(word[i])
256
+ i += 1
257
+ new_word = tuple(new_word)
258
+ word = new_word
259
+ if len(word) == 1:
260
+ break
261
+ else:
262
+ pairs = get_pairs(word)
263
+ word = " ".join(word)
264
+ self.cache[token] = word
265
+ return word
266
+
267
+ # Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer._tokenize
268
+ def _tokenize(self, text):
269
+ """Tokenize a string."""
270
+ bpe_tokens = []
271
+ for token in re.findall(self.pat, text):
272
+ token = "".join(
273
+ self.byte_encoder[b] for b in token.encode("utf-8")
274
+ ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
275
+ bpe_tokens.extend(bpe_token for bpe_token in self.bpe(token).split(" "))
276
+ return bpe_tokens
277
+
278
+ # Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer._convert_token_to_id
279
+ def _convert_token_to_id(self, token):
280
+ """Converts a token (str) in an id using the vocab."""
281
+ return self.encoder.get(token, self.encoder.get(self.unk_token))
282
+
283
+ # Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer._convert_id_to_token
284
+ def _convert_id_to_token(self, index):
285
+ """Converts an index (integer) in a token (str) using the vocab."""
286
+ return self.decoder.get(index)
287
+
288
+ # Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer.convert_tokens_to_string
289
+ def convert_tokens_to_string(self, tokens):
290
+ """Converts a sequence of tokens (string) in a single string."""
291
+ text = "".join(tokens)
292
+ text = bytearray([self.byte_decoder[c] for c in text]).decode("utf-8", errors=self.errors)
293
+ return text
294
+
295
+ def decode(
296
+ self,
297
+ token_ids,
298
+ skip_special_tokens: bool = False,
299
+ clean_up_tokenization_spaces: Optional[bool] = False,
300
+ spaces_between_special_tokens: bool = False,
301
+ **kwargs,
302
+ ) -> str:
303
+ # `spaces_between_special_tokens` defaults to True for _decode in slow tokenizers
304
+ # and cannot be configured elsewhere, but it should default to False for Qwen2Tokenizer
305
+ return super().decode(
306
+ token_ids,
307
+ skip_special_tokens=skip_special_tokens,
308
+ clean_up_tokenization_spaces=clean_up_tokenization_spaces,
309
+ spaces_between_special_tokens=spaces_between_special_tokens,
310
+ **kwargs,
311
+ )
312
+
313
+ # Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer.save_vocabulary
314
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
315
+ if not os.path.isdir(save_directory):
316
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
317
+ return
318
+ vocab_file = os.path.join(
319
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
320
+ )
321
+ merge_file = os.path.join(
322
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"]
323
+ )
324
+
325
+ with open(vocab_file, "w", encoding="utf-8") as f:
326
+ f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + "\n")
327
+
328
+ index = 0
329
+ with open(merge_file, "w", encoding="utf-8") as writer:
330
+ writer.write("#version: 0.2\n")
331
+ for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]):
332
+ if index != token_index:
333
+ logger.warning(
334
+ f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
335
+ " Please check that the tokenizer is not corrupted!"
336
+ )
337
+ index = token_index
338
+ writer.write(" ".join(bpe_tokens) + "\n")
339
+ index += 1
340
+
341
+ return vocab_file, merge_file
342
+
343
+ def prepare_for_tokenization(self, text, **kwargs):
344
+ text = unicodedata.normalize("NFC", text)
345
+ return (text, kwargs)
env-llmeval/lib/python3.10/site-packages/transformers/models/qwen2/tokenization_qwen2_fast.py ADDED
@@ -0,0 +1,143 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 The Qwen team, Alibaba Group and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Tokenization classes for Qwen2."""
16
+
17
+ from typing import Optional, Tuple
18
+
19
+ from ...tokenization_utils import AddedToken
20
+ from ...tokenization_utils_fast import PreTrainedTokenizerFast
21
+ from ...utils import logging
22
+ from .tokenization_qwen2 import Qwen2Tokenizer
23
+
24
+
25
+ logger = logging.get_logger(__name__)
26
+
27
+ VOCAB_FILES_NAMES = {
28
+ "vocab_file": "vocab.json",
29
+ "merges_file": "merges.txt",
30
+ "tokenizer_file": "tokenizer.json",
31
+ }
32
+
33
+ PRETRAINED_VOCAB_FILES_MAP = {
34
+ "vocab_file": {"qwen/qwen-tokenizer": "https://huggingface.co/qwen/qwen-tokenizer/resolve/main/vocab.json"},
35
+ "merges_file": {"qwen/qwen-tokenizer": "https://huggingface.co/qwen/qwen-tokenizer/resolve/main/merges.txt"},
36
+ "tokenizer_file": {
37
+ "qwen/qwen-tokenizer": "https://huggingface.co/qwen/qwen-tokenizer/resolve/main/tokenizer.json"
38
+ },
39
+ }
40
+
41
+ MAX_MODEL_INPUT_SIZES = {"qwen/qwen-tokenizer": 32768}
42
+
43
+
44
+ class Qwen2TokenizerFast(PreTrainedTokenizerFast):
45
+ """
46
+ Construct a "fast" Qwen2 tokenizer (backed by HuggingFace's *tokenizers* library). Based on byte-level
47
+ Byte-Pair-Encoding.
48
+
49
+ Same with GPT2Tokenizer, this tokenizer has been trained to treat spaces like parts of the tokens so a word will
50
+ be encoded differently whether it is at the beginning of the sentence (without space) or not:
51
+
52
+ ```python
53
+ >>> from transformers import Qwen2TokenizerFast
54
+
55
+ >>> tokenizer = Qwen2TokenizerFast.from_pretrained("Qwen/Qwen-tokenizer")
56
+ >>> tokenizer("Hello world")["input_ids"]
57
+ [9707, 1879]
58
+
59
+ >>> tokenizer(" Hello world")["input_ids"]
60
+ [21927, 1879]
61
+ ```
62
+ This is expected.
63
+
64
+ This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
65
+ refer to this superclass for more information regarding those methods.
66
+
67
+ Args:
68
+ vocab_file (`str`, *optional*):
69
+ Path to the vocabulary file.
70
+ merges_file (`str`, *optional*):
71
+ Path to the merges file.
72
+ tokenizer_file (`str`, *optional*):
73
+ Path to [tokenizers](https://github.com/huggingface/tokenizers) file (generally has a .json extension) that
74
+ contains everything needed to load the tokenizer.
75
+ unk_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
76
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
77
+ token instead. Not applicable to this tokenizer.
78
+ bos_token (`str`, *optional*):
79
+ The beginning of sequence token. Not applicable for this tokenizer.
80
+ eos_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
81
+ The end of sequence token.
82
+ pad_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
83
+ The token used for padding, for example when batching sequences of different lengths.
84
+ """
85
+
86
+ vocab_files_names = VOCAB_FILES_NAMES
87
+ pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
88
+ max_model_input_sizes = MAX_MODEL_INPUT_SIZES
89
+ model_input_names = ["input_ids", "attention_mask"]
90
+ slow_tokenizer_class = Qwen2Tokenizer
91
+
92
+ def __init__(
93
+ self,
94
+ vocab_file=None,
95
+ merges_file=None,
96
+ tokenizer_file=None,
97
+ unk_token="<|endoftext|>",
98
+ bos_token=None,
99
+ eos_token="<|endoftext|>",
100
+ pad_token="<|endoftext|>",
101
+ **kwargs,
102
+ ):
103
+ # We need to at least pass vocab_file and merges_file to base class
104
+ # in case a slow tokenizer needs to be initialized; other can be
105
+ # configured through files.
106
+ # following GPT2TokenizerFast, also adding unk_token, bos_token, and eos_token
107
+
108
+ bos_token = (
109
+ AddedToken(bos_token, lstrip=False, rstrip=False, special=True, normalized=False)
110
+ if isinstance(bos_token, str)
111
+ else bos_token
112
+ )
113
+ eos_token = (
114
+ AddedToken(eos_token, lstrip=False, rstrip=False, special=True, normalized=False)
115
+ if isinstance(eos_token, str)
116
+ else eos_token
117
+ )
118
+ unk_token = (
119
+ AddedToken(unk_token, lstrip=False, rstrip=False, special=True, normalized=False)
120
+ if isinstance(unk_token, str)
121
+ else unk_token
122
+ )
123
+ pad_token = (
124
+ AddedToken(pad_token, lstrip=False, rstrip=False, special=True, normalized=False)
125
+ if isinstance(pad_token, str)
126
+ else pad_token
127
+ )
128
+
129
+ super().__init__(
130
+ vocab_file,
131
+ merges_file,
132
+ tokenizer_file=tokenizer_file,
133
+ unk_token=unk_token,
134
+ bos_token=bos_token,
135
+ eos_token=eos_token,
136
+ pad_token=pad_token,
137
+ **kwargs,
138
+ )
139
+
140
+ # Copied from transformers.models.gpt2.tokenization_gpt2_fast.GPT2TokenizerFast.save_vocabulary
141
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
142
+ files = self._tokenizer.model.save(save_directory, name=filename_prefix)
143
+ return tuple(files)
env-llmeval/lib/python3.10/site-packages/transformers/models/splinter/__init__.py ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2021 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
17
+
18
+
19
+ _import_structure = {
20
+ "configuration_splinter": ["SPLINTER_PRETRAINED_CONFIG_ARCHIVE_MAP", "SplinterConfig"],
21
+ "tokenization_splinter": ["SplinterTokenizer"],
22
+ }
23
+
24
+ try:
25
+ if not is_tokenizers_available():
26
+ raise OptionalDependencyNotAvailable()
27
+ except OptionalDependencyNotAvailable:
28
+ pass
29
+ else:
30
+ _import_structure["tokenization_splinter_fast"] = ["SplinterTokenizerFast"]
31
+
32
+ try:
33
+ if not is_torch_available():
34
+ raise OptionalDependencyNotAvailable()
35
+ except OptionalDependencyNotAvailable:
36
+ pass
37
+ else:
38
+ _import_structure["modeling_splinter"] = [
39
+ "SPLINTER_PRETRAINED_MODEL_ARCHIVE_LIST",
40
+ "SplinterForQuestionAnswering",
41
+ "SplinterForPreTraining",
42
+ "SplinterLayer",
43
+ "SplinterModel",
44
+ "SplinterPreTrainedModel",
45
+ ]
46
+
47
+
48
+ if TYPE_CHECKING:
49
+ from .configuration_splinter import SPLINTER_PRETRAINED_CONFIG_ARCHIVE_MAP, SplinterConfig
50
+ from .tokenization_splinter import SplinterTokenizer
51
+
52
+ try:
53
+ if not is_tokenizers_available():
54
+ raise OptionalDependencyNotAvailable()
55
+ except OptionalDependencyNotAvailable:
56
+ pass
57
+ else:
58
+ from .tokenization_splinter_fast import SplinterTokenizerFast
59
+
60
+ try:
61
+ if not is_torch_available():
62
+ raise OptionalDependencyNotAvailable()
63
+ except OptionalDependencyNotAvailable:
64
+ pass
65
+ else:
66
+ from .modeling_splinter import (
67
+ SPLINTER_PRETRAINED_MODEL_ARCHIVE_LIST,
68
+ SplinterForPreTraining,
69
+ SplinterForQuestionAnswering,
70
+ SplinterLayer,
71
+ SplinterModel,
72
+ SplinterPreTrainedModel,
73
+ )
74
+
75
+
76
+ else:
77
+ import sys
78
+
79
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
env-llmeval/lib/python3.10/site-packages/transformers/models/splinter/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.33 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/splinter/__pycache__/configuration_splinter.cpython-310.pyc ADDED
Binary file (5.32 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/splinter/__pycache__/modeling_splinter.cpython-310.pyc ADDED
Binary file (34.1 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/splinter/__pycache__/tokenization_splinter.cpython-310.pyc ADDED
Binary file (17.9 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/splinter/__pycache__/tokenization_splinter_fast.cpython-310.pyc ADDED
Binary file (8.08 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/splinter/configuration_splinter.py ADDED
@@ -0,0 +1,128 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 Tel AViv University, AllenAI and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Splinter model configuration"""
16
+
17
+ from ...configuration_utils import PretrainedConfig
18
+ from ...utils import logging
19
+
20
+
21
+ logger = logging.get_logger(__name__)
22
+
23
+ SPLINTER_PRETRAINED_CONFIG_ARCHIVE_MAP = {
24
+ "tau/splinter-base": "https://huggingface.co/tau/splinter-base/resolve/main/config.json",
25
+ "tau/splinter-base-qass": "https://huggingface.co/tau/splinter-base-qass/resolve/main/config.json",
26
+ "tau/splinter-large": "https://huggingface.co/tau/splinter-large/resolve/main/config.json",
27
+ "tau/splinter-large-qass": "https://huggingface.co/tau/splinter-large-qass/resolve/main/config.json",
28
+ # See all Splinter models at https://huggingface.co/models?filter=splinter
29
+ }
30
+
31
+
32
+ class SplinterConfig(PretrainedConfig):
33
+ r"""
34
+ This is the configuration class to store the configuration of a [`SplinterModel`]. It is used to instantiate an
35
+ Splinter model according to the specified arguments, defining the model architecture. Instantiating a configuration
36
+ with the defaults will yield a similar configuration to that of the Splinter
37
+ [tau/splinter-base](https://huggingface.co/tau/splinter-base) architecture.
38
+
39
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
40
+ documentation from [`PretrainedConfig`] for more information.
41
+
42
+
43
+ Args:
44
+ vocab_size (`int`, *optional*, defaults to 30522):
45
+ Vocabulary size of the Splinter model. Defines the number of different tokens that can be represented by
46
+ the `inputs_ids` passed when calling [`SplinterModel`].
47
+ hidden_size (`int`, *optional*, defaults to 768):
48
+ Dimension of the encoder layers and the pooler layer.
49
+ num_hidden_layers (`int`, *optional*, defaults to 12):
50
+ Number of hidden layers in the Transformer encoder.
51
+ num_attention_heads (`int`, *optional*, defaults to 12):
52
+ Number of attention heads for each attention layer in the Transformer encoder.
53
+ intermediate_size (`int`, *optional*, defaults to 3072):
54
+ Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
55
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
56
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
57
+ `"relu"`, `"selu"` and `"gelu_new"` are supported.
58
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
59
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
60
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
61
+ The dropout ratio for the attention probabilities.
62
+ max_position_embeddings (`int`, *optional*, defaults to 512):
63
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
64
+ just in case (e.g., 512 or 1024 or 2048).
65
+ type_vocab_size (`int`, *optional*, defaults to 2):
66
+ The vocabulary size of the `token_type_ids` passed when calling [`SplinterModel`].
67
+ initializer_range (`float`, *optional*, defaults to 0.02):
68
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
69
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
70
+ The epsilon used by the layer normalization layers.
71
+ use_cache (`bool`, *optional*, defaults to `True`):
72
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
73
+ relevant if `config.is_decoder=True`.
74
+ question_token_id (`int`, *optional*, defaults to 104):
75
+ The id of the `[QUESTION]` token.
76
+
77
+ Example:
78
+
79
+ ```python
80
+ >>> from transformers import SplinterModel, SplinterConfig
81
+
82
+ >>> # Initializing a Splinter tau/splinter-base style configuration
83
+ >>> configuration = SplinterConfig()
84
+
85
+ >>> # Initializing a model from the tau/splinter-base style configuration
86
+ >>> model = SplinterModel(configuration)
87
+
88
+ >>> # Accessing the model configuration
89
+ >>> configuration = model.config
90
+ ```"""
91
+
92
+ model_type = "splinter"
93
+
94
+ def __init__(
95
+ self,
96
+ vocab_size=30522,
97
+ hidden_size=768,
98
+ num_hidden_layers=12,
99
+ num_attention_heads=12,
100
+ intermediate_size=3072,
101
+ hidden_act="gelu",
102
+ hidden_dropout_prob=0.1,
103
+ attention_probs_dropout_prob=0.1,
104
+ max_position_embeddings=512,
105
+ type_vocab_size=2,
106
+ initializer_range=0.02,
107
+ layer_norm_eps=1e-12,
108
+ use_cache=True,
109
+ pad_token_id=0,
110
+ question_token_id=104,
111
+ **kwargs,
112
+ ):
113
+ super().__init__(pad_token_id=pad_token_id, **kwargs)
114
+
115
+ self.vocab_size = vocab_size
116
+ self.max_position_embeddings = max_position_embeddings
117
+ self.hidden_size = hidden_size
118
+ self.num_hidden_layers = num_hidden_layers
119
+ self.num_attention_heads = num_attention_heads
120
+ self.intermediate_size = intermediate_size
121
+ self.hidden_act = hidden_act
122
+ self.hidden_dropout_prob = hidden_dropout_prob
123
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
124
+ self.initializer_range = initializer_range
125
+ self.type_vocab_size = type_vocab_size
126
+ self.layer_norm_eps = layer_norm_eps
127
+ self.use_cache = use_cache
128
+ self.question_token_id = question_token_id
env-llmeval/lib/python3.10/site-packages/transformers/models/splinter/modeling_splinter.py ADDED
@@ -0,0 +1,1109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 Tel AViv University, AllenAI and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch Splinter model."""
16
+
17
+
18
+ import math
19
+ from dataclasses import dataclass
20
+ from typing import List, Optional, Tuple, Union
21
+
22
+ import torch
23
+ import torch.utils.checkpoint
24
+ from torch import nn
25
+ from torch.nn import CrossEntropyLoss
26
+
27
+ from ...activations import ACT2FN
28
+ from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, ModelOutput, QuestionAnsweringModelOutput
29
+ from ...modeling_utils import PreTrainedModel
30
+ from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
31
+ from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
32
+ from .configuration_splinter import SplinterConfig
33
+
34
+
35
+ logger = logging.get_logger(__name__)
36
+
37
+ _CHECKPOINT_FOR_DOC = "tau/splinter-base"
38
+ _CONFIG_FOR_DOC = "SplinterConfig"
39
+
40
+ SPLINTER_PRETRAINED_MODEL_ARCHIVE_LIST = [
41
+ "tau/splinter-base",
42
+ "tau/splinter-base-qass",
43
+ "tau/splinter-large",
44
+ "tau/splinter-large-qass",
45
+ # See all Splinter models at https://huggingface.co/models?filter=splinter
46
+ ]
47
+
48
+
49
+ class SplinterEmbeddings(nn.Module):
50
+ """Construct the embeddings from word, position and token_type embeddings."""
51
+
52
+ def __init__(self, config):
53
+ super().__init__()
54
+ self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
55
+ self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
56
+ self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
57
+
58
+ # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
59
+ # any TensorFlow checkpoint file
60
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
61
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
62
+
63
+ # position_ids (1, len position emb) is contiguous in memory and exported when serialized
64
+ self.register_buffer(
65
+ "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
66
+ )
67
+ self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
68
+
69
+ def forward(
70
+ self,
71
+ input_ids: Optional[torch.LongTensor] = None,
72
+ token_type_ids: Optional[torch.LongTensor] = None,
73
+ position_ids: Optional[torch.LongTensor] = None,
74
+ inputs_embeds: Optional[torch.FloatTensor] = None,
75
+ past_key_values_length: Optional[int] = 0,
76
+ ) -> Tuple:
77
+ if input_ids is not None:
78
+ input_shape = input_ids.size()
79
+ else:
80
+ input_shape = inputs_embeds.size()[:-1]
81
+
82
+ seq_length = input_shape[1]
83
+
84
+ if position_ids is None:
85
+ position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length]
86
+
87
+ if token_type_ids is None:
88
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
89
+
90
+ if inputs_embeds is None:
91
+ inputs_embeds = self.word_embeddings(input_ids)
92
+ token_type_embeddings = self.token_type_embeddings(token_type_ids)
93
+
94
+ embeddings = inputs_embeds + token_type_embeddings
95
+ if self.position_embedding_type == "absolute":
96
+ position_embeddings = self.position_embeddings(position_ids)
97
+ embeddings += position_embeddings
98
+ embeddings = self.LayerNorm(embeddings)
99
+ embeddings = self.dropout(embeddings)
100
+ return embeddings
101
+
102
+
103
+ # Copied from transformers.models.bert.modeling_bert.BertSelfAttention with Bert->Splinter
104
+ class SplinterSelfAttention(nn.Module):
105
+ def __init__(self, config, position_embedding_type=None):
106
+ super().__init__()
107
+ if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
108
+ raise ValueError(
109
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
110
+ f"heads ({config.num_attention_heads})"
111
+ )
112
+
113
+ self.num_attention_heads = config.num_attention_heads
114
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
115
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
116
+
117
+ self.query = nn.Linear(config.hidden_size, self.all_head_size)
118
+ self.key = nn.Linear(config.hidden_size, self.all_head_size)
119
+ self.value = nn.Linear(config.hidden_size, self.all_head_size)
120
+
121
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
122
+ self.position_embedding_type = position_embedding_type or getattr(
123
+ config, "position_embedding_type", "absolute"
124
+ )
125
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
126
+ self.max_position_embeddings = config.max_position_embeddings
127
+ self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
128
+
129
+ self.is_decoder = config.is_decoder
130
+
131
+ def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor:
132
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
133
+ x = x.view(new_x_shape)
134
+ return x.permute(0, 2, 1, 3)
135
+
136
+ def forward(
137
+ self,
138
+ hidden_states: torch.Tensor,
139
+ attention_mask: Optional[torch.FloatTensor] = None,
140
+ head_mask: Optional[torch.FloatTensor] = None,
141
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
142
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
143
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
144
+ output_attentions: Optional[bool] = False,
145
+ ) -> Tuple[torch.Tensor]:
146
+ mixed_query_layer = self.query(hidden_states)
147
+
148
+ # If this is instantiated as a cross-attention module, the keys
149
+ # and values come from an encoder; the attention mask needs to be
150
+ # such that the encoder's padding tokens are not attended to.
151
+ is_cross_attention = encoder_hidden_states is not None
152
+
153
+ if is_cross_attention and past_key_value is not None:
154
+ # reuse k,v, cross_attentions
155
+ key_layer = past_key_value[0]
156
+ value_layer = past_key_value[1]
157
+ attention_mask = encoder_attention_mask
158
+ elif is_cross_attention:
159
+ key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
160
+ value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
161
+ attention_mask = encoder_attention_mask
162
+ elif past_key_value is not None:
163
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
164
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
165
+ key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
166
+ value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
167
+ else:
168
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
169
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
170
+
171
+ query_layer = self.transpose_for_scores(mixed_query_layer)
172
+
173
+ use_cache = past_key_value is not None
174
+ if self.is_decoder:
175
+ # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
176
+ # Further calls to cross_attention layer can then reuse all cross-attention
177
+ # key/value_states (first "if" case)
178
+ # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
179
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
180
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
181
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
182
+ past_key_value = (key_layer, value_layer)
183
+
184
+ # Take the dot product between "query" and "key" to get the raw attention scores.
185
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
186
+
187
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
188
+ query_length, key_length = query_layer.shape[2], key_layer.shape[2]
189
+ if use_cache:
190
+ position_ids_l = torch.tensor(key_length - 1, dtype=torch.long, device=hidden_states.device).view(
191
+ -1, 1
192
+ )
193
+ else:
194
+ position_ids_l = torch.arange(query_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
195
+ position_ids_r = torch.arange(key_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
196
+ distance = position_ids_l - position_ids_r
197
+
198
+ positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
199
+ positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
200
+
201
+ if self.position_embedding_type == "relative_key":
202
+ relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
203
+ attention_scores = attention_scores + relative_position_scores
204
+ elif self.position_embedding_type == "relative_key_query":
205
+ relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
206
+ relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
207
+ attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
208
+
209
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
210
+ if attention_mask is not None:
211
+ # Apply the attention mask is (precomputed for all layers in SplinterModel forward() function)
212
+ attention_scores = attention_scores + attention_mask
213
+
214
+ # Normalize the attention scores to probabilities.
215
+ attention_probs = nn.functional.softmax(attention_scores, dim=-1)
216
+
217
+ # This is actually dropping out entire tokens to attend to, which might
218
+ # seem a bit unusual, but is taken from the original Transformer paper.
219
+ attention_probs = self.dropout(attention_probs)
220
+
221
+ # Mask heads if we want to
222
+ if head_mask is not None:
223
+ attention_probs = attention_probs * head_mask
224
+
225
+ context_layer = torch.matmul(attention_probs, value_layer)
226
+
227
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
228
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
229
+ context_layer = context_layer.view(new_context_layer_shape)
230
+
231
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
232
+
233
+ if self.is_decoder:
234
+ outputs = outputs + (past_key_value,)
235
+ return outputs
236
+
237
+
238
+ # Copied from transformers.models.bert.modeling_bert.BertSelfOutput with Bert->Splinter
239
+ class SplinterSelfOutput(nn.Module):
240
+ def __init__(self, config):
241
+ super().__init__()
242
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
243
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
244
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
245
+
246
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
247
+ hidden_states = self.dense(hidden_states)
248
+ hidden_states = self.dropout(hidden_states)
249
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
250
+ return hidden_states
251
+
252
+
253
+ # Copied from transformers.models.bert.modeling_bert.BertAttention with Bert->Splinter
254
+ class SplinterAttention(nn.Module):
255
+ def __init__(self, config, position_embedding_type=None):
256
+ super().__init__()
257
+ self.self = SplinterSelfAttention(config, position_embedding_type=position_embedding_type)
258
+ self.output = SplinterSelfOutput(config)
259
+ self.pruned_heads = set()
260
+
261
+ def prune_heads(self, heads):
262
+ if len(heads) == 0:
263
+ return
264
+ heads, index = find_pruneable_heads_and_indices(
265
+ heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
266
+ )
267
+
268
+ # Prune linear layers
269
+ self.self.query = prune_linear_layer(self.self.query, index)
270
+ self.self.key = prune_linear_layer(self.self.key, index)
271
+ self.self.value = prune_linear_layer(self.self.value, index)
272
+ self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
273
+
274
+ # Update hyper params and store pruned heads
275
+ self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
276
+ self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
277
+ self.pruned_heads = self.pruned_heads.union(heads)
278
+
279
+ def forward(
280
+ self,
281
+ hidden_states: torch.Tensor,
282
+ attention_mask: Optional[torch.FloatTensor] = None,
283
+ head_mask: Optional[torch.FloatTensor] = None,
284
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
285
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
286
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
287
+ output_attentions: Optional[bool] = False,
288
+ ) -> Tuple[torch.Tensor]:
289
+ self_outputs = self.self(
290
+ hidden_states,
291
+ attention_mask,
292
+ head_mask,
293
+ encoder_hidden_states,
294
+ encoder_attention_mask,
295
+ past_key_value,
296
+ output_attentions,
297
+ )
298
+ attention_output = self.output(self_outputs[0], hidden_states)
299
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
300
+ return outputs
301
+
302
+
303
+ # Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->Splinter
304
+ class SplinterIntermediate(nn.Module):
305
+ def __init__(self, config):
306
+ super().__init__()
307
+ self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
308
+ if isinstance(config.hidden_act, str):
309
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
310
+ else:
311
+ self.intermediate_act_fn = config.hidden_act
312
+
313
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
314
+ hidden_states = self.dense(hidden_states)
315
+ hidden_states = self.intermediate_act_fn(hidden_states)
316
+ return hidden_states
317
+
318
+
319
+ # Copied from transformers.models.bert.modeling_bert.BertOutput with Bert->Splinter
320
+ class SplinterOutput(nn.Module):
321
+ def __init__(self, config):
322
+ super().__init__()
323
+ self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
324
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
325
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
326
+
327
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
328
+ hidden_states = self.dense(hidden_states)
329
+ hidden_states = self.dropout(hidden_states)
330
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
331
+ return hidden_states
332
+
333
+
334
+ # Copied from transformers.models.bert.modeling_bert.BertLayer with Bert->Splinter
335
+ class SplinterLayer(nn.Module):
336
+ def __init__(self, config):
337
+ super().__init__()
338
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
339
+ self.seq_len_dim = 1
340
+ self.attention = SplinterAttention(config)
341
+ self.is_decoder = config.is_decoder
342
+ self.add_cross_attention = config.add_cross_attention
343
+ if self.add_cross_attention:
344
+ if not self.is_decoder:
345
+ raise ValueError(f"{self} should be used as a decoder model if cross attention is added")
346
+ self.crossattention = SplinterAttention(config, position_embedding_type="absolute")
347
+ self.intermediate = SplinterIntermediate(config)
348
+ self.output = SplinterOutput(config)
349
+
350
+ def forward(
351
+ self,
352
+ hidden_states: torch.Tensor,
353
+ attention_mask: Optional[torch.FloatTensor] = None,
354
+ head_mask: Optional[torch.FloatTensor] = None,
355
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
356
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
357
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
358
+ output_attentions: Optional[bool] = False,
359
+ ) -> Tuple[torch.Tensor]:
360
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
361
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
362
+ self_attention_outputs = self.attention(
363
+ hidden_states,
364
+ attention_mask,
365
+ head_mask,
366
+ output_attentions=output_attentions,
367
+ past_key_value=self_attn_past_key_value,
368
+ )
369
+ attention_output = self_attention_outputs[0]
370
+
371
+ # if decoder, the last output is tuple of self-attn cache
372
+ if self.is_decoder:
373
+ outputs = self_attention_outputs[1:-1]
374
+ present_key_value = self_attention_outputs[-1]
375
+ else:
376
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
377
+
378
+ cross_attn_present_key_value = None
379
+ if self.is_decoder and encoder_hidden_states is not None:
380
+ if not hasattr(self, "crossattention"):
381
+ raise ValueError(
382
+ f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers"
383
+ " by setting `config.add_cross_attention=True`"
384
+ )
385
+
386
+ # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple
387
+ cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
388
+ cross_attention_outputs = self.crossattention(
389
+ attention_output,
390
+ attention_mask,
391
+ head_mask,
392
+ encoder_hidden_states,
393
+ encoder_attention_mask,
394
+ cross_attn_past_key_value,
395
+ output_attentions,
396
+ )
397
+ attention_output = cross_attention_outputs[0]
398
+ outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
399
+
400
+ # add cross-attn cache to positions 3,4 of present_key_value tuple
401
+ cross_attn_present_key_value = cross_attention_outputs[-1]
402
+ present_key_value = present_key_value + cross_attn_present_key_value
403
+
404
+ layer_output = apply_chunking_to_forward(
405
+ self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
406
+ )
407
+ outputs = (layer_output,) + outputs
408
+
409
+ # if decoder, return the attn key/values as the last output
410
+ if self.is_decoder:
411
+ outputs = outputs + (present_key_value,)
412
+
413
+ return outputs
414
+
415
+ def feed_forward_chunk(self, attention_output):
416
+ intermediate_output = self.intermediate(attention_output)
417
+ layer_output = self.output(intermediate_output, attention_output)
418
+ return layer_output
419
+
420
+
421
+ # Copied from transformers.models.bert.modeling_bert.BertEncoder with Bert->Splinter
422
+ class SplinterEncoder(nn.Module):
423
+ def __init__(self, config):
424
+ super().__init__()
425
+ self.config = config
426
+ self.layer = nn.ModuleList([SplinterLayer(config) for _ in range(config.num_hidden_layers)])
427
+ self.gradient_checkpointing = False
428
+
429
+ def forward(
430
+ self,
431
+ hidden_states: torch.Tensor,
432
+ attention_mask: Optional[torch.FloatTensor] = None,
433
+ head_mask: Optional[torch.FloatTensor] = None,
434
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
435
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
436
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
437
+ use_cache: Optional[bool] = None,
438
+ output_attentions: Optional[bool] = False,
439
+ output_hidden_states: Optional[bool] = False,
440
+ return_dict: Optional[bool] = True,
441
+ ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]:
442
+ all_hidden_states = () if output_hidden_states else None
443
+ all_self_attentions = () if output_attentions else None
444
+ all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
445
+
446
+ if self.gradient_checkpointing and self.training:
447
+ if use_cache:
448
+ logger.warning_once(
449
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
450
+ )
451
+ use_cache = False
452
+
453
+ next_decoder_cache = () if use_cache else None
454
+ for i, layer_module in enumerate(self.layer):
455
+ if output_hidden_states:
456
+ all_hidden_states = all_hidden_states + (hidden_states,)
457
+
458
+ layer_head_mask = head_mask[i] if head_mask is not None else None
459
+ past_key_value = past_key_values[i] if past_key_values is not None else None
460
+
461
+ if self.gradient_checkpointing and self.training:
462
+ layer_outputs = self._gradient_checkpointing_func(
463
+ layer_module.__call__,
464
+ hidden_states,
465
+ attention_mask,
466
+ layer_head_mask,
467
+ encoder_hidden_states,
468
+ encoder_attention_mask,
469
+ past_key_value,
470
+ output_attentions,
471
+ )
472
+ else:
473
+ layer_outputs = layer_module(
474
+ hidden_states,
475
+ attention_mask,
476
+ layer_head_mask,
477
+ encoder_hidden_states,
478
+ encoder_attention_mask,
479
+ past_key_value,
480
+ output_attentions,
481
+ )
482
+
483
+ hidden_states = layer_outputs[0]
484
+ if use_cache:
485
+ next_decoder_cache += (layer_outputs[-1],)
486
+ if output_attentions:
487
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
488
+ if self.config.add_cross_attention:
489
+ all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
490
+
491
+ if output_hidden_states:
492
+ all_hidden_states = all_hidden_states + (hidden_states,)
493
+
494
+ if not return_dict:
495
+ return tuple(
496
+ v
497
+ for v in [
498
+ hidden_states,
499
+ next_decoder_cache,
500
+ all_hidden_states,
501
+ all_self_attentions,
502
+ all_cross_attentions,
503
+ ]
504
+ if v is not None
505
+ )
506
+ return BaseModelOutputWithPastAndCrossAttentions(
507
+ last_hidden_state=hidden_states,
508
+ past_key_values=next_decoder_cache,
509
+ hidden_states=all_hidden_states,
510
+ attentions=all_self_attentions,
511
+ cross_attentions=all_cross_attentions,
512
+ )
513
+
514
+
515
+ class SplinterPreTrainedModel(PreTrainedModel):
516
+ """
517
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
518
+ models.
519
+ """
520
+
521
+ config_class = SplinterConfig
522
+ base_model_prefix = "splinter"
523
+ supports_gradient_checkpointing = True
524
+
525
+ # Copied from transformers.models.bert.modeling_bert.BertPreTrainedModel._init_weights
526
+ def _init_weights(self, module):
527
+ """Initialize the weights"""
528
+ if isinstance(module, nn.Linear):
529
+ # Slightly different from the TF version which uses truncated_normal for initialization
530
+ # cf https://github.com/pytorch/pytorch/pull/5617
531
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
532
+ if module.bias is not None:
533
+ module.bias.data.zero_()
534
+ elif isinstance(module, nn.Embedding):
535
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
536
+ if module.padding_idx is not None:
537
+ module.weight.data[module.padding_idx].zero_()
538
+ elif isinstance(module, nn.LayerNorm):
539
+ module.bias.data.zero_()
540
+ module.weight.data.fill_(1.0)
541
+
542
+
543
+ SPLINTER_START_DOCSTRING = r"""
544
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
545
+ it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
546
+ behavior.
547
+
548
+ Parameters:
549
+ config ([`SplinterConfig`]): Model configuration class with all the parameters of the model.
550
+ Initializing with a config file does not load the weights associated with the model, only the
551
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
552
+ """
553
+
554
+ SPLINTER_INPUTS_DOCSTRING = r"""
555
+ Args:
556
+ input_ids (`torch.LongTensor` of shape `({0})`):
557
+ Indices of input sequence tokens in the vocabulary.
558
+
559
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
560
+ [`PreTrainedTokenizer.__call__`] for details.
561
+
562
+ [What are input IDs?](../glossary#input-ids)
563
+ attention_mask (`torch.FloatTensor` of shape `{0}`, *optional*):
564
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
565
+
566
+ - 1 for tokens that are **not masked**,
567
+ - 0 for tokens that are **masked**.
568
+
569
+ [What are attention masks?](../glossary#attention-mask)
570
+ token_type_ids (`torch.LongTensor` of shape `{0}`, *optional*):
571
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
572
+ 1]`:
573
+
574
+ - 0 corresponds to a *sentence A* token,
575
+ - 1 corresponds to a *sentence B* token.
576
+
577
+ [What are token type IDs?](../glossary#token-type-ids)
578
+ position_ids (`torch.LongTensor` of shape `{0}`, *optional*):
579
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
580
+ config.max_position_embeddings - 1]`.
581
+
582
+ [What are position IDs?](../glossary#position-ids)
583
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
584
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
585
+
586
+ - 1 indicates the head is **not masked**,
587
+ - 0 indicates the head is **masked**.
588
+
589
+ inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
590
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
591
+ is useful if you want more control over how to convert *input_ids* indices into associated vectors than the
592
+ model's internal embedding lookup matrix.
593
+ output_attentions (`bool`, *optional*):
594
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
595
+ tensors for more detail.
596
+ output_hidden_states (`bool`, *optional*):
597
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
598
+ more detail.
599
+ return_dict (`bool`, *optional*):
600
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
601
+ """
602
+
603
+
604
+ @add_start_docstrings(
605
+ "The bare Splinter Model transformer outputting raw hidden-states without any specific head on top.",
606
+ SPLINTER_START_DOCSTRING,
607
+ )
608
+ class SplinterModel(SplinterPreTrainedModel):
609
+ """
610
+ The model is an encoder (with only self-attention) following the architecture described in [Attention is all you
611
+ need](https://arxiv.org/abs/1706.03762) by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones,
612
+ Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.
613
+ """
614
+
615
+ def __init__(self, config):
616
+ super().__init__(config)
617
+ self.config = config
618
+
619
+ self.embeddings = SplinterEmbeddings(config)
620
+ self.encoder = SplinterEncoder(config)
621
+
622
+ # Initialize weights and apply final processing
623
+ self.post_init()
624
+
625
+ def get_input_embeddings(self):
626
+ return self.embeddings.word_embeddings
627
+
628
+ def set_input_embeddings(self, value):
629
+ self.embeddings.word_embeddings = value
630
+
631
+ def _prune_heads(self, heads_to_prune):
632
+ """
633
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
634
+ class PreTrainedModel
635
+ """
636
+ for layer, heads in heads_to_prune.items():
637
+ self.encoder.layer[layer].attention.prune_heads(heads)
638
+
639
+ @add_start_docstrings_to_model_forward(SPLINTER_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
640
+ @add_code_sample_docstrings(
641
+ checkpoint=_CHECKPOINT_FOR_DOC,
642
+ output_type=BaseModelOutputWithPastAndCrossAttentions,
643
+ config_class=_CONFIG_FOR_DOC,
644
+ )
645
+ def forward(
646
+ self,
647
+ input_ids: Optional[torch.Tensor] = None,
648
+ attention_mask: Optional[torch.Tensor] = None,
649
+ token_type_ids: Optional[torch.Tensor] = None,
650
+ position_ids: Optional[torch.Tensor] = None,
651
+ head_mask: Optional[torch.Tensor] = None,
652
+ inputs_embeds: Optional[torch.Tensor] = None,
653
+ encoder_hidden_states: Optional[torch.Tensor] = None,
654
+ encoder_attention_mask: Optional[torch.Tensor] = None,
655
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
656
+ use_cache: Optional[bool] = None,
657
+ output_attentions: Optional[bool] = None,
658
+ output_hidden_states: Optional[bool] = None,
659
+ return_dict: Optional[bool] = None,
660
+ ) -> Union[Tuple, BaseModelOutputWithPastAndCrossAttentions]:
661
+ r"""
662
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
663
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
664
+ the model is configured as a decoder.
665
+ encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
666
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
667
+ the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
668
+
669
+ - 1 for tokens that are **not masked**,
670
+ - 0 for tokens that are **masked**.
671
+ past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
672
+ Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
673
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
674
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
675
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
676
+ use_cache (`bool`, *optional*):
677
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
678
+ `past_key_values`).
679
+ """
680
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
681
+ output_hidden_states = (
682
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
683
+ )
684
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
685
+
686
+ if self.config.is_decoder:
687
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
688
+ else:
689
+ use_cache = False
690
+
691
+ if input_ids is not None and inputs_embeds is not None:
692
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
693
+ elif input_ids is not None:
694
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
695
+ input_shape = input_ids.size()
696
+ elif inputs_embeds is not None:
697
+ input_shape = inputs_embeds.size()[:-1]
698
+ else:
699
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
700
+
701
+ batch_size, seq_length = input_shape
702
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
703
+
704
+ # past_key_values_length
705
+ past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
706
+
707
+ if attention_mask is None:
708
+ attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)
709
+ if token_type_ids is None:
710
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
711
+
712
+ # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
713
+ # ourselves in which case we just need to make it broadcastable to all heads.
714
+ extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape)
715
+
716
+ # If a 2D or 3D attention mask is provided for the cross-attention
717
+ # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
718
+ if self.config.is_decoder and encoder_hidden_states is not None:
719
+ encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
720
+ encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
721
+ if encoder_attention_mask is None:
722
+ encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
723
+ encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
724
+ else:
725
+ encoder_extended_attention_mask = None
726
+
727
+ # Prepare head mask if needed
728
+ # 1.0 in head_mask indicate we keep the head
729
+ # attention_probs has shape bsz x n_heads x N x N
730
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
731
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
732
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
733
+
734
+ embedding_output = self.embeddings(
735
+ input_ids=input_ids,
736
+ position_ids=position_ids,
737
+ token_type_ids=token_type_ids,
738
+ inputs_embeds=inputs_embeds,
739
+ past_key_values_length=past_key_values_length,
740
+ )
741
+ encoder_outputs = self.encoder(
742
+ embedding_output,
743
+ attention_mask=extended_attention_mask,
744
+ head_mask=head_mask,
745
+ encoder_hidden_states=encoder_hidden_states,
746
+ encoder_attention_mask=encoder_extended_attention_mask,
747
+ past_key_values=past_key_values,
748
+ use_cache=use_cache,
749
+ output_attentions=output_attentions,
750
+ output_hidden_states=output_hidden_states,
751
+ return_dict=return_dict,
752
+ )
753
+ sequence_output = encoder_outputs[0]
754
+
755
+ if not return_dict:
756
+ return (sequence_output,) + encoder_outputs[1:]
757
+
758
+ return BaseModelOutputWithPastAndCrossAttentions(
759
+ last_hidden_state=sequence_output,
760
+ past_key_values=encoder_outputs.past_key_values,
761
+ hidden_states=encoder_outputs.hidden_states,
762
+ attentions=encoder_outputs.attentions,
763
+ cross_attentions=encoder_outputs.cross_attentions,
764
+ )
765
+
766
+
767
+ class SplinterFullyConnectedLayer(nn.Module):
768
+ def __init__(self, input_dim, output_dim, hidden_act="gelu"):
769
+ super().__init__()
770
+
771
+ self.input_dim = input_dim
772
+ self.output_dim = output_dim
773
+
774
+ self.dense = nn.Linear(self.input_dim, self.output_dim)
775
+ self.act_fn = ACT2FN[hidden_act]
776
+ self.LayerNorm = nn.LayerNorm(self.output_dim)
777
+
778
+ def forward(self, inputs: torch.Tensor) -> torch.Tensor:
779
+ hidden_states = self.dense(inputs)
780
+ hidden_states = self.act_fn(hidden_states)
781
+ hidden_states = self.LayerNorm(hidden_states)
782
+ return hidden_states
783
+
784
+
785
+ class QuestionAwareSpanSelectionHead(nn.Module):
786
+ """
787
+ Implementation of Question-Aware Span Selection (QASS) head, described in Splinter's paper:
788
+
789
+ """
790
+
791
+ def __init__(self, config):
792
+ super().__init__()
793
+
794
+ self.query_start_transform = SplinterFullyConnectedLayer(config.hidden_size, config.hidden_size)
795
+ self.query_end_transform = SplinterFullyConnectedLayer(config.hidden_size, config.hidden_size)
796
+ self.start_transform = SplinterFullyConnectedLayer(config.hidden_size, config.hidden_size)
797
+ self.end_transform = SplinterFullyConnectedLayer(config.hidden_size, config.hidden_size)
798
+
799
+ self.start_classifier = nn.Linear(config.hidden_size, config.hidden_size, bias=False)
800
+ self.end_classifier = nn.Linear(config.hidden_size, config.hidden_size, bias=False)
801
+
802
+ def forward(self, inputs, positions):
803
+ _, _, dim = inputs.size()
804
+ index = positions.unsqueeze(-1).repeat(1, 1, dim) # [batch_size, num_positions, dim]
805
+ gathered_reps = torch.gather(inputs, dim=1, index=index) # [batch_size, num_positions, dim]
806
+
807
+ query_start_reps = self.query_start_transform(gathered_reps) # [batch_size, num_positions, dim]
808
+ query_end_reps = self.query_end_transform(gathered_reps) # [batch_size, num_positions, dim]
809
+ start_reps = self.start_transform(inputs) # [batch_size, seq_length, dim]
810
+ end_reps = self.end_transform(inputs) # [batch_size, seq_length, dim]
811
+
812
+ hidden_states = self.start_classifier(query_start_reps) # [batch_size, num_positions, dim]
813
+ start_reps = start_reps.permute(0, 2, 1) # [batch_size, dim, seq_length]
814
+ start_logits = torch.matmul(hidden_states, start_reps)
815
+
816
+ hidden_states = self.end_classifier(query_end_reps)
817
+ end_reps = end_reps.permute(0, 2, 1)
818
+ end_logits = torch.matmul(hidden_states, end_reps)
819
+
820
+ return start_logits, end_logits
821
+
822
+
823
+ @add_start_docstrings(
824
+ """
825
+ Splinter Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
826
+ layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
827
+ """,
828
+ SPLINTER_START_DOCSTRING,
829
+ )
830
+ class SplinterForQuestionAnswering(SplinterPreTrainedModel):
831
+ def __init__(self, config):
832
+ super().__init__(config)
833
+
834
+ self.splinter = SplinterModel(config)
835
+ self.splinter_qass = QuestionAwareSpanSelectionHead(config)
836
+ self.question_token_id = config.question_token_id
837
+
838
+ # Initialize weights and apply final processing
839
+ self.post_init()
840
+
841
+ @add_start_docstrings_to_model_forward(SPLINTER_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
842
+ @add_code_sample_docstrings(
843
+ checkpoint=_CHECKPOINT_FOR_DOC,
844
+ output_type=QuestionAnsweringModelOutput,
845
+ config_class=_CONFIG_FOR_DOC,
846
+ )
847
+ def forward(
848
+ self,
849
+ input_ids: Optional[torch.Tensor] = None,
850
+ attention_mask: Optional[torch.Tensor] = None,
851
+ token_type_ids: Optional[torch.Tensor] = None,
852
+ position_ids: Optional[torch.Tensor] = None,
853
+ head_mask: Optional[torch.Tensor] = None,
854
+ inputs_embeds: Optional[torch.Tensor] = None,
855
+ start_positions: Optional[torch.LongTensor] = None,
856
+ end_positions: Optional[torch.LongTensor] = None,
857
+ output_attentions: Optional[bool] = None,
858
+ output_hidden_states: Optional[bool] = None,
859
+ return_dict: Optional[bool] = None,
860
+ question_positions: Optional[torch.LongTensor] = None,
861
+ ) -> Union[Tuple, QuestionAnsweringModelOutput]:
862
+ r"""
863
+ start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
864
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
865
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
866
+ are not taken into account for computing the loss.
867
+ end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
868
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
869
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
870
+ are not taken into account for computing the loss.
871
+ question_positions (`torch.LongTensor` of shape `(batch_size, num_questions)`, *optional*):
872
+ The positions of all question tokens. If given, start_logits and end_logits will be of shape `(batch_size,
873
+ num_questions, sequence_length)`. If None, the first question token in each sequence in the batch will be
874
+ the only one for which start_logits and end_logits are calculated and they will be of shape `(batch_size,
875
+ sequence_length)`.
876
+ """
877
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
878
+
879
+ question_positions_were_none = False
880
+ if question_positions is None:
881
+ if input_ids is not None:
882
+ question_position_for_each_example = torch.argmax(
883
+ (torch.eq(input_ids, self.question_token_id)).int(), dim=-1
884
+ )
885
+ else:
886
+ question_position_for_each_example = torch.zeros(
887
+ inputs_embeds.size(0), dtype=torch.long, layout=inputs_embeds.layout, device=inputs_embeds.device
888
+ )
889
+ question_positions = question_position_for_each_example.unsqueeze(-1)
890
+ question_positions_were_none = True
891
+
892
+ outputs = self.splinter(
893
+ input_ids,
894
+ attention_mask=attention_mask,
895
+ token_type_ids=token_type_ids,
896
+ position_ids=position_ids,
897
+ head_mask=head_mask,
898
+ inputs_embeds=inputs_embeds,
899
+ output_attentions=output_attentions,
900
+ output_hidden_states=output_hidden_states,
901
+ return_dict=return_dict,
902
+ )
903
+
904
+ sequence_output = outputs[0]
905
+ start_logits, end_logits = self.splinter_qass(sequence_output, question_positions)
906
+
907
+ if question_positions_were_none:
908
+ start_logits, end_logits = start_logits.squeeze(1), end_logits.squeeze(1)
909
+
910
+ if attention_mask is not None:
911
+ start_logits = start_logits + (1 - attention_mask) * torch.finfo(start_logits.dtype).min
912
+ end_logits = end_logits + (1 - attention_mask) * torch.finfo(end_logits.dtype).min
913
+
914
+ total_loss = None
915
+ if start_positions is not None and end_positions is not None:
916
+ # If we are on multi-GPU, split add a dimension
917
+ if len(start_positions.size()) > 1:
918
+ start_positions = start_positions.squeeze(-1)
919
+ if len(end_positions.size()) > 1:
920
+ end_positions = end_positions.squeeze(-1)
921
+ # sometimes the start/end positions are outside our model inputs, we ignore these terms
922
+ ignored_index = start_logits.size(1)
923
+ start_positions.clamp_(0, ignored_index)
924
+ end_positions.clamp_(0, ignored_index)
925
+
926
+ loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
927
+ start_loss = loss_fct(start_logits, start_positions)
928
+ end_loss = loss_fct(end_logits, end_positions)
929
+ total_loss = (start_loss + end_loss) / 2
930
+
931
+ if not return_dict:
932
+ output = (start_logits, end_logits) + outputs[1:]
933
+ return ((total_loss,) + output) if total_loss is not None else output
934
+
935
+ return QuestionAnsweringModelOutput(
936
+ loss=total_loss,
937
+ start_logits=start_logits,
938
+ end_logits=end_logits,
939
+ hidden_states=outputs.hidden_states,
940
+ attentions=outputs.attentions,
941
+ )
942
+
943
+
944
+ @dataclass
945
+ class SplinterForPreTrainingOutput(ModelOutput):
946
+ """
947
+ Class for outputs of Splinter as a span selection model.
948
+
949
+ Args:
950
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when start and end positions are provided):
951
+ Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.
952
+ start_logits (`torch.FloatTensor` of shape `(batch_size, num_questions, sequence_length)`):
953
+ Span-start scores (before SoftMax).
954
+ end_logits (`torch.FloatTensor` of shape `(batch_size, num_questions, sequence_length)`):
955
+ Span-end scores (before SoftMax).
956
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
957
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
958
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
959
+
960
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
961
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
962
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
963
+ sequence_length)`.
964
+
965
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
966
+ heads.
967
+ """
968
+
969
+ loss: Optional[torch.FloatTensor] = None
970
+ start_logits: torch.FloatTensor = None
971
+ end_logits: torch.FloatTensor = None
972
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
973
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
974
+
975
+
976
+ @add_start_docstrings(
977
+ """
978
+ Splinter Model for the recurring span selection task as done during the pretraining. The difference to the QA task
979
+ is that we do not have a question, but multiple question tokens that replace the occurrences of recurring spans
980
+ instead.
981
+ """,
982
+ SPLINTER_START_DOCSTRING,
983
+ )
984
+ class SplinterForPreTraining(SplinterPreTrainedModel):
985
+ def __init__(self, config):
986
+ super().__init__(config)
987
+
988
+ self.splinter = SplinterModel(config)
989
+ self.splinter_qass = QuestionAwareSpanSelectionHead(config)
990
+ self.question_token_id = config.question_token_id
991
+
992
+ # Initialize weights and apply final processing
993
+ self.post_init()
994
+
995
+ @add_start_docstrings_to_model_forward(
996
+ SPLINTER_INPUTS_DOCSTRING.format("batch_size, num_questions, sequence_length")
997
+ )
998
+ def forward(
999
+ self,
1000
+ input_ids: Optional[torch.Tensor] = None,
1001
+ attention_mask: Optional[torch.Tensor] = None,
1002
+ token_type_ids: Optional[torch.Tensor] = None,
1003
+ position_ids: Optional[torch.Tensor] = None,
1004
+ head_mask: Optional[torch.Tensor] = None,
1005
+ inputs_embeds: Optional[torch.Tensor] = None,
1006
+ start_positions: Optional[torch.LongTensor] = None,
1007
+ end_positions: Optional[torch.LongTensor] = None,
1008
+ output_attentions: Optional[bool] = None,
1009
+ output_hidden_states: Optional[bool] = None,
1010
+ return_dict: Optional[bool] = None,
1011
+ question_positions: Optional[torch.LongTensor] = None,
1012
+ ) -> Union[Tuple, SplinterForPreTrainingOutput]:
1013
+ r"""
1014
+ start_positions (`torch.LongTensor` of shape `(batch_size, num_questions)`, *optional*):
1015
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
1016
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1017
+ are not taken into account for computing the loss.
1018
+ end_positions (`torch.LongTensor` of shape `(batch_size, num_questions)`, *optional*):
1019
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
1020
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1021
+ are not taken into account for computing the loss.
1022
+ question_positions (`torch.LongTensor` of shape `(batch_size, num_questions)`, *optional*):
1023
+ The positions of all question tokens. If given, start_logits and end_logits will be of shape `(batch_size,
1024
+ num_questions, sequence_length)`. If None, the first question token in each sequence in the batch will be
1025
+ the only one for which start_logits and end_logits are calculated and they will be of shape `(batch_size,
1026
+ sequence_length)`.
1027
+ """
1028
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1029
+
1030
+ if question_positions is None and start_positions is not None and end_positions is not None:
1031
+ raise TypeError("question_positions must be specified in order to calculate the loss")
1032
+
1033
+ elif question_positions is None and input_ids is None:
1034
+ raise TypeError("question_positions must be specified when input_embeds is used")
1035
+
1036
+ elif question_positions is None:
1037
+ question_positions = self._prepare_question_positions(input_ids)
1038
+
1039
+ outputs = self.splinter(
1040
+ input_ids,
1041
+ attention_mask=attention_mask,
1042
+ token_type_ids=token_type_ids,
1043
+ position_ids=position_ids,
1044
+ head_mask=head_mask,
1045
+ inputs_embeds=inputs_embeds,
1046
+ output_attentions=output_attentions,
1047
+ output_hidden_states=output_hidden_states,
1048
+ return_dict=return_dict,
1049
+ )
1050
+
1051
+ sequence_output = outputs[0]
1052
+ batch_size, sequence_length, dim = sequence_output.size()
1053
+ # [batch_size, num_questions, sequence_length]
1054
+ start_logits, end_logits = self.splinter_qass(sequence_output, question_positions)
1055
+
1056
+ num_questions = question_positions.size(1)
1057
+ if attention_mask is not None:
1058
+ attention_mask_for_each_question = attention_mask.unsqueeze(1).expand(
1059
+ batch_size, num_questions, sequence_length
1060
+ )
1061
+ start_logits = start_logits + (1 - attention_mask_for_each_question) * torch.finfo(start_logits.dtype).min
1062
+ end_logits = end_logits + (1 - attention_mask_for_each_question) * torch.finfo(end_logits.dtype).min
1063
+
1064
+ total_loss = None
1065
+ # [batch_size, num_questions, sequence_length]
1066
+ if start_positions is not None and end_positions is not None:
1067
+ # sometimes the start/end positions are outside our model inputs, we ignore these terms
1068
+ start_positions.clamp_(0, max(0, sequence_length - 1))
1069
+ end_positions.clamp_(0, max(0, sequence_length - 1))
1070
+
1071
+ # Ignore zero positions in the loss. Splinter never predicts zero
1072
+ # during pretraining and zero is used for padding question
1073
+ # tokens as well as for start and end positions of padded
1074
+ # question tokens.
1075
+ loss_fct = CrossEntropyLoss(ignore_index=self.config.pad_token_id)
1076
+ start_loss = loss_fct(
1077
+ start_logits.view(batch_size * num_questions, sequence_length),
1078
+ start_positions.view(batch_size * num_questions),
1079
+ )
1080
+ end_loss = loss_fct(
1081
+ end_logits.view(batch_size * num_questions, sequence_length),
1082
+ end_positions.view(batch_size * num_questions),
1083
+ )
1084
+ total_loss = (start_loss + end_loss) / 2
1085
+
1086
+ if not return_dict:
1087
+ output = (start_logits, end_logits) + outputs[1:]
1088
+ return ((total_loss,) + output) if total_loss is not None else output
1089
+
1090
+ return SplinterForPreTrainingOutput(
1091
+ loss=total_loss,
1092
+ start_logits=start_logits,
1093
+ end_logits=end_logits,
1094
+ hidden_states=outputs.hidden_states,
1095
+ attentions=outputs.attentions,
1096
+ )
1097
+
1098
+ def _prepare_question_positions(self, input_ids: torch.Tensor) -> torch.Tensor:
1099
+ rows, flat_positions = torch.where(input_ids == self.config.question_token_id)
1100
+ num_questions = torch.bincount(rows)
1101
+ positions = torch.full(
1102
+ (input_ids.size(0), num_questions.max()),
1103
+ self.config.pad_token_id,
1104
+ dtype=torch.long,
1105
+ device=input_ids.device,
1106
+ )
1107
+ cols = torch.cat([torch.arange(n) for n in num_questions])
1108
+ positions[rows, cols] = flat_positions
1109
+ return positions
env-llmeval/lib/python3.10/site-packages/transformers/models/splinter/tokenization_splinter.py ADDED
@@ -0,0 +1,529 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 Tel AViv University, AllenAI and The HuggingFace Inc. team. All rights reserved.
3
+ # All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """Tokenization classes for Splinter."""
17
+
18
+ import collections
19
+ import os
20
+ import unicodedata
21
+ from typing import List, Optional, Tuple
22
+
23
+ from ...tokenization_utils import PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace
24
+ from ...utils import logging
25
+
26
+
27
+ logger = logging.get_logger(__name__)
28
+
29
+ VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt"}
30
+
31
+ PRETRAINED_VOCAB_FILES_MAP = {
32
+ "vocab_file": {
33
+ "tau/splinter-base": "https://huggingface.co/tau/splinter-base/resolve/main/vocab.txt",
34
+ "tau/splinter-base-qass": "https://huggingface.co/tau/splinter-base-qass/resolve/main/vocab.txt",
35
+ "tau/splinter-large": "https://huggingface.co/tau/splinter-large/resolve/main/vocab.txt",
36
+ "tau/splinter-large-qass": "https://huggingface.co/tau/splinter-large-qass/resolve/main/vocab.txt",
37
+ }
38
+ }
39
+
40
+ PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
41
+ "tau/splinter-base": 512,
42
+ "tau/splinter-base-qass": 512,
43
+ "tau/splinter-large": 512,
44
+ "tau/splinter-large-qass": 512,
45
+ }
46
+
47
+ PRETRAINED_INIT_CONFIGURATION = {
48
+ "tau/splinter-base": {"do_lower_case": False},
49
+ "tau/splinter-base-qass": {"do_lower_case": False},
50
+ "tau/splinter-large": {"do_lower_case": False},
51
+ "tau/splinter-large-qass": {"do_lower_case": False},
52
+ }
53
+
54
+
55
+ def load_vocab(vocab_file):
56
+ """Loads a vocabulary file into a dictionary."""
57
+ vocab = collections.OrderedDict()
58
+ with open(vocab_file, "r", encoding="utf-8") as reader:
59
+ tokens = reader.readlines()
60
+ for index, token in enumerate(tokens):
61
+ token = token.rstrip("\n")
62
+ vocab[token] = index
63
+ return vocab
64
+
65
+
66
+ def whitespace_tokenize(text):
67
+ """Runs basic whitespace cleaning and splitting on a piece of text."""
68
+ text = text.strip()
69
+ if not text:
70
+ return []
71
+ tokens = text.split()
72
+ return tokens
73
+
74
+
75
+ class SplinterTokenizer(PreTrainedTokenizer):
76
+ r"""
77
+ Construct a Splinter tokenizer. Based on WordPiece.
78
+
79
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
80
+ this superclass for more information regarding those methods.
81
+
82
+ Args:
83
+ vocab_file (`str`):
84
+ File containing the vocabulary.
85
+ do_lower_case (`bool`, *optional*, defaults to `True`):
86
+ Whether or not to lowercase the input when tokenizing.
87
+ do_basic_tokenize (`bool`, *optional*, defaults to `True`):
88
+ Whether or not to do basic tokenization before WordPiece.
89
+ never_split (`Iterable`, *optional*):
90
+ Collection of tokens which will never be split during tokenization. Only has an effect when
91
+ `do_basic_tokenize=True`
92
+ unk_token (`str`, *optional*, defaults to `"[UNK]"`):
93
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
94
+ token instead.
95
+ sep_token (`str`, *optional*, defaults to `"[SEP]"`):
96
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
97
+ sequence classification or for a text and a question for question answering. It is also used as the last
98
+ token of a sequence built with special tokens.
99
+ pad_token (`str`, *optional*, defaults to `"[PAD]"`):
100
+ The token used for padding, for example when batching sequences of different lengths.
101
+ cls_token (`str`, *optional*, defaults to `"[CLS]"`):
102
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
103
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
104
+ mask_token (`str`, *optional*, defaults to `"[MASK]"`):
105
+ The token used for masking values. This is the token used when training this model with masked language
106
+ modeling. This is the token which the model will try to predict.
107
+ question_token (`str`, *optional*, defaults to `"[QUESTION]"`):
108
+ The token used for constructing question representations.
109
+ tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
110
+ Whether or not to tokenize Chinese characters.
111
+
112
+ This should likely be deactivated for Japanese (see this
113
+ [issue](https://github.com/huggingface/transformers/issues/328)).
114
+ strip_accents (`bool`, *optional*):
115
+ Whether or not to strip all accents. If this option is not specified, then it will be determined by the
116
+ value for `lowercase` (as in the original BERT).
117
+ """
118
+
119
+ vocab_files_names = VOCAB_FILES_NAMES
120
+ pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
121
+ pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION
122
+ max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
123
+
124
+ def __init__(
125
+ self,
126
+ vocab_file,
127
+ do_lower_case=True,
128
+ do_basic_tokenize=True,
129
+ never_split=None,
130
+ unk_token="[UNK]",
131
+ sep_token="[SEP]",
132
+ pad_token="[PAD]",
133
+ cls_token="[CLS]",
134
+ mask_token="[MASK]",
135
+ question_token="[QUESTION]",
136
+ tokenize_chinese_chars=True,
137
+ strip_accents=None,
138
+ **kwargs,
139
+ ):
140
+ if not os.path.isfile(vocab_file):
141
+ raise ValueError(
142
+ f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"
143
+ " model use `tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`"
144
+ )
145
+ self.vocab = load_vocab(vocab_file)
146
+ self.ids_to_tokens = collections.OrderedDict([(ids, tok) for tok, ids in self.vocab.items()])
147
+ self.do_basic_tokenize = do_basic_tokenize
148
+ if do_basic_tokenize:
149
+ self.basic_tokenizer = BasicTokenizer(
150
+ do_lower_case=do_lower_case,
151
+ never_split=never_split,
152
+ tokenize_chinese_chars=tokenize_chinese_chars,
153
+ strip_accents=strip_accents,
154
+ )
155
+ self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=str(unk_token))
156
+ self.question_token = question_token
157
+ super().__init__(
158
+ do_lower_case=do_lower_case,
159
+ do_basic_tokenize=do_basic_tokenize,
160
+ never_split=never_split,
161
+ unk_token=unk_token,
162
+ sep_token=sep_token,
163
+ pad_token=pad_token,
164
+ cls_token=cls_token,
165
+ mask_token=mask_token,
166
+ tokenize_chinese_chars=tokenize_chinese_chars,
167
+ strip_accents=strip_accents,
168
+ **kwargs,
169
+ )
170
+
171
+ @property
172
+ def question_token_id(self):
173
+ """
174
+ `Optional[int]`: Id of the question token in the vocabulary, used to condition the answer on a question
175
+ representation.
176
+ """
177
+ return self.convert_tokens_to_ids(self.question_token)
178
+
179
+ @property
180
+ def do_lower_case(self):
181
+ return self.basic_tokenizer.do_lower_case
182
+
183
+ @property
184
+ def vocab_size(self):
185
+ return len(self.vocab)
186
+
187
+ def get_vocab(self):
188
+ return dict(self.vocab, **self.added_tokens_encoder)
189
+
190
+ def _tokenize(self, text):
191
+ split_tokens = []
192
+ if self.do_basic_tokenize:
193
+ for token in self.basic_tokenizer.tokenize(text, never_split=self.all_special_tokens):
194
+ # If the token is part of the never_split set
195
+ if token in self.basic_tokenizer.never_split:
196
+ split_tokens.append(token)
197
+ else:
198
+ split_tokens += self.wordpiece_tokenizer.tokenize(token)
199
+ else:
200
+ split_tokens = self.wordpiece_tokenizer.tokenize(text)
201
+ return split_tokens
202
+
203
+ def _convert_token_to_id(self, token):
204
+ """Converts a token (str) in an id using the vocab."""
205
+ return self.vocab.get(token, self.vocab.get(self.unk_token))
206
+
207
+ def _convert_id_to_token(self, index):
208
+ """Converts an index (integer) in a token (str) using the vocab."""
209
+ return self.ids_to_tokens.get(index, self.unk_token)
210
+
211
+ def convert_tokens_to_string(self, tokens):
212
+ """Converts a sequence of tokens (string) in a single string."""
213
+ out_string = " ".join(tokens).replace(" ##", "").strip()
214
+ return out_string
215
+
216
+ def build_inputs_with_special_tokens(
217
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
218
+ ) -> List[int]:
219
+ """
220
+ Build model inputs from a pair of sequence for question answering tasks by concatenating and adding special
221
+ tokens. A Splinter sequence has the following format:
222
+
223
+ - single sequence: `[CLS] X [SEP]`
224
+ - pair of sequences for question answering: `[CLS] question_tokens [QUESTION] . [SEP] context_tokens [SEP]`
225
+
226
+ Args:
227
+ token_ids_0 (`List[int]`):
228
+ The question token IDs if pad_on_right, else context tokens IDs
229
+ token_ids_1 (`List[int]`, *optional*):
230
+ The context token IDs if pad_on_right, else question token IDs
231
+
232
+ Returns:
233
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
234
+ """
235
+ if token_ids_1 is None:
236
+ return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
237
+
238
+ cls = [self.cls_token_id]
239
+ sep = [self.sep_token_id]
240
+ question_suffix = [self.question_token_id] + [self.convert_tokens_to_ids(".")]
241
+ if self.padding_side == "right":
242
+ # Input is question-then-context
243
+ return cls + token_ids_0 + question_suffix + sep + token_ids_1 + sep
244
+ else:
245
+ # Input is context-then-question
246
+ return cls + token_ids_0 + sep + token_ids_1 + question_suffix + sep
247
+
248
+ def get_special_tokens_mask(
249
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
250
+ ) -> List[int]:
251
+ """
252
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
253
+ special tokens using the tokenizer `prepare_for_model` method.
254
+
255
+ Args:
256
+ token_ids_0 (`List[int]`):
257
+ List of IDs.
258
+ token_ids_1 (`List[int]`, *optional*):
259
+ Optional second list of IDs for sequence pairs.
260
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
261
+ Whether or not the token list is already formatted with special tokens for the model.
262
+
263
+ Returns:
264
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
265
+ """
266
+
267
+ if already_has_special_tokens:
268
+ return super().get_special_tokens_mask(
269
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
270
+ )
271
+
272
+ if token_ids_1 is not None:
273
+ return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
274
+ return [1] + ([0] * len(token_ids_0)) + [1]
275
+
276
+ def create_token_type_ids_from_sequences(
277
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
278
+ ) -> List[int]:
279
+ """
280
+ Create the token type IDs corresponding to the sequences passed. [What are token type
281
+ IDs?](../glossary#token-type-ids)
282
+
283
+ Should be overridden in a subclass if the model has a special way of building those.
284
+
285
+ Args:
286
+ token_ids_0 (`List[int]`): The first tokenized sequence.
287
+ token_ids_1 (`List[int]`, *optional*): The second tokenized sequence.
288
+
289
+ Returns:
290
+ `List[int]`: The token type ids.
291
+ """
292
+ sep = [self.sep_token_id]
293
+ cls = [self.cls_token_id]
294
+ question_suffix = [self.question_token_id] + [self.convert_tokens_to_ids(".")]
295
+ if token_ids_1 is None:
296
+ return len(cls + token_ids_0 + sep) * [0]
297
+
298
+ if self.padding_side == "right":
299
+ # Input is question-then-context
300
+ return len(cls + token_ids_0 + question_suffix + sep) * [0] + len(token_ids_1 + sep) * [1]
301
+ else:
302
+ # Input is context-then-question
303
+ return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + question_suffix + sep) * [1]
304
+
305
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
306
+ index = 0
307
+ if os.path.isdir(save_directory):
308
+ vocab_file = os.path.join(
309
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
310
+ )
311
+ else:
312
+ vocab_file = (filename_prefix + "-" if filename_prefix else "") + save_directory
313
+ with open(vocab_file, "w", encoding="utf-8") as writer:
314
+ for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]):
315
+ if index != token_index:
316
+ logger.warning(
317
+ f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
318
+ " Please check that the vocabulary is not corrupted!"
319
+ )
320
+ index = token_index
321
+ writer.write(token + "\n")
322
+ index += 1
323
+ return (vocab_file,)
324
+
325
+
326
+ class BasicTokenizer(object):
327
+ """
328
+ Constructs a BasicTokenizer that will run basic tokenization (punctuation splitting, lower casing, etc.).
329
+
330
+ Args:
331
+ do_lower_case (`bool`, *optional*, defaults to `True`):
332
+ Whether or not to lowercase the input when tokenizing.
333
+ never_split (`Iterable`, *optional*):
334
+ Collection of tokens which will never be split during tokenization. Only has an effect when
335
+ `do_basic_tokenize=True`
336
+ tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
337
+ Whether or not to tokenize Chinese characters.
338
+
339
+ This should likely be deactivated for Japanese (see this
340
+ [issue](https://github.com/huggingface/transformers/issues/328)).
341
+ strip_accents (`bool`, *optional*):
342
+ Whether or not to strip all accents. If this option is not specified, then it will be determined by the
343
+ value for `lowercase` (as in the original BERT).
344
+ """
345
+
346
+ def __init__(self, do_lower_case=True, never_split=None, tokenize_chinese_chars=True, strip_accents=None):
347
+ if never_split is None:
348
+ never_split = []
349
+ self.do_lower_case = do_lower_case
350
+ self.never_split = set(never_split)
351
+ self.tokenize_chinese_chars = tokenize_chinese_chars
352
+ self.strip_accents = strip_accents
353
+
354
+ def tokenize(self, text, never_split=None):
355
+ """
356
+ Basic Tokenization of a piece of text. Split on "white spaces" only, for sub-word tokenization, see
357
+ WordPieceTokenizer.
358
+
359
+ Args:
360
+ **never_split**: (*optional*) list of str
361
+ Kept for backward compatibility purposes. Now implemented directly at the base class level (see
362
+ [`PreTrainedTokenizer.tokenize`]) List of token not to split.
363
+ """
364
+ # union() returns a new set by concatenating the two sets.
365
+ never_split = self.never_split.union(set(never_split)) if never_split else self.never_split
366
+ text = self._clean_text(text)
367
+
368
+ # This was added on November 1st, 2018 for the multilingual and Chinese
369
+ # models. This is also applied to the English models now, but it doesn't
370
+ # matter since the English models were not trained on any Chinese data
371
+ # and generally don't have any Chinese data in them (there are Chinese
372
+ # characters in the vocabulary because Wikipedia does have some Chinese
373
+ # words in the English Wikipedia.).
374
+ if self.tokenize_chinese_chars:
375
+ text = self._tokenize_chinese_chars(text)
376
+ orig_tokens = whitespace_tokenize(text)
377
+ split_tokens = []
378
+ for token in orig_tokens:
379
+ if token not in never_split:
380
+ if self.do_lower_case:
381
+ token = token.lower()
382
+ if self.strip_accents is not False:
383
+ token = self._run_strip_accents(token)
384
+ elif self.strip_accents:
385
+ token = self._run_strip_accents(token)
386
+ split_tokens.extend(self._run_split_on_punc(token, never_split))
387
+
388
+ output_tokens = whitespace_tokenize(" ".join(split_tokens))
389
+ return output_tokens
390
+
391
+ def _run_strip_accents(self, text):
392
+ """Strips accents from a piece of text."""
393
+ text = unicodedata.normalize("NFD", text)
394
+ output = []
395
+ for char in text:
396
+ cat = unicodedata.category(char)
397
+ if cat == "Mn":
398
+ continue
399
+ output.append(char)
400
+ return "".join(output)
401
+
402
+ def _run_split_on_punc(self, text, never_split=None):
403
+ """Splits punctuation on a piece of text."""
404
+ if never_split is not None and text in never_split:
405
+ return [text]
406
+ chars = list(text)
407
+ i = 0
408
+ start_new_word = True
409
+ output = []
410
+ while i < len(chars):
411
+ char = chars[i]
412
+ if _is_punctuation(char):
413
+ output.append([char])
414
+ start_new_word = True
415
+ else:
416
+ if start_new_word:
417
+ output.append([])
418
+ start_new_word = False
419
+ output[-1].append(char)
420
+ i += 1
421
+
422
+ return ["".join(x) for x in output]
423
+
424
+ def _tokenize_chinese_chars(self, text):
425
+ """Adds whitespace around any CJK character."""
426
+ output = []
427
+ for char in text:
428
+ cp = ord(char)
429
+ if self._is_chinese_char(cp):
430
+ output.append(" ")
431
+ output.append(char)
432
+ output.append(" ")
433
+ else:
434
+ output.append(char)
435
+ return "".join(output)
436
+
437
+ def _is_chinese_char(self, cp):
438
+ """Checks whether CP is the codepoint of a CJK character."""
439
+ # This defines a "chinese character" as anything in the CJK Unicode block:
440
+ # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
441
+ #
442
+ # Note that the CJK Unicode block is NOT all Japanese and Korean characters,
443
+ # despite its name. The modern Korean Hangul alphabet is a different block,
444
+ # as is Japanese Hiragana and Katakana. Those alphabets are used to write
445
+ # space-separated words, so they are not treated specially and handled
446
+ # like the all of the other languages.
447
+ if (
448
+ (cp >= 0x4E00 and cp <= 0x9FFF)
449
+ or (cp >= 0x3400 and cp <= 0x4DBF) #
450
+ or (cp >= 0x20000 and cp <= 0x2A6DF) #
451
+ or (cp >= 0x2A700 and cp <= 0x2B73F) #
452
+ or (cp >= 0x2B740 and cp <= 0x2B81F) #
453
+ or (cp >= 0x2B820 and cp <= 0x2CEAF) #
454
+ or (cp >= 0xF900 and cp <= 0xFAFF)
455
+ or (cp >= 0x2F800 and cp <= 0x2FA1F) #
456
+ ): #
457
+ return True
458
+
459
+ return False
460
+
461
+ def _clean_text(self, text):
462
+ """Performs invalid character removal and whitespace cleanup on text."""
463
+ output = []
464
+ for char in text:
465
+ cp = ord(char)
466
+ if cp == 0 or cp == 0xFFFD or _is_control(char):
467
+ continue
468
+ if _is_whitespace(char):
469
+ output.append(" ")
470
+ else:
471
+ output.append(char)
472
+ return "".join(output)
473
+
474
+
475
+ class WordpieceTokenizer(object):
476
+ """Runs WordPiece tokenization."""
477
+
478
+ def __init__(self, vocab, unk_token, max_input_chars_per_word=100):
479
+ self.vocab = vocab
480
+ self.unk_token = unk_token
481
+ self.max_input_chars_per_word = max_input_chars_per_word
482
+
483
+ def tokenize(self, text):
484
+ """
485
+ Tokenizes a piece of text into its word pieces. This uses a greedy longest-match-first algorithm to perform
486
+ tokenization using the given vocabulary.
487
+
488
+ For example, `input = "unaffable"` wil return as output `["un", "##aff", "##able"]`.
489
+
490
+ Args:
491
+ text: A single token or whitespace separated tokens. This should have
492
+ already been passed through *BasicTokenizer*.
493
+
494
+ Returns:
495
+ A list of wordpiece tokens.
496
+ """
497
+
498
+ output_tokens = []
499
+ for token in whitespace_tokenize(text):
500
+ chars = list(token)
501
+ if len(chars) > self.max_input_chars_per_word:
502
+ output_tokens.append(self.unk_token)
503
+ continue
504
+
505
+ is_bad = False
506
+ start = 0
507
+ sub_tokens = []
508
+ while start < len(chars):
509
+ end = len(chars)
510
+ cur_substr = None
511
+ while start < end:
512
+ substr = "".join(chars[start:end])
513
+ if start > 0:
514
+ substr = "##" + substr
515
+ if substr in self.vocab:
516
+ cur_substr = substr
517
+ break
518
+ end -= 1
519
+ if cur_substr is None:
520
+ is_bad = True
521
+ break
522
+ sub_tokens.append(cur_substr)
523
+ start = end
524
+
525
+ if is_bad:
526
+ output_tokens.append(self.unk_token)
527
+ else:
528
+ output_tokens.extend(sub_tokens)
529
+ return output_tokens