applied-ai-018 commited on
Commit
d6d2964
·
verified ·
1 Parent(s): cde5cd1

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. llmeval-env/lib/python3.10/site-packages/transformers/models/clap/__init__.py +76 -0
  2. llmeval-env/lib/python3.10/site-packages/transformers/models/clap/__pycache__/__init__.cpython-310.pyc +0 -0
  3. llmeval-env/lib/python3.10/site-packages/transformers/models/clap/__pycache__/configuration_clap.cpython-310.pyc +0 -0
  4. llmeval-env/lib/python3.10/site-packages/transformers/models/clap/__pycache__/convert_clap_original_pytorch_to_hf.cpython-310.pyc +0 -0
  5. llmeval-env/lib/python3.10/site-packages/transformers/models/clap/__pycache__/feature_extraction_clap.cpython-310.pyc +0 -0
  6. llmeval-env/lib/python3.10/site-packages/transformers/models/clap/__pycache__/modeling_clap.cpython-310.pyc +0 -0
  7. llmeval-env/lib/python3.10/site-packages/transformers/models/clap/__pycache__/processing_clap.cpython-310.pyc +0 -0
  8. llmeval-env/lib/python3.10/site-packages/transformers/models/clap/configuration_clap.py +427 -0
  9. llmeval-env/lib/python3.10/site-packages/transformers/models/clap/convert_clap_original_pytorch_to_hf.py +133 -0
  10. llmeval-env/lib/python3.10/site-packages/transformers/models/clap/feature_extraction_clap.py +363 -0
  11. llmeval-env/lib/python3.10/site-packages/transformers/models/clap/modeling_clap.py +0 -0
  12. llmeval-env/lib/python3.10/site-packages/transformers/models/clap/processing_clap.py +117 -0
  13. llmeval-env/lib/python3.10/site-packages/transformers/models/cohere/__init__.py +77 -0
  14. llmeval-env/lib/python3.10/site-packages/transformers/models/cohere/__pycache__/__init__.cpython-310.pyc +0 -0
  15. llmeval-env/lib/python3.10/site-packages/transformers/models/cohere/__pycache__/configuration_cohere.cpython-310.pyc +0 -0
  16. llmeval-env/lib/python3.10/site-packages/transformers/models/cohere/__pycache__/modeling_cohere.cpython-310.pyc +0 -0
  17. llmeval-env/lib/python3.10/site-packages/transformers/models/cohere/__pycache__/tokenization_cohere_fast.cpython-310.pyc +0 -0
  18. llmeval-env/lib/python3.10/site-packages/transformers/models/cohere/configuration_cohere.py +159 -0
  19. llmeval-env/lib/python3.10/site-packages/transformers/models/cohere/modeling_cohere.py +1266 -0
  20. llmeval-env/lib/python3.10/site-packages/transformers/models/cohere/tokenization_cohere_fast.py +701 -0
  21. llmeval-env/lib/python3.10/site-packages/transformers/models/focalnet/__init__.py +59 -0
  22. llmeval-env/lib/python3.10/site-packages/transformers/models/focalnet/configuration_focalnet.py +164 -0
  23. llmeval-env/lib/python3.10/site-packages/transformers/models/focalnet/convert_focalnet_to_hf_format.py +237 -0
  24. llmeval-env/lib/python3.10/site-packages/transformers/models/focalnet/modeling_focalnet.py +1032 -0
  25. llmeval-env/lib/python3.10/site-packages/transformers/models/led/__init__.py +101 -0
  26. llmeval-env/lib/python3.10/site-packages/transformers/models/led/configuration_led.py +165 -0
  27. llmeval-env/lib/python3.10/site-packages/transformers/models/led/modeling_led.py +0 -0
  28. llmeval-env/lib/python3.10/site-packages/transformers/models/led/modeling_tf_led.py +0 -0
  29. llmeval-env/lib/python3.10/site-packages/transformers/models/led/tokenization_led.py +449 -0
  30. llmeval-env/lib/python3.10/site-packages/transformers/models/led/tokenization_led_fast.py +325 -0
  31. llmeval-env/lib/python3.10/site-packages/transformers/models/mobilebert/__init__.py +145 -0
  32. llmeval-env/lib/python3.10/site-packages/transformers/models/mobilebert/__pycache__/__init__.cpython-310.pyc +0 -0
  33. llmeval-env/lib/python3.10/site-packages/transformers/models/mobilebert/__pycache__/configuration_mobilebert.cpython-310.pyc +0 -0
  34. llmeval-env/lib/python3.10/site-packages/transformers/models/mobilebert/__pycache__/convert_mobilebert_original_tf_checkpoint_to_pytorch.cpython-310.pyc +0 -0
  35. llmeval-env/lib/python3.10/site-packages/transformers/models/mobilebert/__pycache__/modeling_mobilebert.cpython-310.pyc +0 -0
  36. llmeval-env/lib/python3.10/site-packages/transformers/models/mobilebert/__pycache__/modeling_tf_mobilebert.cpython-310.pyc +0 -0
  37. llmeval-env/lib/python3.10/site-packages/transformers/models/mobilebert/__pycache__/tokenization_mobilebert.cpython-310.pyc +0 -0
  38. llmeval-env/lib/python3.10/site-packages/transformers/models/mobilebert/__pycache__/tokenization_mobilebert_fast.cpython-310.pyc +0 -0
  39. llmeval-env/lib/python3.10/site-packages/transformers/models/mobilebert/convert_mobilebert_original_tf_checkpoint_to_pytorch.py +58 -0
  40. llmeval-env/lib/python3.10/site-packages/transformers/models/mobilebert/modeling_mobilebert.py +1617 -0
  41. llmeval-env/lib/python3.10/site-packages/transformers/models/mobilebert/modeling_tf_mobilebert.py +1970 -0
  42. llmeval-env/lib/python3.10/site-packages/transformers/models/mobilebert/tokenization_mobilebert.py +506 -0
  43. llmeval-env/lib/python3.10/site-packages/transformers/models/qwen2/__pycache__/tokenization_qwen2_fast.cpython-310.pyc +0 -0
  44. llmeval-env/lib/python3.10/site-packages/transformers/models/reformer/__init__.py +103 -0
  45. llmeval-env/lib/python3.10/site-packages/transformers/models/reformer/configuration_reformer.py +235 -0
  46. llmeval-env/lib/python3.10/site-packages/transformers/models/speecht5/__init__.py +96 -0
  47. llmeval-env/lib/python3.10/site-packages/transformers/models/speecht5/__pycache__/__init__.cpython-310.pyc +0 -0
  48. llmeval-env/lib/python3.10/site-packages/transformers/models/speecht5/__pycache__/configuration_speecht5.cpython-310.pyc +0 -0
  49. llmeval-env/lib/python3.10/site-packages/transformers/models/speecht5/__pycache__/convert_hifigan.cpython-310.pyc +0 -0
  50. llmeval-env/lib/python3.10/site-packages/transformers/models/speecht5/__pycache__/convert_speecht5_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc +0 -0
llmeval-env/lib/python3.10/site-packages/transformers/models/clap/__init__.py ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
17
+
18
+
19
+ _import_structure = {
20
+ "configuration_clap": [
21
+ "CLAP_PRETRAINED_MODEL_ARCHIVE_LIST",
22
+ "ClapAudioConfig",
23
+ "ClapConfig",
24
+ "ClapTextConfig",
25
+ ],
26
+ "processing_clap": ["ClapProcessor"],
27
+ }
28
+
29
+ try:
30
+ if not is_torch_available():
31
+ raise OptionalDependencyNotAvailable()
32
+ except OptionalDependencyNotAvailable:
33
+ pass
34
+ else:
35
+ _import_structure["modeling_clap"] = [
36
+ "CLAP_PRETRAINED_MODEL_ARCHIVE_LIST",
37
+ "ClapModel",
38
+ "ClapPreTrainedModel",
39
+ "ClapTextModel",
40
+ "ClapTextModelWithProjection",
41
+ "ClapAudioModel",
42
+ "ClapAudioModelWithProjection",
43
+ ]
44
+ _import_structure["feature_extraction_clap"] = ["ClapFeatureExtractor"]
45
+
46
+ if TYPE_CHECKING:
47
+ from .configuration_clap import (
48
+ CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
49
+ ClapAudioConfig,
50
+ ClapConfig,
51
+ ClapTextConfig,
52
+ )
53
+ from .processing_clap import ClapProcessor
54
+
55
+ try:
56
+ if not is_torch_available():
57
+ raise OptionalDependencyNotAvailable()
58
+ except OptionalDependencyNotAvailable:
59
+ pass
60
+ else:
61
+ from .feature_extraction_clap import ClapFeatureExtractor
62
+ from .modeling_clap import (
63
+ CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
64
+ ClapAudioModel,
65
+ ClapAudioModelWithProjection,
66
+ ClapModel,
67
+ ClapPreTrainedModel,
68
+ ClapTextModel,
69
+ ClapTextModelWithProjection,
70
+ )
71
+
72
+
73
+ else:
74
+ import sys
75
+
76
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
llmeval-env/lib/python3.10/site-packages/transformers/models/clap/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.2 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/clap/__pycache__/configuration_clap.cpython-310.pyc ADDED
Binary file (17.1 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/clap/__pycache__/convert_clap_original_pytorch_to_hf.cpython-310.pyc ADDED
Binary file (3.35 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/clap/__pycache__/feature_extraction_clap.cpython-310.pyc ADDED
Binary file (14.5 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/clap/__pycache__/modeling_clap.cpython-310.pyc ADDED
Binary file (66.9 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/clap/__pycache__/processing_clap.cpython-310.pyc ADDED
Binary file (5.27 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/clap/configuration_clap.py ADDED
@@ -0,0 +1,427 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ CLAP model configuration"""
16
+
17
+ import os
18
+ from typing import Union
19
+
20
+ from ...configuration_utils import PretrainedConfig
21
+ from ...utils import logging
22
+
23
+
24
+ logger = logging.get_logger(__name__)
25
+
26
+
27
+ class ClapTextConfig(PretrainedConfig):
28
+ r"""
29
+ This is the configuration class to store the configuration of a [`ClapTextModel`]. It is used to instantiate a CLAP
30
+ model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
31
+ defaults will yield a similar configuration to that of the CLAP
32
+ [calp-hsat-fused](https://huggingface.co/laion/clap-hsat-fused) architecture.
33
+
34
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
35
+ documentation from [`PretrainedConfig`] for more information.
36
+
37
+
38
+ Args:
39
+ vocab_size (`int`, *optional*, defaults to 30522):
40
+ Vocabulary size of the CLAP model. Defines the number of different tokens that can be represented by the
41
+ `inputs_ids` passed when calling [`ClapTextModel`].
42
+ hidden_size (`int`, *optional*, defaults to 768):
43
+ Dimensionality of the encoder layers and the pooler layer.
44
+ num_hidden_layers (`int`, *optional*, defaults to 12):
45
+ Number of hidden layers in the Transformer encoder.
46
+ num_attention_heads (`int`, *optional*, defaults to 12):
47
+ Number of attention heads for each attention layer in the Transformer encoder.
48
+ intermediate_size (`int`, *optional*, defaults to 3072):
49
+ Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
50
+ hidden_act (`str` or `Callable`, *optional*, defaults to `"relu"`):
51
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"relu"`,
52
+ `"relu"`, `"silu"` and `"relu_new"` are supported.
53
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
54
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
55
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
56
+ The dropout ratio for the attention probabilities.
57
+ max_position_embeddings (`int`, *optional*, defaults to 512):
58
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
59
+ just in case (e.g., 512 or 1024 or 2048).
60
+ type_vocab_size (`int`, *optional*, defaults to 2):
61
+ The vocabulary size of the `token_type_ids` passed when calling [`ClapTextModel`].
62
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
63
+ The epsilon used by the layer normalization layers.
64
+ position_embedding_type (`str`, *optional*, defaults to `"absolute"`):
65
+ Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For
66
+ positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to
67
+ [Self-Attention with Relative Position Representations (Shaw et al.)](https://arxiv.org/abs/1803.02155).
68
+ For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models
69
+ with Better Relative Position Embeddings (Huang et al.)](https://arxiv.org/abs/2009.13658).
70
+ is_decoder (`bool`, *optional*, defaults to `False`):
71
+ Whether the model is used as a decoder or not. If `False`, the model is used as an encoder.
72
+ use_cache (`bool`, *optional*, defaults to `True`):
73
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
74
+ relevant if `config.is_decoder=True`.
75
+ projection_hidden_act (`str`, *optional*, defaults to `"relu"`):
76
+ The non-linear activation function (function or string) in the projection layer. If string, `"gelu"`,
77
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
78
+ projection_dim (`int`, *optional*, defaults to 512)
79
+ Dimension of the projection head of the `ClapTextModelWithProjection`.
80
+
81
+ Examples:
82
+
83
+ ```python
84
+ >>> from transformers import ClapTextConfig, ClapTextModel
85
+
86
+ >>> # Initializing a CLAP text configuration
87
+ >>> configuration = ClapTextConfig()
88
+
89
+ >>> # Initializing a model (with random weights) from the configuration
90
+ >>> model = ClapTextModel(configuration)
91
+
92
+ >>> # Accessing the model configuration
93
+ >>> configuration = model.config
94
+ ```"""
95
+
96
+ model_type = "clap_text_model"
97
+
98
+ def __init__(
99
+ self,
100
+ vocab_size=50265,
101
+ hidden_size=768,
102
+ num_hidden_layers=12,
103
+ num_attention_heads=12,
104
+ intermediate_size=3072,
105
+ hidden_act="gelu",
106
+ hidden_dropout_prob=0.1,
107
+ attention_probs_dropout_prob=0.1,
108
+ max_position_embeddings=514,
109
+ type_vocab_size=1,
110
+ initializer_factor=1.0,
111
+ layer_norm_eps=1e-12,
112
+ projection_dim=512,
113
+ pad_token_id=1,
114
+ bos_token_id=0,
115
+ eos_token_id=2,
116
+ position_embedding_type="absolute",
117
+ use_cache=True,
118
+ projection_hidden_act="relu",
119
+ **kwargs,
120
+ ):
121
+ super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
122
+
123
+ self.vocab_size = vocab_size
124
+ self.hidden_size = hidden_size
125
+ self.num_hidden_layers = num_hidden_layers
126
+ self.num_attention_heads = num_attention_heads
127
+ self.hidden_act = hidden_act
128
+ self.intermediate_size = intermediate_size
129
+ self.hidden_dropout_prob = hidden_dropout_prob
130
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
131
+ self.max_position_embeddings = max_position_embeddings
132
+ self.type_vocab_size = type_vocab_size
133
+ self.initializer_factor = initializer_factor
134
+ self.layer_norm_eps = layer_norm_eps
135
+ self.position_embedding_type = position_embedding_type
136
+ self.use_cache = use_cache
137
+ self.projection_hidden_act = projection_hidden_act
138
+ self.projection_dim = projection_dim
139
+
140
+ @classmethod
141
+ def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig":
142
+ cls._set_token_in_kwargs(kwargs)
143
+
144
+ config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
145
+
146
+ # get the text config dict if we are loading from ClapConfig
147
+ if config_dict.get("model_type") == "clap":
148
+ config_dict = config_dict["text_config"]
149
+
150
+ if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
151
+ logger.warning(
152
+ f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
153
+ f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
154
+ )
155
+
156
+ return cls.from_dict(config_dict, **kwargs)
157
+
158
+
159
+ class ClapAudioConfig(PretrainedConfig):
160
+ r"""
161
+ This is the configuration class to store the configuration of a [`ClapAudioModel`]. It is used to instantiate a
162
+ CLAP audio encoder according to the specified arguments, defining the model architecture. Instantiating a
163
+ configuration with the defaults will yield a similar configuration to that of the audio encoder of the CLAP
164
+ [laion/clap-htsat-fused](https://huggingface.co/laion/clap-htsat-fused) architecture.
165
+
166
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
167
+ documentation from [`PretrainedConfig`] for more information.
168
+
169
+ Args:
170
+ window_size (`int`, *optional*, defaults to 8):
171
+ Image size of the spectrogram
172
+ num_mel_bins (`int`, *optional*, defaults to 64):
173
+ Number of mel features used per frames. Should correspond to the value used in the `ClapProcessor` class.
174
+ spec_size (`int`, *optional*, defaults to 256):
175
+ Desired input size of the spectrogram that the model supports. It can be different from the output of the
176
+ `ClapFeatureExtractor`, in which case the input features will be resized. Corresponds to the `image_size`
177
+ of the audio models.
178
+ hidden_act (`str`, *optional*, defaults to `"gelu"`):
179
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
180
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
181
+ patch_size (`int`, *optional*, defaults to 4):
182
+ Patch size for the audio spectrogram
183
+ patch_stride (`list`, *optional*, defaults to `[4, 4]`):
184
+ Patch stride for the audio spectrogram
185
+ num_classes (`int`, *optional*, defaults to 527):
186
+ Number of classes used for the head training
187
+ hidden_size (`int`, *optional*, defaults to 768):
188
+ Hidden size of the output of the audio encoder. Correspond to the dimension of the penultimate layer's
189
+ output,which is sent to the projection MLP layer.
190
+ projection_dim (`int`, *optional*, defaults to 512):
191
+ Hidden size of the projection layer.
192
+ depths (`list`, *optional*, defaults to `[2, 2, 6, 2]`):
193
+ Depths used for the Swin Layers of the audio model
194
+ num_attention_heads (`list`, *optional*, defaults to `[4, 8, 16, 32]`):
195
+ Number of attention heads used for the Swin Layers of the audio model
196
+ enable_fusion (`bool`, *optional*, defaults to `False`):
197
+ Whether or not to enable patch fusion. This is the main contribution of the authors, and should give the
198
+ best results.
199
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
200
+ The dropout probability for all fully connected layers in the encoder.
201
+ fusion_type (`[type]`, *optional*):
202
+ Fusion type used for the patch fusion.
203
+ patch_embed_input_channels (`int`, *optional*, defaults to 1):
204
+ Number of channels used for the input spectrogram
205
+ flatten_patch_embeds (`bool`, *optional*, defaults to `True`):
206
+ Whether or not to flatten the patch embeddings
207
+ patch_embeds_hidden_size (`int`, *optional*, defaults to 96):
208
+ Hidden size of the patch embeddings. It is used as the number of output channels.
209
+ enable_patch_layer_norm (`bool`, *optional*, defaults to `True`):
210
+ Whether or not to enable layer normalization for the patch embeddings
211
+ drop_path_rate (`float`, *optional*, defaults to 0.0):
212
+ Drop path rate for the patch fusion
213
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0):
214
+ The dropout ratio for the attention probabilities.
215
+ qkv_bias (`bool`, *optional*, defaults to `True`):
216
+ Whether or not to add a bias to the query, key, value projections.
217
+ mlp_ratio (`float`, *optional*, defaults to 4.0):
218
+ Ratio of the mlp hidden dim to embedding dim.
219
+ aff_block_r (`int`, *optional*, defaults to 4):
220
+ downsize_ratio used in the AudioFF block
221
+ num_hidden_layers (`int`, *optional*, defaults to 4):
222
+ Number of hidden layers in the Transformer encoder.
223
+ projection_hidden_act (`str`, *optional*, defaults to `"relu"`):
224
+ The non-linear activation function (function or string) in the projection layer. If string, `"gelu"`,
225
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
226
+ layer_norm_eps (`[type]`, *optional*, defaults to 1e-05):
227
+ The epsilon used by the layer normalization layers.
228
+ initializer_factor (`float`, *optional*, defaults to 1.0):
229
+ A factor for initializing all weight matrices (should be kept to 1, used internally for initialization
230
+ testing).
231
+
232
+ Example:
233
+
234
+ ```python
235
+ >>> from transformers import ClapAudioConfig, ClapAudioModel
236
+
237
+ >>> # Initializing a ClapAudioConfig with laion/clap-htsat-fused style configuration
238
+ >>> configuration = ClapAudioConfig()
239
+
240
+ >>> # Initializing a ClapAudioModel (with random weights) from the laion/clap-htsat-fused style configuration
241
+ >>> model = ClapAudioModel(configuration)
242
+
243
+ >>> # Accessing the model configuration
244
+ >>> configuration = model.config
245
+ ```"""
246
+
247
+ model_type = "clap_audio_model"
248
+
249
+ def __init__(
250
+ self,
251
+ window_size=8,
252
+ num_mel_bins=64,
253
+ spec_size=256,
254
+ hidden_act="gelu",
255
+ patch_size=4,
256
+ patch_stride=[4, 4],
257
+ num_classes=527,
258
+ hidden_size=768,
259
+ projection_dim=512,
260
+ depths=[2, 2, 6, 2],
261
+ num_attention_heads=[4, 8, 16, 32],
262
+ enable_fusion=False,
263
+ hidden_dropout_prob=0.1,
264
+ fusion_type=None,
265
+ patch_embed_input_channels=1,
266
+ flatten_patch_embeds=True,
267
+ patch_embeds_hidden_size=96,
268
+ enable_patch_layer_norm=True,
269
+ drop_path_rate=0.0,
270
+ attention_probs_dropout_prob=0.0,
271
+ qkv_bias=True,
272
+ mlp_ratio=4.0,
273
+ aff_block_r=4,
274
+ num_hidden_layers=4,
275
+ projection_hidden_act="relu",
276
+ layer_norm_eps=1e-5,
277
+ initializer_factor=1.0,
278
+ **kwargs,
279
+ ):
280
+ super().__init__(**kwargs)
281
+ self.window_size = window_size
282
+ self.num_mel_bins = num_mel_bins
283
+ self.spec_size = spec_size
284
+ self.patch_size = patch_size
285
+ self.patch_stride = patch_stride
286
+ self.num_classes = num_classes
287
+ self.hidden_size = hidden_size
288
+ self.depths = depths
289
+ self.num_hidden_layers = num_hidden_layers
290
+ self.num_attention_heads = num_attention_heads
291
+ self.window_size = window_size
292
+ self.enable_fusion = enable_fusion
293
+ self.fusion_type = fusion_type
294
+ self.hidden_act = hidden_act
295
+ self.hidden_dropout_prob = hidden_dropout_prob
296
+ self.projection_dim = projection_dim
297
+ self.flatten_patch_embeds = flatten_patch_embeds
298
+ self.patch_embeds_hidden_size = patch_embeds_hidden_size
299
+ self.enable_patch_layer_norm = enable_patch_layer_norm
300
+ self.drop_path_rate = drop_path_rate
301
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
302
+ self.qkv_bias = qkv_bias
303
+ self.mlp_ratio = mlp_ratio
304
+ self.patch_embed_input_channels = patch_embed_input_channels
305
+ self.aff_block_r = aff_block_r
306
+ self.layer_norm_eps = layer_norm_eps
307
+ self.initializer_factor = initializer_factor
308
+ self.projection_hidden_act = projection_hidden_act
309
+
310
+ @classmethod
311
+ def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig":
312
+ cls._set_token_in_kwargs(kwargs)
313
+
314
+ config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
315
+
316
+ # get the audio config dict if we are loading from ClapConfig
317
+ if config_dict.get("model_type") == "clap":
318
+ config_dict = config_dict["audio_config"]
319
+
320
+ if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
321
+ logger.warning(
322
+ f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
323
+ f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
324
+ )
325
+
326
+ return cls.from_dict(config_dict, **kwargs)
327
+
328
+
329
+ class ClapConfig(PretrainedConfig):
330
+ r"""
331
+ [`ClapConfig`] is the configuration class to store the configuration of a [`ClapModel`]. It is used to instantiate
332
+ a CLAP model according to the specified arguments, defining the text model and audio model configs. Instantiating a
333
+ configuration with the defaults will yield a similar configuration to that of the CLAP
334
+ [laion/clap-htsat-fused](https://huggingface.co/laion/clap-htsat-fused) architecture.
335
+
336
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
337
+ documentation from [`PretrainedConfig`] for more information.
338
+
339
+ Args:
340
+ text_config (`dict`, *optional*):
341
+ Dictionary of configuration options used to initialize [`ClapTextConfig`].
342
+ audio_config (`dict`, *optional*):
343
+ Dictionary of configuration options used to initialize [`ClapAudioConfig`].
344
+ logit_scale_init_value (`float`, *optional*, defaults to 14.29):
345
+ The inital value of the *logit_scale* paramter. Default is used as per the original CLAP implementation.
346
+ projection_dim (`int`, *optional*, defaults to 512):
347
+ Dimentionality of text and audio projection layers.
348
+ projection_hidden_act (`str`, *optional*, defaults to `"relu"`):
349
+ Activation function for the projection layers.
350
+ initializer_factor (`float`, *optional*, defaults to 1.0):
351
+ Factor to scale the initialization of the model weights.
352
+ kwargs (*optional*):
353
+ Dictionary of keyword arguments.
354
+
355
+ Example:
356
+
357
+ ```python
358
+ >>> from transformers import ClapConfig, ClapModel
359
+
360
+ >>> # Initializing a ClapConfig with laion-ai/base style configuration
361
+ >>> configuration = ClapConfig()
362
+
363
+ >>> # Initializing a ClapModel (with random weights) from the laion-ai/base style configuration
364
+ >>> model = ClapModel(configuration)
365
+
366
+ >>> # Accessing the model configuration
367
+ >>> configuration = model.config
368
+
369
+ >>> # We can also initialize a ClapConfig from a ClapTextConfig and a ClapAudioConfig
370
+ >>> from transformers import ClapTextConfig, ClapAudioConfig
371
+
372
+ >>> # Initializing a ClapText and ClapAudioConfig configuration
373
+ >>> config_text = ClapTextConfig()
374
+ >>> config_audio = ClapAudioConfig()
375
+
376
+ >>> config = ClapConfig.from_text_audio_configs(config_text, config_audio)
377
+ ```"""
378
+
379
+ model_type = "clap"
380
+
381
+ def __init__(
382
+ self,
383
+ text_config=None,
384
+ audio_config=None,
385
+ logit_scale_init_value=(1 / 0.07),
386
+ projection_dim=512,
387
+ projection_hidden_act="relu",
388
+ initializer_factor=1.0,
389
+ **kwargs,
390
+ ):
391
+ super().__init__(**kwargs)
392
+
393
+ if text_config is None:
394
+ text_config = {}
395
+ logger.info("text_config is None. Initializing the ClapTextConfig with default values.")
396
+
397
+ if audio_config is None:
398
+ audio_config = {}
399
+ logger.info("audio_config is None. initializing the ClapAudioConfig with default values.")
400
+
401
+ self.text_config = ClapTextConfig(**text_config)
402
+ self.audio_config = ClapAudioConfig(**audio_config)
403
+ self.text_config.projection_dim = projection_dim
404
+ self.audio_config.projection_dim = projection_dim
405
+
406
+ self.text_config.projection_hidden_act = projection_hidden_act
407
+ self.audio_config.projection_hidden_act = projection_hidden_act
408
+
409
+ self.projection_dim = projection_dim
410
+ self.projection_hidden_act = projection_hidden_act
411
+ self.hidden_size = self.text_config.hidden_size
412
+
413
+ self.logit_scale_init_value = logit_scale_init_value
414
+ self.initializer_factor = initializer_factor
415
+ self.num_hidden_layers = self.text_config.num_hidden_layers + len(self.audio_config.depths)
416
+
417
+ @classmethod
418
+ def from_text_audio_configs(cls, text_config: ClapTextConfig, audio_config: ClapAudioConfig, **kwargs):
419
+ r"""
420
+ Instantiate a [`ClapConfig`] (or a derived class) from clap text model configuration and clap audio model
421
+ configuration.
422
+
423
+ Returns:
424
+ [`ClapConfig`]: An instance of a configuration object
425
+ """
426
+
427
+ return cls(text_config=text_config.to_dict(), audio_config=audio_config.to_dict(), **kwargs)
llmeval-env/lib/python3.10/site-packages/transformers/models/clap/convert_clap_original_pytorch_to_hf.py ADDED
@@ -0,0 +1,133 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import argparse
17
+ import re
18
+
19
+ from laion_clap import CLAP_Module
20
+
21
+ from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
22
+
23
+
24
+ KEYS_TO_MODIFY_MAPPING = {
25
+ "text_branch": "text_model",
26
+ "audio_branch": "audio_model.audio_encoder",
27
+ "attn": "attention.self",
28
+ "self.proj": "output.dense",
29
+ "attention.self_mask": "attn_mask",
30
+ "mlp.fc1": "intermediate.dense",
31
+ "mlp.fc2": "output.dense",
32
+ "norm1": "layernorm_before",
33
+ "norm2": "layernorm_after",
34
+ "bn0": "batch_norm",
35
+ }
36
+
37
+ processor = AutoFeatureExtractor.from_pretrained("laion/clap-htsat-unfused", truncation="rand_trunc")
38
+
39
+
40
+ def init_clap(checkpoint_path, model_type, enable_fusion=False):
41
+ model = CLAP_Module(
42
+ amodel=model_type,
43
+ enable_fusion=enable_fusion,
44
+ )
45
+ model.load_ckpt(checkpoint_path)
46
+ return model
47
+
48
+
49
+ def get_config_from_original(clap_model):
50
+ audio_config = {
51
+ "patch_embeds_hidden_size": clap_model.model.audio_branch.embed_dim,
52
+ "depths": clap_model.model.audio_branch.depths,
53
+ "hidden_size": clap_model.model.audio_projection[0].in_features,
54
+ }
55
+
56
+ text_config = {"hidden_size": clap_model.model.text_branch.pooler.dense.in_features}
57
+
58
+ return ClapConfig(audio_config=audio_config, text_config=text_config)
59
+
60
+
61
+ def rename_state_dict(state_dict):
62
+ model_state_dict = {}
63
+
64
+ sequential_layers_pattern = r".*sequential.(\d+).*"
65
+ text_projection_pattern = r".*_projection.(\d+).*"
66
+
67
+ for key, value in state_dict.items():
68
+ # check if any key needs to be modified
69
+ for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
70
+ if key_to_modify in key:
71
+ key = key.replace(key_to_modify, new_key)
72
+
73
+ if re.match(sequential_layers_pattern, key):
74
+ # replace sequential layers with list
75
+ sequential_layer = re.match(sequential_layers_pattern, key).group(1)
76
+
77
+ key = key.replace(f"sequential.{sequential_layer}.", f"layers.{int(sequential_layer)//3}.linear.")
78
+ elif re.match(text_projection_pattern, key):
79
+ projecton_layer = int(re.match(text_projection_pattern, key).group(1))
80
+
81
+ # Because in CLAP they use `nn.Sequential`...
82
+ transformers_projection_layer = 1 if projecton_layer == 0 else 2
83
+
84
+ key = key.replace(f"_projection.{projecton_layer}.", f"_projection.linear{transformers_projection_layer}.")
85
+
86
+ if "audio" and "qkv" in key:
87
+ # split qkv into query key and value
88
+ mixed_qkv = value
89
+ qkv_dim = mixed_qkv.size(0) // 3
90
+
91
+ query_layer = mixed_qkv[:qkv_dim]
92
+ key_layer = mixed_qkv[qkv_dim : qkv_dim * 2]
93
+ value_layer = mixed_qkv[qkv_dim * 2 :]
94
+
95
+ model_state_dict[key.replace("qkv", "query")] = query_layer
96
+ model_state_dict[key.replace("qkv", "key")] = key_layer
97
+ model_state_dict[key.replace("qkv", "value")] = value_layer
98
+ else:
99
+ model_state_dict[key] = value
100
+
101
+ return model_state_dict
102
+
103
+
104
+ def convert_clap_checkpoint(checkpoint_path, pytorch_dump_folder_path, config_path, model_type, enable_fusion=False):
105
+ clap_model = init_clap(checkpoint_path, model_type, enable_fusion=enable_fusion)
106
+
107
+ clap_model.eval()
108
+ state_dict = clap_model.model.state_dict()
109
+ state_dict = rename_state_dict(state_dict)
110
+
111
+ transformers_config = get_config_from_original(clap_model)
112
+ transformers_config.audio_config.enable_fusion = enable_fusion
113
+ model = ClapModel(transformers_config)
114
+
115
+ # ignore the spectrogram embedding layer
116
+ model.load_state_dict(state_dict, strict=False)
117
+
118
+ model.save_pretrained(pytorch_dump_folder_path)
119
+ transformers_config.save_pretrained(pytorch_dump_folder_path)
120
+
121
+
122
+ if __name__ == "__main__":
123
+ parser = argparse.ArgumentParser()
124
+ parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
125
+ parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
126
+ parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
127
+ parser.add_argument("--enable_fusion", action="store_true", help="Whether to enable fusion or not")
128
+ parser.add_argument("--model_type", default="HTSAT-tiny", type=str, help="Whether to enable fusion or not")
129
+ args = parser.parse_args()
130
+
131
+ convert_clap_checkpoint(
132
+ args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.model_type, args.enable_fusion
133
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/clap/feature_extraction_clap.py ADDED
@@ -0,0 +1,363 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Feature extractor class for CLAP."""
16
+
17
+
18
+ import copy
19
+ from typing import Any, Dict, List, Optional, Union
20
+
21
+ import numpy as np
22
+ import torch
23
+
24
+ from ...audio_utils import mel_filter_bank, spectrogram, window_function
25
+ from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
26
+ from ...feature_extraction_utils import BatchFeature
27
+ from ...utils import TensorType, logging
28
+
29
+
30
+ logger = logging.get_logger(__name__)
31
+
32
+
33
+ class ClapFeatureExtractor(SequenceFeatureExtractor):
34
+ r"""
35
+ Constructs a CLAP feature extractor.
36
+
37
+ This feature extractor inherits from [`~feature_extraction_sequence_utils.SequenceFeatureExtractor`] which contains
38
+ most of the main methods. Users should refer to this superclass for more information regarding those methods.
39
+
40
+ This class extracts mel-filter bank features from raw speech using a custom numpy implementation of the *Short Time
41
+ Fourier Transform* (STFT) which should match pytorch's `torch.stft` equivalent.
42
+
43
+ Args:
44
+ feature_size (`int`, *optional*, defaults to 64):
45
+ The feature dimension of the extracted Mel spectrograms. This corresponds to the number of mel filters
46
+ (`n_mels`).
47
+ sampling_rate (`int`, *optional*, defaults to 48000):
48
+ The sampling rate at which the audio files should be digitalized expressed in hertz (Hz). This only serves
49
+ to warn users if the audio fed to the feature extractor does not have the same sampling rate.
50
+ hop_length (`int`,*optional*, defaults to 480):
51
+ Length of the overlaping windows for the STFT used to obtain the Mel Spectrogram. The audio will be split
52
+ in smaller `frames` with a step of `hop_length` between each frame.
53
+ max_length_s (`int`, *optional*, defaults to 10):
54
+ The maximum input length of the model in seconds. This is used to pad the audio.
55
+ fft_window_size (`int`, *optional*, defaults to 1024):
56
+ Size of the window (in samples) on which the Fourier transform is applied. This controls the frequency
57
+ resolution of the spectrogram. 400 means that the fourrier transform is computed on windows of 400 samples.
58
+ padding_value (`float`, *optional*, defaults to 0.0):
59
+ Padding value used to pad the audio. Should correspond to silences.
60
+ return_attention_mask (`bool`, *optional*, defaults to `False`):
61
+ Whether or not the model should return the attention masks coresponding to the input.
62
+ frequency_min (`float`, *optional*, defaults to 0):
63
+ The lowest frequency of interest. The STFT will not be computed for values below this.
64
+ frequency_max (`float`, *optional*, defaults to 14000):
65
+ The highest frequency of interest. The STFT will not be computed for values above this.
66
+ top_db (`float`, *optional*):
67
+ The highest decibel value used to convert the mel spectrogram to the log scale. For more details see the
68
+ `audio_utils.power_to_db` function
69
+ truncation (`str`, *optional*, defaults to `"fusion"`):
70
+ Truncation pattern for long audio inputs. Two patterns are available:
71
+ - `fusion` will use `_random_mel_fusion`, which stacks 3 random crops from the mel spectrogram and a
72
+ downsampled version of the entire mel spectrogram.
73
+ If `config.fusion` is set to True, shorter audios also need to to return 4 mels, which will just be a copy
74
+ of the original mel obtained from the padded audio.
75
+ - `rand_trunc` will select a random crop of the mel spectrogram.
76
+ padding (`str`, *optional*, defaults to `"repeatpad"`):
77
+ Padding pattern for shorter audio inputs. Three patterns were originally implemented:
78
+ - `repeatpad`: the audio is repeated, and then padded to fit the `max_length`.
79
+ - `repeat`: the audio is repeated and then cut to fit the `max_length`
80
+ - `pad`: the audio is padded.
81
+ """
82
+
83
+ model_input_names = ["input_features", "is_longer"]
84
+
85
+ def __init__(
86
+ self,
87
+ feature_size=64,
88
+ sampling_rate=48_000,
89
+ hop_length=480,
90
+ max_length_s=10,
91
+ fft_window_size=1024,
92
+ padding_value=0.0,
93
+ return_attention_mask=False, # pad inputs to max length with silence token (zero) and no attention mask
94
+ frequency_min: float = 0,
95
+ frequency_max: float = 14_000,
96
+ top_db: int = None,
97
+ truncation: str = "fusion",
98
+ padding: str = "repeatpad",
99
+ **kwargs,
100
+ ):
101
+ super().__init__(
102
+ feature_size=feature_size,
103
+ sampling_rate=sampling_rate,
104
+ padding_value=padding_value,
105
+ return_attention_mask=return_attention_mask,
106
+ **kwargs,
107
+ )
108
+ self.top_db = top_db
109
+ self.truncation = truncation
110
+ self.padding = padding
111
+ self.fft_window_size = fft_window_size
112
+ self.nb_frequency_bins = (fft_window_size >> 1) + 1
113
+ self.hop_length = hop_length
114
+ self.max_length_s = max_length_s
115
+ self.nb_max_samples = max_length_s * sampling_rate
116
+ self.sampling_rate = sampling_rate
117
+ self.frequency_min = frequency_min
118
+ self.frequency_max = frequency_max
119
+ self.mel_filters = mel_filter_bank(
120
+ num_frequency_bins=self.nb_frequency_bins,
121
+ num_mel_filters=feature_size,
122
+ min_frequency=frequency_min,
123
+ max_frequency=frequency_max,
124
+ sampling_rate=sampling_rate,
125
+ norm=None,
126
+ mel_scale="htk",
127
+ )
128
+ self.mel_filters_slaney = mel_filter_bank(
129
+ num_frequency_bins=self.nb_frequency_bins,
130
+ num_mel_filters=feature_size,
131
+ min_frequency=frequency_min,
132
+ max_frequency=frequency_max,
133
+ sampling_rate=sampling_rate,
134
+ norm="slaney",
135
+ mel_scale="slaney",
136
+ )
137
+
138
+ def to_dict(self) -> Dict[str, Any]:
139
+ """
140
+ Serializes this instance to a Python dictionary.
141
+
142
+ Returns:
143
+ `Dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance, excpet for the
144
+ mel filter banks, which do not need to be saved or printed as they are too long.
145
+ """
146
+ output = copy.deepcopy(self.__dict__)
147
+ output["feature_extractor_type"] = self.__class__.__name__
148
+ if "mel_filters" in output:
149
+ del output["mel_filters"]
150
+ if "mel_filters_slaney" in output:
151
+ del output["mel_filters_slaney"]
152
+ return output
153
+
154
+ def _np_extract_fbank_features(self, waveform: np.array, mel_filters: Optional[np.array] = None) -> np.ndarray:
155
+ """
156
+ Compute the log-mel spectrogram of the provided `waveform` using the Hann window. In CLAP, two different filter
157
+ banks are used depending on the truncation pattern:
158
+ - `self.mel_filters`: they correspond to the default parameters of `torchaudio` which can be obtained from
159
+ calling `torchaudio.transforms.MelSpectrogram().mel_scale.fb`. These filters are used when `truncation`
160
+ is set to `"fusion"`.
161
+ - `self.mel_filteres_slaney` : they correspond to the default parameters of `librosa` which used
162
+ `librosa.filters.mel` when computing the mel spectrogram. These filters were only used in the original
163
+ implementation when the truncation mode is not `"fusion"`.
164
+ """
165
+ log_mel_spectrogram = spectrogram(
166
+ waveform,
167
+ window_function(self.fft_window_size, "hann"),
168
+ frame_length=self.fft_window_size,
169
+ hop_length=self.hop_length,
170
+ power=2.0,
171
+ mel_filters=mel_filters,
172
+ log_mel="dB",
173
+ )
174
+ return log_mel_spectrogram.T
175
+
176
+ def _random_mel_fusion(self, mel, total_frames, chunk_frames):
177
+ ranges = np.array_split(list(range(0, total_frames - chunk_frames + 1)), 3)
178
+ if len(ranges[1]) == 0:
179
+ # if the audio is too short, we just use the first chunk
180
+ ranges[1] = [0]
181
+ if len(ranges[2]) == 0:
182
+ # if the audio is too short, we just use the first chunk
183
+ ranges[2] = [0]
184
+ # randomly choose index for each part
185
+ idx_front = np.random.choice(ranges[0])
186
+ idx_middle = np.random.choice(ranges[1])
187
+ idx_back = np.random.choice(ranges[2])
188
+
189
+ mel_chunk_front = mel[idx_front : idx_front + chunk_frames, :]
190
+ mel_chunk_middle = mel[idx_middle : idx_middle + chunk_frames, :]
191
+ mel_chunk_back = mel[idx_back : idx_back + chunk_frames, :]
192
+
193
+ mel = torch.tensor(mel[None, None, :])
194
+ mel_shrink = torch.nn.functional.interpolate(
195
+ mel, size=[chunk_frames, 64], mode="bilinear", align_corners=False
196
+ )
197
+ mel_shrink = mel_shrink[0][0].numpy()
198
+ mel_fusion = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back], axis=0)
199
+ return mel_fusion
200
+
201
+ def _get_input_mel(self, waveform: np.array, max_length, truncation, padding) -> np.array:
202
+ """
203
+ Extracts the mel spectrogram and prepares it for the mode based on the `truncation` and `padding` arguments.
204
+ Four different path are possible:
205
+ - `truncation="fusion"` and the length of the waveform is greater than the max length: the mel spectrogram
206
+ will be computed on the entire audio. 3 random crops and a dowsampled version of the full mel spectrogram
207
+ are then stacked together. They will later be used for `feature_fusion`.
208
+ - `truncation="rand_trunc"` and the length of the waveform is smaller than the max length: the audio is
209
+ padded based on `padding`.
210
+ - `truncation="fusion"` and the length of the waveform is smaller than the max length: the audio is padded
211
+ based on `padding`, and is repeated `4` times.
212
+ - `truncation="rand_trunc"` and the length of the waveform is greater than the max length: the mel
213
+ spectrogram will be computed on a random crop of the waveform.
214
+
215
+ """
216
+ if waveform.shape[0] > max_length:
217
+ if truncation == "rand_trunc":
218
+ longer = True
219
+ # random crop to max_length (for compatibility) -> this should be handled by self.pad
220
+ overflow = len(waveform) - max_length
221
+ idx = np.random.randint(0, overflow + 1)
222
+ waveform = waveform[idx : idx + max_length]
223
+ input_mel = self._np_extract_fbank_features(waveform, self.mel_filters_slaney)[None, :]
224
+ elif truncation == "fusion":
225
+ mel = self._np_extract_fbank_features(waveform, self.mel_filters)
226
+ chunk_frames = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
227
+ total_frames = mel.shape[0]
228
+ if chunk_frames == total_frames:
229
+ # there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
230
+ # In this case, we just use the whole audio.
231
+ input_mel = np.stack([mel, mel, mel, mel], axis=0)
232
+ longer = False
233
+ else:
234
+ input_mel = self._random_mel_fusion(mel, total_frames, chunk_frames)
235
+ longer = True
236
+ else:
237
+ raise NotImplementedError(f"data_truncating {truncation} not implemented")
238
+
239
+ else:
240
+ longer = False
241
+ # only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
242
+ if waveform.shape[0] < max_length:
243
+ if padding == "repeat":
244
+ n_repeat = int(max_length / len(waveform))
245
+ waveform = np.tile(waveform, n_repeat + 1)[:max_length]
246
+ if padding == "repeatpad":
247
+ n_repeat = int(max_length / len(waveform))
248
+ waveform = np.tile(waveform, n_repeat)
249
+ waveform = np.pad(waveform, (0, max_length - waveform.shape[0]), mode="constant", constant_values=0)
250
+
251
+ if truncation == "fusion":
252
+ input_mel = self._np_extract_fbank_features(waveform, self.mel_filters)
253
+ input_mel = np.stack([input_mel, input_mel, input_mel, input_mel], axis=0)
254
+ else:
255
+ input_mel = self._np_extract_fbank_features(waveform, self.mel_filters_slaney)[None, :]
256
+
257
+ return input_mel, longer
258
+
259
+ def __call__(
260
+ self,
261
+ raw_speech: Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]],
262
+ truncation: str = None,
263
+ padding: Optional[str] = None,
264
+ max_length: Optional[int] = None,
265
+ sampling_rate: Optional[int] = None,
266
+ return_tensors: Optional[Union[str, TensorType]] = None,
267
+ **kwargs,
268
+ ) -> BatchFeature:
269
+ """
270
+ Main method to featurize and prepare for the model one or several sequence(s).
271
+
272
+ Args:
273
+ raw_speech (`np.ndarray`, `List[float]`, `List[np.ndarray]`, `List[List[float]]`):
274
+ The sequence or batch of sequences to be padded. Each sequence can be a numpy array, a list of float
275
+ values, a list of numpy arrays or a list of list of float values. Must be mono channel audio, not
276
+ stereo, i.e. single float per timestep.
277
+ truncation (`str`, *optional*):
278
+ Truncation pattern for long audio inputs. Two patterns are available:
279
+ - `fusion` will use `_random_mel_fusion`, which stacks 3 random crops from the mel spectrogram and
280
+ a downsampled version of the entire mel spectrogram.
281
+ If `config.fusion` is set to True, shorter audios also need to to return 4 mels, which will just be a
282
+ copy of the original mel obtained from the padded audio.
283
+ - `rand_trunc` will select a random crop of the mel spectrogram.
284
+ padding (`str`, *optional*):
285
+ Padding pattern for shorter audio inputs. Three patterns were originally implemented:
286
+ - `repeatpad`: the audio is repeated, and then padded to fit the `max_length`.
287
+ - `repeat`: the audio is repeated and then cut to fit the `max_length`
288
+ - `pad`: the audio is padded.
289
+ return_tensors (`str` or [`~utils.TensorType`], *optional*):
290
+ If set, will return tensors instead of list of python integers. Acceptable values are:
291
+
292
+ - `'tf'`: Return TensorFlow `tf.constant` objects.
293
+ - `'pt'`: Return PyTorch `torch.np.array` objects.
294
+ - `'np'`: Return Numpy `np.ndarray` objects.
295
+ sampling_rate (`int`, *optional*):
296
+ The sampling rate at which the `raw_speech` input was sampled. It is strongly recommended to pass
297
+ `sampling_rate` at the forward call to prevent silent errors and allow automatic speech recognition
298
+ pipeline.
299
+ """
300
+ truncation = truncation if truncation is not None else self.truncation
301
+ padding = padding if padding else self.padding
302
+
303
+ if sampling_rate is not None:
304
+ if sampling_rate != self.sampling_rate:
305
+ raise ValueError(
306
+ f"The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"
307
+ f" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"
308
+ f" was sampled with {self.sampling_rate} and not {sampling_rate}."
309
+ )
310
+ else:
311
+ logger.warning(
312
+ "It is strongly recommended to pass the `sampling_rate` argument to this function. "
313
+ "Failing to do so can result in silent errors that might be hard to debug."
314
+ )
315
+
316
+ is_batched_numpy = isinstance(raw_speech, np.ndarray) and len(raw_speech.shape) > 1
317
+ if is_batched_numpy and len(raw_speech.shape) > 2:
318
+ raise ValueError(f"Only mono-channel audio is supported for input to {self}")
319
+ is_batched = is_batched_numpy or (
320
+ isinstance(raw_speech, (list, tuple)) and (isinstance(raw_speech[0], (np.ndarray, tuple, list)))
321
+ )
322
+
323
+ if is_batched:
324
+ raw_speech = [np.asarray(speech, dtype=np.float64) for speech in raw_speech]
325
+ elif not is_batched and not isinstance(raw_speech, np.ndarray):
326
+ raw_speech = np.asarray(raw_speech, dtype=np.float64)
327
+ elif isinstance(raw_speech, np.ndarray) and raw_speech.dtype is np.dtype(np.float64):
328
+ raw_speech = raw_speech.astype(np.float64)
329
+
330
+ # always return batch
331
+ if not is_batched:
332
+ raw_speech = [np.asarray(raw_speech)]
333
+
334
+ # convert to mel spectrogram, truncate and pad if needed.
335
+ padded_inputs = [
336
+ self._get_input_mel(waveform, max_length if max_length else self.nb_max_samples, truncation, padding)
337
+ for waveform in raw_speech
338
+ ]
339
+
340
+ input_mel = []
341
+ is_longer = []
342
+ for mel, longer in padded_inputs:
343
+ input_mel.append(mel)
344
+ is_longer.append(longer)
345
+
346
+ if truncation == "fusion" and sum(is_longer) == 0:
347
+ # if no audio is longer than 10s, then randomly select one audio to be longer
348
+ rand_idx = np.random.randint(0, len(input_mel))
349
+ is_longer[rand_idx] = True
350
+
351
+ if isinstance(input_mel[0], List):
352
+ input_mel = [np.asarray(feature, dtype=np.float64) for feature in input_mel]
353
+
354
+ # is_longer is a list of bool
355
+ is_longer = [[longer] for longer in is_longer]
356
+
357
+ input_features = {"input_features": input_mel, "is_longer": is_longer}
358
+ input_features = BatchFeature(input_features)
359
+
360
+ if return_tensors is not None:
361
+ input_features = input_features.convert_to_tensors(return_tensors)
362
+
363
+ return input_features
llmeval-env/lib/python3.10/site-packages/transformers/models/clap/modeling_clap.py ADDED
The diff for this file is too large to render. See raw diff
 
llmeval-env/lib/python3.10/site-packages/transformers/models/clap/processing_clap.py ADDED
@@ -0,0 +1,117 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """
16
+ Audio/Text processor class for CLAP
17
+ """
18
+
19
+ from ...processing_utils import ProcessorMixin
20
+ from ...tokenization_utils_base import BatchEncoding
21
+
22
+
23
+ class ClapProcessor(ProcessorMixin):
24
+ r"""
25
+ Constructs a CLAP processor which wraps a CLAP feature extractor and a RoBerta tokenizer into a single processor.
26
+
27
+ [`ClapProcessor`] offers all the functionalities of [`ClapFeatureExtractor`] and [`RobertaTokenizerFast`]. See the
28
+ [`~ClapProcessor.__call__`] and [`~ClapProcessor.decode`] for more information.
29
+
30
+ Args:
31
+ feature_extractor ([`ClapFeatureExtractor`]):
32
+ The audio processor is a required input.
33
+ tokenizer ([`RobertaTokenizerFast`]):
34
+ The tokenizer is a required input.
35
+ """
36
+
37
+ feature_extractor_class = "ClapFeatureExtractor"
38
+ tokenizer_class = ("RobertaTokenizer", "RobertaTokenizerFast")
39
+
40
+ def __init__(self, feature_extractor, tokenizer):
41
+ super().__init__(feature_extractor, tokenizer)
42
+
43
+ def __call__(self, text=None, audios=None, return_tensors=None, **kwargs):
44
+ """
45
+ Main method to prepare for the model one or several sequences(s) and audio(s). This method forwards the `text`
46
+ and `kwargs` arguments to RobertaTokenizerFast's [`~RobertaTokenizerFast.__call__`] if `text` is not `None` to
47
+ encode the text. To prepare the audio(s), this method forwards the `audios` and `kwrags` arguments to
48
+ ClapFeatureExtractor's [`~ClapFeatureExtractor.__call__`] if `audios` is not `None`. Please refer to the
49
+ doctsring of the above two methods for more information.
50
+
51
+ Args:
52
+ text (`str`, `List[str]`, `List[List[str]]`):
53
+ The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
54
+ (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
55
+ `is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
56
+ audios (`np.ndarray`, `torch.Tensor`, `List[np.ndarray]`, `List[torch.Tensor]`):
57
+ The audio or batch of audios to be prepared. Each audio can be NumPy array or PyTorch tensor. In case
58
+ of a NumPy array/PyTorch tensor, each audio should be of shape (C, T), where C is a number of channels,
59
+ and T the sample length of the audio.
60
+
61
+ return_tensors (`str` or [`~utils.TensorType`], *optional*):
62
+ If set, will return tensors of a particular framework. Acceptable values are:
63
+
64
+ - `'tf'`: Return TensorFlow `tf.constant` objects.
65
+ - `'pt'`: Return PyTorch `torch.Tensor` objects.
66
+ - `'np'`: Return NumPy `np.ndarray` objects.
67
+ - `'jax'`: Return JAX `jnp.ndarray` objects.
68
+
69
+ Returns:
70
+ [`BatchEncoding`]: A [`BatchEncoding`] with the following fields:
71
+
72
+ - **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`.
73
+ - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
74
+ `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not
75
+ `None`).
76
+ - **audio_features** -- Audio features to be fed to a model. Returned when `audios` is not `None`.
77
+ """
78
+ sampling_rate = kwargs.pop("sampling_rate", None)
79
+
80
+ if text is None and audios is None:
81
+ raise ValueError("You have to specify either text or audios. Both cannot be none.")
82
+
83
+ if text is not None:
84
+ encoding = self.tokenizer(text, return_tensors=return_tensors, **kwargs)
85
+
86
+ if audios is not None:
87
+ audio_features = self.feature_extractor(
88
+ audios, sampling_rate=sampling_rate, return_tensors=return_tensors, **kwargs
89
+ )
90
+
91
+ if text is not None and audios is not None:
92
+ encoding["input_features"] = audio_features.input_features
93
+ return encoding
94
+ elif text is not None:
95
+ return encoding
96
+ else:
97
+ return BatchEncoding(data=dict(**audio_features), tensor_type=return_tensors)
98
+
99
+ def batch_decode(self, *args, **kwargs):
100
+ """
101
+ This method forwards all its arguments to RobertaTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please
102
+ refer to the docstring of this method for more information.
103
+ """
104
+ return self.tokenizer.batch_decode(*args, **kwargs)
105
+
106
+ def decode(self, *args, **kwargs):
107
+ """
108
+ This method forwards all its arguments to RobertaTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer
109
+ to the docstring of this method for more information.
110
+ """
111
+ return self.tokenizer.decode(*args, **kwargs)
112
+
113
+ @property
114
+ def model_input_names(self):
115
+ tokenizer_input_names = self.tokenizer.model_input_names
116
+ feature_extractor_input_names = self.feature_extractor.model_input_names
117
+ return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names))
llmeval-env/lib/python3.10/site-packages/transformers/models/cohere/__init__.py ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 Cohere and The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import (
17
+ OptionalDependencyNotAvailable,
18
+ _LazyModule,
19
+ is_sentencepiece_available,
20
+ is_tokenizers_available,
21
+ is_torch_available,
22
+ )
23
+
24
+
25
+ _import_structure = {
26
+ "configuration_cohere": ["COHERE_PRETRAINED_CONFIG_ARCHIVE_MAP", "CohereConfig"],
27
+ }
28
+
29
+
30
+ try:
31
+ if not is_tokenizers_available():
32
+ raise OptionalDependencyNotAvailable()
33
+ except OptionalDependencyNotAvailable:
34
+ pass
35
+ else:
36
+ _import_structure["tokenization_cohere_fast"] = ["CohereTokenizerFast"]
37
+
38
+ try:
39
+ if not is_torch_available():
40
+ raise OptionalDependencyNotAvailable()
41
+ except OptionalDependencyNotAvailable:
42
+ pass
43
+ else:
44
+ _import_structure["modeling_cohere"] = [
45
+ "CohereForCausalLM",
46
+ "CohereModel",
47
+ "CoherePreTrainedModel",
48
+ ]
49
+
50
+
51
+ if TYPE_CHECKING:
52
+ from .configuration_cohere import COHERE_PRETRAINED_CONFIG_ARCHIVE_MAP, CohereConfig
53
+
54
+ try:
55
+ if not is_tokenizers_available():
56
+ raise OptionalDependencyNotAvailable()
57
+ except OptionalDependencyNotAvailable:
58
+ pass
59
+ else:
60
+ from .tokenization_cohere_fast import CohereTokenizerFast
61
+
62
+ try:
63
+ if not is_torch_available():
64
+ raise OptionalDependencyNotAvailable()
65
+ except OptionalDependencyNotAvailable:
66
+ pass
67
+ else:
68
+ from .modeling_cohere import (
69
+ CohereForCausalLM,
70
+ CohereModel,
71
+ CoherePreTrainedModel,
72
+ )
73
+
74
+ else:
75
+ import sys
76
+
77
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
llmeval-env/lib/python3.10/site-packages/transformers/models/cohere/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.13 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/cohere/__pycache__/configuration_cohere.cpython-310.pyc ADDED
Binary file (6.13 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/cohere/__pycache__/modeling_cohere.cpython-310.pyc ADDED
Binary file (36.4 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/cohere/__pycache__/tokenization_cohere_fast.cpython-310.pyc ADDED
Binary file (36.6 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/cohere/configuration_cohere.py ADDED
@@ -0,0 +1,159 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 Cohere team. All rights reserved.
3
+ #
4
+ # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
5
+ # and OPT implementations in this library. It has been modified from its
6
+ # original forms to accommodate minor architectural differences compared
7
+ # to GPT-NeoX and OPT used by the Meta AI team that trained the model.
8
+ #
9
+ # Licensed under the Apache License, Version 2.0 (the "License");
10
+ # you may not use this file except in compliance with the License.
11
+ # You may obtain a copy of the License at
12
+ #
13
+ # http://www.apache.org/licenses/LICENSE-2.0
14
+ #
15
+ # Unless required by applicable law or agreed to in writing, software
16
+ # distributed under the License is distributed on an "AS IS" BASIS,
17
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18
+ # See the License for the specific language governing permissions and
19
+ # limitations under the License.
20
+ """ Cohere model configuration"""
21
+
22
+ from ...configuration_utils import PretrainedConfig
23
+ from ...utils import logging
24
+
25
+
26
+ logger = logging.get_logger(__name__)
27
+
28
+ COHERE_PRETRAINED_CONFIG_ARCHIVE_MAP = {}
29
+
30
+
31
+ class CohereConfig(PretrainedConfig):
32
+ r"""
33
+ This is the configuration class to store the configuration of a [`CohereModel`]. It is used to instantiate an Cohere
34
+ model according to the specified arguments, defining the model architecture.
35
+
36
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
37
+ documentation from [`PretrainedConfig`] for more information. Instantiating a configuration
38
+ with the defaults will yield a similar configuration to that of the [CohereForAI/c4ai-command-r-v01](https://huggingface.co/CohereForAI/c4ai-command-r-v01) model.
39
+
40
+
41
+ Args:
42
+ vocab_size (`int`, *optional*, defaults to 256000):
43
+ Vocabulary size of the Cohere model. Defines the number of different tokens that can be represented by the
44
+ `inputs_ids` passed when calling [`CohereModel`]
45
+ hidden_size (`int`, *optional*, defaults to 8192):
46
+ Dimension of the hidden representations.
47
+ intermediate_size (`int`, *optional*, defaults to 22528):
48
+ Dimension of the MLP representations.
49
+ logit_scale (`float`, *optional*, defaults to 0.0625):
50
+ The scaling factor for the output logits.
51
+ num_hidden_layers (`int`, *optional*, defaults to 40):
52
+ Number of hidden layers in the Transformer decoder.
53
+ num_attention_heads (`int`, *optional*, defaults to 64):
54
+ Number of attention heads for each attention layer in the Transformer decoder.
55
+ num_key_value_heads (`int`, *optional*):
56
+ This is the number of key_value heads that should be used to implement Grouped Query Attention. If
57
+ `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
58
+ `num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When
59
+ converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
60
+ by meanpooling all the original heads within that group. For more details checkout [this
61
+ paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to
62
+ `num_attention_heads`.
63
+ hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
64
+ The non-linear activation function (function or string) in the decoder.
65
+ max_position_embeddings (`int`, *optional*, defaults to 8192):
66
+ The maximum sequence length that this model might ever be used with.
67
+ initializer_range (`float`, *optional*, defaults to 0.02):
68
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
69
+ layer_norm_eps (`float`, *optional*, defaults to 1e-05):
70
+ The epsilon used by the layer normalization.
71
+ use_cache (`bool`, *optional*, defaults to `True`):
72
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
73
+ relevant if `config.is_decoder=True`.
74
+ pad_token_id (`int`, *optional*, defaults to 0):
75
+ Padding token id.
76
+ bos_token_id (`int`, *optional*, defaults to 5):
77
+ Beginning of stream token id.
78
+ eos_token_id (`int`, *optional*, defaults to 255001):
79
+ End of stream token id.
80
+ tie_word_embeddings (`bool`, *optional*, defaults to `True`):
81
+ Whether to tie weight embeddings
82
+ rope_theta (`float`, *optional*, defaults to 10000.0):
83
+ The base period of the RoPE embeddings.
84
+ attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`):
85
+ Whether to use a bias in the query, key, value and output projection layers during self-attention.
86
+ attention_dropout (`float`, *optional*, defaults to 0.0):
87
+ The dropout ratio for the attention probabilities.
88
+ use_qk_norm (`bool`, *optional*, defaults to `False`):
89
+ Whether to use query-key normalization in the attention
90
+
91
+ ```python
92
+ >>> from transformers import CohereModel, CohereConfig
93
+
94
+ >>> # Initializing a Cohere model configuration
95
+ >>> configuration = CohereConfig()
96
+
97
+ >>> # Initializing a model from the Cohere configuration
98
+ >>> model = CohereModel(configuration) # doctest: +SKIP
99
+
100
+ >>> # Accessing the model configuration
101
+ >>> configuration = model.config # doctest: +SKIP
102
+ ```"""
103
+
104
+ model_type = "cohere"
105
+ keys_to_ignore_at_inference = ["past_key_values"]
106
+
107
+ def __init__(
108
+ self,
109
+ vocab_size=256000,
110
+ hidden_size=8192,
111
+ intermediate_size=22528,
112
+ logit_scale=0.0625,
113
+ num_hidden_layers=40,
114
+ num_attention_heads=64,
115
+ num_key_value_heads=None,
116
+ hidden_act="silu",
117
+ max_position_embeddings=8192,
118
+ initializer_range=0.02,
119
+ layer_norm_eps=1e-5,
120
+ use_cache=True,
121
+ pad_token_id=0,
122
+ bos_token_id=5,
123
+ eos_token_id=255001,
124
+ tie_word_embeddings=True,
125
+ rope_theta=10000.0,
126
+ attention_bias=False,
127
+ attention_dropout=0.0,
128
+ use_qk_norm=False,
129
+ **kwargs,
130
+ ):
131
+ self.vocab_size = vocab_size
132
+ self.max_position_embeddings = max_position_embeddings
133
+ self.hidden_size = hidden_size
134
+ self.logit_scale = logit_scale
135
+ self.intermediate_size = intermediate_size
136
+ self.num_hidden_layers = num_hidden_layers
137
+ self.num_attention_heads = num_attention_heads
138
+
139
+ # for backward compatibility
140
+ if num_key_value_heads is None:
141
+ num_key_value_heads = num_attention_heads
142
+
143
+ self.num_key_value_heads = num_key_value_heads
144
+ self.hidden_act = hidden_act
145
+ self.initializer_range = initializer_range
146
+ self.layer_norm_eps = layer_norm_eps
147
+ self.use_cache = use_cache
148
+ self.rope_theta = rope_theta
149
+ self.attention_bias = attention_bias
150
+ self.attention_dropout = attention_dropout
151
+ self.use_qk_norm = use_qk_norm
152
+
153
+ super().__init__(
154
+ pad_token_id=pad_token_id,
155
+ bos_token_id=bos_token_id,
156
+ eos_token_id=eos_token_id,
157
+ tie_word_embeddings=tie_word_embeddings,
158
+ **kwargs,
159
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/cohere/modeling_cohere.py ADDED
@@ -0,0 +1,1266 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 Cohere team. All rights reserved.
3
+ #
4
+ # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
5
+ # and OPT implementations in this library. It has been modified from its
6
+ # original forms to accommodate minor architectural differences compared
7
+ # to GPT-NeoX and OPT used by the Meta AI team that trained the model.
8
+ #
9
+ # Licensed under the Apache License, Version 2.0 (the "License");
10
+ # you may not use this file except in compliance with the License.
11
+ # You may obtain a copy of the License at
12
+ #
13
+ # http://www.apache.org/licenses/LICENSE-2.0
14
+ #
15
+ # Unless required by applicable law or agreed to in writing, software
16
+ # distributed under the License is distributed on an "AS IS" BASIS,
17
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18
+ # See the License for the specific language governing permissions and
19
+ # limitations under the License.
20
+
21
+ # This file is based on the LLama model definition file in transformers
22
+
23
+ """PyTorch Cohere model."""
24
+
25
+ import math
26
+ import warnings
27
+ from typing import List, Optional, Tuple, Union
28
+
29
+ import torch
30
+ import torch.nn.functional as F
31
+ import torch.utils.checkpoint
32
+ from torch import nn
33
+ from torch.nn import CrossEntropyLoss
34
+
35
+ from ...activations import ACT2FN
36
+ from ...cache_utils import Cache, DynamicCache, StaticCache
37
+ from ...modeling_attn_mask_utils import AttentionMaskConverter
38
+ from ...modeling_outputs import (
39
+ BaseModelOutputWithPast,
40
+ CausalLMOutputWithPast,
41
+ )
42
+ from ...modeling_utils import PreTrainedModel
43
+ from ...pytorch_utils import ALL_LAYERNORM_LAYERS
44
+ from ...utils import (
45
+ add_start_docstrings,
46
+ add_start_docstrings_to_model_forward,
47
+ is_flash_attn_2_available,
48
+ is_flash_attn_greater_or_equal_2_10,
49
+ logging,
50
+ replace_return_docstrings,
51
+ )
52
+ from .configuration_cohere import CohereConfig
53
+
54
+
55
+ if is_flash_attn_2_available():
56
+ from flash_attn import flash_attn_func, flash_attn_varlen_func
57
+ from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa
58
+
59
+
60
+ logger = logging.get_logger(__name__)
61
+
62
+ _CONFIG_FOR_DOC = "CohereConfig"
63
+
64
+
65
+ # Copied from transformers.models.llama.modeling_llama._get_unpad_data
66
+ def _get_unpad_data(attention_mask):
67
+ seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)
68
+ indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
69
+ max_seqlen_in_batch = seqlens_in_batch.max().item()
70
+ cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.int32), (1, 0))
71
+ return (
72
+ indices,
73
+ cu_seqlens,
74
+ max_seqlen_in_batch,
75
+ )
76
+
77
+
78
+ class CohereLayerNorm(nn.Module):
79
+ def __init__(self, hidden_size=None, eps=1e-5, bias=False):
80
+ """The hidden size can be a tuple or an int. The tuple is used for QKNorm to normalize across head_dim"""
81
+ super().__init__()
82
+ self.weight = nn.Parameter(torch.ones(hidden_size))
83
+ self.variance_epsilon = eps
84
+
85
+ def forward(self, hidden_states):
86
+ input_dtype = hidden_states.dtype
87
+ hidden_states = hidden_states.to(torch.float32)
88
+ mean = hidden_states.mean(-1, keepdim=True)
89
+ variance = (hidden_states - mean).pow(2).mean(-1, keepdim=True)
90
+ hidden_states = (hidden_states - mean) * torch.rsqrt(variance + self.variance_epsilon)
91
+ hidden_states = self.weight.to(torch.float32) * hidden_states
92
+ return hidden_states.to(input_dtype)
93
+
94
+
95
+ ALL_LAYERNORM_LAYERS.append(CohereLayerNorm)
96
+
97
+
98
+ class CohereRotaryEmbedding(nn.Module):
99
+ def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0):
100
+ super().__init__()
101
+ self.scaling_factor = scaling_factor
102
+ self.dim = dim
103
+ self.max_position_embeddings = max_position_embeddings
104
+ self.base = base
105
+ inv_freq = 1.0 / (self.base ** (torch.arange(0, self.dim, 2, dtype=torch.int64).float().to(device) / self.dim))
106
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
107
+
108
+ @torch.no_grad()
109
+ def forward(self, x, position_ids):
110
+ # x: [bs, num_attention_heads, seq_len, head_size]
111
+ inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1)
112
+ position_ids_expanded = position_ids[:, None, :].float()
113
+
114
+ # Force float32 since bfloat16 loses precision on long contexts
115
+ # See https://github.com/huggingface/transformers/pull/29285
116
+ device_type = x.device.type
117
+ device_type = device_type if isinstance(device_type, str) and device_type != "mps" else "cpu"
118
+ with torch.autocast(device_type=device_type, enabled=False):
119
+ freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
120
+ emb = torch.repeat_interleave(freqs, 2, dim=-1)
121
+ cos = emb.cos()
122
+ sin = emb.sin()
123
+ return cos, sin
124
+
125
+
126
+ def rotate_half(x):
127
+ # Split and rotate
128
+ x1 = x[..., ::2]
129
+ x2 = x[..., 1::2]
130
+ rot_x = torch.stack([-x2, x1], dim=-1).flatten(-2)
131
+ return rot_x
132
+
133
+
134
+ def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
135
+ """Applies Rotary Position Embedding to the query and key tensors.
136
+
137
+ Args:
138
+ q (`torch.Tensor`): The query tensor.
139
+ k (`torch.Tensor`): The key tensor.
140
+ cos (`torch.Tensor`): The cosine part of the rotary embedding.
141
+ sin (`torch.Tensor`): The sine part of the rotary embedding.
142
+ position_ids (`torch.Tensor`, *optional*):
143
+ Deprecated and unused.
144
+ unsqueeze_dim (`int`, *optional*, defaults to 1):
145
+ The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
146
+ sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
147
+ that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
148
+ k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
149
+ cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
150
+ the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
151
+ Returns:
152
+ `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
153
+ """
154
+ dtype = q.dtype
155
+ q = q.float()
156
+ k = k.float()
157
+ cos = cos.unsqueeze(unsqueeze_dim)
158
+ sin = sin.unsqueeze(unsqueeze_dim)
159
+ q_embed = (q * cos) + (rotate_half(q) * sin)
160
+ k_embed = (k * cos) + (rotate_half(k) * sin)
161
+ return q_embed.to(dtype=dtype), k_embed.to(dtype=dtype)
162
+
163
+
164
+ # Copied from transformers.models.llama.modeling_llama.LlamaMLP Llama->Cohere
165
+ class CohereMLP(nn.Module):
166
+ def __init__(self, config):
167
+ super().__init__()
168
+ self.config = config
169
+ self.hidden_size = config.hidden_size
170
+ self.intermediate_size = config.intermediate_size
171
+ self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
172
+ self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
173
+ self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
174
+ self.act_fn = ACT2FN[config.hidden_act]
175
+
176
+ # Ignore copy
177
+ def forward(self, x):
178
+ down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
179
+ return down_proj
180
+
181
+
182
+ # Copied from transformers.models.llama.modeling_llama.repeat_kv
183
+ def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
184
+ """
185
+ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
186
+ num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
187
+ """
188
+ batch, num_key_value_heads, slen, head_dim = hidden_states.shape
189
+ if n_rep == 1:
190
+ return hidden_states
191
+ hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
192
+ return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
193
+
194
+
195
+ class CohereAttention(nn.Module):
196
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
197
+
198
+ def __init__(self, config: CohereConfig, layer_idx: Optional[int] = None):
199
+ super().__init__()
200
+ self.config = config
201
+ self.layer_idx = layer_idx
202
+ if layer_idx is None:
203
+ logger.warning_once(
204
+ f"Instantiating {self.__class__.__name__} without passing a `layer_idx` is not recommended and will "
205
+ "lead to errors during the forward call if caching is used. Please make sure to provide a `layer_idx` "
206
+ "when creating this class."
207
+ )
208
+
209
+ self.attention_dropout = config.attention_dropout
210
+ self.hidden_size = config.hidden_size
211
+ self.num_heads = config.num_attention_heads
212
+ self.head_dim = self.hidden_size // self.num_heads
213
+ self.num_key_value_heads = config.num_key_value_heads
214
+ self.num_key_value_groups = self.num_heads // self.num_key_value_heads
215
+ self.max_position_embeddings = config.max_position_embeddings
216
+ self.rope_theta = config.rope_theta
217
+ self.is_causal = True
218
+ self.use_qk_norm = config.use_qk_norm
219
+
220
+ if (self.head_dim * self.num_heads) != self.hidden_size:
221
+ raise ValueError(
222
+ f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}"
223
+ f" and `num_heads`: {self.num_heads})."
224
+ )
225
+
226
+ if self.use_qk_norm:
227
+ # When sharding the model using Tensor Parallelism, need to be careful to use n_local_heads
228
+ self.q_norm = CohereLayerNorm(hidden_size=(self.num_heads, self.head_dim), eps=config.layer_norm_eps)
229
+ self.k_norm = CohereLayerNorm(
230
+ hidden_size=(self.num_key_value_heads, self.head_dim), eps=config.layer_norm_eps
231
+ )
232
+
233
+ self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=config.attention_bias)
234
+ self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.attention_bias)
235
+ self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.attention_bias)
236
+ self.o_proj = nn.Linear(self.hidden_size, self.hidden_size, bias=config.attention_bias)
237
+ self._init_rope()
238
+
239
+ # Ignore copy
240
+ def _init_rope(self):
241
+ self.rotary_emb = CohereRotaryEmbedding(
242
+ self.head_dim,
243
+ max_position_embeddings=self.max_position_embeddings,
244
+ base=self.rope_theta,
245
+ )
246
+
247
+ # Ignore copy
248
+ def forward(
249
+ self,
250
+ hidden_states: torch.Tensor,
251
+ attention_mask: Optional[torch.Tensor] = None,
252
+ position_ids: Optional[torch.LongTensor] = None,
253
+ past_key_value: Optional[Cache] = None,
254
+ output_attentions: bool = False,
255
+ use_cache: bool = False,
256
+ cache_position: Optional[torch.LongTensor] = None,
257
+ **kwargs,
258
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
259
+ bsz, q_len, _ = hidden_states.size()
260
+
261
+ query_states = self.q_proj(hidden_states)
262
+ key_states = self.k_proj(hidden_states)
263
+ value_states = self.v_proj(hidden_states)
264
+
265
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim)
266
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim)
267
+ if self.use_qk_norm:
268
+ query_states = self.q_norm(query_states)
269
+ key_states = self.k_norm(key_states)
270
+
271
+ query_states = query_states.transpose(1, 2)
272
+ key_states = key_states.transpose(1, 2)
273
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
274
+
275
+ past_key_value = getattr(self, "past_key_value", past_key_value)
276
+ cos, sin = self.rotary_emb(value_states, position_ids)
277
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
278
+
279
+ if past_key_value is not None:
280
+ # sin and cos are specific to RoPE models; position_ids needed for the static cache
281
+ cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
282
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
283
+
284
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
285
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
286
+
287
+ attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
288
+
289
+ if attention_mask is not None: # no matter the length, we just slice it
290
+ causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
291
+ attn_weights = attn_weights + causal_mask
292
+
293
+ # upcast attention to fp32
294
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
295
+ attn_weights = nn.functional.dropout(attn_weights, p=self.attention_dropout, training=self.training)
296
+ attn_output = torch.matmul(attn_weights, value_states)
297
+
298
+ if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
299
+ raise ValueError(
300
+ f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is"
301
+ f" {attn_output.size()}"
302
+ )
303
+
304
+ attn_output = attn_output.transpose(1, 2).contiguous()
305
+
306
+ attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
307
+
308
+ attn_output = self.o_proj(attn_output)
309
+
310
+ if not output_attentions:
311
+ attn_weights = None
312
+
313
+ return attn_output, attn_weights, past_key_value
314
+
315
+
316
+ # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2 Llama->Cohere
317
+ class CohereFlashAttention2(CohereAttention):
318
+ """
319
+ Cohere flash attention module. This module inherits from `CohereAttention` as the weights of the module stays
320
+ untouched. The only required change would be on the forward pass where it needs to correctly call the public API of
321
+ flash attention and deal with padding tokens in case the input contains any of them.
322
+ """
323
+
324
+ def __init__(self, *args, **kwargs):
325
+ super().__init__(*args, **kwargs)
326
+
327
+ # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1.
328
+ # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0.
329
+ # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left).
330
+ self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10()
331
+
332
+ def forward(
333
+ self,
334
+ hidden_states: torch.Tensor,
335
+ attention_mask: Optional[torch.LongTensor] = None,
336
+ position_ids: Optional[torch.LongTensor] = None,
337
+ past_key_value: Optional[Cache] = None,
338
+ output_attentions: bool = False,
339
+ use_cache: bool = False,
340
+ cache_position: Optional[torch.LongTensor] = None,
341
+ **kwargs,
342
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
343
+ output_attentions = False
344
+
345
+ bsz, q_len, _ = hidden_states.size()
346
+
347
+ query_states = self.q_proj(hidden_states)
348
+ key_states = self.k_proj(hidden_states)
349
+ value_states = self.v_proj(hidden_states)
350
+
351
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim)
352
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim)
353
+ if self.use_qk_norm:
354
+ query_states = self.q_norm(query_states)
355
+ key_states = self.k_norm(key_states)
356
+
357
+ query_states = query_states.transpose(1, 2)
358
+ key_states = key_states.transpose(1, 2)
359
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
360
+
361
+ cos, sin = self.rotary_emb(value_states, position_ids)
362
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
363
+
364
+ past_key_value = getattr(self, "past_key_value", past_key_value)
365
+
366
+ if past_key_value is not None:
367
+ # sin and cos are specific to RoPE models; position_ids needed for the static cache
368
+ cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
369
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
370
+
371
+ # TODO: These transpose are quite inefficient but Flash Attention requires the layout [batch_size, sequence_length, num_heads, head_dim]. We would need to refactor the KV cache
372
+ # to be able to avoid many of these transpose/reshape/view.
373
+ query_states = query_states.transpose(1, 2)
374
+ key_states = key_states.transpose(1, 2)
375
+ value_states = value_states.transpose(1, 2)
376
+
377
+ dropout_rate = self.attention_dropout if self.training else 0.0
378
+
379
+ # Ignore copy
380
+ # In PEFT, usually we cast the layer norms in float32 for training stability reasons
381
+ # therefore the input hidden states gets silently casted in float32. Hence, we need
382
+ # cast them back in the correct dtype just to be sure everything works as expected.
383
+ # This might slowdown training & inference so it is recommended to not cast the LayerNorms
384
+ # in fp32. (CohereLayerNorm handles it correctly)
385
+
386
+ input_dtype = query_states.dtype
387
+ if input_dtype == torch.float32:
388
+ if torch.is_autocast_enabled():
389
+ target_dtype = torch.get_autocast_gpu_dtype()
390
+ # Handle the case where the model is quantized
391
+ elif hasattr(self.config, "_pre_quantization_dtype"):
392
+ target_dtype = self.config._pre_quantization_dtype
393
+ else:
394
+ target_dtype = self.q_proj.weight.dtype
395
+
396
+ logger.warning_once(
397
+ f"The input hidden states seems to be silently casted in float32, this might be related to"
398
+ f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in"
399
+ f" {target_dtype}."
400
+ )
401
+
402
+ query_states = query_states.to(target_dtype)
403
+ key_states = key_states.to(target_dtype)
404
+ value_states = value_states.to(target_dtype)
405
+
406
+ attn_output = self._flash_attention_forward(
407
+ query_states, key_states, value_states, attention_mask, q_len, dropout=dropout_rate
408
+ )
409
+
410
+ attn_output = attn_output.reshape(bsz, q_len, self.hidden_size).contiguous()
411
+ attn_output = self.o_proj(attn_output)
412
+
413
+ if not output_attentions:
414
+ attn_weights = None
415
+
416
+ return attn_output, attn_weights, past_key_value
417
+
418
+ def _flash_attention_forward(
419
+ self, query_states, key_states, value_states, attention_mask, query_length, dropout=0.0, softmax_scale=None
420
+ ):
421
+ """
422
+ Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token
423
+ first unpad the input, then computes the attention scores and pad the final attention scores.
424
+
425
+ Args:
426
+ query_states (`torch.Tensor`):
427
+ Input query states to be passed to Flash Attention API
428
+ key_states (`torch.Tensor`):
429
+ Input key states to be passed to Flash Attention API
430
+ value_states (`torch.Tensor`):
431
+ Input value states to be passed to Flash Attention API
432
+ attention_mask (`torch.Tensor`):
433
+ The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the
434
+ position of padding tokens and 1 for the position of non-padding tokens.
435
+ dropout (`float`):
436
+ Attention dropout
437
+ softmax_scale (`float`, *optional*):
438
+ The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim)
439
+ """
440
+ if not self._flash_attn_uses_top_left_mask:
441
+ causal = self.is_causal
442
+ else:
443
+ # TODO: Remove the `query_length != 1` check once Flash Attention for RoCm is bumped to 2.1. For details, please see the comment in CohereFlashAttention2 __init__.
444
+ causal = self.is_causal and query_length != 1
445
+
446
+ # Contains at least one padding token in the sequence
447
+ if attention_mask is not None:
448
+ batch_size = query_states.shape[0]
449
+ query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input(
450
+ query_states, key_states, value_states, attention_mask, query_length
451
+ )
452
+
453
+ cu_seqlens_q, cu_seqlens_k = cu_seq_lens
454
+ max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens
455
+
456
+ attn_output_unpad = flash_attn_varlen_func(
457
+ query_states,
458
+ key_states,
459
+ value_states,
460
+ cu_seqlens_q=cu_seqlens_q,
461
+ cu_seqlens_k=cu_seqlens_k,
462
+ max_seqlen_q=max_seqlen_in_batch_q,
463
+ max_seqlen_k=max_seqlen_in_batch_k,
464
+ dropout_p=dropout,
465
+ softmax_scale=softmax_scale,
466
+ causal=causal,
467
+ )
468
+
469
+ attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length)
470
+ else:
471
+ attn_output = flash_attn_func(
472
+ query_states, key_states, value_states, dropout, softmax_scale=softmax_scale, causal=causal
473
+ )
474
+
475
+ return attn_output
476
+
477
+ def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length):
478
+ indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask)
479
+ batch_size, kv_seq_len, num_key_value_heads, head_dim = key_layer.shape
480
+
481
+ key_layer = index_first_axis(
482
+ key_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
483
+ )
484
+ value_layer = index_first_axis(
485
+ value_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
486
+ )
487
+ if query_length == kv_seq_len:
488
+ query_layer = index_first_axis(
489
+ query_layer.reshape(batch_size * kv_seq_len, self.num_heads, head_dim), indices_k
490
+ )
491
+ cu_seqlens_q = cu_seqlens_k
492
+ max_seqlen_in_batch_q = max_seqlen_in_batch_k
493
+ indices_q = indices_k
494
+ elif query_length == 1:
495
+ max_seqlen_in_batch_q = 1
496
+ cu_seqlens_q = torch.arange(
497
+ batch_size + 1, dtype=torch.int32, device=query_layer.device
498
+ ) # There is a memcpy here, that is very bad.
499
+ indices_q = cu_seqlens_q[:-1]
500
+ query_layer = query_layer.squeeze(1)
501
+ else:
502
+ # The -q_len: slice assumes left padding.
503
+ attention_mask = attention_mask[:, -query_length:]
504
+ query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask)
505
+
506
+ return (
507
+ query_layer,
508
+ key_layer,
509
+ value_layer,
510
+ indices_q,
511
+ (cu_seqlens_q, cu_seqlens_k),
512
+ (max_seqlen_in_batch_q, max_seqlen_in_batch_k),
513
+ )
514
+
515
+
516
+ # Copied from transformers.models.llama.modeling_llama.LlamaSdpaAttention Llama->Cohere
517
+ class CohereSdpaAttention(CohereAttention):
518
+ """
519
+ Cohere attention module using torch.nn.functional.scaled_dot_product_attention. This module inherits from
520
+ `CohereAttention` as the weights of the module stays untouched. The only changes are on the forward pass to adapt to
521
+ SDPA API.
522
+ """
523
+
524
+ # Ignore copy
525
+ def forward(
526
+ self,
527
+ hidden_states: torch.Tensor,
528
+ attention_mask: Optional[torch.Tensor] = None,
529
+ position_ids: Optional[torch.LongTensor] = None,
530
+ past_key_value: Optional[Cache] = None,
531
+ output_attentions: bool = False,
532
+ use_cache: bool = False,
533
+ cache_position: Optional[torch.LongTensor] = None,
534
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
535
+ if output_attentions:
536
+ # TODO: Improve this warning with e.g. `model.config.attn_implementation = "manual"` once this is implemented.
537
+ logger.warning_once(
538
+ "CohereModel is using CohereSdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to the manual attention implementation, "
539
+ 'but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.'
540
+ )
541
+ return super().forward(
542
+ hidden_states=hidden_states,
543
+ attention_mask=attention_mask,
544
+ position_ids=position_ids,
545
+ past_key_value=past_key_value,
546
+ output_attentions=output_attentions,
547
+ use_cache=use_cache,
548
+ cache_position=cache_position,
549
+ )
550
+
551
+ bsz, q_len, _ = hidden_states.size()
552
+
553
+ query_states = self.q_proj(hidden_states)
554
+ key_states = self.k_proj(hidden_states)
555
+ value_states = self.v_proj(hidden_states)
556
+
557
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim)
558
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim)
559
+ if self.use_qk_norm:
560
+ query_states = self.q_norm(query_states)
561
+ key_states = self.k_norm(key_states)
562
+
563
+ query_states = query_states.transpose(1, 2)
564
+ key_states = key_states.transpose(1, 2)
565
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
566
+
567
+ cos, sin = self.rotary_emb(value_states, position_ids)
568
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
569
+
570
+ # In case static cache is used, it is an instance attribute.
571
+ past_key_value = getattr(self, "past_key_value", past_key_value)
572
+
573
+ if past_key_value is not None:
574
+ # sin and cos are specific to RoPE models; cache_position needed for the static cache
575
+ cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
576
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
577
+
578
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
579
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
580
+
581
+ causal_mask = attention_mask
582
+ # if attention_mask is not None and cache_position is not None:
583
+ if attention_mask is not None:
584
+ causal_mask = causal_mask[:, :, :, : key_states.shape[-2]]
585
+
586
+ # SDPA with memory-efficient backend is currently (torch==2.1.2) bugged with non-contiguous inputs with custom attn_mask,
587
+ # Reference: https://github.com/pytorch/pytorch/issues/112577.
588
+ if query_states.device.type == "cuda" and causal_mask is not None:
589
+ query_states = query_states.contiguous()
590
+ key_states = key_states.contiguous()
591
+ value_states = value_states.contiguous()
592
+
593
+ # In case we are not compiling, we may set `causal_mask` to None, which is required to dispatch to SDPA's Flash Attention 2 backend, rather
594
+ # relying on the `is_causal` argument.
595
+ attn_output = torch.nn.functional.scaled_dot_product_attention(
596
+ query_states,
597
+ key_states,
598
+ value_states,
599
+ attn_mask=causal_mask,
600
+ dropout_p=self.attention_dropout if self.training else 0.0,
601
+ is_causal=causal_mask is None and q_len > 1,
602
+ )
603
+
604
+ attn_output = attn_output.transpose(1, 2).contiguous()
605
+ attn_output = attn_output.view(bsz, q_len, self.hidden_size)
606
+
607
+ attn_output = self.o_proj(attn_output)
608
+
609
+ return attn_output, None, past_key_value
610
+
611
+
612
+ COHERE_ATTENTION_CLASSES = {
613
+ "eager": CohereAttention,
614
+ "flash_attention_2": CohereFlashAttention2,
615
+ "sdpa": CohereSdpaAttention,
616
+ }
617
+
618
+
619
+ class CohereDecoderLayer(nn.Module):
620
+ def __init__(self, config: CohereConfig, layer_idx: int):
621
+ super().__init__()
622
+ self.hidden_size = config.hidden_size
623
+
624
+ self.self_attn = COHERE_ATTENTION_CLASSES[config._attn_implementation](config=config, layer_idx=layer_idx)
625
+
626
+ self.mlp = CohereMLP(config)
627
+ self.input_layernorm = CohereLayerNorm(hidden_size=(config.hidden_size), eps=config.layer_norm_eps)
628
+
629
+ def forward(
630
+ self,
631
+ hidden_states: torch.Tensor,
632
+ attention_mask: Optional[torch.Tensor] = None,
633
+ position_ids: Optional[torch.LongTensor] = None,
634
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
635
+ output_attentions: Optional[bool] = False,
636
+ use_cache: Optional[bool] = False,
637
+ cache_position: Optional[torch.LongTensor] = None,
638
+ **kwargs,
639
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
640
+ """
641
+ Args:
642
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
643
+ attention_mask (`torch.FloatTensor`, *optional*):
644
+ attention mask of size `(batch_size, sequence_length)` if flash attention is used or `(batch_size, 1,
645
+ query_sequence_length, key_sequence_length)` if default attention is used.
646
+ output_attentions (`bool`, *optional*):
647
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
648
+ returned tensors for more detail.
649
+ use_cache (`bool`, *optional*):
650
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
651
+ (see `past_key_values`).
652
+ past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
653
+ """
654
+ if "padding_mask" in kwargs:
655
+ warnings.warn(
656
+ "Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`"
657
+ )
658
+
659
+ residual = hidden_states
660
+
661
+ hidden_states = self.input_layernorm(hidden_states)
662
+
663
+ # Self Attention
664
+ hidden_states_attention, self_attn_weights, present_key_value = self.self_attn(
665
+ hidden_states=hidden_states,
666
+ attention_mask=attention_mask,
667
+ position_ids=position_ids,
668
+ past_key_value=past_key_value,
669
+ output_attentions=output_attentions,
670
+ use_cache=use_cache,
671
+ cache_position=cache_position,
672
+ **kwargs,
673
+ )
674
+
675
+ # Fully Connected
676
+ hidden_states_mlp = self.mlp(hidden_states)
677
+
678
+ # Add everything together
679
+ hidden_states = residual + hidden_states_attention + hidden_states_mlp
680
+
681
+ outputs = (hidden_states,)
682
+
683
+ if output_attentions:
684
+ outputs += (self_attn_weights,)
685
+
686
+ if use_cache:
687
+ outputs += (present_key_value,)
688
+
689
+ return outputs
690
+
691
+
692
+ COHERE_START_DOCSTRING = r"""
693
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
694
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
695
+ etc.)
696
+
697
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
698
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
699
+ and behavior.
700
+
701
+ Parameters:
702
+ config ([`CohereConfig`]):
703
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
704
+ load the weights associated with the model, only the configuration. Check out the
705
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
706
+ """
707
+
708
+
709
+ @add_start_docstrings(
710
+ "The bare Cohere Model outputting raw hidden-states without any specific head on top.",
711
+ COHERE_START_DOCSTRING,
712
+ )
713
+ # Copied from transformers.models.llama.modeling_llama.LlamaPreTrainedModel with Llama->Cohere
714
+ class CoherePreTrainedModel(PreTrainedModel):
715
+ config_class = CohereConfig
716
+ base_model_prefix = "model"
717
+ supports_gradient_checkpointing = True
718
+ _no_split_modules = ["CohereDecoderLayer"]
719
+ _skip_keys_device_placement = ["past_key_values"]
720
+ _supports_flash_attn_2 = True
721
+ _supports_sdpa = True
722
+ _supports_cache_class = True
723
+
724
+ def _init_weights(self, module):
725
+ std = self.config.initializer_range
726
+ if isinstance(module, nn.Linear):
727
+ module.weight.data.normal_(mean=0.0, std=std)
728
+ if module.bias is not None:
729
+ module.bias.data.zero_()
730
+ elif isinstance(module, nn.Embedding):
731
+ module.weight.data.normal_(mean=0.0, std=std)
732
+ if module.padding_idx is not None:
733
+ module.weight.data[module.padding_idx].zero_()
734
+
735
+ def _setup_cache(self, cache_cls, max_batch_size, max_cache_len: Optional[int] = None):
736
+ if self.config._attn_implementation == "flash_attention_2" and cache_cls == StaticCache:
737
+ raise ValueError(
738
+ "`static` cache implementation is not compatible with `attn_implementation==flash_attention_2` "
739
+ "make sure to use `sdpa` in the mean time, and open an issue at https://github.com/huggingface/transformers"
740
+ )
741
+
742
+ for layer in self.model.layers:
743
+ device = layer.input_layernorm.weight.device
744
+ if hasattr(self.config, "_pre_quantization_dtype"):
745
+ dtype = self.config._pre_quantization_dtype
746
+ else:
747
+ dtype = layer.self_attn.o_proj.weight.dtype
748
+ layer.self_attn.past_key_value = cache_cls(
749
+ self.config, max_batch_size, max_cache_len, device=device, dtype=dtype
750
+ )
751
+
752
+ def _reset_cache(self):
753
+ for layer in self.model.layers:
754
+ layer.self_attn.past_key_value = None
755
+
756
+
757
+ COHERE_INPUTS_DOCSTRING = r"""
758
+ Args:
759
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
760
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
761
+ it.
762
+
763
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
764
+ [`PreTrainedTokenizer.__call__`] for details.
765
+
766
+ [What are input IDs?](../glossary#input-ids)
767
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
768
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
769
+
770
+ - 1 for tokens that are **not masked**,
771
+ - 0 for tokens that are **masked**.
772
+
773
+ [What are attention masks?](../glossary#attention-mask)
774
+
775
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
776
+ [`PreTrainedTokenizer.__call__`] for details.
777
+
778
+ If `past_key_values` is used, optionally only the last `input_ids` have to be input (see
779
+ `past_key_values`).
780
+
781
+ If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
782
+ and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
783
+ information on the default strategy.
784
+
785
+ - 1 indicates the head is **not masked**,
786
+ - 0 indicates the head is **masked**.
787
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
788
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
789
+ config.n_positions - 1]`.
790
+
791
+ [What are position IDs?](../glossary#position-ids)
792
+ past_key_values (`Cache` or `tuple(tuple(torch.FloatTensor))`, *optional*):
793
+ Pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
794
+ blocks) that can be used to speed up sequential decoding. This typically consists in the `past_key_values`
795
+ returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`.
796
+
797
+ Two formats are allowed:
798
+ - a [`~cache_utils.Cache`] instance;
799
+ - Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
800
+ shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`). This is also known as the legacy
801
+ cache format.
802
+
803
+ The model will output the same cache format that is fed as input. If no `past_key_values` are passed, the
804
+ legacy cache format will be returned.
805
+
806
+ If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't
807
+ have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids`
808
+ of shape `(batch_size, sequence_length)`.
809
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
810
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
811
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
812
+ model's internal embedding lookup matrix.
813
+ use_cache (`bool`, *optional*):
814
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
815
+ `past_key_values`).
816
+ output_attentions (`bool`, *optional*):
817
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
818
+ tensors for more detail.
819
+ output_hidden_states (`bool`, *optional*):
820
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
821
+ more detail.
822
+ return_dict (`bool`, *optional*):
823
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
824
+ """
825
+
826
+
827
+ @add_start_docstrings(
828
+ "The bare Cohere Model outputting raw hidden-states without any specific head on top.",
829
+ COHERE_START_DOCSTRING,
830
+ )
831
+ # Copied from transformers.models.llama.modeling_llama.LlamaModel with Llama->Cohere
832
+ class CohereModel(CoherePreTrainedModel):
833
+ """
834
+ Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`CohereDecoderLayer`]
835
+
836
+ Args:
837
+ config: CohereConfig
838
+ """
839
+
840
+ # Ignore copy
841
+ def __init__(self, config: CohereConfig):
842
+ super().__init__(config)
843
+ self.padding_idx = config.pad_token_id
844
+ self.vocab_size = config.vocab_size
845
+
846
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
847
+ self.layers = nn.ModuleList(
848
+ [CohereDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
849
+ )
850
+ self.norm = CohereLayerNorm(hidden_size=(config.hidden_size), eps=config.layer_norm_eps)
851
+ self.gradient_checkpointing = False
852
+
853
+ # Initialize weights and apply final processing
854
+ self.post_init()
855
+
856
+ def get_input_embeddings(self):
857
+ return self.embed_tokens
858
+
859
+ def set_input_embeddings(self, value):
860
+ self.embed_tokens = value
861
+
862
+ # Ignore copy
863
+ @add_start_docstrings_to_model_forward(COHERE_INPUTS_DOCSTRING)
864
+ def forward(
865
+ self,
866
+ input_ids: torch.LongTensor = None,
867
+ attention_mask: Optional[torch.Tensor] = None,
868
+ position_ids: Optional[torch.LongTensor] = None,
869
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
870
+ inputs_embeds: Optional[torch.FloatTensor] = None,
871
+ use_cache: Optional[bool] = None,
872
+ output_attentions: Optional[bool] = None,
873
+ output_hidden_states: Optional[bool] = None,
874
+ return_dict: Optional[bool] = None,
875
+ cache_position: Optional[torch.LongTensor] = None,
876
+ ) -> Union[Tuple, BaseModelOutputWithPast]:
877
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
878
+ output_hidden_states = (
879
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
880
+ )
881
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
882
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
883
+
884
+ if (input_ids is None) ^ (inputs_embeds is not None):
885
+ raise ValueError(
886
+ "You cannot specify both input_ids and inputs_embeds at the same time, and must specify either one"
887
+ )
888
+
889
+ if self.gradient_checkpointing and self.training and use_cache:
890
+ logger.warning_once(
891
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`."
892
+ )
893
+ use_cache = False
894
+
895
+ if inputs_embeds is None:
896
+ inputs_embeds = self.embed_tokens(input_ids)
897
+
898
+ past_seen_tokens = 0
899
+ if use_cache: # kept for BC (cache positions)
900
+ if not isinstance(past_key_values, StaticCache):
901
+ past_key_values = DynamicCache.from_legacy_cache(past_key_values)
902
+ past_seen_tokens = past_key_values.get_seq_length()
903
+
904
+ if cache_position is None:
905
+ if isinstance(past_key_values, StaticCache):
906
+ raise ValueError("cache_position is a required argument when using StaticCache.")
907
+ cache_position = torch.arange(
908
+ past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
909
+ )
910
+
911
+ if position_ids is None:
912
+ position_ids = cache_position.unsqueeze(0)
913
+
914
+ causal_mask = self._update_causal_mask(attention_mask, inputs_embeds, cache_position, past_seen_tokens)
915
+
916
+ # embed positions
917
+ hidden_states = inputs_embeds
918
+
919
+ # decoder layers
920
+ all_hidden_states = () if output_hidden_states else None
921
+ all_self_attns = () if output_attentions else None
922
+ next_decoder_cache = None
923
+
924
+ for decoder_layer in self.layers:
925
+ if output_hidden_states:
926
+ all_hidden_states += (hidden_states,)
927
+
928
+ if self.gradient_checkpointing and self.training:
929
+ layer_outputs = self._gradient_checkpointing_func(
930
+ decoder_layer.__call__,
931
+ hidden_states,
932
+ causal_mask,
933
+ position_ids,
934
+ past_key_values,
935
+ output_attentions,
936
+ use_cache,
937
+ cache_position,
938
+ )
939
+ else:
940
+ layer_outputs = decoder_layer(
941
+ hidden_states,
942
+ attention_mask=causal_mask,
943
+ position_ids=position_ids,
944
+ past_key_value=past_key_values,
945
+ output_attentions=output_attentions,
946
+ use_cache=use_cache,
947
+ cache_position=cache_position,
948
+ )
949
+
950
+ hidden_states = layer_outputs[0]
951
+
952
+ if use_cache:
953
+ next_decoder_cache = layer_outputs[2 if output_attentions else 1]
954
+
955
+ if output_attentions:
956
+ all_self_attns += (layer_outputs[1],)
957
+
958
+ hidden_states = self.norm(hidden_states)
959
+
960
+ # add hidden states from the last decoder layer
961
+ if output_hidden_states:
962
+ all_hidden_states += (hidden_states,)
963
+
964
+ next_cache = None
965
+ if use_cache:
966
+ next_cache = (
967
+ next_decoder_cache.to_legacy_cache() if isinstance(next_decoder_cache, Cache) else next_decoder_cache
968
+ )
969
+ if not return_dict:
970
+ return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None)
971
+ return BaseModelOutputWithPast(
972
+ last_hidden_state=hidden_states,
973
+ past_key_values=next_cache,
974
+ hidden_states=all_hidden_states,
975
+ attentions=all_self_attns,
976
+ )
977
+
978
+ def _update_causal_mask(
979
+ self,
980
+ attention_mask: torch.Tensor,
981
+ input_tensor: torch.Tensor,
982
+ cache_position: torch.Tensor,
983
+ past_seen_tokens: int,
984
+ ):
985
+ # TODO: As of torch==2.2.0, the `attention_mask` passed to the model in `generate` is 2D and of dynamic length even when the static
986
+ # KV cache is used. This is an issue for torch.compile which then recaptures cudagraphs at each decode steps due to the dynamic shapes.
987
+ # (`recording cudagraph tree for symint key 13`, etc.), which is VERY slow. A workaround is `@torch.compiler.disable`, but this prevents using
988
+ # `fullgraph=True`. See more context in https://github.com/huggingface/transformers/pull/29114
989
+
990
+ if self.config._attn_implementation == "flash_attention_2":
991
+ if attention_mask is not None and 0.0 in attention_mask:
992
+ return attention_mask
993
+ return None
994
+
995
+ if self.config._attn_implementation == "sdpa":
996
+ # For SDPA, when possible, we will rely on its `is_causal` argument instead of its `attn_mask` argument,
997
+ # in order to dispatch on Flash Attention 2.
998
+ if AttentionMaskConverter._ignore_causal_mask_sdpa(
999
+ attention_mask, inputs_embeds=input_tensor, past_key_values_length=past_seen_tokens
1000
+ ):
1001
+ return None
1002
+
1003
+ dtype, device = input_tensor.dtype, input_tensor.device
1004
+ min_dtype = torch.finfo(dtype).min
1005
+ sequence_length = input_tensor.shape[1]
1006
+ if hasattr(getattr(self.layers[0], "self_attn", {}), "past_key_value"): # static cache
1007
+ target_length = self.config.max_position_embeddings
1008
+ else: # dynamic cache
1009
+ target_length = (
1010
+ attention_mask.shape[-1]
1011
+ if isinstance(attention_mask, torch.Tensor)
1012
+ else past_seen_tokens + sequence_length + 1
1013
+ )
1014
+
1015
+ causal_mask = torch.full((sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=device)
1016
+ if sequence_length != 1:
1017
+ causal_mask = torch.triu(causal_mask, diagonal=1)
1018
+ causal_mask *= torch.arange(target_length, device=device) > cache_position.reshape(-1, 1)
1019
+ causal_mask = causal_mask[None, None, :, :].expand(input_tensor.shape[0], 1, -1, -1)
1020
+ if attention_mask is not None:
1021
+ causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit
1022
+ if attention_mask.dim() == 2:
1023
+ mask_length = attention_mask.shape[-1]
1024
+ padding_mask = causal_mask[..., :mask_length].eq(0.0) * attention_mask[:, None, None, :].eq(0.0)
1025
+ causal_mask[..., :mask_length] = causal_mask[..., :mask_length].masked_fill(padding_mask, min_dtype)
1026
+ elif attention_mask.dim() == 4:
1027
+ # backwards compatibility: we allow passing a 4D attention mask shorter than the input length with
1028
+ # cache. In that case, the 4D attention mask attends to the newest tokens only.
1029
+ if attention_mask.shape[-2] < cache_position[0] + sequence_length:
1030
+ offset = cache_position[0]
1031
+ else:
1032
+ offset = 0
1033
+ mask_shape = attention_mask.shape
1034
+ mask_slice = (attention_mask.eq(0.0)).to(dtype=dtype) * min_dtype
1035
+ causal_mask[
1036
+ : mask_shape[0], : mask_shape[1], offset : mask_shape[2] + offset, : mask_shape[3]
1037
+ ] = mask_slice
1038
+
1039
+ if (
1040
+ self.config._attn_implementation == "sdpa"
1041
+ and attention_mask is not None
1042
+ and attention_mask.device.type == "cuda"
1043
+ ):
1044
+ # Attend to all tokens in fully masked rows in the causal_mask, for example the relevant first rows when
1045
+ # using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path.
1046
+ # Details: https://github.com/pytorch/pytorch/issues/110213
1047
+ causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype)
1048
+
1049
+ return causal_mask
1050
+
1051
+
1052
+ # Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM with Llama->Cohere
1053
+ class CohereForCausalLM(CoherePreTrainedModel):
1054
+ _tied_weights_keys = ["lm_head.weight"]
1055
+
1056
+ # Ignore copy
1057
+ def __init__(self, config):
1058
+ super().__init__(config)
1059
+ self.model = CohereModel(config)
1060
+ self.vocab_size = config.vocab_size
1061
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
1062
+ self.logit_scale = config.logit_scale
1063
+ self.tie_word_embeddings = config.tie_word_embeddings
1064
+ # Initialize weights and apply final processing
1065
+ self.post_init()
1066
+
1067
+ def get_input_embeddings(self):
1068
+ return self.model.embed_tokens
1069
+
1070
+ def set_input_embeddings(self, value):
1071
+ self.model.embed_tokens = value
1072
+
1073
+ def get_output_embeddings(self):
1074
+ return self.lm_head
1075
+
1076
+ def set_output_embeddings(self, new_embeddings):
1077
+ self.lm_head = new_embeddings
1078
+
1079
+ def set_decoder(self, decoder):
1080
+ self.model = decoder
1081
+
1082
+ def get_decoder(self):
1083
+ return self.model
1084
+
1085
+ # Ignore copy
1086
+ @add_start_docstrings_to_model_forward(COHERE_INPUTS_DOCSTRING)
1087
+ @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
1088
+ def forward(
1089
+ self,
1090
+ input_ids: torch.LongTensor = None,
1091
+ attention_mask: Optional[torch.Tensor] = None,
1092
+ position_ids: Optional[torch.LongTensor] = None,
1093
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1094
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1095
+ labels: Optional[torch.LongTensor] = None,
1096
+ use_cache: Optional[bool] = None,
1097
+ output_attentions: Optional[bool] = None,
1098
+ output_hidden_states: Optional[bool] = None,
1099
+ return_dict: Optional[bool] = None,
1100
+ cache_position: Optional[torch.LongTensor] = None,
1101
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
1102
+ r"""
1103
+ Args:
1104
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1105
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
1106
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
1107
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
1108
+
1109
+ Returns:
1110
+
1111
+ Example:
1112
+
1113
+ ```python
1114
+ >> from transformers import AutoTokenizer, CohereForCausalLM
1115
+
1116
+ >> model = CohereForCausalLM.from_pretrained("CohereForAI/c4ai-command-r-v01")
1117
+ >> tokenizer = AutoTokenizer.from_pretrained("CohereForAI/c4ai-command-r-v01")
1118
+
1119
+ >> prompt = "Hey, are you conscious? Can you talk to me?"
1120
+ >> inputs = tokenizer(prompt, return_tensors="pt")
1121
+
1122
+ >> # Generate
1123
+ >> generate_ids = model.generate(inputs.input_ids, max_length=30)
1124
+ >> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
1125
+ "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
1126
+ ```"""
1127
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1128
+ output_hidden_states = (
1129
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1130
+ )
1131
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1132
+
1133
+ # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
1134
+ outputs = self.model(
1135
+ input_ids=input_ids,
1136
+ attention_mask=attention_mask,
1137
+ position_ids=position_ids,
1138
+ past_key_values=past_key_values,
1139
+ inputs_embeds=inputs_embeds,
1140
+ use_cache=use_cache,
1141
+ output_attentions=output_attentions,
1142
+ output_hidden_states=output_hidden_states,
1143
+ return_dict=return_dict,
1144
+ cache_position=cache_position,
1145
+ )
1146
+
1147
+ hidden_states = outputs[0]
1148
+ logits = self.lm_head(hidden_states)
1149
+ logits = logits * self.logit_scale
1150
+ logits = logits.float()
1151
+
1152
+ loss = None
1153
+ if labels is not None:
1154
+ # Shift so that tokens < n predict n
1155
+ shift_logits = logits[..., :-1, :].contiguous()
1156
+ shift_labels = labels[..., 1:].contiguous()
1157
+ # Flatten the tokens
1158
+ loss_fct = CrossEntropyLoss()
1159
+ shift_logits = shift_logits.view(-1, self.config.vocab_size)
1160
+ shift_labels = shift_labels.view(-1)
1161
+ # Enable model parallelism
1162
+ shift_labels = shift_labels.to(shift_logits.device)
1163
+ loss = loss_fct(shift_logits, shift_labels)
1164
+
1165
+ if not return_dict:
1166
+ output = (logits,) + outputs[1:]
1167
+ return (loss,) + output if loss is not None else output
1168
+
1169
+ return CausalLMOutputWithPast(
1170
+ loss=loss,
1171
+ logits=logits,
1172
+ past_key_values=outputs.past_key_values,
1173
+ hidden_states=outputs.hidden_states,
1174
+ attentions=outputs.attentions,
1175
+ )
1176
+
1177
+ def prepare_inputs_for_generation(
1178
+ self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, cache_position=None, **kwargs
1179
+ ):
1180
+ # With static cache, the `past_key_values` is None
1181
+ # TODO joao: standardize interface for the different Cache classes and remove of this if
1182
+ has_static_cache = False
1183
+ if past_key_values is None:
1184
+ past_key_values = getattr(getattr(self.model.layers[0], "self_attn", {}), "past_key_value", None)
1185
+ has_static_cache = past_key_values is not None
1186
+
1187
+ past_length = 0
1188
+ if past_key_values is not None:
1189
+ if isinstance(past_key_values, Cache):
1190
+ past_length = cache_position[0] if cache_position is not None else past_key_values.get_seq_length()
1191
+ max_cache_length = (
1192
+ torch.tensor(past_key_values.get_max_length(), device=input_ids.device)
1193
+ if past_key_values.get_max_length() is not None
1194
+ else None
1195
+ )
1196
+ cache_length = past_length if max_cache_length is None else torch.min(max_cache_length, past_length)
1197
+ # TODO joao: remove this `else` after `generate` prioritizes `Cache` objects
1198
+ else:
1199
+ cache_length = past_length = past_key_values[0][0].shape[2]
1200
+ max_cache_length = None
1201
+
1202
+ # Keep only the unprocessed tokens:
1203
+ # 1 - If the length of the attention_mask exceeds the length of input_ids, then we are in a setting where
1204
+ # some of the inputs are exclusively passed as part of the cache (e.g. when passing input_embeds as
1205
+ # input)
1206
+ if attention_mask is not None and attention_mask.shape[1] > input_ids.shape[1]:
1207
+ input_ids = input_ids[:, -(attention_mask.shape[1] - past_length) :]
1208
+ # 2 - If the past_length is smaller than input_ids', then input_ids holds all input tokens. We can discard
1209
+ # input_ids based on the past_length.
1210
+ elif past_length < input_ids.shape[1]:
1211
+ input_ids = input_ids[:, past_length:]
1212
+ # 3 - Otherwise (past_length >= input_ids.shape[1]), let's assume input_ids only has unprocessed tokens.
1213
+
1214
+ # If we are about to go beyond the maximum cache length, we need to crop the input attention mask.
1215
+ if (
1216
+ max_cache_length is not None
1217
+ and attention_mask is not None
1218
+ and cache_length + input_ids.shape[1] > max_cache_length
1219
+ ):
1220
+ attention_mask = attention_mask[:, -max_cache_length:]
1221
+
1222
+ position_ids = kwargs.get("position_ids", None)
1223
+ if attention_mask is not None and position_ids is None:
1224
+ # create position_ids on the fly for batch generation
1225
+ position_ids = attention_mask.long().cumsum(-1) - 1
1226
+ position_ids.masked_fill_(attention_mask == 0, 1)
1227
+ if past_key_values:
1228
+ position_ids = position_ids[:, -input_ids.shape[1] :]
1229
+
1230
+ # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
1231
+ if inputs_embeds is not None and past_key_values is None:
1232
+ model_inputs = {"inputs_embeds": inputs_embeds}
1233
+ else:
1234
+ # The `contiguous()` here is necessary to have a static stride during decoding. torchdynamo otherwise
1235
+ # recompiles graphs as the stride of the inputs is a guard. Ref: https://github.com/huggingface/transformers/pull/29114
1236
+ # TODO: use `next_tokens` directly instead.
1237
+ model_inputs = {"input_ids": input_ids.contiguous()}
1238
+
1239
+ input_length = position_ids.shape[-1] if position_ids is not None else input_ids.shape[-1]
1240
+ if cache_position is None:
1241
+ cache_position = torch.arange(past_length, past_length + input_length, device=input_ids.device)
1242
+ else:
1243
+ cache_position = cache_position[-input_length:]
1244
+
1245
+ if has_static_cache:
1246
+ past_key_values = None
1247
+
1248
+ model_inputs.update(
1249
+ {
1250
+ "position_ids": position_ids,
1251
+ "cache_position": cache_position,
1252
+ "past_key_values": past_key_values,
1253
+ "use_cache": kwargs.get("use_cache"),
1254
+ "attention_mask": attention_mask,
1255
+ }
1256
+ )
1257
+ return model_inputs
1258
+
1259
+ @staticmethod
1260
+ def _reorder_cache(past_key_values, beam_idx):
1261
+ reordered_past = ()
1262
+ for layer_past in past_key_values:
1263
+ reordered_past += (
1264
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),
1265
+ )
1266
+ return reordered_past
llmeval-env/lib/python3.10/site-packages/transformers/models/cohere/tokenization_cohere_fast.py ADDED
@@ -0,0 +1,701 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 Cohere team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # This file is based on the tokenization_llama_fast.py file in transformers
17
+
18
+ import pickle
19
+ from typing import Dict, List, Literal, Union
20
+
21
+ from tokenizers import processors
22
+
23
+ from ...pipelines.conversational import Conversation
24
+ from ...tokenization_utils_base import BatchEncoding
25
+ from ...tokenization_utils_fast import PreTrainedTokenizerFast
26
+ from ...utils import logging
27
+ from ...utils.versions import require_version
28
+
29
+
30
+ require_version("tokenizers>=0.13.3")
31
+
32
+ logger = logging.get_logger(__name__)
33
+ VOCAB_FILES_NAMES = {"tokenizer_file": "tokenizer.json"}
34
+
35
+ PRETRAINED_VOCAB_FILES_MAP = {
36
+ "tokenizer_file": {
37
+ "Cohere/Command-nightly": "https://huggingface.co/Cohere/Command-nightly/blob/main/tokenizer.json",
38
+ },
39
+ }
40
+
41
+ # fmt: off
42
+ DEFAULT_SYSTEM_PROMPT = "You are Command-R, a brilliant, sophisticated, AI-assistant trained to assist human users by providing thorough responses. You are trained by Cohere."
43
+ DEFAULT_RAG_PREAMBLE = """## Task and Context
44
+ You help people answer their questions and other requests interactively. You will be asked a very wide array of requests on all kinds of topics. You will be equipped with a wide range of search engines or similar tools to help you, which you use to research your answer. You should focus on serving the user's needs as best you can, which will be wide-ranging.
45
+
46
+ ## Style Guide
47
+ Unless the user asks for a different style of answer, you should answer in full sentences, using proper grammar and spelling."""
48
+ # fmt: on
49
+
50
+
51
+ class CohereTokenizerFast(PreTrainedTokenizerFast):
52
+ """
53
+ Construct a Cohere tokenizer. Based on byte-level Byte-Pair-Encoding.
54
+
55
+ This uses notably ByteFallback and NFC normalization.
56
+
57
+ ```python
58
+ >>> from transformers import AutoTokenizer
59
+
60
+ >>> tokenizer = AutoTokenizer.from_pretrained("CohereForAI/c4ai-command-r-v01")
61
+ >>> tokenizer.encode("Hello this is a test")
62
+ [5, 28339, 2075, 1801, 1671, 3282]
63
+ ```
64
+
65
+ If you want to change the `bos_token` or the `eos_token`, make sure to specify them when initializing the model, or
66
+ call `tokenizer.update_post_processor()` to make sure that the post-processing is correctly done (otherwise the
67
+ values of the first token and final token of an encoded sequence will not be correct). For more details, checkout
68
+ [post-processors] (https://huggingface.co/docs/tokenizers/api/post-processors) documentation.
69
+
70
+ You can get around that behavior by passing `add_prefix_space=True` when instantiating this tokenizer, but since
71
+ the model was not pretrained this way, it might yield a decrease in performance.
72
+
73
+ <Tip>
74
+
75
+ When used with `is_split_into_words=True`, this tokenizer needs to be instantiated with `add_prefix_space=True`.
76
+
77
+ </Tip>
78
+
79
+ This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
80
+ refer to this superclass for more information regarding those methods.
81
+
82
+ Args:
83
+ vocab_file (`str`, *optional*):
84
+ Path to the vocabulary file.
85
+ merges_file (`str`, *optional*):
86
+ Path to the merges file.
87
+ tokenizer_file (`str`, *optional*):
88
+ [tokenizers](https://github.com/huggingface/tokenizers) file (generally has a .json extension) that
89
+ contains everything needed to load the tokenizer.
90
+ clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`):
91
+ Whether or not to cleanup spaces after decoding, cleanup consists in removing potential artifacts like
92
+ extra spaces.
93
+ unk_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `"<UNK>"`):
94
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
95
+ token instead.
96
+ bos_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `"<BOS_TOKEN>"`):
97
+ The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
98
+ eos_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `"<|END_OF_TURN_TOKEN|>"`):
99
+ The end of sequence token.
100
+ add_bos_token (`bool`, *optional*, defaults to `True`):
101
+ Whether or not to add an `bos_token` at the start of sequences.
102
+ add_eos_token (`bool`, *optional*, defaults to `False`):
103
+ Whether or not to add an `eos_token` at the end of sequences.
104
+ use_default_system_prompt (`bool`, *optional*, defaults to `False`):
105
+ Whether or not the default system prompt for Cohere tokenizer should be used.
106
+ add_prefix_space (`bool`, *optional*, defaults to `False`):
107
+ Whether or not the tokenizer should automatically add a prefix space
108
+ """
109
+
110
+ vocab_files_names = VOCAB_FILES_NAMES
111
+ pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
112
+ padding_side = "left"
113
+ model_input_names = ["input_ids", "attention_mask"]
114
+ slow_tokenizer_class = None
115
+ # No `max_model_input_sizes`
116
+
117
+ def __init__(
118
+ self,
119
+ vocab_file=None,
120
+ merges_file=None,
121
+ tokenizer_file=None,
122
+ clean_up_tokenization_spaces=False,
123
+ unk_token="<UNK>",
124
+ bos_token="<BOS_TOKEN>",
125
+ eos_token="<|END_OF_TURN_TOKEN|>",
126
+ add_bos_token=True,
127
+ add_eos_token=False,
128
+ use_default_system_prompt=False,
129
+ add_prefix_space=False,
130
+ **kwargs,
131
+ ):
132
+ super().__init__(
133
+ vocab_file=vocab_file,
134
+ merges_file=merges_file,
135
+ tokenizer_file=tokenizer_file,
136
+ clean_up_tokenization_spaces=clean_up_tokenization_spaces,
137
+ unk_token=unk_token,
138
+ bos_token=bos_token,
139
+ eos_token=eos_token,
140
+ add_bos_token=add_bos_token,
141
+ add_eos_token=add_eos_token,
142
+ use_default_system_prompt=use_default_system_prompt,
143
+ add_prefix_space=add_prefix_space,
144
+ **kwargs,
145
+ )
146
+ self._add_bos_token = add_bos_token
147
+ self._add_eos_token = add_eos_token
148
+ self.update_post_processor()
149
+ self.use_default_system_prompt = use_default_system_prompt
150
+ self.vocab_file = vocab_file
151
+ self.grounded_generation_template = kwargs.pop("grounded_generation_template", None)
152
+ self.tool_use_template = kwargs.pop("tool_use_template", None)
153
+
154
+ # TODO @ArthurZucker this can only work one way for now, to update later-on. Tests should also properly
155
+ # check this as they were green before.
156
+ pre_tok_state = pickle.dumps(self.backend_tokenizer.pre_tokenizer)
157
+ decoder_state = pickle.dumps(self.backend_tokenizer.decoder)
158
+
159
+ if add_prefix_space:
160
+ pre_tok_state = pre_tok_state.replace(b'"add_prefix_space":false', b'"add_prefix_space": true')
161
+ decoder_state = decoder_state.replace(b'"add_prefix_space":false', b'"add_prefix_space": true')
162
+ self.backend_tokenizer.pre_tokenizer = pickle.loads(pre_tok_state)
163
+ self.backend_tokenizer.decoder = pickle.loads(decoder_state)
164
+
165
+ self.add_prefix_space = add_prefix_space
166
+
167
+ def _batch_encode_plus(self, *args, **kwargs) -> BatchEncoding:
168
+ is_split_into_words = kwargs.get("is_split_into_words", False)
169
+ if not (self.add_prefix_space or not is_split_into_words):
170
+ raise Exception(
171
+ f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"
172
+ " pretokenized inputs."
173
+ )
174
+
175
+ return super()._batch_encode_plus(*args, **kwargs)
176
+
177
+ def _encode_plus(self, *args, **kwargs) -> BatchEncoding:
178
+ is_split_into_words = kwargs.get("is_split_into_words", False)
179
+
180
+ if not (self.add_prefix_space or not is_split_into_words):
181
+ raise Exception(
182
+ f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"
183
+ " pretokenized inputs."
184
+ )
185
+
186
+ return super()._encode_plus(*args, **kwargs)
187
+
188
+ def update_post_processor(self):
189
+ """
190
+ Updates the underlying post processor with the current `bos_token` and `eos_token`.
191
+ """
192
+ bos = self.bos_token
193
+ bos_token_id = self.bos_token_id
194
+ if bos is None and self.add_bos_token:
195
+ raise ValueError("add_bos_token = True but bos_token = None")
196
+
197
+ eos = self.eos_token
198
+ eos_token_id = self.eos_token_id
199
+ if eos is None and self.add_eos_token:
200
+ raise ValueError("add_eos_token = True but eos_token = None")
201
+
202
+ single = f"{(bos+':0 ') if self.add_bos_token else ''}$A:0{(' '+eos+':0') if self.add_eos_token else ''}"
203
+ pair = f"{single}{(' '+bos+':1') if self.add_bos_token else ''} $B:1{(' '+eos+':1') if self.add_eos_token else ''}"
204
+
205
+ special_tokens = []
206
+ if self.add_bos_token:
207
+ special_tokens.append((bos, bos_token_id))
208
+ if self.add_eos_token:
209
+ special_tokens.append((eos, eos_token_id))
210
+ self._tokenizer.post_processor = processors.TemplateProcessing(
211
+ single=single, pair=pair, special_tokens=special_tokens
212
+ )
213
+
214
+ @property
215
+ def add_eos_token(self):
216
+ return self._add_eos_token
217
+
218
+ @property
219
+ def add_bos_token(self):
220
+ return self._add_bos_token
221
+
222
+ @add_eos_token.setter
223
+ def add_eos_token(self, value):
224
+ self._add_eos_token = value
225
+ self.update_post_processor()
226
+
227
+ @add_bos_token.setter
228
+ def add_bos_token(self, value):
229
+ self._add_bos_token = value
230
+ self.update_post_processor()
231
+
232
+ @property
233
+ def default_chat_template(self):
234
+ """
235
+ Cohere Tokenizer uses <|START_OF_TURN_TOKEN|> and <|END_OF_TURN_TOKEN|> to indicate each turn in a chat.
236
+ Additioanlly, to indicate the source of the message, <|USER_TOKEN|>, <|CHATBOT_TOKEN|> and <|SYSTEM_TOKEN|>
237
+ for user, assitant and system messages respectively.
238
+
239
+ The output should look something like:
240
+ <|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|>{{ preamble }}<|END_OF_TURN_TOKEN|><BOS_TOKEN><|START_OF_TURN_TOKEN|><|USER_TOKEN|>{{ How are you? }}<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>{{ I am doing well! }}<|END_OF_TURN_TOKEN|>
241
+
242
+ Use add_generation_prompt to add a prompt for the model to generate a response:
243
+ >>> from transformers import AutoTokenizer
244
+ >>> tokenizer = AutoTokenizer.from_pretrained("CohereForAI/c4ai-command-r-v01")
245
+ >>> messages = [{"role": "user", "content": "Hello, how are you?"}]
246
+ >>> tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
247
+ '<BOS_TOKEN><|START_OF_TURN_TOKEN|><|USER_TOKEN|>Hello, how are you?<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>'
248
+
249
+ """
250
+ logger.warning_once(
251
+ "\nNo chat template is defined for this tokenizer - using the default template "
252
+ f"for the {self.__class__.__name__} class. If the default is not appropriate for "
253
+ "your model, please set `tokenizer.chat_template` to an appropriate template. "
254
+ "See https://huggingface.co/docs/transformers/main/chat_templating for more information.\n"
255
+ )
256
+ default_template = (
257
+ "{{ bos_token }}"
258
+ "{% if messages[0]['role'] == 'system' %}"
259
+ "{% set loop_messages = messages[1:] %}" # Extract system message if it's present
260
+ "{% set system_message = messages[0]['content'] %}"
261
+ "{% elif USE_DEFAULT_PROMPT == true %}"
262
+ "{% set loop_messages = messages %}" # Or use the default system message if the flag is set
263
+ "{% set system_message = 'DEFAULT_SYSTEM_MESSAGE' %}"
264
+ "{% else %}"
265
+ "{% set loop_messages = messages %}"
266
+ "{% set system_message = false %}"
267
+ "{% endif %}"
268
+ "{% if system_message != false %}" # Start with system message
269
+ "{{ '<|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|>' + system_message + '<|END_OF_TURN_TOKEN|>' }}"
270
+ "{% endif %}"
271
+ "{% for message in loop_messages %}" # Loop over all non-system messages
272
+ "{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}"
273
+ "{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}"
274
+ "{% endif %}"
275
+ "{% set content = message['content'] %}"
276
+ "{% if message['role'] == 'user' %}" # After all of that, handle messages/roles in a fairly normal way
277
+ "{{ '<|START_OF_TURN_TOKEN|><|USER_TOKEN|>' + content.strip() + '<|END_OF_TURN_TOKEN|>' }}"
278
+ "{% elif message['role'] == 'assistant' %}"
279
+ "{{ '<|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>' + content.strip() + '<|END_OF_TURN_TOKEN|>' }}"
280
+ "{% endif %}"
281
+ "{% endfor %}"
282
+ "{% if add_generation_prompt %}"
283
+ "{{ '<|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>' }}"
284
+ "{% endif %}"
285
+ )
286
+ default_template = default_template.replace(
287
+ "USE_DEFAULT_PROMPT", "true" if self.use_default_system_prompt else "false"
288
+ )
289
+ default_message = DEFAULT_SYSTEM_PROMPT.replace("\n", "\\n").replace("'", "\\'")
290
+ default_template = default_template.replace("DEFAULT_SYSTEM_MESSAGE", default_message)
291
+
292
+ tool_use_template = (
293
+ "{{ bos_token }}"
294
+ "{% if messages[0]['role'] == 'system' %}"
295
+ "{% set loop_messages = messages[1:] %}" # Extract system message if it's present
296
+ "{% set system_message = messages[0]['content'] %}"
297
+ "{% else %}"
298
+ "{% set loop_messages = messages %}"
299
+ "{% set system_message = 'DEFAULT_SYSTEM_MESSAGE' %}"
300
+ "{% endif %}"
301
+ "{{ '<|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|>' }}"
302
+ "{{ '# Safety Preamble' }}"
303
+ "{{ '\nThe instructions in this section override those in the task description and style guide sections. Don\\'t answer questions that are harmful or immoral.' }}"
304
+ "{{ '\n\n# System Preamble' }}"
305
+ "{{ '\n## Basic Rules' }}"
306
+ "{{ '\nYou are a powerful conversational AI trained by Cohere to help people. You are augmented by a number of tools, and your job is to use and consume the output of these tools to best help the user. You will see a conversation history between yourself and a user, ending with an utterance from the user. You will then see a specific instruction instructing you what kind of response to generate. When you answer the user\\'s requests, you cite your sources in your answers, according to those instructions.' }}"
307
+ "{{ '\n\n# User Preamble' }}"
308
+ "{{ '\n' + system_message }}"
309
+ "{{'\n\n## Available Tools\nHere is a list of tools that you have available to you:\n\n'}}"
310
+ "{% for tool in tools %}"
311
+ "{% if loop.index0 != 0 %}"
312
+ "{{ '\n\n'}}"
313
+ "{% endif %}"
314
+ "{{'```python\ndef ' + tool.name + '('}}"
315
+ "{% for param_name, param_fields in tool.parameter_definitions.items() %}"
316
+ "{% if loop.index0 != 0 %}"
317
+ "{{ ', '}}"
318
+ "{% endif %}"
319
+ "{{param_name}}: "
320
+ "{% if not param_fields.required %}"
321
+ "{{'Optional[' + param_fields.type + '] = None'}}"
322
+ "{% else %}"
323
+ "{{ param_fields.type }}"
324
+ "{% endif %}"
325
+ "{% endfor %}"
326
+ '{{ \') -> List[Dict]:\n """\'}}'
327
+ "{{ tool.description }}"
328
+ "{% if tool.parameter_definitions|length != 0 %}"
329
+ "{{ '\n\n Args:\n '}}"
330
+ "{% for param_name, param_fields in tool.parameter_definitions.items() %}"
331
+ "{% if loop.index0 != 0 %}"
332
+ "{{ '\n ' }}"
333
+ "{% endif %}"
334
+ "{{ param_name + ' ('}}"
335
+ "{% if not param_fields.required %}"
336
+ "{{'Optional[' + param_fields.type + ']'}}"
337
+ "{% else %}"
338
+ "{{ param_fields.type }}"
339
+ "{% endif %}"
340
+ "{{ '): ' + param_fields.description }}"
341
+ "{% endfor %}"
342
+ "{% endif %}"
343
+ '{{ \'\n """\n pass\n```\' }}'
344
+ "{% endfor %}"
345
+ "{{ '<|END_OF_TURN_TOKEN|>'}}"
346
+ "{% for message in loop_messages %}"
347
+ "{% set content = message['content'] %}"
348
+ "{% if message['role'] == 'user' %}"
349
+ "{{ '<|START_OF_TURN_TOKEN|><|USER_TOKEN|>' + content.strip() + '<|END_OF_TURN_TOKEN|>' }}"
350
+ "{% elif message['role'] == 'system' %}"
351
+ "{{ '<|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|>' + content.strip() + '<|END_OF_TURN_TOKEN|>' }}"
352
+ "{% elif message['role'] == 'assistant' %}"
353
+ "{{ '<|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>' + content.strip() + '<|END_OF_TURN_TOKEN|>' }}"
354
+ "{% endif %}"
355
+ "{% endfor %}"
356
+ "{{'<|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|>Write \\'Action:\\' followed by a json-formatted list of actions that you want to perform in order to produce a good response to the user\\'s last input. You can use any of the supplied tools any number of times, but you should aim to execute the minimum number of necessary actions for the input. You should use the `directly-answer` tool if calling the other tools is unnecessary. The list of actions you want to call should be formatted as a list of json objects, for example:\n```json\n[\n {\n \"tool_name\": title of the tool in the specification,\n \"parameters\": a dict of parameters to input into the tool as they are defined in the specs, or {} if it takes no parameters\n }\n]```<|END_OF_TURN_TOKEN|>'}}"
357
+ "{% if add_generation_prompt %}"
358
+ "{{ '<|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>' }}"
359
+ "{% endif %}"
360
+ )
361
+ default_tool_message = DEFAULT_RAG_PREAMBLE.replace("\n", "\\n").replace("'", "\\'")
362
+ tool_use_template = tool_use_template.replace("DEFAULT_SYSTEM_MESSAGE", default_tool_message)
363
+
364
+ rag_template = (
365
+ "{{ bos_token }}"
366
+ "{% if messages[0]['role'] == 'system' %}"
367
+ "{% set loop_messages = messages[1:] %}" # Extract system message if it's present
368
+ "{% set system_message = messages[0]['content'] %}"
369
+ "{% else %}"
370
+ "{% set loop_messages = messages %}"
371
+ "{% set system_message = 'DEFAULT_SYSTEM_MESSAGE' %}"
372
+ "{% endif %}"
373
+ "{{ '<|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|>' }}"
374
+ "{{ '# Safety Preamble' }}"
375
+ "{{ '\nThe instructions in this section override those in the task description and style guide sections. Don\\'t answer questions that are harmful or immoral.' }}"
376
+ "{{ '\n\n# System Preamble' }}"
377
+ "{{ '\n## Basic Rules' }}"
378
+ "{{ '\nYou are a powerful conversational AI trained by Cohere to help people. You are augmented by a number of tools, and your job is to use and consume the output of these tools to best help the user. You will see a conversation history between yourself and a user, ending with an utterance from the user. You will then see a specific instruction instructing you what kind of response to generate. When you answer the user\\'s requests, you cite your sources in your answers, according to those instructions.' }}"
379
+ "{{ '\n\n# User Preamble' }}"
380
+ "{{ '\n' + system_message }}"
381
+ "{{ '<|END_OF_TURN_TOKEN|>'}}"
382
+ "{% for message in loop_messages %}" # Loop over all non-system messages
383
+ "{% set content = message['content'] %}"
384
+ "{% if message['role'] == 'user' %}" # After all of that, handle messages/roles in a fairly normal way
385
+ "{{ '<|START_OF_TURN_TOKEN|><|USER_TOKEN|>' + content.strip() + '<|END_OF_TURN_TOKEN|>' }}"
386
+ "{% elif message['role'] == 'system' %}"
387
+ "{{ '<|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|>' + content.strip() + '<|END_OF_TURN_TOKEN|>' }}"
388
+ "{% elif message['role'] == 'assistant' %}"
389
+ "{{ '<|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>' + content.strip() + '<|END_OF_TURN_TOKEN|>' }}"
390
+ "{% endif %}"
391
+ "{% endfor %}"
392
+ "{{ '<|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|>'}}"
393
+ "{{ '<results>' }}"
394
+ "{% for document in documents %}" # Loop over all non-system messages
395
+ "{{ '\nDocument: ' }}"
396
+ "{{ loop.index0 }}\n"
397
+ "{% for key, value in document.items() %}"
398
+ "{{ key }}: {{value}}\n"
399
+ "{% endfor %}"
400
+ "{% endfor %}"
401
+ "{{ '</results>'}}"
402
+ "{{ '<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|>' }}"
403
+ "{{ 'Carefully perform the following instructions, in order, starting each with a new line.\n' }}"
404
+ "{{ 'Firstly, Decide which of the retrieved documents are relevant to the user\\'s last input by writing \\'Relevant Documents:\\' followed by comma-separated list of document numbers. If none are relevant, you should instead write \\'None\\'.\n' }}"
405
+ "{{ 'Secondly, Decide which of the retrieved documents contain facts that should be cited in a good answer to the user\\'s last input by writing \\'Cited Documents:\\' followed a comma-separated list of document numbers. If you dont want to cite any of them, you should instead write \\'None\\'.\n' }}"
406
+ "{% if citation_mode=='accurate' %}"
407
+ "{{ 'Thirdly, Write \\'Answer:\\' followed by a response to the user\\'s last input in high quality natural english. Use the retrieved documents to help you. Do not insert any citations or grounding markup.\n' }}"
408
+ "{% endif %}"
409
+ "{{ 'Finally, Write \\'Grounded answer:\\' followed by a response to the user\\'s last input in high quality natural english. Use the symbols <co: doc> and </co: doc> to indicate when a fact comes from a document in the search result, e.g <co: 0>my fact</co: 0> for a fact from document 0.' }}"
410
+ "{{ '<|END_OF_TURN_TOKEN|>' }}"
411
+ "{% if add_generation_prompt %}"
412
+ "{{ '<|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>' }}"
413
+ "{% endif %}"
414
+ )
415
+ default_rag_message = DEFAULT_RAG_PREAMBLE.replace("\n", "\\n").replace("'", "\\'")
416
+ rag_template = rag_template.replace("DEFAULT_SYSTEM_MESSAGE", default_rag_message)
417
+
418
+ return {"default": default_template, "tool_use": tool_use_template, "rag": rag_template}
419
+
420
+ def apply_tool_use_template(
421
+ self,
422
+ conversation: Union[List[Dict[str, str]], "Conversation"],
423
+ tools: List[Dict],
424
+ **kwargs,
425
+ ) -> Union[str, List[int]]:
426
+ """Create a Command-R tool-use prompt.
427
+
428
+ Once rendered, the prompt instructs the model to generate a list of actions to perform on a set of user supplied tools
429
+ to help carry out the user's requests.
430
+
431
+ Conceptually, this works in the same way as `apply_chat_format`, but takes an additional `tools` parameter.
432
+
433
+ Converts a Conversation object or a list of dictionaries with `"role"` and `"content"` keys and a list of available
434
+ tools for the model to use into a prompt string, or a list of token ids.
435
+ This method will use the tokenizer's `default_tool_use_template` template specified at the class level.
436
+ You can override the default template using the `tool_use_template` kwarg but the quality of your results may decrease.
437
+
438
+ Args:
439
+ conversation (Union[List[Dict[str, str]], "Conversation"]): A Conversation object or list of dicts
440
+ with "role" and "content" keys, representing the chat history so far.
441
+ tools (List[Dict]): a list of tools to render into the prompt for the model to choose from.
442
+ See an example at the bottom of the docstring.
443
+ The format should be:
444
+ * name (str): The name of the tool to be called. Valid names contain only the characters a-z,
445
+ A-Z, 0-9, _ and must not begin with a digit.
446
+ * description (str): The description of what the tool does, the model uses the description to
447
+ choose when and how to call the function.
448
+ * parameter_definitions (List[Dict]): The input parameters of the tool. Accepts a dictionary
449
+ where the key is the name of the parameter and the value is the parameter spec.
450
+ Valid parameter names contain only the characters a-z, A-Z, 0-9, _ and must not begin with a digit.
451
+ Parameter specs are as follows:
452
+ * description (str): The description of the parameter.
453
+ * type (str): the type of the parameter - most effective for python builtin data types, such as 'str', 'bool'
454
+ * required: boolean: Denotes whether the parameter is always present (required) or not. Defaults to not required.
455
+ add_generation_prompt (bool, *optional*): Whether to end the prompt with the token(s) that indicate
456
+ the start of an assistant message. This is useful when you want to generate a response from the model.
457
+ Note that this argument will be passed to the chat template, and so it must be supported in the
458
+ template for this argument to have any effect.
459
+ tokenize (`bool`, defaults to `True`):
460
+ Whether to tokenize the output. If `False`, the output will be a string.
461
+ padding (`bool`, defaults to `False`):
462
+ Whether to pad sequences to the maximum length. Has no effect if tokenize is `False`.
463
+ truncation (`bool`, defaults to `False`):
464
+ Whether to truncate sequences at the maximum length. Has no effect if tokenize is `False`.
465
+ max_length (`int`, *optional*):
466
+ Maximum length (in tokens) to use for padding or truncation. Has no effect if tokenize is `False`. If
467
+ not specified, the tokenizer's `max_length` attribute will be used as a default.
468
+ return_tensors (`str` or [`~utils.TensorType`], *optional*):
469
+ If set, will return tensors of a particular framework. Has no effect if tokenize is `False`. Acceptable
470
+ values are:
471
+ - `'tf'`: Return TensorFlow `tf.Tensor` objects.
472
+ - `'pt'`: Return PyTorch `torch.Tensor` objects.
473
+ - `'np'`: Return NumPy `np.ndarray` objects.
474
+ - `'jax'`: Return JAX `jnp.ndarray` objects.
475
+ return_dict (`bool`, *optional*, defaults to `False`):
476
+ Whether to return a dictionary with named outputs. Has no effect if tokenize is `False`.
477
+ **tokenizer_kwargs: Additional kwargs to pass to the tokenizer.
478
+
479
+ Returns:
480
+ `str`: A rendered prompt string.
481
+ or if tokenize=True:
482
+ `List[int]`: A list of token ids representing the tokenized chat so far, including control tokens. This
483
+ output is ready to pass to the model, either directly or via methods like `generate()`.
484
+
485
+ Examples:
486
+
487
+ ```python
488
+ >> tokenizer = CohereTokenizerFast.from_pretrained("CohereForAI/c4ai-command-r-v01")
489
+ >> tools = [
490
+ {
491
+ "name": "internet_search",
492
+ "description": "Returns a list of relevant document snippets for a textual query retrieved from the internet",
493
+ "parameter_definitions": {
494
+ "query": {
495
+ "description": "Query to search the internet with",
496
+ "type": "str",
497
+ "required": True
498
+ }
499
+ }
500
+ },
501
+ {
502
+ "name': "directly_answer",
503
+ "description": "Calls a standard (un-augmented) AI chatbot to generate a response given the conversation history",
504
+ "parameter_definitions": {}
505
+ }
506
+ ]
507
+ >> conversation = [
508
+ {"role": "user", "content": "Whats the biggest penguin in the world?"}
509
+ ]
510
+ >> # render the prompt, ready for user to inspect, or for input into the model:
511
+ >> prompt = tokenizer.apply_tool_use_template(conversation, tools=tools, tokenize=False, add_generation_prompt=True)
512
+ >> print(prompt)
513
+ <BOS_TOKEN><|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|># Safety Preamble
514
+ The instructions in this section override those in the task description and style guide sections. Don't answer questions that are harmful or immoral.
515
+
516
+ # System Preamble
517
+ ## Basic Rules
518
+ You are a powerful conversational AI trained by Cohere to help people. You are augmented by a number of tools, and your job is to use and consume the output of these tools to best help the user. You will see a conversation history between yourself and a user, ending with an utterance from the user. You will then see a specific instruction instructing you what kind of response to generate. When you answer the user's requests, you cite your sources in your answers, according to those instructions.
519
+
520
+ # User Preamble
521
+ ## Task and Context
522
+ You help people answer their questions and other requests interactively. You will be asked a very wide array of requests on all kinds of topics. You will be equipped with a wide range of search engines or similar tools to help you, which you use to research your answer. You should focus on serving the user's needs as best you can, which will be wide-ranging.
523
+
524
+ ## Style Guide
525
+ Unless the user asks for a different style of answer, you should answer in full sentences, using proper grammar and spelling.
526
+
527
+ ## Available Tools
528
+ Here is a list of tools that you have available to you:
529
+
530
+ \\`\\`\\`python
531
+ def internet_search(query: str) -> List[Dict]:
532
+ \"\"\"Returns a list of relevant document snippets for a textual query retrieved from the internet
533
+
534
+ Args:
535
+ query (str): Query to search the internet with
536
+ \"\"\"
537
+ pass
538
+ \\`\\`\\`
539
+
540
+ \\`\\`\\`python
541
+ def directly_answer() -> List[Dict]:
542
+ \"\"\"Calls a standard (un-augmented) AI chatbot to generate a response given the conversation history
543
+ \"\"\"
544
+ pass
545
+ \\`\\`\\`<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|USER_TOKEN|>Whats the biggest penguin in the world?<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|>Write 'Action:' followed by a json-formatted list of actions that you want to perform in order to produce a good response to the user's last input. You can use any of the supplied tools any number of times, but you should aim to execute the minimum number of necessary actions for the input. You should use the `directly-answer` tool if calling the other tools is unnecessary. The list of actions you want to call should be formatted as a list of json objects, for example:
546
+ \\`\\`\\`json
547
+ [
548
+ {
549
+ "tool_name": title of the tool in the specification,
550
+ "parameters": a dict of parameters to input into the tool as they are defined in the specs, or {} if it takes no parameters
551
+ }
552
+ ]\\`\\`\\`<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>
553
+ ```
554
+ >> inputs = tokenizer.encode(prompt, add_special_tokens=False, return_tensors='pt')
555
+ >> outputs = model.generate(inputs, max_new_tokens=128)
556
+ >> print(tokenizer.decode(outputs[0]))
557
+ Action: ```json
558
+ [
559
+ {
560
+ "tool_name": "internet_search",
561
+ "parameters": {
562
+ "query": "biggest penguin in the world"
563
+ }
564
+ }
565
+ ]
566
+ ```
567
+ """
568
+ return self.apply_chat_template(
569
+ conversation,
570
+ chat_template="tool_use",
571
+ tools=tools,
572
+ **kwargs,
573
+ )
574
+
575
+ def apply_grounded_generation_template(
576
+ self,
577
+ conversation: Union[List[Dict[str, str]], "Conversation"],
578
+ documents: List[Dict],
579
+ citation_mode: Literal["fast", "accurate"] = "accurate",
580
+ **kwargs,
581
+ ) -> Union[str, List[int]]:
582
+ """Create a Command-R grounded generation (aka RAG) prompt.
583
+
584
+ Once rendered, the prompt instructs the model to generate a response with citations in, based on supplied documents.
585
+
586
+ Conceptually, this works in the same way as `apply_chat_format`, but takes additional `documents`
587
+ and parameter `citation_mode` parameters.
588
+
589
+ Converts a Conversation object or a list of dictionaries with `"role"` and `"content"` keys and a list of
590
+ documents for the model to ground its response on into a prompt string, or a list of token ids.
591
+ This method will use the tokenizer's `grounded_generation_template` template specified at the class level.
592
+ You can override the default template using the `grounded_generation_template` kwarg but the quality of your results may decrease.
593
+
594
+ Args:
595
+ conversation (Union[List[Dict[str, str]], "Conversation"]): A Conversation object or list of dicts
596
+ with "role" and "content" keys, representing the chat history so far.
597
+ documents (List[Dict[str, str]): A list of dicts, representing documents or tool outputs to ground your
598
+ generation on. A document is a semistructured dict, wiht a string to string mapping. Common fields are
599
+ `url`, `title`, `snippet` etc but should be descriptive of the key. They will get rendered into the prompt.
600
+ citation_mode: either "accurate" (prompt the model to generate an answer first, then rewrite it with citation
601
+ spans in) or "fast", where the prompt instructs the model to generate an answer with citations in directly.
602
+ The former has higher quality citations, the latter requires fewer tokens to be generated.
603
+ add_generation_prompt (bool, *optional*): Whether to end the prompt with the token(s) that indicate
604
+ the start of an assistant message. This is useful when you want to generate a response from the model.
605
+ Note that this argument will be passed to the chat template, and so it must be supported in the
606
+ template for this argument to have any effect.
607
+ tokenize (`bool`, defaults to `True`):
608
+ Whether to tokenize the output. If `False`, the output will be a string.
609
+ padding (`bool`, defaults to `False`):
610
+ Whether to pad sequences to the maximum length. Has no effect if tokenize is `False`.
611
+ truncation (`bool`, defaults to `False`):
612
+ Whether to truncate sequences at the maximum length. Has no effect if tokenize is `False`.
613
+ max_length (`int`, *optional*):
614
+ Maximum length (in tokens) to use for padding or truncation. Has no effect if tokenize is `False`. If
615
+ not specified, the tokenizer's `max_length` attribute will be used as a default.
616
+ return_tensors (`str` or [`~utils.TensorType`], *optional*):
617
+ If set, will return tensors of a particular framework. Has no effect if tokenize is `False`. Acceptable
618
+ values are:
619
+ - `'tf'`: Return TensorFlow `tf.Tensor` objects.
620
+ - `'pt'`: Return PyTorch `torch.Tensor` objects.
621
+ - `'np'`: Return NumPy `np.ndarray` objects.
622
+ - `'jax'`: Return JAX `jnp.ndarray` objects.
623
+ return_dict (`bool`, *optional*, defaults to `False`):
624
+ Whether to return a dictionary with named outputs. Has no effect if tokenize is `False`.
625
+ **tokenizer_kwargs: Additional kwargs to pass to the tokenizer.
626
+
627
+ Returns:
628
+ `str`: A rendered prompt string.
629
+ or if tokenize=True:
630
+ `List[int]`: A list of token ids representing the tokenized chat so far, including control tokens. This
631
+ output is ready to pass to the model, either directly or via methods like `generate()`.
632
+
633
+ Examples:
634
+
635
+ ```python
636
+ >> tokenizer = CohereTokenizerFast.from_pretrained('CohereForAI/c4ai-command-r-v01')
637
+
638
+ >> # define documents:
639
+ >> documents = [
640
+ { "title": "Tall penguins", "text": "Emperor penguins are the tallest." },
641
+ { "title": "Penguin habitats", "text": "Emperor penguins only live in Antarctica."}
642
+ ]
643
+ >> # define a conversation:
644
+ >> conversation = [
645
+ {"role": "user", "content": "Whats the biggest penguin in the world?"}
646
+ ]
647
+ >> # render the prompt, ready for user to inspect, or for input into the model:
648
+ >> grounded_generation_prompt = tokenizer.apply_grounded_generation_template(conversation, documents=documents, tokenize=False, add_generation_prompt=True)
649
+ >> print(grounded_generation_prompt)
650
+ <BOS_TOKEN><|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|># Safety Preamble
651
+ The instructions in this section override those in the task description and style guide sections. Don't answer questions that are harmful or immoral.
652
+
653
+ ## Basic Rules
654
+ You are a powerful conversational AI trained by Cohere to help people. You are augmented by a number of tools, and your job is to use and consume the output of these tools to best help the user. You will see a conversation history between yourself and a user, ending with an utterance from the user. You will then see a specific instruction instructing you what kind of response to generate. When you answer the user's requests, you cite your sources in your answers, according to those instructions.
655
+
656
+ # User Preamble
657
+ ## Task and Context
658
+ You help people answer their questions and other requests interactively. You will be asked a very wide array of requests on all kinds of topics. You will be equipped with a wide range of search engines or similar tools to help you, which you use to research your answer. You should focus on serving the user's needs as best you can, which will be wide-ranging.
659
+
660
+ ## Style Guide
661
+ Unless the user asks for a different style of answer, you should answer in full sentences, using proper grammar and spelling.<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|USER_TOKEN|>Whats the biggest penguin in the world?<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|><results>
662
+ Document: 0
663
+ title: Tall penguins
664
+ text: Emperor penguins are the tallest.
665
+
666
+ Document: 1
667
+ title: Penguin habitats
668
+ text: Emperor penguins only live in Antarctica.
669
+ </results><|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|>Carefully perform the following instructions, in order, starting each with a new line.
670
+ Firstly, Decide which of the retrieved documents are relevant to the user's last input by writing 'Relevant Documents:' followed by comma-separated list of document numbers. If none are relevant, you should instead write 'None'.
671
+ Secondly, Decide which of the retrieved documents contain facts that should be cited in a good answer to the user's last input by writing 'Cited Documents:' followed a comma-separated list of document numbers. If you dont want to cite any of them, you should instead write 'None'.
672
+ Thirdly, Write 'Answer:' followed by a response to the user's last input in high quality natural english. Use the retrieved documents to help you. Do not insert any citations or grounding markup.
673
+ Finally, Write 'Grounded answer:' followed by a response to the user's last input in high quality natural english. Use the symbols <co: doc> and </co: doc> to indicate when a fact comes from a document in the search result, e.g <co: 0>my fact</co: 0> for a fact from document 0.<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>'''
674
+ ```
675
+ >> inputs = tokenizer.encode(prompt, add_special_tokens=False, return_tensors='pt')
676
+ >> outputs = model.generate(inputs, max_new_tokens=128)
677
+ >> print(tokenizer.decode(outputs[0]))
678
+ Relevant Documents: 0,1
679
+ Cited Documents: 0,1
680
+ Answer: The Emperor Penguin is the tallest or biggest penguin in the world. It is a bird that lives only in Antarctica and grows to a height of around 122 centimetres.
681
+ Grounded answer: The <co: 0>Emperor Penguin</co: 0> is the <co: 0>tallest</co: 0> or biggest penguin in the world. It is a bird that <co: 1>lives only in Antarctica</co: 1> and <co: 0>grows to a height of around 122 centimetres.</co: 0>
682
+ """
683
+ return self.apply_chat_template(
684
+ conversation,
685
+ chat_template="rag",
686
+ documents=documents,
687
+ citation_mode=citation_mode,
688
+ **kwargs,
689
+ )
690
+
691
+ # TODO ArthurZ let's rely on the template processor instead, refactor all fast tokenizers
692
+ def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
693
+ bos_token_id = [self.bos_token_id] if self.add_bos_token else []
694
+ eos_token_id = [self.eos_token_id] if self.add_eos_token else []
695
+
696
+ output = bos_token_id + token_ids_0 + eos_token_id
697
+
698
+ if token_ids_1 is not None:
699
+ output = output + bos_token_id + token_ids_1 + eos_token_id
700
+
701
+ return output
llmeval-env/lib/python3.10/site-packages/transformers/models/focalnet/__init__.py ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ # rely on isort to merge the imports
17
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
18
+
19
+
20
+ _import_structure = {"configuration_focalnet": ["FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FocalNetConfig"]}
21
+
22
+
23
+ try:
24
+ if not is_torch_available():
25
+ raise OptionalDependencyNotAvailable()
26
+ except OptionalDependencyNotAvailable:
27
+ pass
28
+ else:
29
+ _import_structure["modeling_focalnet"] = [
30
+ "FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST",
31
+ "FocalNetForImageClassification",
32
+ "FocalNetForMaskedImageModeling",
33
+ "FocalNetBackbone",
34
+ "FocalNetModel",
35
+ "FocalNetPreTrainedModel",
36
+ ]
37
+
38
+ if TYPE_CHECKING:
39
+ from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
40
+
41
+ try:
42
+ if not is_torch_available():
43
+ raise OptionalDependencyNotAvailable()
44
+ except OptionalDependencyNotAvailable:
45
+ pass
46
+ else:
47
+ from .modeling_focalnet import (
48
+ FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
49
+ FocalNetBackbone,
50
+ FocalNetForImageClassification,
51
+ FocalNetForMaskedImageModeling,
52
+ FocalNetModel,
53
+ FocalNetPreTrainedModel,
54
+ )
55
+
56
+ else:
57
+ import sys
58
+
59
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
llmeval-env/lib/python3.10/site-packages/transformers/models/focalnet/configuration_focalnet.py ADDED
@@ -0,0 +1,164 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ FocalNet model configuration"""
16
+
17
+ from ...configuration_utils import PretrainedConfig
18
+ from ...utils import logging
19
+ from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
20
+
21
+
22
+ logger = logging.get_logger(__name__)
23
+
24
+
25
+ from ..deprecated._archive_maps import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
26
+
27
+
28
+ class FocalNetConfig(BackboneConfigMixin, PretrainedConfig):
29
+ r"""
30
+ This is the configuration class to store the configuration of a [`FocalNetModel`]. It is used to instantiate a
31
+ FocalNet model according to the specified arguments, defining the model architecture. Instantiating a configuration
32
+ with the defaults will yield a similar configuration to that of the FocalNet
33
+ [microsoft/focalnet-tiny](https://huggingface.co/microsoft/focalnet-tiny) architecture.
34
+
35
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
36
+ documentation from [`PretrainedConfig`] for more information.
37
+
38
+ Args:
39
+ image_size (`int`, *optional*, defaults to 224):
40
+ The size (resolution) of each image.
41
+ patch_size (`int`, *optional*, defaults to 4):
42
+ The size (resolution) of each patch in the embeddings layer.
43
+ num_channels (`int`, *optional*, defaults to 3):
44
+ The number of input channels.
45
+ embed_dim (`int`, *optional*, defaults to 96):
46
+ Dimensionality of patch embedding.
47
+ use_conv_embed (`bool`, *optional*, defaults to `False`):
48
+ Whether to use convolutional embedding. The authors noted that using convolutional embedding usually
49
+ improve the performance, but it's not used by default.
50
+ hidden_sizes (`List[int]`, *optional*, defaults to `[192, 384, 768, 768]`):
51
+ Dimensionality (hidden size) at each stage.
52
+ depths (`list(int)`, *optional*, defaults to `[2, 2, 6, 2]`):
53
+ Depth (number of layers) of each stage in the encoder.
54
+ focal_levels (`list(int)`, *optional*, defaults to `[2, 2, 2, 2]`):
55
+ Number of focal levels in each layer of the respective stages in the encoder.
56
+ focal_windows (`list(int)`, *optional*, defaults to `[3, 3, 3, 3]`):
57
+ Focal window size in each layer of the respective stages in the encoder.
58
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
59
+ The non-linear activation function (function or string) in the encoder. If string, `"gelu"`, `"relu"`,
60
+ `"selu"` and `"gelu_new"` are supported.
61
+ mlp_ratio (`float`, *optional*, defaults to 4.0):
62
+ Ratio of MLP hidden dimensionality to embedding dimensionality.
63
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.0):
64
+ The dropout probability for all fully connected layers in the embeddings and encoder.
65
+ drop_path_rate (`float`, *optional*, defaults to 0.1):
66
+ Stochastic depth rate.
67
+ use_layerscale (`bool`, *optional*, defaults to `False`):
68
+ Whether to use layer scale in the encoder.
69
+ layerscale_value (`float`, *optional*, defaults to 0.0001):
70
+ The initial value of the layer scale.
71
+ use_post_layernorm (`bool`, *optional*, defaults to `False`):
72
+ Whether to use post layer normalization in the encoder.
73
+ use_post_layernorm_in_modulation (`bool`, *optional*, defaults to `False`):
74
+ Whether to use post layer normalization in the modulation layer.
75
+ normalize_modulator (`bool`, *optional*, defaults to `False`):
76
+ Whether to normalize the modulator.
77
+ initializer_range (`float`, *optional*, defaults to 0.02):
78
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
79
+ layer_norm_eps (`float`, *optional*, defaults to 1e-05):
80
+ The epsilon used by the layer normalization layers.
81
+ encoder_stride (`int`, *optional*, defaults to 32):
82
+ Factor to increase the spatial resolution by in the decoder head for masked image modeling.
83
+ out_features (`List[str]`, *optional*):
84
+ If used as backbone, list of features to output. Can be any of `"stem"`, `"stage1"`, `"stage2"`, etc.
85
+ (depending on how many stages the model has). If unset and `out_indices` is set, will default to the
86
+ corresponding stages. If unset and `out_indices` is unset, will default to the last stage. Must be in the
87
+ same order as defined in the `stage_names` attribute.
88
+ out_indices (`List[int]`, *optional*):
89
+ If used as backbone, list of indices of features to output. Can be any of 0, 1, 2, etc. (depending on how
90
+ many stages the model has). If unset and `out_features` is set, will default to the corresponding stages.
91
+ If unset and `out_features` is unset, will default to the last stage. Must be in the
92
+ same order as defined in the `stage_names` attribute.
93
+
94
+ Example:
95
+
96
+ ```python
97
+ >>> from transformers import FocalNetConfig, FocalNetModel
98
+
99
+ >>> # Initializing a FocalNet microsoft/focalnet-tiny style configuration
100
+ >>> configuration = FocalNetConfig()
101
+
102
+ >>> # Initializing a model (with random weights) from the microsoft/focalnet-tiny style configuration
103
+ >>> model = FocalNetModel(configuration)
104
+
105
+ >>> # Accessing the model configuration
106
+ >>> configuration = model.config
107
+ ```"""
108
+
109
+ model_type = "focalnet"
110
+
111
+ def __init__(
112
+ self,
113
+ image_size=224,
114
+ patch_size=4,
115
+ num_channels=3,
116
+ embed_dim=96,
117
+ use_conv_embed=False,
118
+ hidden_sizes=[192, 384, 768, 768],
119
+ depths=[2, 2, 6, 2],
120
+ focal_levels=[2, 2, 2, 2],
121
+ focal_windows=[3, 3, 3, 3],
122
+ hidden_act="gelu",
123
+ mlp_ratio=4.0,
124
+ hidden_dropout_prob=0.0,
125
+ drop_path_rate=0.1,
126
+ use_layerscale=False,
127
+ layerscale_value=1e-4,
128
+ use_post_layernorm=False,
129
+ use_post_layernorm_in_modulation=False,
130
+ normalize_modulator=False,
131
+ initializer_range=0.02,
132
+ layer_norm_eps=1e-5,
133
+ encoder_stride=32,
134
+ out_features=None,
135
+ out_indices=None,
136
+ **kwargs,
137
+ ):
138
+ super().__init__(**kwargs)
139
+
140
+ self.image_size = image_size
141
+ self.patch_size = patch_size
142
+ self.num_channels = num_channels
143
+ self.embed_dim = embed_dim
144
+ self.use_conv_embed = use_conv_embed
145
+ self.hidden_sizes = hidden_sizes
146
+ self.depths = depths
147
+ self.focal_levels = focal_levels
148
+ self.focal_windows = focal_windows
149
+ self.hidden_act = hidden_act
150
+ self.mlp_ratio = mlp_ratio
151
+ self.hidden_dropout_prob = hidden_dropout_prob
152
+ self.drop_path_rate = drop_path_rate
153
+ self.use_layerscale = use_layerscale
154
+ self.layerscale_value = layerscale_value
155
+ self.use_post_layernorm = use_post_layernorm
156
+ self.use_post_layernorm_in_modulation = use_post_layernorm_in_modulation
157
+ self.normalize_modulator = normalize_modulator
158
+ self.initializer_range = initializer_range
159
+ self.layer_norm_eps = layer_norm_eps
160
+ self.encoder_stride = encoder_stride
161
+ self.stage_names = ["stem"] + [f"stage{idx}" for idx in range(1, len(self.depths) + 1)]
162
+ self._out_features, self._out_indices = get_aligned_output_features_output_indices(
163
+ out_features=out_features, out_indices=out_indices, stage_names=self.stage_names
164
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/focalnet/convert_focalnet_to_hf_format.py ADDED
@@ -0,0 +1,237 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Convert FocalNet checkpoints from the original repository. URL: https://github.com/microsoft/FocalNet/tree/main"""
16
+
17
+ import argparse
18
+ import json
19
+
20
+ import requests
21
+ import torch
22
+ from huggingface_hub import hf_hub_download
23
+ from PIL import Image
24
+ from torchvision import transforms
25
+
26
+ from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
27
+ from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
28
+
29
+
30
+ def get_focalnet_config(model_name):
31
+ depths = [2, 2, 6, 2] if "tiny" in model_name else [2, 2, 18, 2]
32
+ use_conv_embed = True if "large" in model_name or "huge" in model_name else False
33
+ use_post_layernorm = True if "large" in model_name or "huge" in model_name else False
34
+ use_layerscale = True if "large" in model_name or "huge" in model_name else False
35
+
36
+ if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
37
+ if "fl3" in model_name:
38
+ focal_levels = [3, 3, 3, 3]
39
+ focal_windows = [5, 5, 5, 5]
40
+ elif "fl4" in model_name:
41
+ focal_levels = [4, 4, 4, 4]
42
+ focal_windows = [3, 3, 3, 3]
43
+
44
+ if "tiny" in model_name or "small" in model_name or "base" in model_name:
45
+ focal_windows = [3, 3, 3, 3]
46
+ if "lrf" in model_name:
47
+ focal_levels = [3, 3, 3, 3]
48
+ else:
49
+ focal_levels = [2, 2, 2, 2]
50
+
51
+ if "tiny" in model_name:
52
+ embed_dim = 96
53
+ elif "small" in model_name:
54
+ embed_dim = 96
55
+ elif "base" in model_name:
56
+ embed_dim = 128
57
+ elif "large" in model_name:
58
+ embed_dim = 192
59
+ elif "xlarge" in model_name:
60
+ embed_dim = 256
61
+ elif "huge" in model_name:
62
+ embed_dim = 352
63
+
64
+ # set label information
65
+ repo_id = "huggingface/label-files"
66
+ if "large" in model_name or "huge" in model_name:
67
+ filename = "imagenet-22k-id2label.json"
68
+ else:
69
+ filename = "imagenet-1k-id2label.json"
70
+
71
+ id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r"))
72
+ id2label = {int(k): v for k, v in id2label.items()}
73
+ label2id = {v: k for k, v in id2label.items()}
74
+
75
+ config = FocalNetConfig(
76
+ embed_dim=embed_dim,
77
+ depths=depths,
78
+ focal_levels=focal_levels,
79
+ focal_windows=focal_windows,
80
+ use_conv_embed=use_conv_embed,
81
+ id2label=id2label,
82
+ label2id=label2id,
83
+ use_post_layernorm=use_post_layernorm,
84
+ use_layerscale=use_layerscale,
85
+ )
86
+
87
+ return config
88
+
89
+
90
+ def rename_key(name):
91
+ if "patch_embed.proj" in name:
92
+ name = name.replace("patch_embed.proj", "embeddings.patch_embeddings.projection")
93
+ if "patch_embed.norm" in name:
94
+ name = name.replace("patch_embed.norm", "embeddings.norm")
95
+ if "layers" in name:
96
+ name = "encoder." + name
97
+ if "encoder.layers" in name:
98
+ name = name.replace("encoder.layers", "encoder.stages")
99
+ if "downsample.proj" in name:
100
+ name = name.replace("downsample.proj", "downsample.projection")
101
+ if "blocks" in name:
102
+ name = name.replace("blocks", "layers")
103
+ if "modulation.f.weight" in name or "modulation.f.bias" in name:
104
+ name = name.replace("modulation.f", "modulation.projection_in")
105
+ if "modulation.h.weight" in name or "modulation.h.bias" in name:
106
+ name = name.replace("modulation.h", "modulation.projection_context")
107
+ if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
108
+ name = name.replace("modulation.proj", "modulation.projection_out")
109
+
110
+ if name == "norm.weight":
111
+ name = "layernorm.weight"
112
+ if name == "norm.bias":
113
+ name = "layernorm.bias"
114
+
115
+ if "head" in name:
116
+ name = name.replace("head", "classifier")
117
+ else:
118
+ name = "focalnet." + name
119
+
120
+ return name
121
+
122
+
123
+ def convert_focalnet_checkpoint(model_name, pytorch_dump_folder_path, push_to_hub=False):
124
+ # fmt: off
125
+ model_name_to_url = {
126
+ "focalnet-tiny": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth",
127
+ "focalnet-tiny-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth",
128
+ "focalnet-small": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth",
129
+ "focalnet-small-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth",
130
+ "focalnet-base": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth",
131
+ "focalnet-base-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth",
132
+ "focalnet-large-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth",
133
+ "focalnet-large-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth",
134
+ "focalnet-xlarge-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth",
135
+ "focalnet-xlarge-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth",
136
+ }
137
+ # fmt: on
138
+
139
+ checkpoint_url = model_name_to_url[model_name]
140
+ print("Checkpoint URL: ", checkpoint_url)
141
+ state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")["model"]
142
+
143
+ # rename keys
144
+ for key in state_dict.copy().keys():
145
+ val = state_dict.pop(key)
146
+ state_dict[rename_key(key)] = val
147
+
148
+ config = get_focalnet_config(model_name)
149
+ model = FocalNetForImageClassification(config)
150
+ model.eval()
151
+
152
+ # load state dict
153
+ model.load_state_dict(state_dict)
154
+
155
+ # verify conversion
156
+ url = "http://images.cocodataset.org/val2017/000000039769.jpg"
157
+
158
+ processor = BitImageProcessor(
159
+ do_resize=True,
160
+ size={"shortest_edge": 256},
161
+ resample=PILImageResampling.BILINEAR,
162
+ do_center_crop=True,
163
+ crop_size=224,
164
+ do_normalize=True,
165
+ image_mean=IMAGENET_DEFAULT_MEAN,
166
+ image_std=IMAGENET_DEFAULT_STD,
167
+ )
168
+ image = Image.open(requests.get(url, stream=True).raw)
169
+ inputs = processor(images=image, return_tensors="pt")
170
+
171
+ image_transforms = transforms.Compose(
172
+ [
173
+ transforms.Resize(256),
174
+ transforms.CenterCrop(224),
175
+ transforms.ToTensor(),
176
+ transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
177
+ ]
178
+ )
179
+
180
+ original_pixel_values = image_transforms(image).unsqueeze(0)
181
+
182
+ # verify pixel_values
183
+ assert torch.allclose(inputs.pixel_values, original_pixel_values, atol=1e-4)
184
+
185
+ outputs = model(**inputs)
186
+
187
+ predicted_class_idx = outputs.logits.argmax(-1).item()
188
+ print("Predicted class:", model.config.id2label[predicted_class_idx])
189
+
190
+ print("First values of logits:", outputs.logits[0, :3])
191
+
192
+ if model_name == "focalnet-tiny":
193
+ expected_slice = torch.tensor([0.2166, -0.4368, 0.2191])
194
+ elif model_name == "focalnet-tiny-lrf":
195
+ expected_slice = torch.tensor([1.1669, 0.0125, -0.1695])
196
+ elif model_name == "focalnet-small":
197
+ expected_slice = torch.tensor([0.4917, -0.0430, 0.1341])
198
+ elif model_name == "focalnet-small-lrf":
199
+ expected_slice = torch.tensor([-0.2588, -0.5342, -0.2331])
200
+ elif model_name == "focalnet-base":
201
+ expected_slice = torch.tensor([-0.1655, -0.4090, -0.1730])
202
+ elif model_name == "focalnet-base-lrf":
203
+ expected_slice = torch.tensor([0.5306, -0.0483, -0.3928])
204
+ assert torch.allclose(outputs.logits[0, :3], expected_slice, atol=1e-4)
205
+ print("Looks ok!")
206
+
207
+ if pytorch_dump_folder_path is not None:
208
+ print(f"Saving model and processor of {model_name} to {pytorch_dump_folder_path}")
209
+ model.save_pretrained(pytorch_dump_folder_path)
210
+ processor.save_pretrained(pytorch_dump_folder_path)
211
+
212
+ if push_to_hub:
213
+ print(f"Pushing model and processor of {model_name} to the hub...")
214
+ model.push_to_hub(f"{model_name}")
215
+ processor.push_to_hub(f"{model_name}")
216
+
217
+
218
+ if __name__ == "__main__":
219
+ parser = argparse.ArgumentParser()
220
+ # Required parameters
221
+ parser.add_argument(
222
+ "--model_name",
223
+ default="focalnet-tiny",
224
+ type=str,
225
+ help="Name of the FocalNet model you'd like to convert.",
226
+ )
227
+ parser.add_argument(
228
+ "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
229
+ )
230
+ parser.add_argument(
231
+ "--push_to_hub",
232
+ action="store_true",
233
+ help="Whether to push the model and processor to the hub.",
234
+ )
235
+
236
+ args = parser.parse_args()
237
+ convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
llmeval-env/lib/python3.10/site-packages/transformers/models/focalnet/modeling_focalnet.py ADDED
@@ -0,0 +1,1032 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 Microsoft Research and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch FocalNet model."""
16
+
17
+
18
+ import collections.abc
19
+ import math
20
+ from dataclasses import dataclass
21
+ from typing import Optional, Tuple, Union
22
+
23
+ import torch
24
+ import torch.utils.checkpoint
25
+ from torch import nn
26
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
27
+
28
+ from ...activations import ACT2FN
29
+ from ...modeling_outputs import BackboneOutput
30
+ from ...modeling_utils import PreTrainedModel
31
+ from ...utils import (
32
+ ModelOutput,
33
+ add_code_sample_docstrings,
34
+ add_start_docstrings,
35
+ add_start_docstrings_to_model_forward,
36
+ logging,
37
+ replace_return_docstrings,
38
+ )
39
+ from ...utils.backbone_utils import BackboneMixin
40
+ from .configuration_focalnet import FocalNetConfig
41
+
42
+
43
+ logger = logging.get_logger(__name__)
44
+
45
+ # General docstring
46
+ _CONFIG_FOR_DOC = "FocalNetConfig"
47
+
48
+ # Base docstring
49
+ _CHECKPOINT_FOR_DOC = "microsoft/focalnet-tiny"
50
+ _EXPECTED_OUTPUT_SHAPE = [1, 49, 768]
51
+
52
+ # Image classification docstring
53
+ _IMAGE_CLASS_CHECKPOINT = "microsoft/focalnet-tiny"
54
+ _IMAGE_CLASS_EXPECTED_OUTPUT = "tabby, tabby cat"
55
+
56
+
57
+ from ..deprecated._archive_maps import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
58
+
59
+
60
+ @dataclass
61
+ class FocalNetEncoderOutput(ModelOutput):
62
+ """
63
+ FocalNet encoder's outputs, with potential hidden states.
64
+
65
+ Args:
66
+ last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
67
+ Sequence of hidden-states at the output of the last layer of the model.
68
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
69
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
70
+ shape `(batch_size, sequence_length, hidden_size)`.
71
+
72
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
73
+
74
+ reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
75
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
76
+ shape `(batch_size, hidden_size, height, width)`.
77
+
78
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to
79
+ include the spatial dimensions.
80
+ """
81
+
82
+ last_hidden_state: torch.FloatTensor = None
83
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
84
+ reshaped_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
85
+
86
+
87
+ @dataclass
88
+ class FocalNetModelOutput(ModelOutput):
89
+ """
90
+ FocalNet model's outputs that also contains a pooling of the last hidden states.
91
+
92
+ Args:
93
+ last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
94
+ Sequence of hidden-states at the output of the last layer of the model.
95
+ pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`, *optional*, returned when `add_pooling_layer=True` is passed):
96
+ Average pooling of the last layer hidden-state.
97
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
98
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
99
+ shape `(batch_size, sequence_length, hidden_size)`.
100
+
101
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
102
+ reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
103
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
104
+ shape `(batch_size, hidden_size, height, width)`.
105
+
106
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to
107
+ include the spatial dimensions.
108
+ """
109
+
110
+ last_hidden_state: torch.FloatTensor = None
111
+ pooler_output: Optional[torch.FloatTensor] = None
112
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
113
+ reshaped_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
114
+
115
+
116
+ @dataclass
117
+ class FocalNetMaskedImageModelingOutput(ModelOutput):
118
+ """
119
+ FocalNet masked image model outputs.
120
+
121
+ Args:
122
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `bool_masked_pos` is provided):
123
+ Masked image modeling (MLM) loss.
124
+ reconstruction (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
125
+ Reconstructed pixel values.
126
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
127
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
128
+ shape `(batch_size, sequence_length, hidden_size)`.
129
+
130
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
131
+ reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
132
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
133
+ shape `(batch_size, hidden_size, height, width)`.
134
+
135
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to
136
+ include the spatial dimensions.
137
+ """
138
+
139
+ loss: Optional[torch.FloatTensor] = None
140
+ reconstruction: torch.FloatTensor = None
141
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
142
+ reshaped_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
143
+
144
+
145
+ @dataclass
146
+ class FocalNetImageClassifierOutput(ModelOutput):
147
+ """
148
+ FocalNet outputs for image classification.
149
+
150
+ Args:
151
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
152
+ Classification (or regression if config.num_labels==1) loss.
153
+ logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`):
154
+ Classification (or regression if config.num_labels==1) scores (before SoftMax).
155
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
156
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
157
+ shape `(batch_size, sequence_length, hidden_size)`.
158
+
159
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
160
+ reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
161
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
162
+ shape `(batch_size, hidden_size, height, width)`.
163
+
164
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to
165
+ include the spatial dimensions.
166
+ """
167
+
168
+ loss: Optional[torch.FloatTensor] = None
169
+ logits: torch.FloatTensor = None
170
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
171
+ reshaped_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
172
+
173
+
174
+ class FocalNetEmbeddings(nn.Module):
175
+ """
176
+ Construct the patch embeddings and layernorm. Optionally, also the mask token.
177
+ """
178
+
179
+ def __init__(self, config, use_mask_token=False):
180
+ super().__init__()
181
+
182
+ self.patch_embeddings = FocalNetPatchEmbeddings(
183
+ config=config,
184
+ image_size=config.image_size,
185
+ patch_size=config.patch_size,
186
+ num_channels=config.num_channels,
187
+ embed_dim=config.embed_dim,
188
+ use_conv_embed=config.use_conv_embed,
189
+ is_stem=True,
190
+ )
191
+ self.patch_grid = self.patch_embeddings.grid_size
192
+ self.mask_token = nn.Parameter(torch.zeros(1, 1, config.embed_dim)) if use_mask_token else None
193
+
194
+ self.norm = nn.LayerNorm(config.embed_dim, eps=config.layer_norm_eps)
195
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
196
+
197
+ def forward(
198
+ self, pixel_values: Optional[torch.FloatTensor], bool_masked_pos: Optional[torch.BoolTensor] = None
199
+ ) -> Tuple[torch.Tensor]:
200
+ embeddings, output_dimensions = self.patch_embeddings(pixel_values)
201
+ embeddings = self.norm(embeddings)
202
+ batch_size, seq_len, _ = embeddings.size()
203
+
204
+ if bool_masked_pos is not None:
205
+ mask_tokens = self.mask_token.expand(batch_size, seq_len, -1)
206
+ # replace the masked visual tokens by mask_tokens
207
+ mask = bool_masked_pos.unsqueeze(-1).type_as(mask_tokens)
208
+ embeddings = embeddings * (1.0 - mask) + mask_tokens * mask
209
+
210
+ embeddings = self.dropout(embeddings)
211
+ return embeddings, output_dimensions
212
+
213
+
214
+ class FocalNetPatchEmbeddings(nn.Module):
215
+ def __init__(
216
+ self,
217
+ config,
218
+ image_size,
219
+ patch_size,
220
+ num_channels,
221
+ embed_dim,
222
+ add_norm=False,
223
+ use_conv_embed=False,
224
+ is_stem=False,
225
+ ):
226
+ super().__init__()
227
+ image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size)
228
+ patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size)
229
+ num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
230
+ self.image_size = image_size
231
+ self.patch_size = patch_size
232
+ self.num_channels = num_channels
233
+ self.num_patches = num_patches
234
+ self.grid_size = (image_size[0] // patch_size[0], image_size[1] // patch_size[1])
235
+
236
+ if use_conv_embed:
237
+ # if we choose to use conv embedding, then we treat the stem and non-stem differently
238
+ if is_stem:
239
+ kernel_size = 7
240
+ padding = 2
241
+ stride = 4
242
+ else:
243
+ kernel_size = 3
244
+ padding = 1
245
+ stride = 2
246
+ self.projection = nn.Conv2d(
247
+ num_channels, embed_dim, kernel_size=kernel_size, stride=stride, padding=padding
248
+ )
249
+ else:
250
+ self.projection = nn.Conv2d(num_channels, embed_dim, kernel_size=patch_size, stride=patch_size)
251
+
252
+ if add_norm:
253
+ self.norm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
254
+ else:
255
+ self.norm = None
256
+
257
+ def maybe_pad(self, pixel_values, height, width):
258
+ if width % self.patch_size[1] != 0:
259
+ pad_values = (0, self.patch_size[1] - width % self.patch_size[1])
260
+ pixel_values = nn.functional.pad(pixel_values, pad_values)
261
+ if height % self.patch_size[0] != 0:
262
+ pad_values = (0, 0, 0, self.patch_size[0] - height % self.patch_size[0])
263
+ pixel_values = nn.functional.pad(pixel_values, pad_values)
264
+ return pixel_values
265
+
266
+ def forward(self, pixel_values: Optional[torch.FloatTensor]) -> Tuple[torch.Tensor, Tuple[int]]:
267
+ _, num_channels, height, width = pixel_values.shape
268
+ if num_channels != self.num_channels:
269
+ raise ValueError(
270
+ "Make sure that the channel dimension of the pixel values match with the one set in the configuration."
271
+ )
272
+ # pad the input to be divisible by self.patch_size, if needed
273
+ pixel_values = self.maybe_pad(pixel_values, height, width)
274
+ embeddings = self.projection(pixel_values)
275
+ _, _, height, width = embeddings.shape
276
+ output_dimensions = (height, width)
277
+ embeddings = embeddings.flatten(2).transpose(1, 2)
278
+
279
+ if self.norm is not None:
280
+ embeddings = self.norm(embeddings)
281
+
282
+ return embeddings, output_dimensions
283
+
284
+
285
+ # Copied from transformers.models.beit.modeling_beit.drop_path
286
+ def drop_path(input: torch.Tensor, drop_prob: float = 0.0, training: bool = False) -> torch.Tensor:
287
+ """
288
+ Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
289
+
290
+ Comment by Ross Wightman: This is the same as the DropConnect impl I created for EfficientNet, etc networks,
291
+ however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
292
+ See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the
293
+ layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the
294
+ argument.
295
+ """
296
+ if drop_prob == 0.0 or not training:
297
+ return input
298
+ keep_prob = 1 - drop_prob
299
+ shape = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
300
+ random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device)
301
+ random_tensor.floor_() # binarize
302
+ output = input.div(keep_prob) * random_tensor
303
+ return output
304
+
305
+
306
+ # Copied from transformers.models.beit.modeling_beit.BeitDropPath with Beit->FocalNet
307
+ class FocalNetDropPath(nn.Module):
308
+ """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
309
+
310
+ def __init__(self, drop_prob: Optional[float] = None) -> None:
311
+ super().__init__()
312
+ self.drop_prob = drop_prob
313
+
314
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
315
+ return drop_path(hidden_states, self.drop_prob, self.training)
316
+
317
+ def extra_repr(self) -> str:
318
+ return "p={}".format(self.drop_prob)
319
+
320
+
321
+ class FocalNetModulation(nn.Module):
322
+ def __init__(self, config, index, dim, focal_factor=2, bias=True, projection_dropout=0.0):
323
+ super().__init__()
324
+
325
+ self.dim = dim
326
+ self.focal_window = config.focal_windows[index]
327
+ self.focal_level = config.focal_levels[index]
328
+ self.focal_factor = focal_factor
329
+ self.use_post_layernorm_in_modulation = config.use_post_layernorm_in_modulation
330
+ self.normalize_modulator = config.normalize_modulator
331
+
332
+ self.projection_in = nn.Linear(dim, 2 * dim + (self.focal_level + 1), bias=bias)
333
+ self.projection_context = nn.Conv2d(dim, dim, kernel_size=1, stride=1, bias=bias)
334
+
335
+ self.activation = nn.GELU()
336
+ self.projection_out = nn.Linear(dim, dim)
337
+ self.projection_dropout = nn.Dropout(projection_dropout)
338
+ self.focal_layers = nn.ModuleList()
339
+
340
+ self.kernel_sizes = []
341
+ for k in range(self.focal_level):
342
+ kernel_size = self.focal_factor * k + self.focal_window
343
+ self.focal_layers.append(
344
+ nn.Sequential(
345
+ nn.Conv2d(
346
+ dim, dim, kernel_size=kernel_size, stride=1, groups=dim, padding=kernel_size // 2, bias=False
347
+ ),
348
+ nn.GELU(),
349
+ )
350
+ )
351
+ self.kernel_sizes.append(kernel_size)
352
+ if self.use_post_layernorm_in_modulation:
353
+ self.layernorm = nn.LayerNorm(dim, eps=config.layer_norm_eps)
354
+
355
+ def forward(self, hidden_state):
356
+ """
357
+ Args:
358
+ hidden_state:
359
+ Input features with shape of (batch_size, height, width, num_channels)
360
+ """
361
+ num_channels = hidden_state.shape[-1]
362
+
363
+ # pre linear projection
364
+ x = self.projection_in(hidden_state).permute(0, 3, 1, 2).contiguous()
365
+ q, ctx, self.gates = torch.split(x, (num_channels, num_channels, self.focal_level + 1), 1)
366
+
367
+ # context aggreation
368
+ ctx_all = 0
369
+ for level in range(self.focal_level):
370
+ ctx = self.focal_layers[level](ctx)
371
+ ctx_all = ctx_all + ctx * self.gates[:, level : level + 1]
372
+ ctx_global = self.activation(ctx.mean(2, keepdim=True).mean(3, keepdim=True))
373
+ ctx_all = ctx_all + ctx_global * self.gates[:, self.focal_level :]
374
+
375
+ # normalize context
376
+ if self.normalize_modulator:
377
+ ctx_all = ctx_all / (self.focal_level + 1)
378
+
379
+ # focal modulation
380
+ self.modulator = self.projection_context(ctx_all)
381
+ x_out = q * self.modulator
382
+ x_out = x_out.permute(0, 2, 3, 1).contiguous()
383
+ if self.use_post_layernorm_in_modulation:
384
+ x_out = self.layernorm(x_out)
385
+
386
+ # post linear porjection
387
+ x_out = self.projection_out(x_out)
388
+ x_out = self.projection_dropout(x_out)
389
+ return x_out
390
+
391
+
392
+ class FocalNetMlp(nn.Module):
393
+ def __init__(self, config, in_features, hidden_features=None, out_features=None, drop=0.0):
394
+ super().__init__()
395
+ out_features = out_features or in_features
396
+ hidden_features = hidden_features or in_features
397
+ self.fc1 = nn.Linear(in_features, hidden_features)
398
+ self.activation = ACT2FN[config.hidden_act]
399
+ self.fc2 = nn.Linear(hidden_features, out_features)
400
+ self.drop = nn.Dropout(drop)
401
+
402
+ def forward(self, hidden_state):
403
+ hidden_state = self.fc1(hidden_state)
404
+ hidden_state = self.activation(hidden_state)
405
+ hidden_state = self.drop(hidden_state)
406
+ hidden_state = self.fc2(hidden_state)
407
+ hidden_state = self.drop(hidden_state)
408
+ return hidden_state
409
+
410
+
411
+ class FocalNetLayer(nn.Module):
412
+ r"""Focal Modulation Network layer (block).
413
+
414
+ Args:
415
+ config (`FocalNetConfig`):
416
+ Model config.
417
+ index (`int`):
418
+ Layer index.
419
+ dim (`int`):
420
+ Number of input channels.
421
+ input_resolution (`Tuple[int]`):
422
+ Input resulotion.
423
+ drop_path (`float`, *optional*, defaults to 0.0):
424
+ Stochastic depth rate.
425
+ """
426
+
427
+ def __init__(self, config, index, dim, input_resolution, drop_path=0.0):
428
+ super().__init__()
429
+
430
+ self.config = config
431
+
432
+ # layer-specific attributes
433
+ self.dim = dim
434
+ self.input_resolution = input_resolution
435
+
436
+ # general attributes
437
+ self.drop = config.hidden_dropout_prob
438
+ self.use_post_layernorm = config.use_post_layernorm
439
+
440
+ self.norm1 = nn.LayerNorm(dim, eps=config.layer_norm_eps)
441
+ self.modulation = FocalNetModulation(
442
+ config=config,
443
+ index=index,
444
+ dim=dim,
445
+ projection_dropout=self.drop,
446
+ )
447
+
448
+ self.drop_path = FocalNetDropPath(drop_path) if drop_path > 0.0 else nn.Identity()
449
+ self.norm2 = nn.LayerNorm(dim, eps=config.layer_norm_eps)
450
+ mlp_hidden_dim = int(dim * config.mlp_ratio)
451
+ self.mlp = FocalNetMlp(config=config, in_features=dim, hidden_features=mlp_hidden_dim, drop=self.drop)
452
+
453
+ self.gamma_1 = 1.0
454
+ self.gamma_2 = 1.0
455
+ if config.use_layerscale:
456
+ self.gamma_1 = nn.Parameter(config.layerscale_value * torch.ones((dim)), requires_grad=True)
457
+ self.gamma_2 = nn.Parameter(config.layerscale_value * torch.ones((dim)), requires_grad=True)
458
+
459
+ def forward(self, hidden_state, input_dimensions):
460
+ height, width = input_dimensions
461
+ batch_size, _, num_channels = hidden_state.shape
462
+ shortcut = hidden_state
463
+
464
+ # Focal Modulation
465
+ hidden_state = hidden_state if self.use_post_layernorm else self.norm1(hidden_state)
466
+ hidden_state = hidden_state.view(batch_size, height, width, num_channels)
467
+ hidden_state = self.modulation(hidden_state).view(batch_size, height * width, num_channels)
468
+ hidden_state = hidden_state if not self.use_post_layernorm else self.norm1(hidden_state)
469
+
470
+ # FFN
471
+ hidden_state = shortcut + self.drop_path(self.gamma_1 * hidden_state)
472
+ hidden_state = hidden_state + self.drop_path(
473
+ self.gamma_2
474
+ * (self.norm2(self.mlp(hidden_state)) if self.use_post_layernorm else self.mlp(self.norm2(hidden_state)))
475
+ )
476
+
477
+ return hidden_state
478
+
479
+
480
+ class FocalNetStage(nn.Module):
481
+ def __init__(self, config, index, input_resolution):
482
+ super().__init__()
483
+
484
+ self.config = config
485
+ self.num_stages = len(config.depths)
486
+
487
+ embed_dim = [config.embed_dim * (2**i) for i in range(self.num_stages)]
488
+ dim = embed_dim[index]
489
+ out_dim = embed_dim[index + 1] if (index < self.num_stages - 1) else None
490
+ downsample = FocalNetPatchEmbeddings if (index < self.num_stages - 1) else None
491
+
492
+ # stochastic depth decay rule
493
+ dpr = [x.item() for x in torch.linspace(0, config.drop_path_rate, sum(config.depths))]
494
+ drop_path = dpr[sum(config.depths[:index]) : sum(config.depths[: index + 1])]
495
+
496
+ self.layers = nn.ModuleList(
497
+ [
498
+ FocalNetLayer(
499
+ config=config,
500
+ index=index,
501
+ dim=dim,
502
+ input_resolution=input_resolution,
503
+ drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path,
504
+ )
505
+ for i in range(config.depths[index])
506
+ ]
507
+ )
508
+
509
+ if downsample is not None:
510
+ self.downsample = downsample(
511
+ config=config,
512
+ image_size=input_resolution,
513
+ patch_size=2,
514
+ num_channels=dim,
515
+ embed_dim=out_dim,
516
+ add_norm=True,
517
+ use_conv_embed=config.use_conv_embed,
518
+ is_stem=False,
519
+ )
520
+ else:
521
+ self.downsample = None
522
+
523
+ self.pointing = False
524
+
525
+ def forward(self, hidden_states: torch.Tensor, input_dimensions: Tuple[int, int]) -> Tuple[torch.Tensor]:
526
+ height, width = input_dimensions
527
+ for layer_module in self.layers:
528
+ hidden_states = layer_module(hidden_states, input_dimensions)
529
+
530
+ hidden_states_before_downsampling = hidden_states
531
+ if self.downsample is not None:
532
+ height, width = input_dimensions
533
+ hidden_states = hidden_states.transpose(1, 2).reshape(
534
+ hidden_states_before_downsampling.shape[0], -1, height, width
535
+ )
536
+ hidden_states, output_dimensions = self.downsample(hidden_states)
537
+
538
+ else:
539
+ output_dimensions = (height, width, height, width)
540
+
541
+ stage_outputs = (hidden_states, hidden_states_before_downsampling, output_dimensions)
542
+
543
+ return stage_outputs
544
+
545
+
546
+ class FocalNetEncoder(nn.Module):
547
+ def __init__(self, config, grid_size):
548
+ super().__init__()
549
+ self.num_stages = len(config.depths)
550
+ self.config = config
551
+
552
+ self.stages = nn.ModuleList(
553
+ [
554
+ FocalNetStage(
555
+ config=config,
556
+ index=i_layer,
557
+ input_resolution=(grid_size[0] // (2**i_layer), grid_size[1] // (2**i_layer)),
558
+ )
559
+ for i_layer in range(self.num_stages)
560
+ ]
561
+ )
562
+
563
+ self.gradient_checkpointing = False
564
+
565
+ def forward(
566
+ self,
567
+ hidden_states: torch.Tensor,
568
+ input_dimensions: Tuple[int, int],
569
+ output_hidden_states: Optional[bool] = False,
570
+ output_hidden_states_before_downsampling: Optional[bool] = False,
571
+ return_dict: Optional[bool] = True,
572
+ ) -> Union[Tuple, FocalNetEncoderOutput]:
573
+ all_hidden_states = () if output_hidden_states else None
574
+ all_reshaped_hidden_states = () if output_hidden_states else None
575
+
576
+ if output_hidden_states:
577
+ batch_size, _, hidden_size = hidden_states.shape
578
+ # rearrange b (h w) c -> b c h w
579
+ reshaped_hidden_state = hidden_states.view(batch_size, *input_dimensions, hidden_size)
580
+ reshaped_hidden_state = reshaped_hidden_state.permute(0, 3, 1, 2)
581
+ all_hidden_states += (hidden_states,)
582
+ all_reshaped_hidden_states += (reshaped_hidden_state,)
583
+
584
+ for i, stage_module in enumerate(self.stages):
585
+ if self.gradient_checkpointing and self.training:
586
+ stage_outputs = self._gradient_checkpointing_func(
587
+ stage_module.__call__,
588
+ hidden_states,
589
+ input_dimensions,
590
+ )
591
+ else:
592
+ stage_outputs = stage_module(hidden_states, input_dimensions)
593
+
594
+ hidden_states = stage_outputs[0]
595
+ hidden_states_before_downsampling = stage_outputs[1]
596
+ output_dimensions = stage_outputs[2]
597
+
598
+ input_dimensions = (output_dimensions[-2], output_dimensions[-1])
599
+
600
+ if output_hidden_states and output_hidden_states_before_downsampling:
601
+ batch_size, _, hidden_size = hidden_states_before_downsampling.shape
602
+ # rearrange b (h w) c -> b c h w
603
+ # here we use the original (not downsampled) height and width
604
+ reshaped_hidden_state = hidden_states_before_downsampling.view(
605
+ batch_size, *(output_dimensions[0], output_dimensions[1]), hidden_size
606
+ )
607
+ reshaped_hidden_state = reshaped_hidden_state.permute(0, 3, 1, 2)
608
+ all_hidden_states += (hidden_states_before_downsampling,)
609
+ all_reshaped_hidden_states += (reshaped_hidden_state,)
610
+ elif output_hidden_states and not output_hidden_states_before_downsampling:
611
+ batch_size, _, hidden_size = hidden_states.shape
612
+ # rearrange b (h w) c -> b c h w
613
+ reshaped_hidden_state = hidden_states.view(batch_size, *input_dimensions, hidden_size)
614
+ reshaped_hidden_state = reshaped_hidden_state.permute(0, 3, 1, 2)
615
+ all_hidden_states += (hidden_states,)
616
+ all_reshaped_hidden_states += (reshaped_hidden_state,)
617
+
618
+ if not return_dict:
619
+ return tuple(v for v in [hidden_states, all_hidden_states] if v is not None)
620
+
621
+ return FocalNetEncoderOutput(
622
+ last_hidden_state=hidden_states,
623
+ hidden_states=all_hidden_states,
624
+ reshaped_hidden_states=all_reshaped_hidden_states,
625
+ )
626
+
627
+
628
+ # Copied from transformers.models.swin.modeling_swin.SwinPreTrainedModel with Swin->FocalNet,swin->focalnet
629
+ class FocalNetPreTrainedModel(PreTrainedModel):
630
+ """
631
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
632
+ models.
633
+ """
634
+
635
+ config_class = FocalNetConfig
636
+ base_model_prefix = "focalnet"
637
+ main_input_name = "pixel_values"
638
+ supports_gradient_checkpointing = True
639
+
640
+ def _init_weights(self, module):
641
+ """Initialize the weights"""
642
+ if isinstance(module, (nn.Linear, nn.Conv2d)):
643
+ # Slightly different from the TF version which uses truncated_normal for initialization
644
+ # cf https://github.com/pytorch/pytorch/pull/5617
645
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
646
+ if module.bias is not None:
647
+ module.bias.data.zero_()
648
+ elif isinstance(module, nn.LayerNorm):
649
+ module.bias.data.zero_()
650
+ module.weight.data.fill_(1.0)
651
+
652
+
653
+ FOCALNET_START_DOCSTRING = r"""
654
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
655
+ it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
656
+ behavior.
657
+
658
+ Parameters:
659
+ config ([`FocalNetConfig`]): Model configuration class with all the parameters of the model.
660
+ Initializing with a config file does not load the weights associated with the model, only the
661
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
662
+ """
663
+
664
+ FOCALNET_INPUTS_DOCSTRING = r"""
665
+ Args:
666
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
667
+ Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
668
+ [`AutoImageProcessor.__call__`] for details.
669
+
670
+ output_hidden_states (`bool`, *optional*):
671
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
672
+ more detail.
673
+ return_dict (`bool`, *optional*):
674
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
675
+ """
676
+
677
+
678
+ @add_start_docstrings(
679
+ "The bare FocalNet Model outputting raw hidden-states without any specific head on top.",
680
+ FOCALNET_START_DOCSTRING,
681
+ )
682
+ class FocalNetModel(FocalNetPreTrainedModel):
683
+ def __init__(self, config, add_pooling_layer=True, use_mask_token=False):
684
+ super().__init__(config)
685
+ self.config = config
686
+ self.num_stages = len(config.depths)
687
+ self.num_features = int(config.embed_dim * 2 ** (self.num_stages - 1))
688
+
689
+ self.embeddings = FocalNetEmbeddings(config, use_mask_token=use_mask_token)
690
+ self.encoder = FocalNetEncoder(config, self.embeddings.patch_grid)
691
+
692
+ self.layernorm = nn.LayerNorm(self.num_features, eps=config.layer_norm_eps)
693
+ self.pooler = nn.AdaptiveAvgPool1d(1) if add_pooling_layer else None
694
+
695
+ # Initialize weights and apply final processing
696
+ self.post_init()
697
+
698
+ def get_input_embeddings(self):
699
+ return self.embeddings.patch_embeddings
700
+
701
+ @add_start_docstrings_to_model_forward(FOCALNET_INPUTS_DOCSTRING)
702
+ @add_code_sample_docstrings(
703
+ checkpoint=_CHECKPOINT_FOR_DOC,
704
+ output_type=FocalNetModelOutput,
705
+ config_class=_CONFIG_FOR_DOC,
706
+ modality="vision",
707
+ expected_output=_EXPECTED_OUTPUT_SHAPE,
708
+ )
709
+ def forward(
710
+ self,
711
+ pixel_values: Optional[torch.FloatTensor] = None,
712
+ bool_masked_pos: Optional[torch.BoolTensor] = None,
713
+ output_hidden_states: Optional[bool] = None,
714
+ return_dict: Optional[bool] = None,
715
+ ) -> Union[Tuple, FocalNetModelOutput]:
716
+ r"""
717
+ bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, num_patches)`):
718
+ Boolean masked positions. Indicates which patches are masked (1) and which aren't (0).
719
+ """
720
+ output_hidden_states = (
721
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
722
+ )
723
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
724
+
725
+ if pixel_values is None:
726
+ raise ValueError("You have to specify pixel_values")
727
+
728
+ embedding_output, input_dimensions = self.embeddings(pixel_values, bool_masked_pos=bool_masked_pos)
729
+
730
+ encoder_outputs = self.encoder(
731
+ embedding_output,
732
+ input_dimensions,
733
+ output_hidden_states=output_hidden_states,
734
+ return_dict=return_dict,
735
+ )
736
+
737
+ sequence_output = encoder_outputs[0]
738
+ sequence_output = self.layernorm(sequence_output)
739
+
740
+ pooled_output = None
741
+ if self.pooler is not None:
742
+ pooled_output = self.pooler(sequence_output.transpose(1, 2))
743
+ pooled_output = torch.flatten(pooled_output, 1)
744
+
745
+ if not return_dict:
746
+ output = (sequence_output, pooled_output) + encoder_outputs[1:]
747
+
748
+ return output
749
+
750
+ return FocalNetModelOutput(
751
+ last_hidden_state=sequence_output,
752
+ pooler_output=pooled_output,
753
+ hidden_states=encoder_outputs.hidden_states,
754
+ reshaped_hidden_states=encoder_outputs.reshaped_hidden_states,
755
+ )
756
+
757
+
758
+ @add_start_docstrings(
759
+ """FocalNet Model with a decoder on top for masked image modeling.
760
+
761
+ This follows the same implementation as in [SimMIM](https://arxiv.org/abs/2111.09886).
762
+
763
+ <Tip>
764
+
765
+ Note that we provide a script to pre-train this model on custom data in our [examples
766
+ directory](https://github.com/huggingface/transformers/tree/main/examples/pytorch/image-pretraining).
767
+
768
+ </Tip>
769
+ """,
770
+ FOCALNET_START_DOCSTRING,
771
+ )
772
+ class FocalNetForMaskedImageModeling(FocalNetPreTrainedModel):
773
+ def __init__(self, config):
774
+ super().__init__(config)
775
+
776
+ self.focalnet = FocalNetModel(config, add_pooling_layer=False, use_mask_token=True)
777
+
778
+ self.num_stages = len(config.depths)
779
+ num_features = int(config.embed_dim * 2 ** (self.num_stages - 1))
780
+ self.decoder = nn.Sequential(
781
+ nn.Conv2d(
782
+ in_channels=num_features, out_channels=config.encoder_stride**2 * config.num_channels, kernel_size=1
783
+ ),
784
+ nn.PixelShuffle(config.encoder_stride),
785
+ )
786
+
787
+ # Initialize weights and apply final processing
788
+ self.post_init()
789
+
790
+ @add_start_docstrings_to_model_forward(FOCALNET_INPUTS_DOCSTRING)
791
+ @replace_return_docstrings(output_type=FocalNetMaskedImageModelingOutput, config_class=_CONFIG_FOR_DOC)
792
+ def forward(
793
+ self,
794
+ pixel_values: Optional[torch.FloatTensor] = None,
795
+ bool_masked_pos: Optional[torch.BoolTensor] = None,
796
+ output_hidden_states: Optional[bool] = None,
797
+ return_dict: Optional[bool] = None,
798
+ ) -> Union[Tuple, FocalNetMaskedImageModelingOutput]:
799
+ r"""
800
+ bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, num_patches)`):
801
+ Boolean masked positions. Indicates which patches are masked (1) and which aren't (0).
802
+
803
+ Returns:
804
+
805
+ Examples:
806
+ ```python
807
+ >>> from transformers import AutoImageProcessor, FocalNetConfig, FocalNetForMaskedImageModeling
808
+ >>> import torch
809
+ >>> from PIL import Image
810
+ >>> import requests
811
+
812
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
813
+ >>> image = Image.open(requests.get(url, stream=True).raw)
814
+
815
+ >>> image_processor = AutoImageProcessor.from_pretrained("microsoft/focalnet-base-simmim-window6-192")
816
+ >>> config = FocalNetConfig()
817
+ >>> model = FocalNetForMaskedImageModeling(config)
818
+
819
+ >>> num_patches = (model.config.image_size // model.config.patch_size) ** 2
820
+ >>> pixel_values = image_processor(images=image, return_tensors="pt").pixel_values
821
+ >>> # create random boolean mask of shape (batch_size, num_patches)
822
+ >>> bool_masked_pos = torch.randint(low=0, high=2, size=(1, num_patches)).bool()
823
+
824
+ >>> outputs = model(pixel_values, bool_masked_pos=bool_masked_pos)
825
+ >>> loss, reconstructed_pixel_values = outputs.loss, outputs.logits
826
+ >>> list(reconstructed_pixel_values.shape)
827
+ [1, 3, 192, 192]
828
+ ```"""
829
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
830
+
831
+ outputs = self.focalnet(
832
+ pixel_values,
833
+ bool_masked_pos=bool_masked_pos,
834
+ output_hidden_states=output_hidden_states,
835
+ return_dict=return_dict,
836
+ )
837
+
838
+ sequence_output = outputs[0]
839
+ # Reshape to (batch_size, num_channels, height, width)
840
+ sequence_output = sequence_output.transpose(1, 2)
841
+ batch_size, num_channels, sequence_length = sequence_output.shape
842
+ height = width = math.floor(sequence_length**0.5)
843
+ sequence_output = sequence_output.reshape(batch_size, num_channels, height, width)
844
+
845
+ # Reconstruct pixel values
846
+ reconstructed_pixel_values = self.decoder(sequence_output)
847
+
848
+ masked_im_loss = None
849
+ if bool_masked_pos is not None:
850
+ size = self.config.image_size // self.config.patch_size
851
+ bool_masked_pos = bool_masked_pos.reshape(-1, size, size)
852
+ mask = (
853
+ bool_masked_pos.repeat_interleave(self.config.patch_size, 1)
854
+ .repeat_interleave(self.config.patch_size, 2)
855
+ .unsqueeze(1)
856
+ .contiguous()
857
+ )
858
+ reconstruction_loss = nn.functional.l1_loss(pixel_values, reconstructed_pixel_values, reduction="none")
859
+ masked_im_loss = (reconstruction_loss * mask).sum() / (mask.sum() + 1e-5) / self.config.num_channels
860
+
861
+ if not return_dict:
862
+ output = (reconstructed_pixel_values,) + outputs[2:]
863
+ return ((masked_im_loss,) + output) if masked_im_loss is not None else output
864
+
865
+ return FocalNetMaskedImageModelingOutput(
866
+ loss=masked_im_loss,
867
+ reconstruction=reconstructed_pixel_values,
868
+ hidden_states=outputs.hidden_states,
869
+ reshaped_hidden_states=outputs.reshaped_hidden_states,
870
+ )
871
+
872
+
873
+ @add_start_docstrings(
874
+ """
875
+ FocalNet Model with an image classification head on top (a linear layer on top of the pooled output) e.g. for
876
+ ImageNet.
877
+ """,
878
+ FOCALNET_START_DOCSTRING,
879
+ )
880
+ class FocalNetForImageClassification(FocalNetPreTrainedModel):
881
+ # Copied from transformers.models.swin.modeling_swin.SwinForImageClassification.__init__ with Swin->FocalNet, swin->focalnet
882
+ def __init__(self, config):
883
+ super().__init__(config)
884
+
885
+ self.num_labels = config.num_labels
886
+ self.focalnet = FocalNetModel(config)
887
+
888
+ # Classifier head
889
+ self.classifier = (
890
+ nn.Linear(self.focalnet.num_features, config.num_labels) if config.num_labels > 0 else nn.Identity()
891
+ )
892
+
893
+ # Initialize weights and apply final processing
894
+ self.post_init()
895
+
896
+ @add_start_docstrings_to_model_forward(FOCALNET_INPUTS_DOCSTRING)
897
+ @add_code_sample_docstrings(
898
+ checkpoint=_IMAGE_CLASS_CHECKPOINT,
899
+ output_type=FocalNetImageClassifierOutput,
900
+ config_class=_CONFIG_FOR_DOC,
901
+ expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT,
902
+ )
903
+ def forward(
904
+ self,
905
+ pixel_values: Optional[torch.FloatTensor] = None,
906
+ labels: Optional[torch.LongTensor] = None,
907
+ output_hidden_states: Optional[bool] = None,
908
+ return_dict: Optional[bool] = None,
909
+ ) -> Union[Tuple, FocalNetImageClassifierOutput]:
910
+ r"""
911
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
912
+ Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
913
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
914
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
915
+ """
916
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
917
+
918
+ outputs = self.focalnet(
919
+ pixel_values,
920
+ output_hidden_states=output_hidden_states,
921
+ return_dict=return_dict,
922
+ )
923
+
924
+ pooled_output = outputs[1]
925
+
926
+ logits = self.classifier(pooled_output)
927
+
928
+ loss = None
929
+ if labels is not None:
930
+ if self.config.problem_type is None:
931
+ if self.num_labels == 1:
932
+ self.config.problem_type = "regression"
933
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
934
+ self.config.problem_type = "single_label_classification"
935
+ else:
936
+ self.config.problem_type = "multi_label_classification"
937
+
938
+ if self.config.problem_type == "regression":
939
+ loss_fct = MSELoss()
940
+ if self.num_labels == 1:
941
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
942
+ else:
943
+ loss = loss_fct(logits, labels)
944
+ elif self.config.problem_type == "single_label_classification":
945
+ loss_fct = CrossEntropyLoss()
946
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
947
+ elif self.config.problem_type == "multi_label_classification":
948
+ loss_fct = BCEWithLogitsLoss()
949
+ loss = loss_fct(logits, labels)
950
+
951
+ if not return_dict:
952
+ output = (logits,) + outputs[2:]
953
+ return ((loss,) + output) if loss is not None else output
954
+
955
+ return FocalNetImageClassifierOutput(
956
+ loss=loss,
957
+ logits=logits,
958
+ hidden_states=outputs.hidden_states,
959
+ reshaped_hidden_states=outputs.reshaped_hidden_states,
960
+ )
961
+
962
+
963
+ @add_start_docstrings(
964
+ """
965
+ FocalNet backbone, to be used with frameworks like X-Decoder.
966
+ """,
967
+ FOCALNET_START_DOCSTRING,
968
+ )
969
+ class FocalNetBackbone(FocalNetPreTrainedModel, BackboneMixin):
970
+ def __init__(self, config: FocalNetConfig):
971
+ super().__init__(config)
972
+ super()._init_backbone(config)
973
+
974
+ self.num_features = [config.embed_dim] + config.hidden_sizes
975
+ self.focalnet = FocalNetModel(config)
976
+
977
+ # initialize weights and apply final processing
978
+ self.post_init()
979
+
980
+ @add_start_docstrings_to_model_forward(FOCALNET_INPUTS_DOCSTRING)
981
+ @replace_return_docstrings(output_type=BackboneOutput, config_class=_CONFIG_FOR_DOC)
982
+ def forward(
983
+ self,
984
+ pixel_values: torch.Tensor,
985
+ output_hidden_states: Optional[bool] = None,
986
+ return_dict: Optional[bool] = None,
987
+ ) -> BackboneOutput:
988
+ """
989
+ Returns:
990
+
991
+ Examples:
992
+
993
+ ```python
994
+ >>> from transformers import AutoImageProcessor, AutoBackbone
995
+ >>> import torch
996
+ >>> from PIL import Image
997
+ >>> import requests
998
+
999
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
1000
+ >>> image = Image.open(requests.get(url, stream=True).raw)
1001
+
1002
+ >>> processor = AutoImageProcessor.from_pretrained("microsoft/focalnet-tiny-lrf")
1003
+ >>> model = AutoBackbone.from_pretrained("microsoft/focalnet-tiny-lrf")
1004
+
1005
+ >>> inputs = processor(image, return_tensors="pt")
1006
+ >>> outputs = model(**inputs)
1007
+ ```"""
1008
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1009
+ output_hidden_states = (
1010
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1011
+ )
1012
+
1013
+ outputs = self.focalnet(pixel_values, output_hidden_states=True, return_dict=True)
1014
+
1015
+ hidden_states = outputs.reshaped_hidden_states
1016
+
1017
+ feature_maps = ()
1018
+ for idx, stage in enumerate(self.stage_names):
1019
+ if stage in self.out_features:
1020
+ feature_maps += (hidden_states[idx],)
1021
+
1022
+ if not return_dict:
1023
+ output = (feature_maps,)
1024
+ if output_hidden_states:
1025
+ output += (outputs.hidden_states,)
1026
+ return output
1027
+
1028
+ return BackboneOutput(
1029
+ feature_maps=feature_maps,
1030
+ hidden_states=outputs.hidden_states if output_hidden_states else None,
1031
+ attentions=None,
1032
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/led/__init__.py ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2021 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import (
17
+ OptionalDependencyNotAvailable,
18
+ _LazyModule,
19
+ is_tf_available,
20
+ is_tokenizers_available,
21
+ is_torch_available,
22
+ )
23
+
24
+
25
+ _import_structure = {
26
+ "configuration_led": ["LED_PRETRAINED_CONFIG_ARCHIVE_MAP", "LEDConfig"],
27
+ "tokenization_led": ["LEDTokenizer"],
28
+ }
29
+
30
+ try:
31
+ if not is_tokenizers_available():
32
+ raise OptionalDependencyNotAvailable()
33
+ except OptionalDependencyNotAvailable:
34
+ pass
35
+ else:
36
+ _import_structure["tokenization_led_fast"] = ["LEDTokenizerFast"]
37
+
38
+ try:
39
+ if not is_torch_available():
40
+ raise OptionalDependencyNotAvailable()
41
+ except OptionalDependencyNotAvailable:
42
+ pass
43
+ else:
44
+ _import_structure["modeling_led"] = [
45
+ "LED_PRETRAINED_MODEL_ARCHIVE_LIST",
46
+ "LEDForConditionalGeneration",
47
+ "LEDForQuestionAnswering",
48
+ "LEDForSequenceClassification",
49
+ "LEDModel",
50
+ "LEDPreTrainedModel",
51
+ ]
52
+
53
+
54
+ try:
55
+ if not is_tf_available():
56
+ raise OptionalDependencyNotAvailable()
57
+ except OptionalDependencyNotAvailable:
58
+ pass
59
+ else:
60
+ _import_structure["modeling_tf_led"] = ["TFLEDForConditionalGeneration", "TFLEDModel", "TFLEDPreTrainedModel"]
61
+
62
+
63
+ if TYPE_CHECKING:
64
+ from .configuration_led import LED_PRETRAINED_CONFIG_ARCHIVE_MAP, LEDConfig
65
+ from .tokenization_led import LEDTokenizer
66
+
67
+ try:
68
+ if not is_tokenizers_available():
69
+ raise OptionalDependencyNotAvailable()
70
+ except OptionalDependencyNotAvailable:
71
+ pass
72
+ else:
73
+ from .tokenization_led_fast import LEDTokenizerFast
74
+
75
+ try:
76
+ if not is_torch_available():
77
+ raise OptionalDependencyNotAvailable()
78
+ except OptionalDependencyNotAvailable:
79
+ pass
80
+ else:
81
+ from .modeling_led import (
82
+ LED_PRETRAINED_MODEL_ARCHIVE_LIST,
83
+ LEDForConditionalGeneration,
84
+ LEDForQuestionAnswering,
85
+ LEDForSequenceClassification,
86
+ LEDModel,
87
+ LEDPreTrainedModel,
88
+ )
89
+
90
+ try:
91
+ if not is_tf_available():
92
+ raise OptionalDependencyNotAvailable()
93
+ except OptionalDependencyNotAvailable:
94
+ pass
95
+ else:
96
+ from .modeling_tf_led import TFLEDForConditionalGeneration, TFLEDModel, TFLEDPreTrainedModel
97
+
98
+ else:
99
+ import sys
100
+
101
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
llmeval-env/lib/python3.10/site-packages/transformers/models/led/configuration_led.py ADDED
@@ -0,0 +1,165 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 Iz Beltagy, Matthew E. Peters, Arman Cohan and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ LED model configuration"""
16
+
17
+ from typing import List, Union
18
+
19
+ from ...configuration_utils import PretrainedConfig
20
+ from ...utils import logging
21
+
22
+
23
+ logger = logging.get_logger(__name__)
24
+
25
+
26
+ from ..deprecated._archive_maps import LED_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
27
+
28
+
29
+ class LEDConfig(PretrainedConfig):
30
+ r"""
31
+ This is the configuration class to store the configuration of a [`LEDModel`]. It is used to instantiate an LED
32
+ model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
33
+ defaults will yield a similar configuration to that of the LED
34
+ [allenai/led-base-16384](https://huggingface.co/allenai/led-base-16384) architecture.
35
+
36
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
37
+ documentation from [`PretrainedConfig`] for more information.
38
+
39
+
40
+ Args:
41
+ vocab_size (`int`, *optional*, defaults to 50265):
42
+ Vocabulary size of the LED model. Defines the number of different tokens that can be represented by the
43
+ `inputs_ids` passed when calling [`LEDModel`] or [`TFLEDModel`].
44
+ d_model (`int`, *optional*, defaults to 1024):
45
+ Dimensionality of the layers and the pooler layer.
46
+ encoder_layers (`int`, *optional*, defaults to 12):
47
+ Number of encoder layers.
48
+ decoder_layers (`int`, *optional*, defaults to 12):
49
+ Number of decoder layers.
50
+ encoder_attention_heads (`int`, *optional*, defaults to 16):
51
+ Number of attention heads for each attention layer in the Transformer encoder.
52
+ decoder_attention_heads (`int`, *optional*, defaults to 16):
53
+ Number of attention heads for each attention layer in the Transformer decoder.
54
+ decoder_ffn_dim (`int`, *optional*, defaults to 4096):
55
+ Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
56
+ encoder_ffn_dim (`int`, *optional*, defaults to 4096):
57
+ Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
58
+ activation_function (`str` or `function`, *optional*, defaults to `"gelu"`):
59
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
60
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
61
+ dropout (`float`, *optional*, defaults to 0.1):
62
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
63
+ attention_dropout (`float`, *optional*, defaults to 0.0):
64
+ The dropout ratio for the attention probabilities.
65
+ activation_dropout (`float`, *optional*, defaults to 0.0):
66
+ The dropout ratio for activations inside the fully connected layer.
67
+ classifier_dropout (`float`, *optional*, defaults to 0.0):
68
+ The dropout ratio for classifier.
69
+ max_encoder_position_embeddings (`int`, *optional*, defaults to 16384):
70
+ The maximum sequence length that the encoder might ever be used with.
71
+ max_decoder_position_embeddings (`int`, *optional*, defaults to 16384):
72
+ The maximum sequence length that the decoder might ever be used with.
73
+ init_std (`float`, *optional*, defaults to 0.02):
74
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
75
+ encoder_layerdrop (`float`, *optional*, defaults to 0.0):
76
+ The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556)
77
+ for more details.
78
+ decoder_layerdrop (`float`, *optional*, defaults to 0.0):
79
+ The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556)
80
+ for more details.
81
+ use_cache (`bool`, *optional*, defaults to `True`):
82
+ Whether or not the model should return the last key/values attentions (not used by all models)
83
+
84
+ Example:
85
+
86
+ ```python
87
+ >>> from transformers import LEDModel, LEDConfig
88
+
89
+ >>> # Initializing a LED allenai/led-base-16384 style configuration
90
+ >>> configuration = LEDConfig()
91
+
92
+ >>> # Initializing a model from the allenai/led-base-16384 style configuration
93
+ >>> model = LEDModel(configuration)
94
+
95
+ >>> # Accessing the model configuration
96
+ >>> configuration = model.config
97
+ ```"""
98
+
99
+ model_type = "led"
100
+ attribute_map = {
101
+ "num_attention_heads": "encoder_attention_heads",
102
+ "hidden_size": "d_model",
103
+ "attention_probs_dropout_prob": "attention_dropout",
104
+ "initializer_range": "init_std",
105
+ }
106
+
107
+ def __init__(
108
+ self,
109
+ vocab_size=50265,
110
+ max_encoder_position_embeddings=16384,
111
+ max_decoder_position_embeddings=1024,
112
+ encoder_layers=12,
113
+ encoder_ffn_dim=4096,
114
+ encoder_attention_heads=16,
115
+ decoder_layers=12,
116
+ decoder_ffn_dim=4096,
117
+ decoder_attention_heads=16,
118
+ encoder_layerdrop=0.0,
119
+ decoder_layerdrop=0.0,
120
+ use_cache=True,
121
+ is_encoder_decoder=True,
122
+ activation_function="gelu",
123
+ d_model=1024,
124
+ dropout=0.1,
125
+ attention_dropout=0.0,
126
+ activation_dropout=0.0,
127
+ init_std=0.02,
128
+ decoder_start_token_id=2,
129
+ classifier_dropout=0.0,
130
+ pad_token_id=1,
131
+ bos_token_id=0,
132
+ eos_token_id=2,
133
+ attention_window: Union[List[int], int] = 512,
134
+ **kwargs,
135
+ ):
136
+ self.vocab_size = vocab_size
137
+ self.max_encoder_position_embeddings = max_encoder_position_embeddings
138
+ self.max_decoder_position_embeddings = max_decoder_position_embeddings
139
+ self.d_model = d_model
140
+ self.encoder_ffn_dim = encoder_ffn_dim
141
+ self.encoder_layers = encoder_layers
142
+ self.encoder_attention_heads = encoder_attention_heads
143
+ self.decoder_ffn_dim = decoder_ffn_dim
144
+ self.decoder_layers = decoder_layers
145
+ self.decoder_attention_heads = decoder_attention_heads
146
+ self.dropout = dropout
147
+ self.attention_dropout = attention_dropout
148
+ self.activation_dropout = activation_dropout
149
+ self.activation_function = activation_function
150
+ self.init_std = init_std
151
+ self.encoder_layerdrop = encoder_layerdrop
152
+ self.decoder_layerdrop = decoder_layerdrop
153
+ self.classifier_dropout = classifier_dropout
154
+ self.use_cache = use_cache
155
+ self.num_hidden_layers = encoder_layers
156
+ self.attention_window = attention_window
157
+
158
+ super().__init__(
159
+ pad_token_id=pad_token_id,
160
+ bos_token_id=bos_token_id,
161
+ eos_token_id=eos_token_id,
162
+ is_encoder_decoder=is_encoder_decoder,
163
+ decoder_start_token_id=decoder_start_token_id,
164
+ **kwargs,
165
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/led/modeling_led.py ADDED
The diff for this file is too large to render. See raw diff
 
llmeval-env/lib/python3.10/site-packages/transformers/models/led/modeling_tf_led.py ADDED
The diff for this file is too large to render. See raw diff
 
llmeval-env/lib/python3.10/site-packages/transformers/models/led/tokenization_led.py ADDED
@@ -0,0 +1,449 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 Iz Beltagy, Matthew E. Peters, Arman Cohan and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Tokenization classes for LED."""
16
+
17
+ import json
18
+ import os
19
+ from functools import lru_cache
20
+ from typing import Dict, List, Optional, Tuple, Union
21
+
22
+ import regex as re
23
+
24
+ from ...tokenization_utils import AddedToken, PreTrainedTokenizer
25
+ from ...tokenization_utils_base import BatchEncoding, EncodedInput
26
+ from ...utils import PaddingStrategy, logging
27
+
28
+
29
+ logger = logging.get_logger(__name__)
30
+
31
+
32
+ VOCAB_FILES_NAMES = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
33
+
34
+ # See all LED models at https://huggingface.co/models?filter=LED
35
+
36
+
37
+ @lru_cache()
38
+ # Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
39
+ def bytes_to_unicode():
40
+ """
41
+ Returns list of utf-8 byte and a mapping to unicode strings. We specifically avoids mapping to whitespace/control
42
+ characters the bpe code barfs on.
43
+
44
+ The reversible bpe codes work on unicode strings. This means you need a large # of unicode characters in your vocab
45
+ if you want to avoid UNKs. When you're at something like a 10B token dataset you end up needing around 5K for
46
+ decent coverage. This is a significant percentage of your normal, say, 32K bpe vocab. To avoid that, we want lookup
47
+ tables between utf-8 bytes and unicode strings.
48
+ """
49
+ bs = (
50
+ list(range(ord("!"), ord("~") + 1)) + list(range(ord("¡"), ord("¬") + 1)) + list(range(ord("®"), ord("ÿ") + 1))
51
+ )
52
+ cs = bs[:]
53
+ n = 0
54
+ for b in range(2**8):
55
+ if b not in bs:
56
+ bs.append(b)
57
+ cs.append(2**8 + n)
58
+ n += 1
59
+ cs = [chr(n) for n in cs]
60
+ return dict(zip(bs, cs))
61
+
62
+
63
+ # Copied from transformers.models.bart.tokenization_bart.get_pairs
64
+ def get_pairs(word):
65
+ """
66
+ Return set of symbol pairs in a word.
67
+
68
+ Word is represented as tuple of symbols (symbols being variable-length strings).
69
+ """
70
+ pairs = set()
71
+ prev_char = word[0]
72
+ for char in word[1:]:
73
+ pairs.add((prev_char, char))
74
+ prev_char = char
75
+ return pairs
76
+
77
+
78
+ class LEDTokenizer(PreTrainedTokenizer):
79
+ """
80
+ Constructs a LED tokenizer, which is smilar to the ROBERTa tokenizer, using byte-level Byte-Pair-Encoding.
81
+
82
+ This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will
83
+ be encoded differently whether it is at the beginning of the sentence (without space) or not:
84
+
85
+ ```python
86
+ >>> from transformers import LEDTokenizer
87
+
88
+ >>> tokenizer = LEDTokenizer.from_pretrained("allenai/led-base-16384")
89
+ >>> tokenizer("Hello world")["input_ids"]
90
+ [0, 31414, 232, 2]
91
+
92
+ >>> tokenizer(" Hello world")["input_ids"]
93
+ [0, 20920, 232, 2]
94
+ ```
95
+
96
+ You can get around that behavior by passing `add_prefix_space=True` when instantiating this tokenizer or when you
97
+ call it on some text, but since the model was not pretrained this way, it might yield a decrease in performance.
98
+
99
+ <Tip>
100
+
101
+ When used with `is_split_into_words=True`, this tokenizer will add a space before each word (even the first one).
102
+
103
+ </Tip>
104
+
105
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
106
+ this superclass for more information regarding those methods.
107
+
108
+ Args:
109
+ vocab_file (`str`):
110
+ Path to the vocabulary file.
111
+ merges_file (`str`):
112
+ Path to the merges file.
113
+ errors (`str`, *optional*, defaults to `"replace"`):
114
+ Paradigm to follow when decoding bytes to UTF-8. See
115
+ [bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information.
116
+ bos_token (`str`, *optional*, defaults to `"<s>"`):
117
+ The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
118
+
119
+ <Tip>
120
+
121
+ When building a sequence using special tokens, this is not the token that is used for the beginning of
122
+ sequence. The token used is the `cls_token`.
123
+
124
+ </Tip>
125
+
126
+ eos_token (`str`, *optional*, defaults to `"</s>"`):
127
+ The end of sequence token.
128
+
129
+ <Tip>
130
+
131
+ When building a sequence using special tokens, this is not the token that is used for the end of sequence.
132
+ The token used is the `sep_token`.
133
+
134
+ </Tip>
135
+
136
+ sep_token (`str`, *optional*, defaults to `"</s>"`):
137
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
138
+ sequence classification or for a text and a question for question answering. It is also used as the last
139
+ token of a sequence built with special tokens.
140
+ cls_token (`str`, *optional*, defaults to `"<s>"`):
141
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
142
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
143
+ unk_token (`str`, *optional*, defaults to `"<unk>"`):
144
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
145
+ token instead.
146
+ pad_token (`str`, *optional*, defaults to `"<pad>"`):
147
+ The token used for padding, for example when batching sequences of different lengths.
148
+ mask_token (`str`, *optional*, defaults to `"<mask>"`):
149
+ The token used for masking values. This is the token used when training this model with masked language
150
+ modeling. This is the token which the model will try to predict.
151
+ add_prefix_space (`bool`, *optional*, defaults to `False`):
152
+ Whether or not to add an initial space to the input. This allows to treat the leading word just as any
153
+ other word. (BART tokenizer detect beginning of words by the preceding space).
154
+ """
155
+
156
+ vocab_files_names = VOCAB_FILES_NAMES
157
+ model_input_names = ["input_ids", "attention_mask"]
158
+
159
+ # Copied from transformers.models.bart.tokenization_bart.BartTokenizer.__init__
160
+ def __init__(
161
+ self,
162
+ vocab_file,
163
+ merges_file,
164
+ errors="replace",
165
+ bos_token="<s>",
166
+ eos_token="</s>",
167
+ sep_token="</s>",
168
+ cls_token="<s>",
169
+ unk_token="<unk>",
170
+ pad_token="<pad>",
171
+ mask_token="<mask>",
172
+ add_prefix_space=False,
173
+ **kwargs,
174
+ ):
175
+ bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token
176
+ eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token
177
+ sep_token = AddedToken(sep_token, lstrip=False, rstrip=False) if isinstance(sep_token, str) else sep_token
178
+ cls_token = AddedToken(cls_token, lstrip=False, rstrip=False) if isinstance(cls_token, str) else cls_token
179
+ unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token
180
+ pad_token = AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token
181
+
182
+ # Mask token behave like a normal word, i.e. include the space before it
183
+ mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token
184
+
185
+ with open(vocab_file, encoding="utf-8") as vocab_handle:
186
+ self.encoder = json.load(vocab_handle)
187
+ self.decoder = {v: k for k, v in self.encoder.items()}
188
+ self.errors = errors # how to handle errors in decoding
189
+ self.byte_encoder = bytes_to_unicode()
190
+ self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
191
+ with open(merges_file, encoding="utf-8") as merges_handle:
192
+ bpe_merges = merges_handle.read().split("\n")[1:-1]
193
+ bpe_merges = [tuple(merge.split()) for merge in bpe_merges]
194
+ self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges))))
195
+ self.cache = {}
196
+ self.add_prefix_space = add_prefix_space
197
+
198
+ # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
199
+ self.pat = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""")
200
+
201
+ super().__init__(
202
+ errors=errors,
203
+ bos_token=bos_token,
204
+ eos_token=eos_token,
205
+ unk_token=unk_token,
206
+ sep_token=sep_token,
207
+ cls_token=cls_token,
208
+ pad_token=pad_token,
209
+ mask_token=mask_token,
210
+ add_prefix_space=add_prefix_space,
211
+ **kwargs,
212
+ )
213
+
214
+ @property
215
+ # Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
216
+ def vocab_size(self):
217
+ return len(self.encoder)
218
+
219
+ # Copied from transformers.models.bart.tokenization_bart.BartTokenizer.get_vocab
220
+ def get_vocab(self):
221
+ return dict(self.encoder, **self.added_tokens_encoder)
222
+
223
+ # Copied from transformers.models.bart.tokenization_bart.BartTokenizer.bpe
224
+ def bpe(self, token):
225
+ if token in self.cache:
226
+ return self.cache[token]
227
+ word = tuple(token)
228
+ pairs = get_pairs(word)
229
+
230
+ if not pairs:
231
+ return token
232
+
233
+ while True:
234
+ bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf")))
235
+ if bigram not in self.bpe_ranks:
236
+ break
237
+ first, second = bigram
238
+ new_word = []
239
+ i = 0
240
+ while i < len(word):
241
+ try:
242
+ j = word.index(first, i)
243
+ except ValueError:
244
+ new_word.extend(word[i:])
245
+ break
246
+ else:
247
+ new_word.extend(word[i:j])
248
+ i = j
249
+
250
+ if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
251
+ new_word.append(first + second)
252
+ i += 2
253
+ else:
254
+ new_word.append(word[i])
255
+ i += 1
256
+ new_word = tuple(new_word)
257
+ word = new_word
258
+ if len(word) == 1:
259
+ break
260
+ else:
261
+ pairs = get_pairs(word)
262
+ word = " ".join(word)
263
+ self.cache[token] = word
264
+ return word
265
+
266
+ # Copied from transformers.models.bart.tokenization_bart.BartTokenizer._tokenize
267
+ def _tokenize(self, text):
268
+ """Tokenize a string."""
269
+ bpe_tokens = []
270
+ for token in re.findall(self.pat, text):
271
+ token = "".join(
272
+ self.byte_encoder[b] for b in token.encode("utf-8")
273
+ ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
274
+ bpe_tokens.extend(bpe_token for bpe_token in self.bpe(token).split(" "))
275
+ return bpe_tokens
276
+
277
+ # Copied from transformers.models.bart.tokenization_bart.BartTokenizer._convert_token_to_id
278
+ def _convert_token_to_id(self, token):
279
+ """Converts a token (str) in an id using the vocab."""
280
+ return self.encoder.get(token, self.encoder.get(self.unk_token))
281
+
282
+ # Copied from transformers.models.bart.tokenization_bart.BartTokenizer._convert_id_to_token
283
+ def _convert_id_to_token(self, index):
284
+ """Converts an index (integer) in a token (str) using the vocab."""
285
+ return self.decoder.get(index)
286
+
287
+ # Copied from transformers.models.bart.tokenization_bart.BartTokenizer.convert_tokens_to_string
288
+ def convert_tokens_to_string(self, tokens):
289
+ """Converts a sequence of tokens (string) in a single string."""
290
+ text = "".join(tokens)
291
+ text = bytearray([self.byte_decoder[c] for c in text]).decode("utf-8", errors=self.errors)
292
+ return text
293
+
294
+ # Copied from transformers.models.bart.tokenization_bart.BartTokenizer.save_vocabulary
295
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
296
+ if not os.path.isdir(save_directory):
297
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
298
+ return
299
+ vocab_file = os.path.join(
300
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
301
+ )
302
+ merge_file = os.path.join(
303
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"]
304
+ )
305
+
306
+ with open(vocab_file, "w", encoding="utf-8") as f:
307
+ f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + "\n")
308
+
309
+ index = 0
310
+ with open(merge_file, "w", encoding="utf-8") as writer:
311
+ writer.write("#version: 0.2\n")
312
+ for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]):
313
+ if index != token_index:
314
+ logger.warning(
315
+ f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
316
+ " Please check that the tokenizer is not corrupted!"
317
+ )
318
+ index = token_index
319
+ writer.write(" ".join(bpe_tokens) + "\n")
320
+ index += 1
321
+
322
+ return vocab_file, merge_file
323
+
324
+ # Copied from transformers.models.bart.tokenization_bart.BartTokenizer.build_inputs_with_special_tokens with BART->LED
325
+ def build_inputs_with_special_tokens(
326
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
327
+ ) -> List[int]:
328
+ """
329
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
330
+ adding special tokens. A LED sequence has the following format:
331
+
332
+ - single sequence: `<s> X </s>`
333
+ - pair of sequences: `<s> A </s></s> B </s>`
334
+
335
+ Args:
336
+ token_ids_0 (`List[int]`):
337
+ List of IDs to which the special tokens will be added.
338
+ token_ids_1 (`List[int]`, *optional*):
339
+ Optional second list of IDs for sequence pairs.
340
+
341
+ Returns:
342
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
343
+ """
344
+ if token_ids_1 is None:
345
+ return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
346
+ cls = [self.cls_token_id]
347
+ sep = [self.sep_token_id]
348
+ return cls + token_ids_0 + sep + sep + token_ids_1 + sep
349
+
350
+ # Copied from transformers.models.bart.tokenization_bart.BartTokenizer.get_special_tokens_mask
351
+ def get_special_tokens_mask(
352
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
353
+ ) -> List[int]:
354
+ """
355
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
356
+ special tokens using the tokenizer `prepare_for_model` method.
357
+
358
+ Args:
359
+ token_ids_0 (`List[int]`):
360
+ List of IDs.
361
+ token_ids_1 (`List[int]`, *optional*):
362
+ Optional second list of IDs for sequence pairs.
363
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
364
+ Whether or not the token list is already formatted with special tokens for the model.
365
+
366
+ Returns:
367
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
368
+ """
369
+ if already_has_special_tokens:
370
+ return super().get_special_tokens_mask(
371
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
372
+ )
373
+
374
+ if token_ids_1 is None:
375
+ return [1] + ([0] * len(token_ids_0)) + [1]
376
+ return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1]
377
+
378
+ # Copied from transformers.models.bart.tokenization_bart.BartTokenizer.create_token_type_ids_from_sequences with BART->LED
379
+ def create_token_type_ids_from_sequences(
380
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
381
+ ) -> List[int]:
382
+ """
383
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. LED does not
384
+ make use of token type ids, therefore a list of zeros is returned.
385
+
386
+ Args:
387
+ token_ids_0 (`List[int]`):
388
+ List of IDs.
389
+ token_ids_1 (`List[int]`, *optional*):
390
+ Optional second list of IDs for sequence pairs.
391
+
392
+ Returns:
393
+ `List[int]`: List of zeros.
394
+ """
395
+ sep = [self.sep_token_id]
396
+ cls = [self.cls_token_id]
397
+
398
+ if token_ids_1 is None:
399
+ return len(cls + token_ids_0 + sep) * [0]
400
+ return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
401
+
402
+ # Copied from transformers.models.bart.tokenization_bart.BartTokenizer.prepare_for_tokenization
403
+ def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs):
404
+ add_prefix_space = kwargs.pop("add_prefix_space", self.add_prefix_space)
405
+ if (is_split_into_words or add_prefix_space) and (len(text) > 0 and not text[0].isspace()):
406
+ text = " " + text
407
+ return (text, kwargs)
408
+
409
+ def _pad(
410
+ self,
411
+ encoded_inputs: Union[Dict[str, EncodedInput], BatchEncoding],
412
+ max_length: Optional[int] = None,
413
+ padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
414
+ pad_to_multiple_of: Optional[int] = None,
415
+ return_attention_mask: Optional[bool] = None,
416
+ ) -> dict:
417
+ encoded_inputs = super()._pad(
418
+ encoded_inputs=encoded_inputs,
419
+ max_length=max_length,
420
+ padding_strategy=padding_strategy,
421
+ pad_to_multiple_of=pad_to_multiple_of,
422
+ return_attention_mask=return_attention_mask,
423
+ )
424
+
425
+ # Load from model defaults
426
+ if return_attention_mask is None:
427
+ return_attention_mask = "attention_mask" in self.model_input_names
428
+
429
+ if return_attention_mask and "global_attention_mask" in encoded_inputs:
430
+ required_input = encoded_inputs[self.model_input_names[0]]
431
+ # `global_attention_mask` need to have the same length as other (sequential) inputs.
432
+ needs_to_be_padded = len(encoded_inputs["global_attention_mask"]) != len(required_input)
433
+
434
+ if needs_to_be_padded:
435
+ difference = len(required_input) - len(encoded_inputs["global_attention_mask"])
436
+
437
+ if self.padding_side == "right":
438
+ # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
439
+ encoded_inputs["global_attention_mask"] = (
440
+ encoded_inputs["global_attention_mask"] + [-1] * difference
441
+ )
442
+ elif self.padding_side == "left":
443
+ encoded_inputs["global_attention_mask"] = [-1] * difference + encoded_inputs[
444
+ "global_attention_mask"
445
+ ]
446
+ else:
447
+ raise ValueError("Invalid padding strategy:" + str(self.padding_side))
448
+
449
+ return encoded_inputs
llmeval-env/lib/python3.10/site-packages/transformers/models/led/tokenization_led_fast.py ADDED
@@ -0,0 +1,325 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 Iz Beltagy, Matthew E. Peters, Arman Cohan and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Tokenization classes for LED."""
16
+
17
+ import json
18
+ from typing import Dict, List, Optional, Tuple, Union
19
+
20
+ from tokenizers import pre_tokenizers, processors
21
+
22
+ from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
23
+ from ...tokenization_utils_fast import PreTrainedTokenizerFast
24
+ from ...utils import PaddingStrategy, logging
25
+ from .tokenization_led import LEDTokenizer
26
+
27
+
28
+ logger = logging.get_logger(__name__)
29
+
30
+
31
+ VOCAB_FILES_NAMES = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
32
+
33
+
34
+ class LEDTokenizerFast(PreTrainedTokenizerFast):
35
+ r"""
36
+ Construct a "fast" LED tokenizer (backed by HuggingFace's *tokenizers* library), derived from the GPT-2 tokenizer,
37
+ using byte-level Byte-Pair-Encoding.
38
+
39
+ This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will
40
+ be encoded differently whether it is at the beginning of the sentence (without space) or not:
41
+
42
+ ```python
43
+ >>> from transformers import LEDTokenizerFast
44
+
45
+ >>> tokenizer = LEDTokenizerFast.from_pretrained("allenai/led-base-16384")
46
+ >>> tokenizer("Hello world")["input_ids"]
47
+ [0, 31414, 232, 2]
48
+
49
+ >>> tokenizer(" Hello world")["input_ids"]
50
+ [0, 20920, 232, 2]
51
+ ```
52
+
53
+ You can get around that behavior by passing `add_prefix_space=True` when instantiating this tokenizer or when you
54
+ call it on some text, but since the model was not pretrained this way, it might yield a decrease in performance.
55
+
56
+ <Tip>
57
+
58
+ When used with `is_split_into_words=True`, this tokenizer needs to be instantiated with `add_prefix_space=True`.
59
+
60
+ </Tip>
61
+
62
+ This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
63
+ refer to this superclass for more information regarding those methods.
64
+
65
+ Args:
66
+ vocab_file (`str`):
67
+ Path to the vocabulary file.
68
+ merges_file (`str`):
69
+ Path to the merges file.
70
+ errors (`str`, *optional*, defaults to `"replace"`):
71
+ Paradigm to follow when decoding bytes to UTF-8. See
72
+ [bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information.
73
+ bos_token (`str`, *optional*, defaults to `"<s>"`):
74
+ The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
75
+
76
+ <Tip>
77
+
78
+ When building a sequence using special tokens, this is not the token that is used for the beginning of
79
+ sequence. The token used is the `cls_token`.
80
+
81
+ </Tip>
82
+
83
+ eos_token (`str`, *optional*, defaults to `"</s>"`):
84
+ The end of sequence token.
85
+
86
+ <Tip>
87
+
88
+ When building a sequence using special tokens, this is not the token that is used for the end of sequence.
89
+ The token used is the `sep_token`.
90
+
91
+ </Tip>
92
+
93
+ sep_token (`str`, *optional*, defaults to `"</s>"`):
94
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
95
+ sequence classification or for a text and a question for question answering. It is also used as the last
96
+ token of a sequence built with special tokens.
97
+ cls_token (`str`, *optional*, defaults to `"<s>"`):
98
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
99
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
100
+ unk_token (`str`, *optional*, defaults to `"<unk>"`):
101
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
102
+ token instead.
103
+ pad_token (`str`, *optional*, defaults to `"<pad>"`):
104
+ The token used for padding, for example when batching sequences of different lengths.
105
+ mask_token (`str`, *optional*, defaults to `"<mask>"`):
106
+ The token used for masking values. This is the token used when training this model with masked language
107
+ modeling. This is the token which the model will try to predict.
108
+ add_prefix_space (`bool`, *optional*, defaults to `False`):
109
+ Whether or not to add an initial space to the input. This allows to treat the leading word just as any
110
+ other word. (LED tokenizer detect beginning of words by the preceding space).
111
+ trim_offsets (`bool`, *optional*, defaults to `True`):
112
+ Whether the post processing step should trim offsets to avoid including whitespaces.
113
+ """
114
+
115
+ vocab_files_names = VOCAB_FILES_NAMES
116
+ slow_tokenizer_class = LEDTokenizer
117
+ model_input_names = ["input_ids", "attention_mask"]
118
+
119
+ # Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.__init__
120
+ def __init__(
121
+ self,
122
+ vocab_file=None,
123
+ merges_file=None,
124
+ tokenizer_file=None,
125
+ errors="replace",
126
+ bos_token="<s>",
127
+ eos_token="</s>",
128
+ sep_token="</s>",
129
+ cls_token="<s>",
130
+ unk_token="<unk>",
131
+ pad_token="<pad>",
132
+ mask_token="<mask>",
133
+ add_prefix_space=False,
134
+ trim_offsets=True,
135
+ **kwargs,
136
+ ):
137
+ # we have to specify that this tokens is special otherwise adding it will reset the normalized flag to `False` in `add_special_tokens`
138
+ mask_token = (
139
+ AddedToken(mask_token, lstrip=True, normalized=True, special=True)
140
+ if isinstance(mask_token, str)
141
+ else mask_token
142
+ )
143
+ super().__init__(
144
+ vocab_file,
145
+ merges_file,
146
+ tokenizer_file=tokenizer_file,
147
+ errors=errors,
148
+ bos_token=bos_token,
149
+ eos_token=eos_token,
150
+ sep_token=sep_token,
151
+ cls_token=cls_token,
152
+ unk_token=unk_token,
153
+ pad_token=pad_token,
154
+ mask_token=mask_token,
155
+ add_prefix_space=add_prefix_space,
156
+ trim_offsets=trim_offsets,
157
+ **kwargs,
158
+ )
159
+
160
+ pre_tok_state = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
161
+ if pre_tok_state.get("add_prefix_space", add_prefix_space) != add_prefix_space:
162
+ pre_tok_class = getattr(pre_tokenizers, pre_tok_state.pop("type"))
163
+ pre_tok_state["add_prefix_space"] = add_prefix_space
164
+ self.backend_tokenizer.pre_tokenizer = pre_tok_class(**pre_tok_state)
165
+
166
+ self.add_prefix_space = add_prefix_space
167
+
168
+ # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
169
+ tokenizer_component = "post_processor"
170
+ tokenizer_component_instance = getattr(self.backend_tokenizer, tokenizer_component, None)
171
+ if tokenizer_component_instance:
172
+ state = json.loads(tokenizer_component_instance.__getstate__())
173
+
174
+ # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
175
+ if "sep" in state:
176
+ state["sep"] = tuple(state["sep"])
177
+ if "cls" in state:
178
+ state["cls"] = tuple(state["cls"])
179
+
180
+ changes_to_apply = False
181
+
182
+ if state.get("add_prefix_space", add_prefix_space) != add_prefix_space:
183
+ state["add_prefix_space"] = add_prefix_space
184
+ changes_to_apply = True
185
+
186
+ if state.get("trim_offsets", trim_offsets) != trim_offsets:
187
+ state["trim_offsets"] = trim_offsets
188
+ changes_to_apply = True
189
+
190
+ if changes_to_apply:
191
+ component_class = getattr(processors, state.pop("type"))
192
+ new_value = component_class(**state)
193
+ setattr(self.backend_tokenizer, tokenizer_component, new_value)
194
+
195
+ @property
196
+ # Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
197
+ def mask_token(self) -> str:
198
+ """
199
+ `str`: Mask token, to use when training a model with masked-language modeling. Log an error if used while not
200
+ having been set.
201
+
202
+ LED tokenizer has a special mask token to be usable in the fill-mask pipeline. The mask token will greedily
203
+ comprise the space before the *<mask>*.
204
+ """
205
+ if self._mask_token is None:
206
+ if self.verbose:
207
+ logger.error("Using mask_token, but it is not set yet.")
208
+ return None
209
+ return str(self._mask_token)
210
+
211
+ @mask_token.setter
212
+ def mask_token(self, value):
213
+ """
214
+ Overriding the default behavior of the mask token to have it eat the space before it.
215
+
216
+ This is needed to preserve backward compatibility with all the previously used models based on LED.
217
+ """
218
+ # Mask token behave like a normal word, i.e. include the space before it
219
+ # So we set lstrip to True
220
+ value = AddedToken(value, lstrip=True, rstrip=False) if isinstance(value, str) else value
221
+ self._mask_token = value
222
+
223
+ # Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast._batch_encode_plus
224
+ def _batch_encode_plus(self, *args, **kwargs) -> BatchEncoding:
225
+ is_split_into_words = kwargs.get("is_split_into_words", False)
226
+
227
+ if is_split_into_words and not self.add_prefix_space:
228
+ raise ValueError(
229
+ f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
230
+ "to use it with pretokenized inputs."
231
+ )
232
+
233
+ return super()._batch_encode_plus(*args, **kwargs)
234
+
235
+ # Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast._encode_plus
236
+ def _encode_plus(self, *args, **kwargs) -> BatchEncoding:
237
+ is_split_into_words = kwargs.get("is_split_into_words", False)
238
+
239
+ if is_split_into_words and not self.add_prefix_space:
240
+ raise ValueError(
241
+ f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
242
+ "to use it with pretokenized inputs."
243
+ )
244
+
245
+ return super()._encode_plus(*args, **kwargs)
246
+
247
+ # Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.save_vocabulary
248
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
249
+ files = self._tokenizer.model.save(save_directory, name=filename_prefix)
250
+ return tuple(files)
251
+
252
+ # Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.build_inputs_with_special_tokens
253
+ def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
254
+ output = [self.bos_token_id] + token_ids_0 + [self.eos_token_id]
255
+ if token_ids_1 is None:
256
+ return output
257
+
258
+ return output + [self.eos_token_id] + token_ids_1 + [self.eos_token_id]
259
+
260
+ # Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.create_token_type_ids_from_sequences with BART->LED
261
+ def create_token_type_ids_from_sequences(
262
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
263
+ ) -> List[int]:
264
+ """
265
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. LED does not
266
+ make use of token type ids, therefore a list of zeros is returned.
267
+
268
+ Args:
269
+ token_ids_0 (`List[int]`):
270
+ List of IDs.
271
+ token_ids_1 (`List[int]`, *optional*):
272
+ Optional second list of IDs for sequence pairs.
273
+
274
+ Returns:
275
+ `List[int]`: List of zeros.
276
+ """
277
+ sep = [self.sep_token_id]
278
+ cls = [self.cls_token_id]
279
+
280
+ if token_ids_1 is None:
281
+ return len(cls + token_ids_0 + sep) * [0]
282
+ return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
283
+
284
+ # Copied from transformers.models.led.tokenization_led.LEDTokenizer._pad
285
+ def _pad(
286
+ self,
287
+ encoded_inputs: Union[Dict[str, EncodedInput], BatchEncoding],
288
+ max_length: Optional[int] = None,
289
+ padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
290
+ pad_to_multiple_of: Optional[int] = None,
291
+ return_attention_mask: Optional[bool] = None,
292
+ ) -> dict:
293
+ encoded_inputs = super()._pad(
294
+ encoded_inputs=encoded_inputs,
295
+ max_length=max_length,
296
+ padding_strategy=padding_strategy,
297
+ pad_to_multiple_of=pad_to_multiple_of,
298
+ return_attention_mask=return_attention_mask,
299
+ )
300
+
301
+ # Load from model defaults
302
+ if return_attention_mask is None:
303
+ return_attention_mask = "attention_mask" in self.model_input_names
304
+
305
+ if return_attention_mask and "global_attention_mask" in encoded_inputs:
306
+ required_input = encoded_inputs[self.model_input_names[0]]
307
+ # `global_attention_mask` need to have the same length as other (sequential) inputs.
308
+ needs_to_be_padded = len(encoded_inputs["global_attention_mask"]) != len(required_input)
309
+
310
+ if needs_to_be_padded:
311
+ difference = len(required_input) - len(encoded_inputs["global_attention_mask"])
312
+
313
+ if self.padding_side == "right":
314
+ # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
315
+ encoded_inputs["global_attention_mask"] = (
316
+ encoded_inputs["global_attention_mask"] + [-1] * difference
317
+ )
318
+ elif self.padding_side == "left":
319
+ encoded_inputs["global_attention_mask"] = [-1] * difference + encoded_inputs[
320
+ "global_attention_mask"
321
+ ]
322
+ else:
323
+ raise ValueError("Invalid padding strategy:" + str(self.padding_side))
324
+
325
+ return encoded_inputs
llmeval-env/lib/python3.10/site-packages/transformers/models/mobilebert/__init__.py ADDED
@@ -0,0 +1,145 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import TYPE_CHECKING
16
+
17
+ from ...utils import (
18
+ OptionalDependencyNotAvailable,
19
+ _LazyModule,
20
+ is_tf_available,
21
+ is_tokenizers_available,
22
+ is_torch_available,
23
+ )
24
+
25
+
26
+ _import_structure = {
27
+ "configuration_mobilebert": [
28
+ "MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
29
+ "MobileBertConfig",
30
+ "MobileBertOnnxConfig",
31
+ ],
32
+ "tokenization_mobilebert": ["MobileBertTokenizer"],
33
+ }
34
+
35
+ try:
36
+ if not is_tokenizers_available():
37
+ raise OptionalDependencyNotAvailable()
38
+ except OptionalDependencyNotAvailable:
39
+ pass
40
+ else:
41
+ _import_structure["tokenization_mobilebert_fast"] = ["MobileBertTokenizerFast"]
42
+
43
+ try:
44
+ if not is_torch_available():
45
+ raise OptionalDependencyNotAvailable()
46
+ except OptionalDependencyNotAvailable:
47
+ pass
48
+ else:
49
+ _import_structure["modeling_mobilebert"] = [
50
+ "MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
51
+ "MobileBertForMaskedLM",
52
+ "MobileBertForMultipleChoice",
53
+ "MobileBertForNextSentencePrediction",
54
+ "MobileBertForPreTraining",
55
+ "MobileBertForQuestionAnswering",
56
+ "MobileBertForSequenceClassification",
57
+ "MobileBertForTokenClassification",
58
+ "MobileBertLayer",
59
+ "MobileBertModel",
60
+ "MobileBertPreTrainedModel",
61
+ "load_tf_weights_in_mobilebert",
62
+ ]
63
+
64
+ try:
65
+ if not is_tf_available():
66
+ raise OptionalDependencyNotAvailable()
67
+ except OptionalDependencyNotAvailable:
68
+ pass
69
+ else:
70
+ _import_structure["modeling_tf_mobilebert"] = [
71
+ "TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
72
+ "TFMobileBertForMaskedLM",
73
+ "TFMobileBertForMultipleChoice",
74
+ "TFMobileBertForNextSentencePrediction",
75
+ "TFMobileBertForPreTraining",
76
+ "TFMobileBertForQuestionAnswering",
77
+ "TFMobileBertForSequenceClassification",
78
+ "TFMobileBertForTokenClassification",
79
+ "TFMobileBertMainLayer",
80
+ "TFMobileBertModel",
81
+ "TFMobileBertPreTrainedModel",
82
+ ]
83
+
84
+
85
+ if TYPE_CHECKING:
86
+ from .configuration_mobilebert import (
87
+ MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
88
+ MobileBertConfig,
89
+ MobileBertOnnxConfig,
90
+ )
91
+ from .tokenization_mobilebert import MobileBertTokenizer
92
+
93
+ try:
94
+ if not is_tokenizers_available():
95
+ raise OptionalDependencyNotAvailable()
96
+ except OptionalDependencyNotAvailable:
97
+ pass
98
+ else:
99
+ from .tokenization_mobilebert_fast import MobileBertTokenizerFast
100
+
101
+ try:
102
+ if not is_torch_available():
103
+ raise OptionalDependencyNotAvailable()
104
+ except OptionalDependencyNotAvailable:
105
+ pass
106
+ else:
107
+ from .modeling_mobilebert import (
108
+ MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
109
+ MobileBertForMaskedLM,
110
+ MobileBertForMultipleChoice,
111
+ MobileBertForNextSentencePrediction,
112
+ MobileBertForPreTraining,
113
+ MobileBertForQuestionAnswering,
114
+ MobileBertForSequenceClassification,
115
+ MobileBertForTokenClassification,
116
+ MobileBertLayer,
117
+ MobileBertModel,
118
+ MobileBertPreTrainedModel,
119
+ load_tf_weights_in_mobilebert,
120
+ )
121
+
122
+ try:
123
+ if not is_tf_available():
124
+ raise OptionalDependencyNotAvailable()
125
+ except OptionalDependencyNotAvailable:
126
+ pass
127
+ else:
128
+ from .modeling_tf_mobilebert import (
129
+ TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
130
+ TFMobileBertForMaskedLM,
131
+ TFMobileBertForMultipleChoice,
132
+ TFMobileBertForNextSentencePrediction,
133
+ TFMobileBertForPreTraining,
134
+ TFMobileBertForQuestionAnswering,
135
+ TFMobileBertForSequenceClassification,
136
+ TFMobileBertForTokenClassification,
137
+ TFMobileBertMainLayer,
138
+ TFMobileBertModel,
139
+ TFMobileBertPreTrainedModel,
140
+ )
141
+
142
+ else:
143
+ import sys
144
+
145
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
llmeval-env/lib/python3.10/site-packages/transformers/models/mobilebert/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (2.23 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/mobilebert/__pycache__/configuration_mobilebert.cpython-310.pyc ADDED
Binary file (7.13 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/mobilebert/__pycache__/convert_mobilebert_original_tf_checkpoint_to_pytorch.cpython-310.pyc ADDED
Binary file (1.44 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/mobilebert/__pycache__/modeling_mobilebert.cpython-310.pyc ADDED
Binary file (48.4 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/mobilebert/__pycache__/modeling_tf_mobilebert.cpython-310.pyc ADDED
Binary file (56.4 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/mobilebert/__pycache__/tokenization_mobilebert.cpython-310.pyc ADDED
Binary file (17.1 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/mobilebert/__pycache__/tokenization_mobilebert_fast.cpython-310.pyc ADDED
Binary file (6.85 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/mobilebert/convert_mobilebert_original_tf_checkpoint_to_pytorch.py ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import argparse
16
+
17
+ import torch
18
+
19
+ from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
20
+ from transformers.utils import logging
21
+
22
+
23
+ logging.set_verbosity_info()
24
+
25
+
26
+ def convert_tf_checkpoint_to_pytorch(tf_checkpoint_path, mobilebert_config_file, pytorch_dump_path):
27
+ # Initialise PyTorch model
28
+ config = MobileBertConfig.from_json_file(mobilebert_config_file)
29
+ print(f"Building PyTorch model from configuration: {config}")
30
+ model = MobileBertForPreTraining(config)
31
+ # Load weights from tf checkpoint
32
+ model = load_tf_weights_in_mobilebert(model, config, tf_checkpoint_path)
33
+ # Save pytorch-model
34
+ print(f"Save PyTorch model to {pytorch_dump_path}")
35
+ torch.save(model.state_dict(), pytorch_dump_path)
36
+
37
+
38
+ if __name__ == "__main__":
39
+ parser = argparse.ArgumentParser()
40
+ # Required parameters
41
+ parser.add_argument(
42
+ "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
43
+ )
44
+ parser.add_argument(
45
+ "--mobilebert_config_file",
46
+ default=None,
47
+ type=str,
48
+ required=True,
49
+ help=(
50
+ "The config json file corresponding to the pre-trained MobileBERT model. \n"
51
+ "This specifies the model architecture."
52
+ ),
53
+ )
54
+ parser.add_argument(
55
+ "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
56
+ )
57
+ args = parser.parse_args()
58
+ convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
llmeval-env/lib/python3.10/site-packages/transformers/models/mobilebert/modeling_mobilebert.py ADDED
@@ -0,0 +1,1617 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # MIT License
2
+ #
3
+ # Copyright (c) 2020 The Google AI Language Team Authors, The HuggingFace Inc. team and github/lonePatient
4
+ #
5
+ # Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ # of this software and associated documentation files (the "Software"), to deal
7
+ # in the Software without restriction, including without limitation the rights
8
+ # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ # copies of the Software, and to permit persons to whom the Software is
10
+ # furnished to do so, subject to the following conditions:
11
+ #
12
+ # The above copyright notice and this permission notice shall be included in all
13
+ # copies or substantial portions of the Software.
14
+ #
15
+ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ # SOFTWARE.
22
+
23
+ import math
24
+ import os
25
+ import warnings
26
+ from dataclasses import dataclass
27
+ from typing import Optional, Tuple, Union
28
+
29
+ import torch
30
+ from torch import nn
31
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
32
+
33
+ from ...activations import ACT2FN
34
+ from ...modeling_outputs import (
35
+ BaseModelOutput,
36
+ BaseModelOutputWithPooling,
37
+ MaskedLMOutput,
38
+ MultipleChoiceModelOutput,
39
+ NextSentencePredictorOutput,
40
+ QuestionAnsweringModelOutput,
41
+ SequenceClassifierOutput,
42
+ TokenClassifierOutput,
43
+ )
44
+ from ...modeling_utils import PreTrainedModel
45
+ from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer
46
+ from ...utils import (
47
+ ModelOutput,
48
+ add_code_sample_docstrings,
49
+ add_start_docstrings,
50
+ add_start_docstrings_to_model_forward,
51
+ logging,
52
+ replace_return_docstrings,
53
+ )
54
+ from .configuration_mobilebert import MobileBertConfig
55
+
56
+
57
+ logger = logging.get_logger(__name__)
58
+
59
+ _CHECKPOINT_FOR_DOC = "google/mobilebert-uncased"
60
+ _CONFIG_FOR_DOC = "MobileBertConfig"
61
+
62
+ # TokenClassification docstring
63
+ _CHECKPOINT_FOR_TOKEN_CLASSIFICATION = "mrm8488/mobilebert-finetuned-ner"
64
+ _TOKEN_CLASS_EXPECTED_OUTPUT = "['I-ORG', 'I-ORG', 'O', 'O', 'O', 'O', 'O', 'I-LOC', 'O', 'I-LOC', 'I-LOC']"
65
+ _TOKEN_CLASS_EXPECTED_LOSS = 0.03
66
+
67
+ # QuestionAnswering docstring
68
+ _CHECKPOINT_FOR_QA = "csarron/mobilebert-uncased-squad-v2"
69
+ _QA_EXPECTED_OUTPUT = "'a nice puppet'"
70
+ _QA_EXPECTED_LOSS = 3.98
71
+ _QA_TARGET_START_INDEX = 12
72
+ _QA_TARGET_END_INDEX = 13
73
+
74
+ # SequenceClassification docstring
75
+ _CHECKPOINT_FOR_SEQUENCE_CLASSIFICATION = "lordtt13/emo-mobilebert"
76
+ _SEQ_CLASS_EXPECTED_OUTPUT = "'others'"
77
+ _SEQ_CLASS_EXPECTED_LOSS = "4.72"
78
+
79
+
80
+ from ..deprecated._archive_maps import MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
81
+
82
+
83
+ def load_tf_weights_in_mobilebert(model, config, tf_checkpoint_path):
84
+ """Load tf checkpoints in a pytorch model."""
85
+ try:
86
+ import re
87
+
88
+ import numpy as np
89
+ import tensorflow as tf
90
+ except ImportError:
91
+ logger.error(
92
+ "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
93
+ "https://www.tensorflow.org/install/ for installation instructions."
94
+ )
95
+ raise
96
+ tf_path = os.path.abspath(tf_checkpoint_path)
97
+ logger.info(f"Converting TensorFlow checkpoint from {tf_path}")
98
+ # Load weights from TF model
99
+ init_vars = tf.train.list_variables(tf_path)
100
+ names = []
101
+ arrays = []
102
+ for name, shape in init_vars:
103
+ logger.info(f"Loading TF weight {name} with shape {shape}")
104
+ array = tf.train.load_variable(tf_path, name)
105
+ names.append(name)
106
+ arrays.append(array)
107
+
108
+ for name, array in zip(names, arrays):
109
+ name = name.replace("ffn_layer", "ffn")
110
+ name = name.replace("FakeLayerNorm", "LayerNorm")
111
+ name = name.replace("extra_output_weights", "dense/kernel")
112
+ name = name.replace("bert", "mobilebert")
113
+ name = name.split("/")
114
+ # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
115
+ # which are not required for using pretrained model
116
+ if any(
117
+ n in ["adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step"]
118
+ for n in name
119
+ ):
120
+ logger.info(f"Skipping {'/'.join(name)}")
121
+ continue
122
+ pointer = model
123
+ for m_name in name:
124
+ if re.fullmatch(r"[A-Za-z]+_\d+", m_name):
125
+ scope_names = re.split(r"_(\d+)", m_name)
126
+ else:
127
+ scope_names = [m_name]
128
+ if scope_names[0] == "kernel" or scope_names[0] == "gamma":
129
+ pointer = getattr(pointer, "weight")
130
+ elif scope_names[0] == "output_bias" or scope_names[0] == "beta":
131
+ pointer = getattr(pointer, "bias")
132
+ elif scope_names[0] == "output_weights":
133
+ pointer = getattr(pointer, "weight")
134
+ elif scope_names[0] == "squad":
135
+ pointer = getattr(pointer, "classifier")
136
+ else:
137
+ try:
138
+ pointer = getattr(pointer, scope_names[0])
139
+ except AttributeError:
140
+ logger.info(f"Skipping {'/'.join(name)}")
141
+ continue
142
+ if len(scope_names) >= 2:
143
+ num = int(scope_names[1])
144
+ pointer = pointer[num]
145
+ if m_name[-11:] == "_embeddings":
146
+ pointer = getattr(pointer, "weight")
147
+ elif m_name == "kernel":
148
+ array = np.transpose(array)
149
+ try:
150
+ assert (
151
+ pointer.shape == array.shape
152
+ ), f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched"
153
+ except AssertionError as e:
154
+ e.args += (pointer.shape, array.shape)
155
+ raise
156
+ logger.info(f"Initialize PyTorch weight {name}")
157
+ pointer.data = torch.from_numpy(array)
158
+ return model
159
+
160
+
161
+ class NoNorm(nn.Module):
162
+ def __init__(self, feat_size, eps=None):
163
+ super().__init__()
164
+ self.bias = nn.Parameter(torch.zeros(feat_size))
165
+ self.weight = nn.Parameter(torch.ones(feat_size))
166
+
167
+ def forward(self, input_tensor: torch.Tensor) -> torch.Tensor:
168
+ return input_tensor * self.weight + self.bias
169
+
170
+
171
+ NORM2FN = {"layer_norm": nn.LayerNorm, "no_norm": NoNorm}
172
+
173
+
174
+ class MobileBertEmbeddings(nn.Module):
175
+ """Construct the embeddings from word, position and token_type embeddings."""
176
+
177
+ def __init__(self, config):
178
+ super().__init__()
179
+ self.trigram_input = config.trigram_input
180
+ self.embedding_size = config.embedding_size
181
+ self.hidden_size = config.hidden_size
182
+
183
+ self.word_embeddings = nn.Embedding(config.vocab_size, config.embedding_size, padding_idx=config.pad_token_id)
184
+ self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
185
+ self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
186
+
187
+ embed_dim_multiplier = 3 if self.trigram_input else 1
188
+ embedded_input_size = self.embedding_size * embed_dim_multiplier
189
+ self.embedding_transformation = nn.Linear(embedded_input_size, config.hidden_size)
190
+
191
+ self.LayerNorm = NORM2FN[config.normalization_type](config.hidden_size)
192
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
193
+
194
+ # position_ids (1, len position emb) is contiguous in memory and exported when serialized
195
+ self.register_buffer(
196
+ "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
197
+ )
198
+
199
+ def forward(
200
+ self,
201
+ input_ids: Optional[torch.LongTensor] = None,
202
+ token_type_ids: Optional[torch.LongTensor] = None,
203
+ position_ids: Optional[torch.LongTensor] = None,
204
+ inputs_embeds: Optional[torch.FloatTensor] = None,
205
+ ) -> torch.Tensor:
206
+ if input_ids is not None:
207
+ input_shape = input_ids.size()
208
+ else:
209
+ input_shape = inputs_embeds.size()[:-1]
210
+
211
+ seq_length = input_shape[1]
212
+
213
+ if position_ids is None:
214
+ position_ids = self.position_ids[:, :seq_length]
215
+
216
+ if token_type_ids is None:
217
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
218
+ if inputs_embeds is None:
219
+ inputs_embeds = self.word_embeddings(input_ids)
220
+
221
+ if self.trigram_input:
222
+ # From the paper MobileBERT: a Compact Task-Agnostic BERT for Resource-Limited
223
+ # Devices (https://arxiv.org/abs/2004.02984)
224
+ #
225
+ # The embedding table in BERT models accounts for a substantial proportion of model size. To compress
226
+ # the embedding layer, we reduce the embedding dimension to 128 in MobileBERT.
227
+ # Then, we apply a 1D convolution with kernel size 3 on the raw token embedding to produce a 512
228
+ # dimensional output.
229
+ inputs_embeds = torch.cat(
230
+ [
231
+ nn.functional.pad(inputs_embeds[:, 1:], [0, 0, 0, 1, 0, 0], value=0.0),
232
+ inputs_embeds,
233
+ nn.functional.pad(inputs_embeds[:, :-1], [0, 0, 1, 0, 0, 0], value=0.0),
234
+ ],
235
+ dim=2,
236
+ )
237
+ if self.trigram_input or self.embedding_size != self.hidden_size:
238
+ inputs_embeds = self.embedding_transformation(inputs_embeds)
239
+
240
+ # Add positional embeddings and token type embeddings, then layer
241
+ # normalize and perform dropout.
242
+ position_embeddings = self.position_embeddings(position_ids)
243
+ token_type_embeddings = self.token_type_embeddings(token_type_ids)
244
+ embeddings = inputs_embeds + position_embeddings + token_type_embeddings
245
+ embeddings = self.LayerNorm(embeddings)
246
+ embeddings = self.dropout(embeddings)
247
+ return embeddings
248
+
249
+
250
+ class MobileBertSelfAttention(nn.Module):
251
+ def __init__(self, config):
252
+ super().__init__()
253
+ self.num_attention_heads = config.num_attention_heads
254
+ self.attention_head_size = int(config.true_hidden_size / config.num_attention_heads)
255
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
256
+
257
+ self.query = nn.Linear(config.true_hidden_size, self.all_head_size)
258
+ self.key = nn.Linear(config.true_hidden_size, self.all_head_size)
259
+ self.value = nn.Linear(
260
+ config.true_hidden_size if config.use_bottleneck_attention else config.hidden_size, self.all_head_size
261
+ )
262
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
263
+
264
+ def transpose_for_scores(self, x):
265
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
266
+ x = x.view(new_x_shape)
267
+ return x.permute(0, 2, 1, 3)
268
+
269
+ def forward(
270
+ self,
271
+ query_tensor: torch.Tensor,
272
+ key_tensor: torch.Tensor,
273
+ value_tensor: torch.Tensor,
274
+ attention_mask: Optional[torch.FloatTensor] = None,
275
+ head_mask: Optional[torch.FloatTensor] = None,
276
+ output_attentions: Optional[bool] = None,
277
+ ) -> Tuple[torch.Tensor]:
278
+ mixed_query_layer = self.query(query_tensor)
279
+ mixed_key_layer = self.key(key_tensor)
280
+ mixed_value_layer = self.value(value_tensor)
281
+
282
+ query_layer = self.transpose_for_scores(mixed_query_layer)
283
+ key_layer = self.transpose_for_scores(mixed_key_layer)
284
+ value_layer = self.transpose_for_scores(mixed_value_layer)
285
+
286
+ # Take the dot product between "query" and "key" to get the raw attention scores.
287
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
288
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
289
+ if attention_mask is not None:
290
+ # Apply the attention mask is (precomputed for all layers in BertModel forward() function)
291
+ attention_scores = attention_scores + attention_mask
292
+ # Normalize the attention scores to probabilities.
293
+ attention_probs = nn.functional.softmax(attention_scores, dim=-1)
294
+ # This is actually dropping out entire tokens to attend to, which might
295
+ # seem a bit unusual, but is taken from the original Transformer paper.
296
+ attention_probs = self.dropout(attention_probs)
297
+ # Mask heads if we want to
298
+ if head_mask is not None:
299
+ attention_probs = attention_probs * head_mask
300
+ context_layer = torch.matmul(attention_probs, value_layer)
301
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
302
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
303
+ context_layer = context_layer.view(new_context_layer_shape)
304
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
305
+ return outputs
306
+
307
+
308
+ class MobileBertSelfOutput(nn.Module):
309
+ def __init__(self, config):
310
+ super().__init__()
311
+ self.use_bottleneck = config.use_bottleneck
312
+ self.dense = nn.Linear(config.true_hidden_size, config.true_hidden_size)
313
+ self.LayerNorm = NORM2FN[config.normalization_type](config.true_hidden_size, eps=config.layer_norm_eps)
314
+ if not self.use_bottleneck:
315
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
316
+
317
+ def forward(self, hidden_states: torch.Tensor, residual_tensor: torch.Tensor) -> torch.Tensor:
318
+ layer_outputs = self.dense(hidden_states)
319
+ if not self.use_bottleneck:
320
+ layer_outputs = self.dropout(layer_outputs)
321
+ layer_outputs = self.LayerNorm(layer_outputs + residual_tensor)
322
+ return layer_outputs
323
+
324
+
325
+ class MobileBertAttention(nn.Module):
326
+ def __init__(self, config):
327
+ super().__init__()
328
+ self.self = MobileBertSelfAttention(config)
329
+ self.output = MobileBertSelfOutput(config)
330
+ self.pruned_heads = set()
331
+
332
+ def prune_heads(self, heads):
333
+ if len(heads) == 0:
334
+ return
335
+ heads, index = find_pruneable_heads_and_indices(
336
+ heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
337
+ )
338
+
339
+ # Prune linear layers
340
+ self.self.query = prune_linear_layer(self.self.query, index)
341
+ self.self.key = prune_linear_layer(self.self.key, index)
342
+ self.self.value = prune_linear_layer(self.self.value, index)
343
+ self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
344
+
345
+ # Update hyper params and store pruned heads
346
+ self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
347
+ self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
348
+ self.pruned_heads = self.pruned_heads.union(heads)
349
+
350
+ def forward(
351
+ self,
352
+ query_tensor: torch.Tensor,
353
+ key_tensor: torch.Tensor,
354
+ value_tensor: torch.Tensor,
355
+ layer_input: torch.Tensor,
356
+ attention_mask: Optional[torch.FloatTensor] = None,
357
+ head_mask: Optional[torch.FloatTensor] = None,
358
+ output_attentions: Optional[bool] = None,
359
+ ) -> Tuple[torch.Tensor]:
360
+ self_outputs = self.self(
361
+ query_tensor,
362
+ key_tensor,
363
+ value_tensor,
364
+ attention_mask,
365
+ head_mask,
366
+ output_attentions,
367
+ )
368
+ # Run a linear projection of `hidden_size` then add a residual
369
+ # with `layer_input`.
370
+ attention_output = self.output(self_outputs[0], layer_input)
371
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
372
+ return outputs
373
+
374
+
375
+ class MobileBertIntermediate(nn.Module):
376
+ def __init__(self, config):
377
+ super().__init__()
378
+ self.dense = nn.Linear(config.true_hidden_size, config.intermediate_size)
379
+ if isinstance(config.hidden_act, str):
380
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
381
+ else:
382
+ self.intermediate_act_fn = config.hidden_act
383
+
384
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
385
+ hidden_states = self.dense(hidden_states)
386
+ hidden_states = self.intermediate_act_fn(hidden_states)
387
+ return hidden_states
388
+
389
+
390
+ class OutputBottleneck(nn.Module):
391
+ def __init__(self, config):
392
+ super().__init__()
393
+ self.dense = nn.Linear(config.true_hidden_size, config.hidden_size)
394
+ self.LayerNorm = NORM2FN[config.normalization_type](config.hidden_size, eps=config.layer_norm_eps)
395
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
396
+
397
+ def forward(self, hidden_states: torch.Tensor, residual_tensor: torch.Tensor) -> torch.Tensor:
398
+ layer_outputs = self.dense(hidden_states)
399
+ layer_outputs = self.dropout(layer_outputs)
400
+ layer_outputs = self.LayerNorm(layer_outputs + residual_tensor)
401
+ return layer_outputs
402
+
403
+
404
+ class MobileBertOutput(nn.Module):
405
+ def __init__(self, config):
406
+ super().__init__()
407
+ self.use_bottleneck = config.use_bottleneck
408
+ self.dense = nn.Linear(config.intermediate_size, config.true_hidden_size)
409
+ self.LayerNorm = NORM2FN[config.normalization_type](config.true_hidden_size)
410
+ if not self.use_bottleneck:
411
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
412
+ else:
413
+ self.bottleneck = OutputBottleneck(config)
414
+
415
+ def forward(
416
+ self, intermediate_states: torch.Tensor, residual_tensor_1: torch.Tensor, residual_tensor_2: torch.Tensor
417
+ ) -> torch.Tensor:
418
+ layer_output = self.dense(intermediate_states)
419
+ if not self.use_bottleneck:
420
+ layer_output = self.dropout(layer_output)
421
+ layer_output = self.LayerNorm(layer_output + residual_tensor_1)
422
+ else:
423
+ layer_output = self.LayerNorm(layer_output + residual_tensor_1)
424
+ layer_output = self.bottleneck(layer_output, residual_tensor_2)
425
+ return layer_output
426
+
427
+
428
+ class BottleneckLayer(nn.Module):
429
+ def __init__(self, config):
430
+ super().__init__()
431
+ self.dense = nn.Linear(config.hidden_size, config.intra_bottleneck_size)
432
+ self.LayerNorm = NORM2FN[config.normalization_type](config.intra_bottleneck_size, eps=config.layer_norm_eps)
433
+
434
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
435
+ layer_input = self.dense(hidden_states)
436
+ layer_input = self.LayerNorm(layer_input)
437
+ return layer_input
438
+
439
+
440
+ class Bottleneck(nn.Module):
441
+ def __init__(self, config):
442
+ super().__init__()
443
+ self.key_query_shared_bottleneck = config.key_query_shared_bottleneck
444
+ self.use_bottleneck_attention = config.use_bottleneck_attention
445
+ self.input = BottleneckLayer(config)
446
+ if self.key_query_shared_bottleneck:
447
+ self.attention = BottleneckLayer(config)
448
+
449
+ def forward(self, hidden_states: torch.Tensor) -> Tuple[torch.Tensor]:
450
+ # This method can return three different tuples of values. These different values make use of bottlenecks,
451
+ # which are linear layers used to project the hidden states to a lower-dimensional vector, reducing memory
452
+ # usage. These linear layer have weights that are learned during training.
453
+ #
454
+ # If `config.use_bottleneck_attention`, it will return the result of the bottleneck layer four times for the
455
+ # key, query, value, and "layer input" to be used by the attention layer.
456
+ # This bottleneck is used to project the hidden. This last layer input will be used as a residual tensor
457
+ # in the attention self output, after the attention scores have been computed.
458
+ #
459
+ # If not `config.use_bottleneck_attention` and `config.key_query_shared_bottleneck`, this will return
460
+ # four values, three of which have been passed through a bottleneck: the query and key, passed through the same
461
+ # bottleneck, and the residual layer to be applied in the attention self output, through another bottleneck.
462
+ #
463
+ # Finally, in the last case, the values for the query, key and values are the hidden states without bottleneck,
464
+ # and the residual layer will be this value passed through a bottleneck.
465
+
466
+ bottlenecked_hidden_states = self.input(hidden_states)
467
+ if self.use_bottleneck_attention:
468
+ return (bottlenecked_hidden_states,) * 4
469
+ elif self.key_query_shared_bottleneck:
470
+ shared_attention_input = self.attention(hidden_states)
471
+ return (shared_attention_input, shared_attention_input, hidden_states, bottlenecked_hidden_states)
472
+ else:
473
+ return (hidden_states, hidden_states, hidden_states, bottlenecked_hidden_states)
474
+
475
+
476
+ class FFNOutput(nn.Module):
477
+ def __init__(self, config):
478
+ super().__init__()
479
+ self.dense = nn.Linear(config.intermediate_size, config.true_hidden_size)
480
+ self.LayerNorm = NORM2FN[config.normalization_type](config.true_hidden_size, eps=config.layer_norm_eps)
481
+
482
+ def forward(self, hidden_states: torch.Tensor, residual_tensor: torch.Tensor) -> torch.Tensor:
483
+ layer_outputs = self.dense(hidden_states)
484
+ layer_outputs = self.LayerNorm(layer_outputs + residual_tensor)
485
+ return layer_outputs
486
+
487
+
488
+ class FFNLayer(nn.Module):
489
+ def __init__(self, config):
490
+ super().__init__()
491
+ self.intermediate = MobileBertIntermediate(config)
492
+ self.output = FFNOutput(config)
493
+
494
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
495
+ intermediate_output = self.intermediate(hidden_states)
496
+ layer_outputs = self.output(intermediate_output, hidden_states)
497
+ return layer_outputs
498
+
499
+
500
+ class MobileBertLayer(nn.Module):
501
+ def __init__(self, config):
502
+ super().__init__()
503
+ self.use_bottleneck = config.use_bottleneck
504
+ self.num_feedforward_networks = config.num_feedforward_networks
505
+
506
+ self.attention = MobileBertAttention(config)
507
+ self.intermediate = MobileBertIntermediate(config)
508
+ self.output = MobileBertOutput(config)
509
+ if self.use_bottleneck:
510
+ self.bottleneck = Bottleneck(config)
511
+ if config.num_feedforward_networks > 1:
512
+ self.ffn = nn.ModuleList([FFNLayer(config) for _ in range(config.num_feedforward_networks - 1)])
513
+
514
+ def forward(
515
+ self,
516
+ hidden_states: torch.Tensor,
517
+ attention_mask: Optional[torch.FloatTensor] = None,
518
+ head_mask: Optional[torch.FloatTensor] = None,
519
+ output_attentions: Optional[bool] = None,
520
+ ) -> Tuple[torch.Tensor]:
521
+ if self.use_bottleneck:
522
+ query_tensor, key_tensor, value_tensor, layer_input = self.bottleneck(hidden_states)
523
+ else:
524
+ query_tensor, key_tensor, value_tensor, layer_input = [hidden_states] * 4
525
+
526
+ self_attention_outputs = self.attention(
527
+ query_tensor,
528
+ key_tensor,
529
+ value_tensor,
530
+ layer_input,
531
+ attention_mask,
532
+ head_mask,
533
+ output_attentions=output_attentions,
534
+ )
535
+ attention_output = self_attention_outputs[0]
536
+ s = (attention_output,)
537
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
538
+
539
+ if self.num_feedforward_networks != 1:
540
+ for i, ffn_module in enumerate(self.ffn):
541
+ attention_output = ffn_module(attention_output)
542
+ s += (attention_output,)
543
+
544
+ intermediate_output = self.intermediate(attention_output)
545
+ layer_output = self.output(intermediate_output, attention_output, hidden_states)
546
+ outputs = (
547
+ (layer_output,)
548
+ + outputs
549
+ + (
550
+ torch.tensor(1000),
551
+ query_tensor,
552
+ key_tensor,
553
+ value_tensor,
554
+ layer_input,
555
+ attention_output,
556
+ intermediate_output,
557
+ )
558
+ + s
559
+ )
560
+ return outputs
561
+
562
+
563
+ class MobileBertEncoder(nn.Module):
564
+ def __init__(self, config):
565
+ super().__init__()
566
+ self.layer = nn.ModuleList([MobileBertLayer(config) for _ in range(config.num_hidden_layers)])
567
+
568
+ def forward(
569
+ self,
570
+ hidden_states: torch.Tensor,
571
+ attention_mask: Optional[torch.FloatTensor] = None,
572
+ head_mask: Optional[torch.FloatTensor] = None,
573
+ output_attentions: Optional[bool] = False,
574
+ output_hidden_states: Optional[bool] = False,
575
+ return_dict: Optional[bool] = True,
576
+ ) -> Union[Tuple, BaseModelOutput]:
577
+ all_hidden_states = () if output_hidden_states else None
578
+ all_attentions = () if output_attentions else None
579
+ for i, layer_module in enumerate(self.layer):
580
+ if output_hidden_states:
581
+ all_hidden_states = all_hidden_states + (hidden_states,)
582
+
583
+ layer_outputs = layer_module(
584
+ hidden_states,
585
+ attention_mask,
586
+ head_mask[i],
587
+ output_attentions,
588
+ )
589
+ hidden_states = layer_outputs[0]
590
+
591
+ if output_attentions:
592
+ all_attentions = all_attentions + (layer_outputs[1],)
593
+
594
+ # Add last layer
595
+ if output_hidden_states:
596
+ all_hidden_states = all_hidden_states + (hidden_states,)
597
+
598
+ if not return_dict:
599
+ return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None)
600
+ return BaseModelOutput(
601
+ last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions
602
+ )
603
+
604
+
605
+ class MobileBertPooler(nn.Module):
606
+ def __init__(self, config):
607
+ super().__init__()
608
+ self.do_activate = config.classifier_activation
609
+ if self.do_activate:
610
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
611
+
612
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
613
+ # We "pool" the model by simply taking the hidden state corresponding
614
+ # to the first token.
615
+ first_token_tensor = hidden_states[:, 0]
616
+ if not self.do_activate:
617
+ return first_token_tensor
618
+ else:
619
+ pooled_output = self.dense(first_token_tensor)
620
+ pooled_output = torch.tanh(pooled_output)
621
+ return pooled_output
622
+
623
+
624
+ class MobileBertPredictionHeadTransform(nn.Module):
625
+ def __init__(self, config):
626
+ super().__init__()
627
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
628
+ if isinstance(config.hidden_act, str):
629
+ self.transform_act_fn = ACT2FN[config.hidden_act]
630
+ else:
631
+ self.transform_act_fn = config.hidden_act
632
+ self.LayerNorm = NORM2FN["layer_norm"](config.hidden_size, eps=config.layer_norm_eps)
633
+
634
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
635
+ hidden_states = self.dense(hidden_states)
636
+ hidden_states = self.transform_act_fn(hidden_states)
637
+ hidden_states = self.LayerNorm(hidden_states)
638
+ return hidden_states
639
+
640
+
641
+ class MobileBertLMPredictionHead(nn.Module):
642
+ def __init__(self, config):
643
+ super().__init__()
644
+ self.transform = MobileBertPredictionHeadTransform(config)
645
+ # The output weights are the same as the input embeddings, but there is
646
+ # an output-only bias for each token.
647
+ self.dense = nn.Linear(config.vocab_size, config.hidden_size - config.embedding_size, bias=False)
648
+ self.decoder = nn.Linear(config.embedding_size, config.vocab_size, bias=False)
649
+ self.bias = nn.Parameter(torch.zeros(config.vocab_size))
650
+ # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
651
+ self.decoder.bias = self.bias
652
+
653
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
654
+ hidden_states = self.transform(hidden_states)
655
+ hidden_states = hidden_states.matmul(torch.cat([self.decoder.weight.t(), self.dense.weight], dim=0))
656
+ hidden_states += self.decoder.bias
657
+ return hidden_states
658
+
659
+
660
+ class MobileBertOnlyMLMHead(nn.Module):
661
+ def __init__(self, config):
662
+ super().__init__()
663
+ self.predictions = MobileBertLMPredictionHead(config)
664
+
665
+ def forward(self, sequence_output: torch.Tensor) -> torch.Tensor:
666
+ prediction_scores = self.predictions(sequence_output)
667
+ return prediction_scores
668
+
669
+
670
+ class MobileBertPreTrainingHeads(nn.Module):
671
+ def __init__(self, config):
672
+ super().__init__()
673
+ self.predictions = MobileBertLMPredictionHead(config)
674
+ self.seq_relationship = nn.Linear(config.hidden_size, 2)
675
+
676
+ def forward(self, sequence_output: torch.Tensor, pooled_output: torch.Tensor) -> Tuple[torch.Tensor]:
677
+ prediction_scores = self.predictions(sequence_output)
678
+ seq_relationship_score = self.seq_relationship(pooled_output)
679
+ return prediction_scores, seq_relationship_score
680
+
681
+
682
+ class MobileBertPreTrainedModel(PreTrainedModel):
683
+ """
684
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
685
+ models.
686
+ """
687
+
688
+ config_class = MobileBertConfig
689
+ load_tf_weights = load_tf_weights_in_mobilebert
690
+ base_model_prefix = "mobilebert"
691
+
692
+ def _init_weights(self, module):
693
+ """Initialize the weights"""
694
+ if isinstance(module, nn.Linear):
695
+ # Slightly different from the TF version which uses truncated_normal for initialization
696
+ # cf https://github.com/pytorch/pytorch/pull/5617
697
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
698
+ if module.bias is not None:
699
+ module.bias.data.zero_()
700
+ elif isinstance(module, nn.Embedding):
701
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
702
+ if module.padding_idx is not None:
703
+ module.weight.data[module.padding_idx].zero_()
704
+ elif isinstance(module, (nn.LayerNorm, NoNorm)):
705
+ module.bias.data.zero_()
706
+ module.weight.data.fill_(1.0)
707
+
708
+
709
+ @dataclass
710
+ class MobileBertForPreTrainingOutput(ModelOutput):
711
+ """
712
+ Output type of [`MobileBertForPreTraining`].
713
+
714
+ Args:
715
+ loss (*optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`):
716
+ Total loss as the sum of the masked language modeling loss and the next sequence prediction
717
+ (classification) loss.
718
+ prediction_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
719
+ Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
720
+ seq_relationship_logits (`torch.FloatTensor` of shape `(batch_size, 2)`):
721
+ Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation
722
+ before SoftMax).
723
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
724
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
725
+ shape `(batch_size, sequence_length, hidden_size)`.
726
+
727
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
728
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
729
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
730
+ sequence_length)`.
731
+
732
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
733
+ heads.
734
+ """
735
+
736
+ loss: Optional[torch.FloatTensor] = None
737
+ prediction_logits: torch.FloatTensor = None
738
+ seq_relationship_logits: torch.FloatTensor = None
739
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
740
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
741
+
742
+
743
+ MOBILEBERT_START_DOCSTRING = r"""
744
+
745
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
746
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
747
+ etc.)
748
+
749
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
750
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
751
+ and behavior.
752
+
753
+ Parameters:
754
+ config ([`MobileBertConfig`]): Model configuration class with all the parameters of the model.
755
+ Initializing with a config file does not load the weights associated with the model, only the
756
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
757
+ """
758
+
759
+ MOBILEBERT_INPUTS_DOCSTRING = r"""
760
+ Args:
761
+ input_ids (`torch.LongTensor` of shape `({0})`):
762
+ Indices of input sequence tokens in the vocabulary.
763
+
764
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
765
+ [`PreTrainedTokenizer.__call__`] for details.
766
+
767
+ [What are input IDs?](../glossary#input-ids)
768
+ attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
769
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
770
+
771
+ - 1 for tokens that are **not masked**,
772
+ - 0 for tokens that are **masked**.
773
+
774
+ [What are attention masks?](../glossary#attention-mask)
775
+ token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
776
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
777
+ 1]`:
778
+
779
+ - 0 corresponds to a *sentence A* token,
780
+ - 1 corresponds to a *sentence B* token.
781
+
782
+ [What are token type IDs?](../glossary#token-type-ids)
783
+ position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
784
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
785
+ config.max_position_embeddings - 1]`.
786
+
787
+ [What are position IDs?](../glossary#position-ids)
788
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
789
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
790
+
791
+ - 1 indicates the head is **not masked**,
792
+ - 0 indicates the head is **masked**.
793
+
794
+ inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
795
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
796
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
797
+ model's internal embedding lookup matrix.
798
+ output_attentions (`bool`, *optional*):
799
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
800
+ tensors for more detail.
801
+ output_hidden_states (`bool`, *optional*):
802
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
803
+ more detail.
804
+ return_dict (`bool`, *optional*):
805
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
806
+ """
807
+
808
+
809
+ @add_start_docstrings(
810
+ "The bare MobileBert Model transformer outputting raw hidden-states without any specific head on top.",
811
+ MOBILEBERT_START_DOCSTRING,
812
+ )
813
+ class MobileBertModel(MobileBertPreTrainedModel):
814
+ """
815
+ https://arxiv.org/pdf/2004.02984.pdf
816
+ """
817
+
818
+ def __init__(self, config, add_pooling_layer=True):
819
+ super().__init__(config)
820
+ self.config = config
821
+ self.embeddings = MobileBertEmbeddings(config)
822
+ self.encoder = MobileBertEncoder(config)
823
+
824
+ self.pooler = MobileBertPooler(config) if add_pooling_layer else None
825
+
826
+ # Initialize weights and apply final processing
827
+ self.post_init()
828
+
829
+ def get_input_embeddings(self):
830
+ return self.embeddings.word_embeddings
831
+
832
+ def set_input_embeddings(self, value):
833
+ self.embeddings.word_embeddings = value
834
+
835
+ def _prune_heads(self, heads_to_prune):
836
+ """
837
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
838
+ class PreTrainedModel
839
+ """
840
+ for layer, heads in heads_to_prune.items():
841
+ self.encoder.layer[layer].attention.prune_heads(heads)
842
+
843
+ @add_start_docstrings_to_model_forward(MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
844
+ @add_code_sample_docstrings(
845
+ checkpoint=_CHECKPOINT_FOR_DOC,
846
+ output_type=BaseModelOutputWithPooling,
847
+ config_class=_CONFIG_FOR_DOC,
848
+ )
849
+ def forward(
850
+ self,
851
+ input_ids: Optional[torch.LongTensor] = None,
852
+ attention_mask: Optional[torch.FloatTensor] = None,
853
+ token_type_ids: Optional[torch.LongTensor] = None,
854
+ position_ids: Optional[torch.LongTensor] = None,
855
+ head_mask: Optional[torch.FloatTensor] = None,
856
+ inputs_embeds: Optional[torch.FloatTensor] = None,
857
+ output_hidden_states: Optional[bool] = None,
858
+ output_attentions: Optional[bool] = None,
859
+ return_dict: Optional[bool] = None,
860
+ ) -> Union[Tuple, BaseModelOutputWithPooling]:
861
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
862
+ output_hidden_states = (
863
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
864
+ )
865
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
866
+
867
+ if input_ids is not None and inputs_embeds is not None:
868
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
869
+ elif input_ids is not None:
870
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
871
+ input_shape = input_ids.size()
872
+ elif inputs_embeds is not None:
873
+ input_shape = inputs_embeds.size()[:-1]
874
+ else:
875
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
876
+
877
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
878
+
879
+ if attention_mask is None:
880
+ attention_mask = torch.ones(input_shape, device=device)
881
+ if token_type_ids is None:
882
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
883
+
884
+ # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
885
+ # ourselves in which case we just need to make it broadcastable to all heads.
886
+ extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape)
887
+
888
+ # Prepare head mask if needed
889
+ # 1.0 in head_mask indicate we keep the head
890
+ # attention_probs has shape bsz x n_heads x N x N
891
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
892
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
893
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
894
+
895
+ embedding_output = self.embeddings(
896
+ input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds
897
+ )
898
+ encoder_outputs = self.encoder(
899
+ embedding_output,
900
+ attention_mask=extended_attention_mask,
901
+ head_mask=head_mask,
902
+ output_attentions=output_attentions,
903
+ output_hidden_states=output_hidden_states,
904
+ return_dict=return_dict,
905
+ )
906
+ sequence_output = encoder_outputs[0]
907
+ pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
908
+
909
+ if not return_dict:
910
+ return (sequence_output, pooled_output) + encoder_outputs[1:]
911
+
912
+ return BaseModelOutputWithPooling(
913
+ last_hidden_state=sequence_output,
914
+ pooler_output=pooled_output,
915
+ hidden_states=encoder_outputs.hidden_states,
916
+ attentions=encoder_outputs.attentions,
917
+ )
918
+
919
+
920
+ @add_start_docstrings(
921
+ """
922
+ MobileBert Model with two heads on top as done during the pretraining: a `masked language modeling` head and a
923
+ `next sentence prediction (classification)` head.
924
+ """,
925
+ MOBILEBERT_START_DOCSTRING,
926
+ )
927
+ class MobileBertForPreTraining(MobileBertPreTrainedModel):
928
+ _tied_weights_keys = ["cls.predictions.decoder.weight", "cls.predictions.decoder.bias"]
929
+
930
+ def __init__(self, config):
931
+ super().__init__(config)
932
+ self.mobilebert = MobileBertModel(config)
933
+ self.cls = MobileBertPreTrainingHeads(config)
934
+
935
+ # Initialize weights and apply final processing
936
+ self.post_init()
937
+
938
+ def get_output_embeddings(self):
939
+ return self.cls.predictions.decoder
940
+
941
+ def set_output_embeddings(self, new_embeddigs):
942
+ self.cls.predictions.decoder = new_embeddigs
943
+
944
+ def resize_token_embeddings(self, new_num_tokens: Optional[int] = None) -> nn.Embedding:
945
+ # resize dense output embedings at first
946
+ self.cls.predictions.dense = self._get_resized_lm_head(
947
+ self.cls.predictions.dense, new_num_tokens=new_num_tokens, transposed=True
948
+ )
949
+
950
+ return super().resize_token_embeddings(new_num_tokens=new_num_tokens)
951
+
952
+ @add_start_docstrings_to_model_forward(MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
953
+ @replace_return_docstrings(output_type=MobileBertForPreTrainingOutput, config_class=_CONFIG_FOR_DOC)
954
+ def forward(
955
+ self,
956
+ input_ids: Optional[torch.LongTensor] = None,
957
+ attention_mask: Optional[torch.FloatTensor] = None,
958
+ token_type_ids: Optional[torch.LongTensor] = None,
959
+ position_ids: Optional[torch.LongTensor] = None,
960
+ head_mask: Optional[torch.FloatTensor] = None,
961
+ inputs_embeds: Optional[torch.FloatTensor] = None,
962
+ labels: Optional[torch.LongTensor] = None,
963
+ next_sentence_label: Optional[torch.LongTensor] = None,
964
+ output_attentions: Optional[torch.FloatTensor] = None,
965
+ output_hidden_states: Optional[torch.FloatTensor] = None,
966
+ return_dict: Optional[torch.FloatTensor] = None,
967
+ ) -> Union[Tuple, MobileBertForPreTrainingOutput]:
968
+ r"""
969
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
970
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
971
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
972
+ loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
973
+ next_sentence_label (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
974
+ Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair
975
+ (see `input_ids` docstring) Indices should be in `[0, 1]`:
976
+
977
+ - 0 indicates sequence B is a continuation of sequence A,
978
+ - 1 indicates sequence B is a random sequence.
979
+
980
+ Returns:
981
+
982
+ Examples:
983
+
984
+ ```python
985
+ >>> from transformers import AutoTokenizer, MobileBertForPreTraining
986
+ >>> import torch
987
+
988
+ >>> tokenizer = AutoTokenizer.from_pretrained("google/mobilebert-uncased")
989
+ >>> model = MobileBertForPreTraining.from_pretrained("google/mobilebert-uncased")
990
+
991
+ >>> input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0)
992
+ >>> # Batch size 1
993
+ >>> outputs = model(input_ids)
994
+
995
+ >>> prediction_logits = outputs.prediction_logits
996
+ >>> seq_relationship_logits = outputs.seq_relationship_logits
997
+ ```"""
998
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
999
+
1000
+ outputs = self.mobilebert(
1001
+ input_ids,
1002
+ attention_mask=attention_mask,
1003
+ token_type_ids=token_type_ids,
1004
+ position_ids=position_ids,
1005
+ head_mask=head_mask,
1006
+ inputs_embeds=inputs_embeds,
1007
+ output_attentions=output_attentions,
1008
+ output_hidden_states=output_hidden_states,
1009
+ return_dict=return_dict,
1010
+ )
1011
+ sequence_output, pooled_output = outputs[:2]
1012
+ prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output)
1013
+
1014
+ total_loss = None
1015
+ if labels is not None and next_sentence_label is not None:
1016
+ loss_fct = CrossEntropyLoss()
1017
+ masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
1018
+ next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
1019
+ total_loss = masked_lm_loss + next_sentence_loss
1020
+
1021
+ if not return_dict:
1022
+ output = (prediction_scores, seq_relationship_score) + outputs[2:]
1023
+ return ((total_loss,) + output) if total_loss is not None else output
1024
+
1025
+ return MobileBertForPreTrainingOutput(
1026
+ loss=total_loss,
1027
+ prediction_logits=prediction_scores,
1028
+ seq_relationship_logits=seq_relationship_score,
1029
+ hidden_states=outputs.hidden_states,
1030
+ attentions=outputs.attentions,
1031
+ )
1032
+
1033
+
1034
+ @add_start_docstrings("""MobileBert Model with a `language modeling` head on top.""", MOBILEBERT_START_DOCSTRING)
1035
+ class MobileBertForMaskedLM(MobileBertPreTrainedModel):
1036
+ _tied_weights_keys = ["cls.predictions.decoder.weight", "cls.predictions.decoder.bias"]
1037
+
1038
+ def __init__(self, config):
1039
+ super().__init__(config)
1040
+ self.mobilebert = MobileBertModel(config, add_pooling_layer=False)
1041
+ self.cls = MobileBertOnlyMLMHead(config)
1042
+ self.config = config
1043
+
1044
+ # Initialize weights and apply final processing
1045
+ self.post_init()
1046
+
1047
+ def get_output_embeddings(self):
1048
+ return self.cls.predictions.decoder
1049
+
1050
+ def set_output_embeddings(self, new_embeddigs):
1051
+ self.cls.predictions.decoder = new_embeddigs
1052
+
1053
+ def resize_token_embeddings(self, new_num_tokens: Optional[int] = None) -> nn.Embedding:
1054
+ # resize dense output embedings at first
1055
+ self.cls.predictions.dense = self._get_resized_lm_head(
1056
+ self.cls.predictions.dense, new_num_tokens=new_num_tokens, transposed=True
1057
+ )
1058
+ return super().resize_token_embeddings(new_num_tokens=new_num_tokens)
1059
+
1060
+ @add_start_docstrings_to_model_forward(MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1061
+ @add_code_sample_docstrings(
1062
+ checkpoint=_CHECKPOINT_FOR_DOC,
1063
+ output_type=MaskedLMOutput,
1064
+ config_class=_CONFIG_FOR_DOC,
1065
+ expected_output="'paris'",
1066
+ expected_loss=0.57,
1067
+ )
1068
+ def forward(
1069
+ self,
1070
+ input_ids: Optional[torch.LongTensor] = None,
1071
+ attention_mask: Optional[torch.FloatTensor] = None,
1072
+ token_type_ids: Optional[torch.LongTensor] = None,
1073
+ position_ids: Optional[torch.LongTensor] = None,
1074
+ head_mask: Optional[torch.FloatTensor] = None,
1075
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1076
+ labels: Optional[torch.LongTensor] = None,
1077
+ output_attentions: Optional[bool] = None,
1078
+ output_hidden_states: Optional[bool] = None,
1079
+ return_dict: Optional[bool] = None,
1080
+ ) -> Union[Tuple, MaskedLMOutput]:
1081
+ r"""
1082
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1083
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
1084
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
1085
+ loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
1086
+ """
1087
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1088
+
1089
+ outputs = self.mobilebert(
1090
+ input_ids,
1091
+ attention_mask=attention_mask,
1092
+ token_type_ids=token_type_ids,
1093
+ position_ids=position_ids,
1094
+ head_mask=head_mask,
1095
+ inputs_embeds=inputs_embeds,
1096
+ output_attentions=output_attentions,
1097
+ output_hidden_states=output_hidden_states,
1098
+ return_dict=return_dict,
1099
+ )
1100
+
1101
+ sequence_output = outputs[0]
1102
+ prediction_scores = self.cls(sequence_output)
1103
+
1104
+ masked_lm_loss = None
1105
+ if labels is not None:
1106
+ loss_fct = CrossEntropyLoss() # -100 index = padding token
1107
+ masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
1108
+
1109
+ if not return_dict:
1110
+ output = (prediction_scores,) + outputs[2:]
1111
+ return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
1112
+
1113
+ return MaskedLMOutput(
1114
+ loss=masked_lm_loss,
1115
+ logits=prediction_scores,
1116
+ hidden_states=outputs.hidden_states,
1117
+ attentions=outputs.attentions,
1118
+ )
1119
+
1120
+
1121
+ class MobileBertOnlyNSPHead(nn.Module):
1122
+ def __init__(self, config):
1123
+ super().__init__()
1124
+ self.seq_relationship = nn.Linear(config.hidden_size, 2)
1125
+
1126
+ def forward(self, pooled_output: torch.Tensor) -> torch.Tensor:
1127
+ seq_relationship_score = self.seq_relationship(pooled_output)
1128
+ return seq_relationship_score
1129
+
1130
+
1131
+ @add_start_docstrings(
1132
+ """MobileBert Model with a `next sentence prediction (classification)` head on top.""",
1133
+ MOBILEBERT_START_DOCSTRING,
1134
+ )
1135
+ class MobileBertForNextSentencePrediction(MobileBertPreTrainedModel):
1136
+ def __init__(self, config):
1137
+ super().__init__(config)
1138
+
1139
+ self.mobilebert = MobileBertModel(config)
1140
+ self.cls = MobileBertOnlyNSPHead(config)
1141
+
1142
+ # Initialize weights and apply final processing
1143
+ self.post_init()
1144
+
1145
+ @add_start_docstrings_to_model_forward(MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1146
+ @replace_return_docstrings(output_type=NextSentencePredictorOutput, config_class=_CONFIG_FOR_DOC)
1147
+ def forward(
1148
+ self,
1149
+ input_ids: Optional[torch.LongTensor] = None,
1150
+ attention_mask: Optional[torch.FloatTensor] = None,
1151
+ token_type_ids: Optional[torch.LongTensor] = None,
1152
+ position_ids: Optional[torch.LongTensor] = None,
1153
+ head_mask: Optional[torch.FloatTensor] = None,
1154
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1155
+ labels: Optional[torch.LongTensor] = None,
1156
+ output_attentions: Optional[bool] = None,
1157
+ output_hidden_states: Optional[bool] = None,
1158
+ return_dict: Optional[bool] = None,
1159
+ **kwargs,
1160
+ ) -> Union[Tuple, NextSentencePredictorOutput]:
1161
+ r"""
1162
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1163
+ Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair
1164
+ (see `input_ids` docstring) Indices should be in `[0, 1]`.
1165
+
1166
+ - 0 indicates sequence B is a continuation of sequence A,
1167
+ - 1 indicates sequence B is a random sequence.
1168
+
1169
+ Returns:
1170
+
1171
+ Examples:
1172
+
1173
+ ```python
1174
+ >>> from transformers import AutoTokenizer, MobileBertForNextSentencePrediction
1175
+ >>> import torch
1176
+
1177
+ >>> tokenizer = AutoTokenizer.from_pretrained("google/mobilebert-uncased")
1178
+ >>> model = MobileBertForNextSentencePrediction.from_pretrained("google/mobilebert-uncased")
1179
+
1180
+ >>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced."
1181
+ >>> next_sentence = "The sky is blue due to the shorter wavelength of blue light."
1182
+ >>> encoding = tokenizer(prompt, next_sentence, return_tensors="pt")
1183
+
1184
+ >>> outputs = model(**encoding, labels=torch.LongTensor([1]))
1185
+ >>> loss = outputs.loss
1186
+ >>> logits = outputs.logits
1187
+ ```"""
1188
+
1189
+ if "next_sentence_label" in kwargs:
1190
+ warnings.warn(
1191
+ "The `next_sentence_label` argument is deprecated and will be removed in a future version, use"
1192
+ " `labels` instead.",
1193
+ FutureWarning,
1194
+ )
1195
+ labels = kwargs.pop("next_sentence_label")
1196
+
1197
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1198
+
1199
+ outputs = self.mobilebert(
1200
+ input_ids,
1201
+ attention_mask=attention_mask,
1202
+ token_type_ids=token_type_ids,
1203
+ position_ids=position_ids,
1204
+ head_mask=head_mask,
1205
+ inputs_embeds=inputs_embeds,
1206
+ output_attentions=output_attentions,
1207
+ output_hidden_states=output_hidden_states,
1208
+ return_dict=return_dict,
1209
+ )
1210
+
1211
+ pooled_output = outputs[1]
1212
+ seq_relationship_score = self.cls(pooled_output)
1213
+
1214
+ next_sentence_loss = None
1215
+ if labels is not None:
1216
+ loss_fct = CrossEntropyLoss()
1217
+ next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), labels.view(-1))
1218
+
1219
+ if not return_dict:
1220
+ output = (seq_relationship_score,) + outputs[2:]
1221
+ return ((next_sentence_loss,) + output) if next_sentence_loss is not None else output
1222
+
1223
+ return NextSentencePredictorOutput(
1224
+ loss=next_sentence_loss,
1225
+ logits=seq_relationship_score,
1226
+ hidden_states=outputs.hidden_states,
1227
+ attentions=outputs.attentions,
1228
+ )
1229
+
1230
+
1231
+ @add_start_docstrings(
1232
+ """
1233
+ MobileBert Model transformer with a sequence classification/regression head on top (a linear layer on top of the
1234
+ pooled output) e.g. for GLUE tasks.
1235
+ """,
1236
+ MOBILEBERT_START_DOCSTRING,
1237
+ )
1238
+ # Copied from transformers.models.bert.modeling_bert.BertForSequenceClassification with Bert->MobileBert all-casing
1239
+ class MobileBertForSequenceClassification(MobileBertPreTrainedModel):
1240
+ def __init__(self, config):
1241
+ super().__init__(config)
1242
+ self.num_labels = config.num_labels
1243
+ self.config = config
1244
+
1245
+ self.mobilebert = MobileBertModel(config)
1246
+ classifier_dropout = (
1247
+ config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
1248
+ )
1249
+ self.dropout = nn.Dropout(classifier_dropout)
1250
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
1251
+
1252
+ # Initialize weights and apply final processing
1253
+ self.post_init()
1254
+
1255
+ @add_start_docstrings_to_model_forward(MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1256
+ @add_code_sample_docstrings(
1257
+ checkpoint=_CHECKPOINT_FOR_SEQUENCE_CLASSIFICATION,
1258
+ output_type=SequenceClassifierOutput,
1259
+ config_class=_CONFIG_FOR_DOC,
1260
+ expected_output=_SEQ_CLASS_EXPECTED_OUTPUT,
1261
+ expected_loss=_SEQ_CLASS_EXPECTED_LOSS,
1262
+ )
1263
+ def forward(
1264
+ self,
1265
+ input_ids: Optional[torch.Tensor] = None,
1266
+ attention_mask: Optional[torch.Tensor] = None,
1267
+ token_type_ids: Optional[torch.Tensor] = None,
1268
+ position_ids: Optional[torch.Tensor] = None,
1269
+ head_mask: Optional[torch.Tensor] = None,
1270
+ inputs_embeds: Optional[torch.Tensor] = None,
1271
+ labels: Optional[torch.Tensor] = None,
1272
+ output_attentions: Optional[bool] = None,
1273
+ output_hidden_states: Optional[bool] = None,
1274
+ return_dict: Optional[bool] = None,
1275
+ ) -> Union[Tuple[torch.Tensor], SequenceClassifierOutput]:
1276
+ r"""
1277
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1278
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1279
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1280
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1281
+ """
1282
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1283
+
1284
+ outputs = self.mobilebert(
1285
+ input_ids,
1286
+ attention_mask=attention_mask,
1287
+ token_type_ids=token_type_ids,
1288
+ position_ids=position_ids,
1289
+ head_mask=head_mask,
1290
+ inputs_embeds=inputs_embeds,
1291
+ output_attentions=output_attentions,
1292
+ output_hidden_states=output_hidden_states,
1293
+ return_dict=return_dict,
1294
+ )
1295
+
1296
+ pooled_output = outputs[1]
1297
+
1298
+ pooled_output = self.dropout(pooled_output)
1299
+ logits = self.classifier(pooled_output)
1300
+
1301
+ loss = None
1302
+ if labels is not None:
1303
+ if self.config.problem_type is None:
1304
+ if self.num_labels == 1:
1305
+ self.config.problem_type = "regression"
1306
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
1307
+ self.config.problem_type = "single_label_classification"
1308
+ else:
1309
+ self.config.problem_type = "multi_label_classification"
1310
+
1311
+ if self.config.problem_type == "regression":
1312
+ loss_fct = MSELoss()
1313
+ if self.num_labels == 1:
1314
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
1315
+ else:
1316
+ loss = loss_fct(logits, labels)
1317
+ elif self.config.problem_type == "single_label_classification":
1318
+ loss_fct = CrossEntropyLoss()
1319
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1320
+ elif self.config.problem_type == "multi_label_classification":
1321
+ loss_fct = BCEWithLogitsLoss()
1322
+ loss = loss_fct(logits, labels)
1323
+ if not return_dict:
1324
+ output = (logits,) + outputs[2:]
1325
+ return ((loss,) + output) if loss is not None else output
1326
+
1327
+ return SequenceClassifierOutput(
1328
+ loss=loss,
1329
+ logits=logits,
1330
+ hidden_states=outputs.hidden_states,
1331
+ attentions=outputs.attentions,
1332
+ )
1333
+
1334
+
1335
+ @add_start_docstrings(
1336
+ """
1337
+ MobileBert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a
1338
+ linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
1339
+ """,
1340
+ MOBILEBERT_START_DOCSTRING,
1341
+ )
1342
+ # Copied from transformers.models.bert.modeling_bert.BertForQuestionAnswering with Bert->MobileBert all-casing
1343
+ class MobileBertForQuestionAnswering(MobileBertPreTrainedModel):
1344
+ def __init__(self, config):
1345
+ super().__init__(config)
1346
+ self.num_labels = config.num_labels
1347
+
1348
+ self.mobilebert = MobileBertModel(config, add_pooling_layer=False)
1349
+ self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
1350
+
1351
+ # Initialize weights and apply final processing
1352
+ self.post_init()
1353
+
1354
+ @add_start_docstrings_to_model_forward(MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1355
+ @add_code_sample_docstrings(
1356
+ checkpoint=_CHECKPOINT_FOR_QA,
1357
+ output_type=QuestionAnsweringModelOutput,
1358
+ config_class=_CONFIG_FOR_DOC,
1359
+ qa_target_start_index=_QA_TARGET_START_INDEX,
1360
+ qa_target_end_index=_QA_TARGET_END_INDEX,
1361
+ expected_output=_QA_EXPECTED_OUTPUT,
1362
+ expected_loss=_QA_EXPECTED_LOSS,
1363
+ )
1364
+ def forward(
1365
+ self,
1366
+ input_ids: Optional[torch.Tensor] = None,
1367
+ attention_mask: Optional[torch.Tensor] = None,
1368
+ token_type_ids: Optional[torch.Tensor] = None,
1369
+ position_ids: Optional[torch.Tensor] = None,
1370
+ head_mask: Optional[torch.Tensor] = None,
1371
+ inputs_embeds: Optional[torch.Tensor] = None,
1372
+ start_positions: Optional[torch.Tensor] = None,
1373
+ end_positions: Optional[torch.Tensor] = None,
1374
+ output_attentions: Optional[bool] = None,
1375
+ output_hidden_states: Optional[bool] = None,
1376
+ return_dict: Optional[bool] = None,
1377
+ ) -> Union[Tuple[torch.Tensor], QuestionAnsweringModelOutput]:
1378
+ r"""
1379
+ start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1380
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
1381
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1382
+ are not taken into account for computing the loss.
1383
+ end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1384
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
1385
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1386
+ are not taken into account for computing the loss.
1387
+ """
1388
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1389
+
1390
+ outputs = self.mobilebert(
1391
+ input_ids,
1392
+ attention_mask=attention_mask,
1393
+ token_type_ids=token_type_ids,
1394
+ position_ids=position_ids,
1395
+ head_mask=head_mask,
1396
+ inputs_embeds=inputs_embeds,
1397
+ output_attentions=output_attentions,
1398
+ output_hidden_states=output_hidden_states,
1399
+ return_dict=return_dict,
1400
+ )
1401
+
1402
+ sequence_output = outputs[0]
1403
+
1404
+ logits = self.qa_outputs(sequence_output)
1405
+ start_logits, end_logits = logits.split(1, dim=-1)
1406
+ start_logits = start_logits.squeeze(-1).contiguous()
1407
+ end_logits = end_logits.squeeze(-1).contiguous()
1408
+
1409
+ total_loss = None
1410
+ if start_positions is not None and end_positions is not None:
1411
+ # If we are on multi-GPU, split add a dimension
1412
+ if len(start_positions.size()) > 1:
1413
+ start_positions = start_positions.squeeze(-1)
1414
+ if len(end_positions.size()) > 1:
1415
+ end_positions = end_positions.squeeze(-1)
1416
+ # sometimes the start/end positions are outside our model inputs, we ignore these terms
1417
+ ignored_index = start_logits.size(1)
1418
+ start_positions = start_positions.clamp(0, ignored_index)
1419
+ end_positions = end_positions.clamp(0, ignored_index)
1420
+
1421
+ loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
1422
+ start_loss = loss_fct(start_logits, start_positions)
1423
+ end_loss = loss_fct(end_logits, end_positions)
1424
+ total_loss = (start_loss + end_loss) / 2
1425
+
1426
+ if not return_dict:
1427
+ output = (start_logits, end_logits) + outputs[2:]
1428
+ return ((total_loss,) + output) if total_loss is not None else output
1429
+
1430
+ return QuestionAnsweringModelOutput(
1431
+ loss=total_loss,
1432
+ start_logits=start_logits,
1433
+ end_logits=end_logits,
1434
+ hidden_states=outputs.hidden_states,
1435
+ attentions=outputs.attentions,
1436
+ )
1437
+
1438
+
1439
+ @add_start_docstrings(
1440
+ """
1441
+ MobileBert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and
1442
+ a softmax) e.g. for RocStories/SWAG tasks.
1443
+ """,
1444
+ MOBILEBERT_START_DOCSTRING,
1445
+ )
1446
+ # Copied from transformers.models.bert.modeling_bert.BertForMultipleChoice with Bert->MobileBert all-casing
1447
+ class MobileBertForMultipleChoice(MobileBertPreTrainedModel):
1448
+ def __init__(self, config):
1449
+ super().__init__(config)
1450
+
1451
+ self.mobilebert = MobileBertModel(config)
1452
+ classifier_dropout = (
1453
+ config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
1454
+ )
1455
+ self.dropout = nn.Dropout(classifier_dropout)
1456
+ self.classifier = nn.Linear(config.hidden_size, 1)
1457
+
1458
+ # Initialize weights and apply final processing
1459
+ self.post_init()
1460
+
1461
+ @add_start_docstrings_to_model_forward(
1462
+ MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")
1463
+ )
1464
+ @add_code_sample_docstrings(
1465
+ checkpoint=_CHECKPOINT_FOR_DOC,
1466
+ output_type=MultipleChoiceModelOutput,
1467
+ config_class=_CONFIG_FOR_DOC,
1468
+ )
1469
+ def forward(
1470
+ self,
1471
+ input_ids: Optional[torch.Tensor] = None,
1472
+ attention_mask: Optional[torch.Tensor] = None,
1473
+ token_type_ids: Optional[torch.Tensor] = None,
1474
+ position_ids: Optional[torch.Tensor] = None,
1475
+ head_mask: Optional[torch.Tensor] = None,
1476
+ inputs_embeds: Optional[torch.Tensor] = None,
1477
+ labels: Optional[torch.Tensor] = None,
1478
+ output_attentions: Optional[bool] = None,
1479
+ output_hidden_states: Optional[bool] = None,
1480
+ return_dict: Optional[bool] = None,
1481
+ ) -> Union[Tuple[torch.Tensor], MultipleChoiceModelOutput]:
1482
+ r"""
1483
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1484
+ Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
1485
+ num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
1486
+ `input_ids` above)
1487
+ """
1488
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1489
+ num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
1490
+
1491
+ input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
1492
+ attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
1493
+ token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
1494
+ position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
1495
+ inputs_embeds = (
1496
+ inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
1497
+ if inputs_embeds is not None
1498
+ else None
1499
+ )
1500
+
1501
+ outputs = self.mobilebert(
1502
+ input_ids,
1503
+ attention_mask=attention_mask,
1504
+ token_type_ids=token_type_ids,
1505
+ position_ids=position_ids,
1506
+ head_mask=head_mask,
1507
+ inputs_embeds=inputs_embeds,
1508
+ output_attentions=output_attentions,
1509
+ output_hidden_states=output_hidden_states,
1510
+ return_dict=return_dict,
1511
+ )
1512
+
1513
+ pooled_output = outputs[1]
1514
+
1515
+ pooled_output = self.dropout(pooled_output)
1516
+ logits = self.classifier(pooled_output)
1517
+ reshaped_logits = logits.view(-1, num_choices)
1518
+
1519
+ loss = None
1520
+ if labels is not None:
1521
+ loss_fct = CrossEntropyLoss()
1522
+ loss = loss_fct(reshaped_logits, labels)
1523
+
1524
+ if not return_dict:
1525
+ output = (reshaped_logits,) + outputs[2:]
1526
+ return ((loss,) + output) if loss is not None else output
1527
+
1528
+ return MultipleChoiceModelOutput(
1529
+ loss=loss,
1530
+ logits=reshaped_logits,
1531
+ hidden_states=outputs.hidden_states,
1532
+ attentions=outputs.attentions,
1533
+ )
1534
+
1535
+
1536
+ @add_start_docstrings(
1537
+ """
1538
+ MobileBert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g.
1539
+ for Named-Entity-Recognition (NER) tasks.
1540
+ """,
1541
+ MOBILEBERT_START_DOCSTRING,
1542
+ )
1543
+ # Copied from transformers.models.bert.modeling_bert.BertForTokenClassification with Bert->MobileBert all-casing
1544
+ class MobileBertForTokenClassification(MobileBertPreTrainedModel):
1545
+ def __init__(self, config):
1546
+ super().__init__(config)
1547
+ self.num_labels = config.num_labels
1548
+
1549
+ self.mobilebert = MobileBertModel(config, add_pooling_layer=False)
1550
+ classifier_dropout = (
1551
+ config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
1552
+ )
1553
+ self.dropout = nn.Dropout(classifier_dropout)
1554
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
1555
+
1556
+ # Initialize weights and apply final processing
1557
+ self.post_init()
1558
+
1559
+ @add_start_docstrings_to_model_forward(MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1560
+ @add_code_sample_docstrings(
1561
+ checkpoint=_CHECKPOINT_FOR_TOKEN_CLASSIFICATION,
1562
+ output_type=TokenClassifierOutput,
1563
+ config_class=_CONFIG_FOR_DOC,
1564
+ expected_output=_TOKEN_CLASS_EXPECTED_OUTPUT,
1565
+ expected_loss=_TOKEN_CLASS_EXPECTED_LOSS,
1566
+ )
1567
+ def forward(
1568
+ self,
1569
+ input_ids: Optional[torch.Tensor] = None,
1570
+ attention_mask: Optional[torch.Tensor] = None,
1571
+ token_type_ids: Optional[torch.Tensor] = None,
1572
+ position_ids: Optional[torch.Tensor] = None,
1573
+ head_mask: Optional[torch.Tensor] = None,
1574
+ inputs_embeds: Optional[torch.Tensor] = None,
1575
+ labels: Optional[torch.Tensor] = None,
1576
+ output_attentions: Optional[bool] = None,
1577
+ output_hidden_states: Optional[bool] = None,
1578
+ return_dict: Optional[bool] = None,
1579
+ ) -> Union[Tuple[torch.Tensor], TokenClassifierOutput]:
1580
+ r"""
1581
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1582
+ Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
1583
+ """
1584
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1585
+
1586
+ outputs = self.mobilebert(
1587
+ input_ids,
1588
+ attention_mask=attention_mask,
1589
+ token_type_ids=token_type_ids,
1590
+ position_ids=position_ids,
1591
+ head_mask=head_mask,
1592
+ inputs_embeds=inputs_embeds,
1593
+ output_attentions=output_attentions,
1594
+ output_hidden_states=output_hidden_states,
1595
+ return_dict=return_dict,
1596
+ )
1597
+
1598
+ sequence_output = outputs[0]
1599
+
1600
+ sequence_output = self.dropout(sequence_output)
1601
+ logits = self.classifier(sequence_output)
1602
+
1603
+ loss = None
1604
+ if labels is not None:
1605
+ loss_fct = CrossEntropyLoss()
1606
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1607
+
1608
+ if not return_dict:
1609
+ output = (logits,) + outputs[2:]
1610
+ return ((loss,) + output) if loss is not None else output
1611
+
1612
+ return TokenClassifierOutput(
1613
+ loss=loss,
1614
+ logits=logits,
1615
+ hidden_states=outputs.hidden_states,
1616
+ attentions=outputs.attentions,
1617
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/mobilebert/modeling_tf_mobilebert.py ADDED
@@ -0,0 +1,1970 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
3
+ # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """ TF 2.0 MobileBERT model."""
17
+
18
+
19
+ from __future__ import annotations
20
+
21
+ import warnings
22
+ from dataclasses import dataclass
23
+ from typing import Optional, Tuple, Union
24
+
25
+ import numpy as np
26
+ import tensorflow as tf
27
+
28
+ from ...activations_tf import get_tf_activation
29
+ from ...modeling_tf_outputs import (
30
+ TFBaseModelOutput,
31
+ TFBaseModelOutputWithPooling,
32
+ TFMaskedLMOutput,
33
+ TFMultipleChoiceModelOutput,
34
+ TFNextSentencePredictorOutput,
35
+ TFQuestionAnsweringModelOutput,
36
+ TFSequenceClassifierOutput,
37
+ TFTokenClassifierOutput,
38
+ )
39
+ from ...modeling_tf_utils import (
40
+ TFMaskedLanguageModelingLoss,
41
+ TFModelInputType,
42
+ TFMultipleChoiceLoss,
43
+ TFNextSentencePredictionLoss,
44
+ TFPreTrainedModel,
45
+ TFQuestionAnsweringLoss,
46
+ TFSequenceClassificationLoss,
47
+ TFTokenClassificationLoss,
48
+ get_initializer,
49
+ keras,
50
+ keras_serializable,
51
+ unpack_inputs,
52
+ )
53
+ from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax
54
+ from ...utils import (
55
+ ModelOutput,
56
+ add_code_sample_docstrings,
57
+ add_start_docstrings,
58
+ add_start_docstrings_to_model_forward,
59
+ logging,
60
+ replace_return_docstrings,
61
+ )
62
+ from .configuration_mobilebert import MobileBertConfig
63
+
64
+
65
+ logger = logging.get_logger(__name__)
66
+
67
+ _CHECKPOINT_FOR_DOC = "google/mobilebert-uncased"
68
+ _CONFIG_FOR_DOC = "MobileBertConfig"
69
+
70
+ # TokenClassification docstring
71
+ _CHECKPOINT_FOR_TOKEN_CLASSIFICATION = "vumichien/mobilebert-finetuned-ner"
72
+ _TOKEN_CLASS_EXPECTED_OUTPUT = "['I-ORG', 'I-ORG', 'O', 'O', 'O', 'O', 'O', 'I-LOC', 'O', 'I-LOC', 'I-LOC']"
73
+ _TOKEN_CLASS_EXPECTED_LOSS = 0.03
74
+
75
+ # QuestionAnswering docstring
76
+ _CHECKPOINT_FOR_QA = "vumichien/mobilebert-uncased-squad-v2"
77
+ _QA_EXPECTED_OUTPUT = "'a nice puppet'"
78
+ _QA_EXPECTED_LOSS = 3.98
79
+ _QA_TARGET_START_INDEX = 12
80
+ _QA_TARGET_END_INDEX = 13
81
+
82
+ # SequenceClassification docstring
83
+ _CHECKPOINT_FOR_SEQUENCE_CLASSIFICATION = "vumichien/emo-mobilebert"
84
+ _SEQ_CLASS_EXPECTED_OUTPUT = "'others'"
85
+ _SEQ_CLASS_EXPECTED_LOSS = "4.72"
86
+
87
+
88
+ from ..deprecated._archive_maps import TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
89
+
90
+
91
+ # Copied from transformers.models.bert.modeling_tf_bert.TFBertPreTrainingLoss
92
+ class TFMobileBertPreTrainingLoss:
93
+ """
94
+ Loss function suitable for BERT-like pretraining, that is, the task of pretraining a language model by combining
95
+ NSP + MLM. .. note:: Any label of -100 will be ignored (along with the corresponding logits) in the loss
96
+ computation.
97
+ """
98
+
99
+ def hf_compute_loss(self, labels: tf.Tensor, logits: tf.Tensor) -> tf.Tensor:
100
+ loss_fn = keras.losses.SparseCategoricalCrossentropy(from_logits=True, reduction=keras.losses.Reduction.NONE)
101
+
102
+ # Clip negative labels to zero here to avoid NaNs and errors - those positions will get masked later anyway
103
+ unmasked_lm_losses = loss_fn(y_true=tf.nn.relu(labels["labels"]), y_pred=logits[0])
104
+ # make sure only labels that are not equal to -100
105
+ # are taken into account for the loss computation
106
+ lm_loss_mask = tf.cast(labels["labels"] != -100, dtype=unmasked_lm_losses.dtype)
107
+ masked_lm_losses = unmasked_lm_losses * lm_loss_mask
108
+ reduced_masked_lm_loss = tf.reduce_sum(masked_lm_losses) / tf.reduce_sum(lm_loss_mask)
109
+
110
+ # Clip negative labels to zero here to avoid NaNs and errors - those positions will get masked later anyway
111
+ unmasked_ns_loss = loss_fn(y_true=tf.nn.relu(labels["next_sentence_label"]), y_pred=logits[1])
112
+ ns_loss_mask = tf.cast(labels["next_sentence_label"] != -100, dtype=unmasked_ns_loss.dtype)
113
+ masked_ns_loss = unmasked_ns_loss * ns_loss_mask
114
+
115
+ reduced_masked_ns_loss = tf.reduce_sum(masked_ns_loss) / tf.reduce_sum(ns_loss_mask)
116
+
117
+ return tf.reshape(reduced_masked_lm_loss + reduced_masked_ns_loss, (1,))
118
+
119
+
120
+ class TFMobileBertIntermediate(keras.layers.Layer):
121
+ def __init__(self, config, **kwargs):
122
+ super().__init__(**kwargs)
123
+
124
+ self.dense = keras.layers.Dense(config.intermediate_size, name="dense")
125
+
126
+ if isinstance(config.hidden_act, str):
127
+ self.intermediate_act_fn = get_tf_activation(config.hidden_act)
128
+ else:
129
+ self.intermediate_act_fn = config.hidden_act
130
+ self.config = config
131
+
132
+ def call(self, hidden_states):
133
+ hidden_states = self.dense(hidden_states)
134
+ hidden_states = self.intermediate_act_fn(hidden_states)
135
+
136
+ return hidden_states
137
+
138
+ def build(self, input_shape=None):
139
+ if self.built:
140
+ return
141
+ self.built = True
142
+ if getattr(self, "dense", None) is not None:
143
+ with tf.name_scope(self.dense.name):
144
+ self.dense.build([None, None, self.config.true_hidden_size])
145
+
146
+
147
+ class TFLayerNorm(keras.layers.LayerNormalization):
148
+ def __init__(self, feat_size, *args, **kwargs):
149
+ self.feat_size = feat_size
150
+ super().__init__(*args, **kwargs)
151
+
152
+ def build(self, input_shape=None):
153
+ super().build([None, None, self.feat_size])
154
+
155
+
156
+ class TFNoNorm(keras.layers.Layer):
157
+ def __init__(self, feat_size, epsilon=None, **kwargs):
158
+ super().__init__(**kwargs)
159
+ self.feat_size = feat_size
160
+
161
+ def build(self, input_shape):
162
+ self.bias = self.add_weight("bias", shape=[self.feat_size], initializer="zeros")
163
+ self.weight = self.add_weight("weight", shape=[self.feat_size], initializer="ones")
164
+ super().build(input_shape)
165
+
166
+ def call(self, inputs: tf.Tensor):
167
+ return inputs * self.weight + self.bias
168
+
169
+
170
+ NORM2FN = {"layer_norm": TFLayerNorm, "no_norm": TFNoNorm}
171
+
172
+
173
+ class TFMobileBertEmbeddings(keras.layers.Layer):
174
+ """Construct the embeddings from word, position and token_type embeddings."""
175
+
176
+ def __init__(self, config, **kwargs):
177
+ super().__init__(**kwargs)
178
+
179
+ self.trigram_input = config.trigram_input
180
+ self.embedding_size = config.embedding_size
181
+ self.config = config
182
+ self.hidden_size = config.hidden_size
183
+ self.max_position_embeddings = config.max_position_embeddings
184
+ self.initializer_range = config.initializer_range
185
+ self.embedding_transformation = keras.layers.Dense(config.hidden_size, name="embedding_transformation")
186
+
187
+ # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
188
+ # any TensorFlow checkpoint file
189
+ self.LayerNorm = NORM2FN[config.normalization_type](
190
+ config.hidden_size, epsilon=config.layer_norm_eps, name="LayerNorm"
191
+ )
192
+ self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob)
193
+ self.embedded_input_size = self.embedding_size * (3 if self.trigram_input else 1)
194
+
195
+ def build(self, input_shape=None):
196
+ with tf.name_scope("word_embeddings"):
197
+ self.weight = self.add_weight(
198
+ name="weight",
199
+ shape=[self.config.vocab_size, self.embedding_size],
200
+ initializer=get_initializer(initializer_range=self.initializer_range),
201
+ )
202
+
203
+ with tf.name_scope("token_type_embeddings"):
204
+ self.token_type_embeddings = self.add_weight(
205
+ name="embeddings",
206
+ shape=[self.config.type_vocab_size, self.hidden_size],
207
+ initializer=get_initializer(initializer_range=self.initializer_range),
208
+ )
209
+
210
+ with tf.name_scope("position_embeddings"):
211
+ self.position_embeddings = self.add_weight(
212
+ name="embeddings",
213
+ shape=[self.max_position_embeddings, self.hidden_size],
214
+ initializer=get_initializer(initializer_range=self.initializer_range),
215
+ )
216
+
217
+ if self.built:
218
+ return
219
+ self.built = True
220
+ if getattr(self, "embedding_transformation", None) is not None:
221
+ with tf.name_scope(self.embedding_transformation.name):
222
+ self.embedding_transformation.build([None, None, self.embedded_input_size])
223
+ if getattr(self, "LayerNorm", None) is not None:
224
+ with tf.name_scope(self.LayerNorm.name):
225
+ self.LayerNorm.build(None)
226
+
227
+ def call(self, input_ids=None, position_ids=None, token_type_ids=None, inputs_embeds=None, training=False):
228
+ """
229
+ Applies embedding based on inputs tensor.
230
+
231
+ Returns:
232
+ final_embeddings (`tf.Tensor`): output embedding tensor.
233
+ """
234
+ assert not (input_ids is None and inputs_embeds is None)
235
+
236
+ if input_ids is not None:
237
+ check_embeddings_within_bounds(input_ids, self.config.vocab_size)
238
+ inputs_embeds = tf.gather(params=self.weight, indices=input_ids)
239
+
240
+ input_shape = shape_list(inputs_embeds)[:-1]
241
+
242
+ if token_type_ids is None:
243
+ token_type_ids = tf.fill(dims=input_shape, value=0)
244
+
245
+ if self.trigram_input:
246
+ # From the paper MobileBERT: a Compact Task-Agnostic BERT for Resource-Limited
247
+ # Devices (https://arxiv.org/abs/2004.02984)
248
+ #
249
+ # The embedding table in BERT models accounts for a substantial proportion of model size. To compress
250
+ # the embedding layer, we reduce the embedding dimension to 128 in MobileBERT.
251
+ # Then, we apply a 1D convolution with kernel size 3 on the raw token embedding to produce a 512
252
+ # dimensional output.
253
+ inputs_embeds = tf.concat(
254
+ [
255
+ tf.pad(inputs_embeds[:, 1:], ((0, 0), (0, 1), (0, 0))),
256
+ inputs_embeds,
257
+ tf.pad(inputs_embeds[:, :-1], ((0, 0), (1, 0), (0, 0))),
258
+ ],
259
+ axis=2,
260
+ )
261
+
262
+ if self.trigram_input or self.embedding_size != self.hidden_size:
263
+ inputs_embeds = self.embedding_transformation(inputs_embeds)
264
+
265
+ if position_ids is None:
266
+ position_ids = tf.expand_dims(tf.range(start=0, limit=input_shape[-1]), axis=0)
267
+
268
+ position_embeds = tf.gather(params=self.position_embeddings, indices=position_ids)
269
+ token_type_embeds = tf.gather(params=self.token_type_embeddings, indices=token_type_ids)
270
+ final_embeddings = inputs_embeds + position_embeds + token_type_embeds
271
+ final_embeddings = self.LayerNorm(inputs=final_embeddings)
272
+ final_embeddings = self.dropout(inputs=final_embeddings, training=training)
273
+
274
+ return final_embeddings
275
+
276
+
277
+ class TFMobileBertSelfAttention(keras.layers.Layer):
278
+ def __init__(self, config, **kwargs):
279
+ super().__init__(**kwargs)
280
+ if config.hidden_size % config.num_attention_heads != 0:
281
+ raise ValueError(
282
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
283
+ f"heads ({config.num_attention_heads}"
284
+ )
285
+
286
+ self.num_attention_heads = config.num_attention_heads
287
+ self.output_attentions = config.output_attentions
288
+ assert config.hidden_size % config.num_attention_heads == 0
289
+ self.attention_head_size = int(config.true_hidden_size / config.num_attention_heads)
290
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
291
+
292
+ self.query = keras.layers.Dense(
293
+ self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="query"
294
+ )
295
+ self.key = keras.layers.Dense(
296
+ self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="key"
297
+ )
298
+ self.value = keras.layers.Dense(
299
+ self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="value"
300
+ )
301
+
302
+ self.dropout = keras.layers.Dropout(config.attention_probs_dropout_prob)
303
+ self.config = config
304
+
305
+ def transpose_for_scores(self, x, batch_size):
306
+ # Reshape from [batch_size, seq_length, all_head_size] to [batch_size, seq_length, num_attention_heads, attention_head_size]
307
+ x = tf.reshape(x, (batch_size, -1, self.num_attention_heads, self.attention_head_size))
308
+ return tf.transpose(x, perm=[0, 2, 1, 3])
309
+
310
+ def call(
311
+ self, query_tensor, key_tensor, value_tensor, attention_mask, head_mask, output_attentions, training=False
312
+ ):
313
+ batch_size = shape_list(attention_mask)[0]
314
+ mixed_query_layer = self.query(query_tensor)
315
+ mixed_key_layer = self.key(key_tensor)
316
+ mixed_value_layer = self.value(value_tensor)
317
+ query_layer = self.transpose_for_scores(mixed_query_layer, batch_size)
318
+ key_layer = self.transpose_for_scores(mixed_key_layer, batch_size)
319
+ value_layer = self.transpose_for_scores(mixed_value_layer, batch_size)
320
+
321
+ # Take the dot product between "query" and "key" to get the raw attention scores.
322
+ attention_scores = tf.matmul(
323
+ query_layer, key_layer, transpose_b=True
324
+ ) # (batch size, num_heads, seq_len_q, seq_len_k)
325
+ dk = tf.cast(shape_list(key_layer)[-1], dtype=attention_scores.dtype) # scale attention_scores
326
+ attention_scores = attention_scores / tf.math.sqrt(dk)
327
+
328
+ if attention_mask is not None:
329
+ # Apply the attention mask is (precomputed for all layers in TFMobileBertModel call() function)
330
+ attention_mask = tf.cast(attention_mask, dtype=attention_scores.dtype)
331
+ attention_scores = attention_scores + attention_mask
332
+
333
+ # Normalize the attention scores to probabilities.
334
+ attention_probs = stable_softmax(attention_scores, axis=-1)
335
+
336
+ # This is actually dropping out entire tokens to attend to, which might
337
+ # seem a bit unusual, but is taken from the original Transformer paper.
338
+ attention_probs = self.dropout(attention_probs, training=training)
339
+
340
+ # Mask heads if we want to
341
+ if head_mask is not None:
342
+ attention_probs = attention_probs * head_mask
343
+
344
+ context_layer = tf.matmul(attention_probs, value_layer)
345
+
346
+ context_layer = tf.transpose(context_layer, perm=[0, 2, 1, 3])
347
+ context_layer = tf.reshape(
348
+ context_layer, (batch_size, -1, self.all_head_size)
349
+ ) # (batch_size, seq_len_q, all_head_size)
350
+
351
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
352
+
353
+ return outputs
354
+
355
+ def build(self, input_shape=None):
356
+ if self.built:
357
+ return
358
+ self.built = True
359
+ if getattr(self, "query", None) is not None:
360
+ with tf.name_scope(self.query.name):
361
+ self.query.build([None, None, self.config.true_hidden_size])
362
+ if getattr(self, "key", None) is not None:
363
+ with tf.name_scope(self.key.name):
364
+ self.key.build([None, None, self.config.true_hidden_size])
365
+ if getattr(self, "value", None) is not None:
366
+ with tf.name_scope(self.value.name):
367
+ self.value.build(
368
+ [
369
+ None,
370
+ None,
371
+ self.config.true_hidden_size
372
+ if self.config.use_bottleneck_attention
373
+ else self.config.hidden_size,
374
+ ]
375
+ )
376
+
377
+
378
+ class TFMobileBertSelfOutput(keras.layers.Layer):
379
+ def __init__(self, config, **kwargs):
380
+ super().__init__(**kwargs)
381
+ self.use_bottleneck = config.use_bottleneck
382
+ self.dense = keras.layers.Dense(
383
+ config.true_hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
384
+ )
385
+ self.LayerNorm = NORM2FN[config.normalization_type](
386
+ config.true_hidden_size, epsilon=config.layer_norm_eps, name="LayerNorm"
387
+ )
388
+ if not self.use_bottleneck:
389
+ self.dropout = keras.layers.Dropout(config.hidden_dropout_prob)
390
+ self.config = config
391
+
392
+ def call(self, hidden_states, residual_tensor, training=False):
393
+ hidden_states = self.dense(hidden_states)
394
+ if not self.use_bottleneck:
395
+ hidden_states = self.dropout(hidden_states, training=training)
396
+ hidden_states = self.LayerNorm(hidden_states + residual_tensor)
397
+ return hidden_states
398
+
399
+ def build(self, input_shape=None):
400
+ if self.built:
401
+ return
402
+ self.built = True
403
+ if getattr(self, "dense", None) is not None:
404
+ with tf.name_scope(self.dense.name):
405
+ self.dense.build([None, None, self.config.true_hidden_size])
406
+ if getattr(self, "LayerNorm", None) is not None:
407
+ with tf.name_scope(self.LayerNorm.name):
408
+ self.LayerNorm.build(None)
409
+
410
+
411
+ class TFMobileBertAttention(keras.layers.Layer):
412
+ def __init__(self, config, **kwargs):
413
+ super().__init__(**kwargs)
414
+ self.self = TFMobileBertSelfAttention(config, name="self")
415
+ self.mobilebert_output = TFMobileBertSelfOutput(config, name="output")
416
+
417
+ def prune_heads(self, heads):
418
+ raise NotImplementedError
419
+
420
+ def call(
421
+ self,
422
+ query_tensor,
423
+ key_tensor,
424
+ value_tensor,
425
+ layer_input,
426
+ attention_mask,
427
+ head_mask,
428
+ output_attentions,
429
+ training=False,
430
+ ):
431
+ self_outputs = self.self(
432
+ query_tensor, key_tensor, value_tensor, attention_mask, head_mask, output_attentions, training=training
433
+ )
434
+
435
+ attention_output = self.mobilebert_output(self_outputs[0], layer_input, training=training)
436
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
437
+ return outputs
438
+
439
+ def build(self, input_shape=None):
440
+ if self.built:
441
+ return
442
+ self.built = True
443
+ if getattr(self, "self", None) is not None:
444
+ with tf.name_scope(self.self.name):
445
+ self.self.build(None)
446
+ if getattr(self, "mobilebert_output", None) is not None:
447
+ with tf.name_scope(self.mobilebert_output.name):
448
+ self.mobilebert_output.build(None)
449
+
450
+
451
+ class TFOutputBottleneck(keras.layers.Layer):
452
+ def __init__(self, config, **kwargs):
453
+ super().__init__(**kwargs)
454
+ self.dense = keras.layers.Dense(config.hidden_size, name="dense")
455
+ self.LayerNorm = NORM2FN[config.normalization_type](
456
+ config.hidden_size, epsilon=config.layer_norm_eps, name="LayerNorm"
457
+ )
458
+ self.dropout = keras.layers.Dropout(config.hidden_dropout_prob)
459
+ self.config = config
460
+
461
+ def call(self, hidden_states, residual_tensor, training=False):
462
+ layer_outputs = self.dense(hidden_states)
463
+ layer_outputs = self.dropout(layer_outputs, training=training)
464
+ layer_outputs = self.LayerNorm(layer_outputs + residual_tensor)
465
+ return layer_outputs
466
+
467
+ def build(self, input_shape=None):
468
+ if self.built:
469
+ return
470
+ self.built = True
471
+ if getattr(self, "dense", None) is not None:
472
+ with tf.name_scope(self.dense.name):
473
+ self.dense.build([None, None, self.config.true_hidden_size])
474
+ if getattr(self, "LayerNorm", None) is not None:
475
+ with tf.name_scope(self.LayerNorm.name):
476
+ self.LayerNorm.build(None)
477
+
478
+
479
+ class TFMobileBertOutput(keras.layers.Layer):
480
+ def __init__(self, config, **kwargs):
481
+ super().__init__(**kwargs)
482
+ self.use_bottleneck = config.use_bottleneck
483
+ self.dense = keras.layers.Dense(
484
+ config.true_hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
485
+ )
486
+ self.LayerNorm = NORM2FN[config.normalization_type](
487
+ config.true_hidden_size, epsilon=config.layer_norm_eps, name="LayerNorm"
488
+ )
489
+ if not self.use_bottleneck:
490
+ self.dropout = keras.layers.Dropout(config.hidden_dropout_prob)
491
+ else:
492
+ self.bottleneck = TFOutputBottleneck(config, name="bottleneck")
493
+ self.config = config
494
+
495
+ def call(self, hidden_states, residual_tensor_1, residual_tensor_2, training=False):
496
+ hidden_states = self.dense(hidden_states)
497
+ if not self.use_bottleneck:
498
+ hidden_states = self.dropout(hidden_states, training=training)
499
+ hidden_states = self.LayerNorm(hidden_states + residual_tensor_1)
500
+ else:
501
+ hidden_states = self.LayerNorm(hidden_states + residual_tensor_1)
502
+ hidden_states = self.bottleneck(hidden_states, residual_tensor_2)
503
+ return hidden_states
504
+
505
+ def build(self, input_shape=None):
506
+ if self.built:
507
+ return
508
+ self.built = True
509
+ if getattr(self, "dense", None) is not None:
510
+ with tf.name_scope(self.dense.name):
511
+ self.dense.build([None, None, self.config.intermediate_size])
512
+ if getattr(self, "LayerNorm", None) is not None:
513
+ with tf.name_scope(self.LayerNorm.name):
514
+ self.LayerNorm.build(None)
515
+ if getattr(self, "bottleneck", None) is not None:
516
+ with tf.name_scope(self.bottleneck.name):
517
+ self.bottleneck.build(None)
518
+
519
+
520
+ class TFBottleneckLayer(keras.layers.Layer):
521
+ def __init__(self, config, **kwargs):
522
+ super().__init__(**kwargs)
523
+ self.dense = keras.layers.Dense(config.intra_bottleneck_size, name="dense")
524
+ self.LayerNorm = NORM2FN[config.normalization_type](
525
+ config.intra_bottleneck_size, epsilon=config.layer_norm_eps, name="LayerNorm"
526
+ )
527
+ self.config = config
528
+
529
+ def call(self, inputs):
530
+ hidden_states = self.dense(inputs)
531
+ hidden_states = self.LayerNorm(hidden_states)
532
+ return hidden_states
533
+
534
+ def build(self, input_shape=None):
535
+ if self.built:
536
+ return
537
+ self.built = True
538
+ if getattr(self, "dense", None) is not None:
539
+ with tf.name_scope(self.dense.name):
540
+ self.dense.build([None, None, self.config.hidden_size])
541
+ if getattr(self, "LayerNorm", None) is not None:
542
+ with tf.name_scope(self.LayerNorm.name):
543
+ self.LayerNorm.build(None)
544
+
545
+
546
+ class TFBottleneck(keras.layers.Layer):
547
+ def __init__(self, config, **kwargs):
548
+ super().__init__(**kwargs)
549
+ self.key_query_shared_bottleneck = config.key_query_shared_bottleneck
550
+ self.use_bottleneck_attention = config.use_bottleneck_attention
551
+ self.bottleneck_input = TFBottleneckLayer(config, name="input")
552
+ if self.key_query_shared_bottleneck:
553
+ self.attention = TFBottleneckLayer(config, name="attention")
554
+
555
+ def call(self, hidden_states):
556
+ # This method can return three different tuples of values. These different values make use of bottlenecks,
557
+ # which are linear layers used to project the hidden states to a lower-dimensional vector, reducing memory
558
+ # usage. These linear layer have weights that are learned during training.
559
+ #
560
+ # If `config.use_bottleneck_attention`, it will return the result of the bottleneck layer four times for the
561
+ # key, query, value, and "layer input" to be used by the attention layer.
562
+ # This bottleneck is used to project the hidden. This last layer input will be used as a residual tensor
563
+ # in the attention self output, after the attention scores have been computed.
564
+ #
565
+ # If not `config.use_bottleneck_attention` and `config.key_query_shared_bottleneck`, this will return
566
+ # four values, three of which have been passed through a bottleneck: the query and key, passed through the same
567
+ # bottleneck, and the residual layer to be applied in the attention self output, through another bottleneck.
568
+ #
569
+ # Finally, in the last case, the values for the query, key and values are the hidden states without bottleneck,
570
+ # and the residual layer will be this value passed through a bottleneck.
571
+
572
+ bottlenecked_hidden_states = self.bottleneck_input(hidden_states)
573
+ if self.use_bottleneck_attention:
574
+ return (bottlenecked_hidden_states,) * 4
575
+ elif self.key_query_shared_bottleneck:
576
+ shared_attention_input = self.attention(hidden_states)
577
+ return (shared_attention_input, shared_attention_input, hidden_states, bottlenecked_hidden_states)
578
+ else:
579
+ return (hidden_states, hidden_states, hidden_states, bottlenecked_hidden_states)
580
+
581
+ def build(self, input_shape=None):
582
+ if self.built:
583
+ return
584
+ self.built = True
585
+ if getattr(self, "bottleneck_input", None) is not None:
586
+ with tf.name_scope(self.bottleneck_input.name):
587
+ self.bottleneck_input.build(None)
588
+ if getattr(self, "attention", None) is not None:
589
+ with tf.name_scope(self.attention.name):
590
+ self.attention.build(None)
591
+
592
+
593
+ class TFFFNOutput(keras.layers.Layer):
594
+ def __init__(self, config, **kwargs):
595
+ super().__init__(**kwargs)
596
+ self.dense = keras.layers.Dense(config.true_hidden_size, name="dense")
597
+ self.LayerNorm = NORM2FN[config.normalization_type](
598
+ config.true_hidden_size, epsilon=config.layer_norm_eps, name="LayerNorm"
599
+ )
600
+ self.config = config
601
+
602
+ def call(self, hidden_states, residual_tensor):
603
+ hidden_states = self.dense(hidden_states)
604
+ hidden_states = self.LayerNorm(hidden_states + residual_tensor)
605
+ return hidden_states
606
+
607
+ def build(self, input_shape=None):
608
+ if self.built:
609
+ return
610
+ self.built = True
611
+ if getattr(self, "dense", None) is not None:
612
+ with tf.name_scope(self.dense.name):
613
+ self.dense.build([None, None, self.config.intermediate_size])
614
+ if getattr(self, "LayerNorm", None) is not None:
615
+ with tf.name_scope(self.LayerNorm.name):
616
+ self.LayerNorm.build(None)
617
+
618
+
619
+ class TFFFNLayer(keras.layers.Layer):
620
+ def __init__(self, config, **kwargs):
621
+ super().__init__(**kwargs)
622
+ self.intermediate = TFMobileBertIntermediate(config, name="intermediate")
623
+ self.mobilebert_output = TFFFNOutput(config, name="output")
624
+
625
+ def call(self, hidden_states):
626
+ intermediate_output = self.intermediate(hidden_states)
627
+ layer_outputs = self.mobilebert_output(intermediate_output, hidden_states)
628
+ return layer_outputs
629
+
630
+ def build(self, input_shape=None):
631
+ if self.built:
632
+ return
633
+ self.built = True
634
+ if getattr(self, "intermediate", None) is not None:
635
+ with tf.name_scope(self.intermediate.name):
636
+ self.intermediate.build(None)
637
+ if getattr(self, "mobilebert_output", None) is not None:
638
+ with tf.name_scope(self.mobilebert_output.name):
639
+ self.mobilebert_output.build(None)
640
+
641
+
642
+ class TFMobileBertLayer(keras.layers.Layer):
643
+ def __init__(self, config, **kwargs):
644
+ super().__init__(**kwargs)
645
+ self.use_bottleneck = config.use_bottleneck
646
+ self.num_feedforward_networks = config.num_feedforward_networks
647
+ self.attention = TFMobileBertAttention(config, name="attention")
648
+ self.intermediate = TFMobileBertIntermediate(config, name="intermediate")
649
+ self.mobilebert_output = TFMobileBertOutput(config, name="output")
650
+
651
+ if self.use_bottleneck:
652
+ self.bottleneck = TFBottleneck(config, name="bottleneck")
653
+ if config.num_feedforward_networks > 1:
654
+ self.ffn = [TFFFNLayer(config, name=f"ffn.{i}") for i in range(config.num_feedforward_networks - 1)]
655
+
656
+ def call(self, hidden_states, attention_mask, head_mask, output_attentions, training=False):
657
+ if self.use_bottleneck:
658
+ query_tensor, key_tensor, value_tensor, layer_input = self.bottleneck(hidden_states)
659
+ else:
660
+ query_tensor, key_tensor, value_tensor, layer_input = [hidden_states] * 4
661
+
662
+ attention_outputs = self.attention(
663
+ query_tensor,
664
+ key_tensor,
665
+ value_tensor,
666
+ layer_input,
667
+ attention_mask,
668
+ head_mask,
669
+ output_attentions,
670
+ training=training,
671
+ )
672
+
673
+ attention_output = attention_outputs[0]
674
+ s = (attention_output,)
675
+
676
+ if self.num_feedforward_networks != 1:
677
+ for i, ffn_module in enumerate(self.ffn):
678
+ attention_output = ffn_module(attention_output)
679
+ s += (attention_output,)
680
+
681
+ intermediate_output = self.intermediate(attention_output)
682
+ layer_output = self.mobilebert_output(intermediate_output, attention_output, hidden_states, training=training)
683
+
684
+ outputs = (
685
+ (layer_output,)
686
+ + attention_outputs[1:]
687
+ + (
688
+ tf.constant(0),
689
+ query_tensor,
690
+ key_tensor,
691
+ value_tensor,
692
+ layer_input,
693
+ attention_output,
694
+ intermediate_output,
695
+ )
696
+ + s
697
+ ) # add attentions if we output them
698
+
699
+ return outputs
700
+
701
+ def build(self, input_shape=None):
702
+ if self.built:
703
+ return
704
+ self.built = True
705
+ if getattr(self, "attention", None) is not None:
706
+ with tf.name_scope(self.attention.name):
707
+ self.attention.build(None)
708
+ if getattr(self, "intermediate", None) is not None:
709
+ with tf.name_scope(self.intermediate.name):
710
+ self.intermediate.build(None)
711
+ if getattr(self, "mobilebert_output", None) is not None:
712
+ with tf.name_scope(self.mobilebert_output.name):
713
+ self.mobilebert_output.build(None)
714
+ if getattr(self, "bottleneck", None) is not None:
715
+ with tf.name_scope(self.bottleneck.name):
716
+ self.bottleneck.build(None)
717
+ if getattr(self, "ffn", None) is not None:
718
+ for layer in self.ffn:
719
+ with tf.name_scope(layer.name):
720
+ layer.build(None)
721
+
722
+
723
+ class TFMobileBertEncoder(keras.layers.Layer):
724
+ def __init__(self, config, **kwargs):
725
+ super().__init__(**kwargs)
726
+ self.output_attentions = config.output_attentions
727
+ self.output_hidden_states = config.output_hidden_states
728
+ self.layer = [TFMobileBertLayer(config, name=f"layer_._{i}") for i in range(config.num_hidden_layers)]
729
+
730
+ def call(
731
+ self,
732
+ hidden_states,
733
+ attention_mask,
734
+ head_mask,
735
+ output_attentions,
736
+ output_hidden_states,
737
+ return_dict,
738
+ training=False,
739
+ ):
740
+ all_hidden_states = () if output_hidden_states else None
741
+ all_attentions = () if output_attentions else None
742
+ for i, layer_module in enumerate(self.layer):
743
+ if output_hidden_states:
744
+ all_hidden_states = all_hidden_states + (hidden_states,)
745
+
746
+ layer_outputs = layer_module(
747
+ hidden_states, attention_mask, head_mask[i], output_attentions, training=training
748
+ )
749
+
750
+ hidden_states = layer_outputs[0]
751
+
752
+ if output_attentions:
753
+ all_attentions = all_attentions + (layer_outputs[1],)
754
+
755
+ # Add last layer
756
+ if output_hidden_states:
757
+ all_hidden_states = all_hidden_states + (hidden_states,)
758
+
759
+ if not return_dict:
760
+ return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None)
761
+ return TFBaseModelOutput(
762
+ last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions
763
+ )
764
+
765
+ def build(self, input_shape=None):
766
+ if self.built:
767
+ return
768
+ self.built = True
769
+ if getattr(self, "layer", None) is not None:
770
+ for layer in self.layer:
771
+ with tf.name_scope(layer.name):
772
+ layer.build(None)
773
+
774
+
775
+ class TFMobileBertPooler(keras.layers.Layer):
776
+ def __init__(self, config, **kwargs):
777
+ super().__init__(**kwargs)
778
+ self.do_activate = config.classifier_activation
779
+ if self.do_activate:
780
+ self.dense = keras.layers.Dense(
781
+ config.hidden_size,
782
+ kernel_initializer=get_initializer(config.initializer_range),
783
+ activation="tanh",
784
+ name="dense",
785
+ )
786
+ self.config = config
787
+
788
+ def call(self, hidden_states):
789
+ # We "pool" the model by simply taking the hidden state corresponding
790
+ # to the first token.
791
+ first_token_tensor = hidden_states[:, 0]
792
+ if not self.do_activate:
793
+ return first_token_tensor
794
+ else:
795
+ pooled_output = self.dense(first_token_tensor)
796
+ return pooled_output
797
+
798
+ def build(self, input_shape=None):
799
+ if self.built:
800
+ return
801
+ self.built = True
802
+ if getattr(self, "dense", None) is not None:
803
+ with tf.name_scope(self.dense.name):
804
+ self.dense.build([None, None, self.config.hidden_size])
805
+
806
+
807
+ class TFMobileBertPredictionHeadTransform(keras.layers.Layer):
808
+ def __init__(self, config, **kwargs):
809
+ super().__init__(**kwargs)
810
+ self.dense = keras.layers.Dense(
811
+ config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
812
+ )
813
+ if isinstance(config.hidden_act, str):
814
+ self.transform_act_fn = get_tf_activation(config.hidden_act)
815
+ else:
816
+ self.transform_act_fn = config.hidden_act
817
+ self.LayerNorm = NORM2FN["layer_norm"](config.hidden_size, epsilon=config.layer_norm_eps, name="LayerNorm")
818
+ self.config = config
819
+
820
+ def call(self, hidden_states):
821
+ hidden_states = self.dense(hidden_states)
822
+ hidden_states = self.transform_act_fn(hidden_states)
823
+ hidden_states = self.LayerNorm(hidden_states)
824
+ return hidden_states
825
+
826
+ def build(self, input_shape=None):
827
+ if self.built:
828
+ return
829
+ self.built = True
830
+ if getattr(self, "dense", None) is not None:
831
+ with tf.name_scope(self.dense.name):
832
+ self.dense.build([None, None, self.config.hidden_size])
833
+ if getattr(self, "LayerNorm", None) is not None:
834
+ with tf.name_scope(self.LayerNorm.name):
835
+ self.LayerNorm.build(None)
836
+
837
+
838
+ class TFMobileBertLMPredictionHead(keras.layers.Layer):
839
+ def __init__(self, config, **kwargs):
840
+ super().__init__(**kwargs)
841
+ self.transform = TFMobileBertPredictionHeadTransform(config, name="transform")
842
+ self.config = config
843
+
844
+ def build(self, input_shape=None):
845
+ self.bias = self.add_weight(shape=(self.config.vocab_size,), initializer="zeros", trainable=True, name="bias")
846
+ self.dense = self.add_weight(
847
+ shape=(self.config.hidden_size - self.config.embedding_size, self.config.vocab_size),
848
+ initializer="zeros",
849
+ trainable=True,
850
+ name="dense/weight",
851
+ )
852
+ self.decoder = self.add_weight(
853
+ shape=(self.config.vocab_size, self.config.embedding_size),
854
+ initializer="zeros",
855
+ trainable=True,
856
+ name="decoder/weight",
857
+ )
858
+
859
+ if self.built:
860
+ return
861
+ self.built = True
862
+ if getattr(self, "transform", None) is not None:
863
+ with tf.name_scope(self.transform.name):
864
+ self.transform.build(None)
865
+
866
+ def get_output_embeddings(self):
867
+ return self
868
+
869
+ def set_output_embeddings(self, value):
870
+ self.decoder = value
871
+ self.config.vocab_size = shape_list(value)[0]
872
+
873
+ def get_bias(self):
874
+ return {"bias": self.bias}
875
+
876
+ def set_bias(self, value):
877
+ self.bias = value["bias"]
878
+ self.config.vocab_size = shape_list(value["bias"])[0]
879
+
880
+ def call(self, hidden_states):
881
+ hidden_states = self.transform(hidden_states)
882
+ hidden_states = tf.matmul(hidden_states, tf.concat([tf.transpose(self.decoder), self.dense], axis=0))
883
+ hidden_states = hidden_states + self.bias
884
+ return hidden_states
885
+
886
+
887
+ class TFMobileBertMLMHead(keras.layers.Layer):
888
+ def __init__(self, config, **kwargs):
889
+ super().__init__(**kwargs)
890
+ self.predictions = TFMobileBertLMPredictionHead(config, name="predictions")
891
+
892
+ def call(self, sequence_output):
893
+ prediction_scores = self.predictions(sequence_output)
894
+ return prediction_scores
895
+
896
+ def build(self, input_shape=None):
897
+ if self.built:
898
+ return
899
+ self.built = True
900
+ if getattr(self, "predictions", None) is not None:
901
+ with tf.name_scope(self.predictions.name):
902
+ self.predictions.build(None)
903
+
904
+
905
+ @keras_serializable
906
+ class TFMobileBertMainLayer(keras.layers.Layer):
907
+ config_class = MobileBertConfig
908
+
909
+ def __init__(self, config, add_pooling_layer=True, **kwargs):
910
+ super().__init__(**kwargs)
911
+
912
+ self.config = config
913
+ self.num_hidden_layers = config.num_hidden_layers
914
+ self.output_attentions = config.output_attentions
915
+ self.output_hidden_states = config.output_hidden_states
916
+ self.return_dict = config.use_return_dict
917
+
918
+ self.embeddings = TFMobileBertEmbeddings(config, name="embeddings")
919
+ self.encoder = TFMobileBertEncoder(config, name="encoder")
920
+ self.pooler = TFMobileBertPooler(config, name="pooler") if add_pooling_layer else None
921
+
922
+ def get_input_embeddings(self):
923
+ return self.embeddings
924
+
925
+ def set_input_embeddings(self, value):
926
+ self.embeddings.weight = value
927
+ self.embeddings.vocab_size = shape_list(value)[0]
928
+
929
+ def _prune_heads(self, heads_to_prune):
930
+ """
931
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
932
+ class PreTrainedModel
933
+ """
934
+ raise NotImplementedError
935
+
936
+ @unpack_inputs
937
+ def call(
938
+ self,
939
+ input_ids=None,
940
+ attention_mask=None,
941
+ token_type_ids=None,
942
+ position_ids=None,
943
+ head_mask=None,
944
+ inputs_embeds=None,
945
+ output_attentions=None,
946
+ output_hidden_states=None,
947
+ return_dict=None,
948
+ training=False,
949
+ ):
950
+ if input_ids is not None and inputs_embeds is not None:
951
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
952
+ elif input_ids is not None:
953
+ input_shape = shape_list(input_ids)
954
+ elif inputs_embeds is not None:
955
+ input_shape = shape_list(inputs_embeds)[:-1]
956
+ else:
957
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
958
+
959
+ if attention_mask is None:
960
+ attention_mask = tf.fill(input_shape, 1)
961
+
962
+ if token_type_ids is None:
963
+ token_type_ids = tf.fill(input_shape, 0)
964
+
965
+ embedding_output = self.embeddings(input_ids, position_ids, token_type_ids, inputs_embeds, training=training)
966
+
967
+ # We create a 3D attention mask from a 2D tensor mask.
968
+ # Sizes are [batch_size, 1, 1, to_seq_length]
969
+ # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
970
+ # this attention mask is more simple than the triangular masking of causal attention
971
+ # used in OpenAI GPT, we just need to prepare the broadcast dimension here.
972
+ extended_attention_mask = tf.reshape(attention_mask, (input_shape[0], 1, 1, input_shape[1]))
973
+
974
+ # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
975
+ # masked positions, this operation will create a tensor which is 0.0 for
976
+ # positions we want to attend and -10000.0 for masked positions.
977
+ # Since we are adding it to the raw scores before the softmax, this is
978
+ # effectively the same as removing these entirely.
979
+ extended_attention_mask = tf.cast(extended_attention_mask, dtype=embedding_output.dtype)
980
+ one_cst = tf.constant(1.0, dtype=embedding_output.dtype)
981
+ ten_thousand_cst = tf.constant(-10000.0, dtype=embedding_output.dtype)
982
+ extended_attention_mask = tf.multiply(tf.subtract(one_cst, extended_attention_mask), ten_thousand_cst)
983
+
984
+ # Prepare head mask if needed
985
+ # 1.0 in head_mask indicate we keep the head
986
+ # attention_probs has shape bsz x n_heads x N x N
987
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
988
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
989
+ if head_mask is not None:
990
+ raise NotImplementedError
991
+ else:
992
+ head_mask = [None] * self.num_hidden_layers
993
+
994
+ encoder_outputs = self.encoder(
995
+ embedding_output,
996
+ extended_attention_mask,
997
+ head_mask,
998
+ output_attentions,
999
+ output_hidden_states,
1000
+ return_dict,
1001
+ training=training,
1002
+ )
1003
+
1004
+ sequence_output = encoder_outputs[0]
1005
+ pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
1006
+
1007
+ if not return_dict:
1008
+ return (
1009
+ sequence_output,
1010
+ pooled_output,
1011
+ ) + encoder_outputs[1:]
1012
+
1013
+ return TFBaseModelOutputWithPooling(
1014
+ last_hidden_state=sequence_output,
1015
+ pooler_output=pooled_output,
1016
+ hidden_states=encoder_outputs.hidden_states,
1017
+ attentions=encoder_outputs.attentions,
1018
+ )
1019
+
1020
+ def build(self, input_shape=None):
1021
+ if self.built:
1022
+ return
1023
+ self.built = True
1024
+ if getattr(self, "embeddings", None) is not None:
1025
+ with tf.name_scope(self.embeddings.name):
1026
+ self.embeddings.build(None)
1027
+ if getattr(self, "encoder", None) is not None:
1028
+ with tf.name_scope(self.encoder.name):
1029
+ self.encoder.build(None)
1030
+ if getattr(self, "pooler", None) is not None:
1031
+ with tf.name_scope(self.pooler.name):
1032
+ self.pooler.build(None)
1033
+
1034
+
1035
+ class TFMobileBertPreTrainedModel(TFPreTrainedModel):
1036
+ """
1037
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
1038
+ models.
1039
+ """
1040
+
1041
+ config_class = MobileBertConfig
1042
+ base_model_prefix = "mobilebert"
1043
+
1044
+
1045
+ @dataclass
1046
+ class TFMobileBertForPreTrainingOutput(ModelOutput):
1047
+ """
1048
+ Output type of [`TFMobileBertForPreTraining`].
1049
+
1050
+ Args:
1051
+ prediction_logits (`tf.Tensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
1052
+ Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
1053
+ seq_relationship_logits (`tf.Tensor` of shape `(batch_size, 2)`):
1054
+ Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation
1055
+ before SoftMax).
1056
+ hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
1057
+ Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape
1058
+ `(batch_size, sequence_length, hidden_size)`.
1059
+
1060
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
1061
+ attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
1062
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
1063
+ sequence_length)`.
1064
+
1065
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
1066
+ heads.
1067
+ """
1068
+
1069
+ loss: tf.Tensor | None = None
1070
+ prediction_logits: tf.Tensor = None
1071
+ seq_relationship_logits: tf.Tensor = None
1072
+ hidden_states: Tuple[tf.Tensor] | None = None
1073
+ attentions: Tuple[tf.Tensor] | None = None
1074
+
1075
+
1076
+ MOBILEBERT_START_DOCSTRING = r"""
1077
+
1078
+ This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
1079
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
1080
+ etc.)
1081
+
1082
+ This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
1083
+ as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
1084
+ behavior.
1085
+
1086
+ <Tip>
1087
+
1088
+ TensorFlow models and layers in `transformers` accept two formats as input:
1089
+
1090
+ - having all inputs as keyword arguments (like PyTorch models), or
1091
+ - having all inputs as a list, tuple or dict in the first positional argument.
1092
+
1093
+ The reason the second format is supported is that Keras methods prefer this format when passing inputs to models
1094
+ and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just
1095
+ pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second
1096
+ format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with
1097
+ the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first
1098
+ positional argument:
1099
+
1100
+ - a single Tensor with `input_ids` only and nothing else: `model(input_ids)`
1101
+ - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
1102
+ `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`
1103
+ - a dictionary with one or several input Tensors associated to the input names given in the docstring:
1104
+ `model({"input_ids": input_ids, "token_type_ids": token_type_ids})`
1105
+
1106
+ Note that when creating models and layers with
1107
+ [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry
1108
+ about any of this, as you can just pass inputs like you would to any other Python function!
1109
+
1110
+ </Tip>
1111
+
1112
+ Parameters:
1113
+ config ([`MobileBertConfig`]): Model configuration class with all the parameters of the model.
1114
+ Initializing with a config file does not load the weights associated with the model, only the
1115
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
1116
+ """
1117
+
1118
+ MOBILEBERT_INPUTS_DOCSTRING = r"""
1119
+ Args:
1120
+ input_ids (`Numpy array` or `tf.Tensor` of shape `({0})`):
1121
+ Indices of input sequence tokens in the vocabulary.
1122
+
1123
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and
1124
+ [`PreTrainedTokenizer.encode`] for details.
1125
+
1126
+ [What are input IDs?](../glossary#input-ids)
1127
+ attention_mask (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*):
1128
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
1129
+
1130
+ - 1 for tokens that are **not masked**,
1131
+ - 0 for tokens that are **masked**.
1132
+
1133
+ [What are attention masks?](../glossary#attention-mask)
1134
+ token_type_ids (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*):
1135
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
1136
+ 1]`:
1137
+
1138
+ - 0 corresponds to a *sentence A* token,
1139
+ - 1 corresponds to a *sentence B* token.
1140
+
1141
+ [What are token type IDs?](../glossary#token-type-ids)
1142
+ position_ids (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*):
1143
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
1144
+ config.max_position_embeddings - 1]`.
1145
+
1146
+ [What are position IDs?](../glossary#position-ids)
1147
+ head_mask (`Numpy array` or `tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
1148
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
1149
+
1150
+ - 1 indicates the head is **not masked**,
1151
+ - 0 indicates the head is **masked**.
1152
+
1153
+ inputs_embeds (`tf.Tensor` of shape `({0}, hidden_size)`, *optional*):
1154
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
1155
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
1156
+ model's internal embedding lookup matrix.
1157
+ output_attentions (`bool`, *optional*):
1158
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
1159
+ tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the
1160
+ config will be used instead.
1161
+ output_hidden_states (`bool`, *optional*):
1162
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
1163
+ more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
1164
+ used instead.
1165
+ return_dict (`bool`, *optional*):
1166
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in
1167
+ eager mode, in graph mode the value will always be set to True.
1168
+ training (`bool`, *optional*, defaults to `False`):
1169
+ Whether or not to use the model in training mode (some modules like dropout modules have different
1170
+ behaviors between training and evaluation).
1171
+ """
1172
+
1173
+
1174
+ @add_start_docstrings(
1175
+ "The bare MobileBert Model transformer outputting raw hidden-states without any specific head on top.",
1176
+ MOBILEBERT_START_DOCSTRING,
1177
+ )
1178
+ class TFMobileBertModel(TFMobileBertPreTrainedModel):
1179
+ def __init__(self, config, *inputs, **kwargs):
1180
+ super().__init__(config, *inputs, **kwargs)
1181
+ self.mobilebert = TFMobileBertMainLayer(config, name="mobilebert")
1182
+
1183
+ @unpack_inputs
1184
+ @add_start_docstrings_to_model_forward(MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1185
+ @add_code_sample_docstrings(
1186
+ checkpoint=_CHECKPOINT_FOR_DOC,
1187
+ output_type=TFBaseModelOutputWithPooling,
1188
+ config_class=_CONFIG_FOR_DOC,
1189
+ )
1190
+ def call(
1191
+ self,
1192
+ input_ids: TFModelInputType | None = None,
1193
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1194
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1195
+ position_ids: np.ndarray | tf.Tensor | None = None,
1196
+ head_mask: np.ndarray | tf.Tensor | None = None,
1197
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1198
+ output_attentions: Optional[bool] = None,
1199
+ output_hidden_states: Optional[bool] = None,
1200
+ return_dict: Optional[bool] = None,
1201
+ training: Optional[bool] = False,
1202
+ ) -> Union[Tuple, TFBaseModelOutputWithPooling]:
1203
+ outputs = self.mobilebert(
1204
+ input_ids=input_ids,
1205
+ attention_mask=attention_mask,
1206
+ token_type_ids=token_type_ids,
1207
+ position_ids=position_ids,
1208
+ head_mask=head_mask,
1209
+ inputs_embeds=inputs_embeds,
1210
+ output_attentions=output_attentions,
1211
+ output_hidden_states=output_hidden_states,
1212
+ return_dict=return_dict,
1213
+ training=training,
1214
+ )
1215
+
1216
+ return outputs
1217
+
1218
+ def build(self, input_shape=None):
1219
+ if self.built:
1220
+ return
1221
+ self.built = True
1222
+ if getattr(self, "mobilebert", None) is not None:
1223
+ with tf.name_scope(self.mobilebert.name):
1224
+ self.mobilebert.build(None)
1225
+
1226
+
1227
+ @add_start_docstrings(
1228
+ """
1229
+ MobileBert Model with two heads on top as done during the pretraining: a `masked language modeling` head and a
1230
+ `next sentence prediction (classification)` head.
1231
+ """,
1232
+ MOBILEBERT_START_DOCSTRING,
1233
+ )
1234
+ class TFMobileBertForPreTraining(TFMobileBertPreTrainedModel, TFMobileBertPreTrainingLoss):
1235
+ def __init__(self, config, *inputs, **kwargs):
1236
+ super().__init__(config, *inputs, **kwargs)
1237
+ self.mobilebert = TFMobileBertMainLayer(config, name="mobilebert")
1238
+ self.predictions = TFMobileBertMLMHead(config, name="predictions___cls")
1239
+ self.seq_relationship = TFMobileBertOnlyNSPHead(config, name="seq_relationship___cls")
1240
+
1241
+ def get_lm_head(self):
1242
+ return self.predictions.predictions
1243
+
1244
+ def get_prefix_bias_name(self):
1245
+ warnings.warn("The method get_prefix_bias_name is deprecated. Please use `get_bias` instead.", FutureWarning)
1246
+ return self.name + "/" + self.predictions.name + "/" + self.predictions.predictions.name
1247
+
1248
+ @unpack_inputs
1249
+ @add_start_docstrings_to_model_forward(MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1250
+ @replace_return_docstrings(output_type=TFMobileBertForPreTrainingOutput, config_class=_CONFIG_FOR_DOC)
1251
+ def call(
1252
+ self,
1253
+ input_ids: TFModelInputType | None = None,
1254
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1255
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1256
+ position_ids: np.ndarray | tf.Tensor | None = None,
1257
+ head_mask: np.ndarray | tf.Tensor | None = None,
1258
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1259
+ output_attentions: Optional[bool] = None,
1260
+ output_hidden_states: Optional[bool] = None,
1261
+ return_dict: Optional[bool] = None,
1262
+ labels: np.ndarray | tf.Tensor | None = None,
1263
+ next_sentence_label: np.ndarray | tf.Tensor | None = None,
1264
+ training: Optional[bool] = False,
1265
+ ) -> Union[Tuple, TFMobileBertForPreTrainingOutput]:
1266
+ r"""
1267
+ Return:
1268
+
1269
+ Examples:
1270
+
1271
+ ```python
1272
+ >>> import tensorflow as tf
1273
+ >>> from transformers import AutoTokenizer, TFMobileBertForPreTraining
1274
+
1275
+ >>> tokenizer = AutoTokenizer.from_pretrained("google/mobilebert-uncased")
1276
+ >>> model = TFMobileBertForPreTraining.from_pretrained("google/mobilebert-uncased")
1277
+ >>> input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute"))[None, :] # Batch size 1
1278
+ >>> outputs = model(input_ids)
1279
+ >>> prediction_scores, seq_relationship_scores = outputs[:2]
1280
+ ```"""
1281
+ outputs = self.mobilebert(
1282
+ input_ids,
1283
+ attention_mask=attention_mask,
1284
+ token_type_ids=token_type_ids,
1285
+ position_ids=position_ids,
1286
+ head_mask=head_mask,
1287
+ inputs_embeds=inputs_embeds,
1288
+ output_attentions=output_attentions,
1289
+ output_hidden_states=output_hidden_states,
1290
+ return_dict=return_dict,
1291
+ training=training,
1292
+ )
1293
+
1294
+ sequence_output, pooled_output = outputs[:2]
1295
+ prediction_scores = self.predictions(sequence_output)
1296
+ seq_relationship_score = self.seq_relationship(pooled_output)
1297
+
1298
+ total_loss = None
1299
+ if labels is not None and next_sentence_label is not None:
1300
+ d_labels = {"labels": labels}
1301
+ d_labels["next_sentence_label"] = next_sentence_label
1302
+ total_loss = self.hf_compute_loss(labels=d_labels, logits=(prediction_scores, seq_relationship_score))
1303
+
1304
+ if not return_dict:
1305
+ output = (prediction_scores, seq_relationship_score) + outputs[2:]
1306
+ return ((total_loss,) + output) if total_loss is not None else output
1307
+
1308
+ return TFMobileBertForPreTrainingOutput(
1309
+ loss=total_loss,
1310
+ prediction_logits=prediction_scores,
1311
+ seq_relationship_logits=seq_relationship_score,
1312
+ hidden_states=outputs.hidden_states,
1313
+ attentions=outputs.attentions,
1314
+ )
1315
+
1316
+ def build(self, input_shape=None):
1317
+ if self.built:
1318
+ return
1319
+ self.built = True
1320
+ if getattr(self, "mobilebert", None) is not None:
1321
+ with tf.name_scope(self.mobilebert.name):
1322
+ self.mobilebert.build(None)
1323
+ if getattr(self, "predictions", None) is not None:
1324
+ with tf.name_scope(self.predictions.name):
1325
+ self.predictions.build(None)
1326
+ if getattr(self, "seq_relationship", None) is not None:
1327
+ with tf.name_scope(self.seq_relationship.name):
1328
+ self.seq_relationship.build(None)
1329
+
1330
+ def tf_to_pt_weight_rename(self, tf_weight):
1331
+ if tf_weight == "cls.predictions.decoder.weight":
1332
+ return tf_weight, "mobilebert.embeddings.word_embeddings.weight"
1333
+ else:
1334
+ return (tf_weight,)
1335
+
1336
+
1337
+ @add_start_docstrings("""MobileBert Model with a `language modeling` head on top.""", MOBILEBERT_START_DOCSTRING)
1338
+ class TFMobileBertForMaskedLM(TFMobileBertPreTrainedModel, TFMaskedLanguageModelingLoss):
1339
+ # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
1340
+ _keys_to_ignore_on_load_unexpected = [
1341
+ r"pooler",
1342
+ r"seq_relationship___cls",
1343
+ r"cls.seq_relationship",
1344
+ ]
1345
+
1346
+ def __init__(self, config, *inputs, **kwargs):
1347
+ super().__init__(config, *inputs, **kwargs)
1348
+
1349
+ self.mobilebert = TFMobileBertMainLayer(config, add_pooling_layer=False, name="mobilebert")
1350
+ self.predictions = TFMobileBertMLMHead(config, name="predictions___cls")
1351
+
1352
+ def get_lm_head(self):
1353
+ return self.predictions.predictions
1354
+
1355
+ def get_prefix_bias_name(self):
1356
+ warnings.warn("The method get_prefix_bias_name is deprecated. Please use `get_bias` instead.", FutureWarning)
1357
+ return self.name + "/" + self.mlm.name + "/" + self.mlm.predictions.name
1358
+
1359
+ @unpack_inputs
1360
+ @add_start_docstrings_to_model_forward(MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1361
+ @add_code_sample_docstrings(
1362
+ checkpoint=_CHECKPOINT_FOR_DOC,
1363
+ output_type=TFMaskedLMOutput,
1364
+ config_class=_CONFIG_FOR_DOC,
1365
+ expected_output="'paris'",
1366
+ expected_loss=0.57,
1367
+ )
1368
+ def call(
1369
+ self,
1370
+ input_ids: TFModelInputType | None = None,
1371
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1372
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1373
+ position_ids: np.ndarray | tf.Tensor | None = None,
1374
+ head_mask: np.ndarray | tf.Tensor | None = None,
1375
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1376
+ output_attentions: Optional[bool] = None,
1377
+ output_hidden_states: Optional[bool] = None,
1378
+ return_dict: Optional[bool] = None,
1379
+ labels: np.ndarray | tf.Tensor | None = None,
1380
+ training: Optional[bool] = False,
1381
+ ) -> Union[Tuple, TFMaskedLMOutput]:
1382
+ r"""
1383
+ labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
1384
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
1385
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
1386
+ loss is only computed for the tokens with labels
1387
+ """
1388
+ outputs = self.mobilebert(
1389
+ input_ids,
1390
+ attention_mask=attention_mask,
1391
+ token_type_ids=token_type_ids,
1392
+ position_ids=position_ids,
1393
+ head_mask=head_mask,
1394
+ inputs_embeds=inputs_embeds,
1395
+ output_attentions=output_attentions,
1396
+ output_hidden_states=output_hidden_states,
1397
+ return_dict=return_dict,
1398
+ training=training,
1399
+ )
1400
+ sequence_output = outputs[0]
1401
+ prediction_scores = self.predictions(sequence_output, training=training)
1402
+
1403
+ loss = None if labels is None else self.hf_compute_loss(labels, prediction_scores)
1404
+
1405
+ if not return_dict:
1406
+ output = (prediction_scores,) + outputs[2:]
1407
+ return ((loss,) + output) if loss is not None else output
1408
+
1409
+ return TFMaskedLMOutput(
1410
+ loss=loss,
1411
+ logits=prediction_scores,
1412
+ hidden_states=outputs.hidden_states,
1413
+ attentions=outputs.attentions,
1414
+ )
1415
+
1416
+ def build(self, input_shape=None):
1417
+ if self.built:
1418
+ return
1419
+ self.built = True
1420
+ if getattr(self, "mobilebert", None) is not None:
1421
+ with tf.name_scope(self.mobilebert.name):
1422
+ self.mobilebert.build(None)
1423
+ if getattr(self, "predictions", None) is not None:
1424
+ with tf.name_scope(self.predictions.name):
1425
+ self.predictions.build(None)
1426
+
1427
+ def tf_to_pt_weight_rename(self, tf_weight):
1428
+ if tf_weight == "cls.predictions.decoder.weight":
1429
+ return tf_weight, "mobilebert.embeddings.word_embeddings.weight"
1430
+ else:
1431
+ return (tf_weight,)
1432
+
1433
+
1434
+ class TFMobileBertOnlyNSPHead(keras.layers.Layer):
1435
+ def __init__(self, config, **kwargs):
1436
+ super().__init__(**kwargs)
1437
+ self.seq_relationship = keras.layers.Dense(2, name="seq_relationship")
1438
+ self.config = config
1439
+
1440
+ def call(self, pooled_output):
1441
+ seq_relationship_score = self.seq_relationship(pooled_output)
1442
+ return seq_relationship_score
1443
+
1444
+ def build(self, input_shape=None):
1445
+ if self.built:
1446
+ return
1447
+ self.built = True
1448
+ if getattr(self, "seq_relationship", None) is not None:
1449
+ with tf.name_scope(self.seq_relationship.name):
1450
+ self.seq_relationship.build([None, None, self.config.hidden_size])
1451
+
1452
+
1453
+ @add_start_docstrings(
1454
+ """MobileBert Model with a `next sentence prediction (classification)` head on top.""",
1455
+ MOBILEBERT_START_DOCSTRING,
1456
+ )
1457
+ class TFMobileBertForNextSentencePrediction(TFMobileBertPreTrainedModel, TFNextSentencePredictionLoss):
1458
+ # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
1459
+ _keys_to_ignore_on_load_unexpected = [r"predictions___cls", r"cls.predictions"]
1460
+
1461
+ def __init__(self, config, *inputs, **kwargs):
1462
+ super().__init__(config, *inputs, **kwargs)
1463
+
1464
+ self.mobilebert = TFMobileBertMainLayer(config, name="mobilebert")
1465
+ self.cls = TFMobileBertOnlyNSPHead(config, name="seq_relationship___cls")
1466
+
1467
+ @unpack_inputs
1468
+ @add_start_docstrings_to_model_forward(MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1469
+ @replace_return_docstrings(output_type=TFNextSentencePredictorOutput, config_class=_CONFIG_FOR_DOC)
1470
+ def call(
1471
+ self,
1472
+ input_ids: TFModelInputType | None = None,
1473
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1474
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1475
+ position_ids: np.ndarray | tf.Tensor | None = None,
1476
+ head_mask: np.ndarray | tf.Tensor | None = None,
1477
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1478
+ output_attentions: Optional[bool] = None,
1479
+ output_hidden_states: Optional[bool] = None,
1480
+ return_dict: Optional[bool] = None,
1481
+ next_sentence_label: np.ndarray | tf.Tensor | None = None,
1482
+ training: Optional[bool] = False,
1483
+ ) -> Union[Tuple, TFNextSentencePredictorOutput]:
1484
+ r"""
1485
+ Return:
1486
+
1487
+ Examples:
1488
+
1489
+ ```python
1490
+ >>> import tensorflow as tf
1491
+ >>> from transformers import AutoTokenizer, TFMobileBertForNextSentencePrediction
1492
+
1493
+ >>> tokenizer = AutoTokenizer.from_pretrained("google/mobilebert-uncased")
1494
+ >>> model = TFMobileBertForNextSentencePrediction.from_pretrained("google/mobilebert-uncased")
1495
+
1496
+ >>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced."
1497
+ >>> next_sentence = "The sky is blue due to the shorter wavelength of blue light."
1498
+ >>> encoding = tokenizer(prompt, next_sentence, return_tensors="tf")
1499
+
1500
+ >>> logits = model(encoding["input_ids"], token_type_ids=encoding["token_type_ids"])[0]
1501
+ ```"""
1502
+ outputs = self.mobilebert(
1503
+ input_ids,
1504
+ attention_mask=attention_mask,
1505
+ token_type_ids=token_type_ids,
1506
+ position_ids=position_ids,
1507
+ head_mask=head_mask,
1508
+ inputs_embeds=inputs_embeds,
1509
+ output_attentions=output_attentions,
1510
+ output_hidden_states=output_hidden_states,
1511
+ return_dict=return_dict,
1512
+ training=training,
1513
+ )
1514
+ pooled_output = outputs[1]
1515
+ seq_relationship_scores = self.cls(pooled_output)
1516
+
1517
+ next_sentence_loss = (
1518
+ None
1519
+ if next_sentence_label is None
1520
+ else self.hf_compute_loss(labels=next_sentence_label, logits=seq_relationship_scores)
1521
+ )
1522
+
1523
+ if not return_dict:
1524
+ output = (seq_relationship_scores,) + outputs[2:]
1525
+ return ((next_sentence_loss,) + output) if next_sentence_loss is not None else output
1526
+
1527
+ return TFNextSentencePredictorOutput(
1528
+ loss=next_sentence_loss,
1529
+ logits=seq_relationship_scores,
1530
+ hidden_states=outputs.hidden_states,
1531
+ attentions=outputs.attentions,
1532
+ )
1533
+
1534
+ def build(self, input_shape=None):
1535
+ if self.built:
1536
+ return
1537
+ self.built = True
1538
+ if getattr(self, "mobilebert", None) is not None:
1539
+ with tf.name_scope(self.mobilebert.name):
1540
+ self.mobilebert.build(None)
1541
+ if getattr(self, "cls", None) is not None:
1542
+ with tf.name_scope(self.cls.name):
1543
+ self.cls.build(None)
1544
+
1545
+
1546
+ @add_start_docstrings(
1547
+ """
1548
+ MobileBert Model transformer with a sequence classification/regression head on top (a linear layer on top of the
1549
+ pooled output) e.g. for GLUE tasks.
1550
+ """,
1551
+ MOBILEBERT_START_DOCSTRING,
1552
+ )
1553
+ class TFMobileBertForSequenceClassification(TFMobileBertPreTrainedModel, TFSequenceClassificationLoss):
1554
+ # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
1555
+ _keys_to_ignore_on_load_unexpected = [
1556
+ r"predictions___cls",
1557
+ r"seq_relationship___cls",
1558
+ r"cls.predictions",
1559
+ r"cls.seq_relationship",
1560
+ ]
1561
+ _keys_to_ignore_on_load_missing = [r"dropout"]
1562
+
1563
+ def __init__(self, config, *inputs, **kwargs):
1564
+ super().__init__(config, *inputs, **kwargs)
1565
+ self.num_labels = config.num_labels
1566
+
1567
+ self.mobilebert = TFMobileBertMainLayer(config, name="mobilebert")
1568
+ classifier_dropout = (
1569
+ config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
1570
+ )
1571
+ self.dropout = keras.layers.Dropout(classifier_dropout)
1572
+ self.classifier = keras.layers.Dense(
1573
+ config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
1574
+ )
1575
+ self.config = config
1576
+
1577
+ @unpack_inputs
1578
+ @add_start_docstrings_to_model_forward(MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1579
+ @add_code_sample_docstrings(
1580
+ checkpoint=_CHECKPOINT_FOR_SEQUENCE_CLASSIFICATION,
1581
+ output_type=TFSequenceClassifierOutput,
1582
+ config_class=_CONFIG_FOR_DOC,
1583
+ expected_output=_SEQ_CLASS_EXPECTED_OUTPUT,
1584
+ expected_loss=_SEQ_CLASS_EXPECTED_LOSS,
1585
+ )
1586
+ def call(
1587
+ self,
1588
+ input_ids: TFModelInputType | None = None,
1589
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1590
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1591
+ position_ids: np.ndarray | tf.Tensor | None = None,
1592
+ head_mask: np.ndarray | tf.Tensor | None = None,
1593
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1594
+ output_attentions: Optional[bool] = None,
1595
+ output_hidden_states: Optional[bool] = None,
1596
+ return_dict: Optional[bool] = None,
1597
+ labels: np.ndarray | tf.Tensor | None = None,
1598
+ training: Optional[bool] = False,
1599
+ ) -> Union[Tuple, TFSequenceClassifierOutput]:
1600
+ r"""
1601
+ labels (`tf.Tensor` of shape `(batch_size,)`, *optional*):
1602
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1603
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1604
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1605
+ """
1606
+ outputs = self.mobilebert(
1607
+ input_ids,
1608
+ attention_mask=attention_mask,
1609
+ token_type_ids=token_type_ids,
1610
+ position_ids=position_ids,
1611
+ head_mask=head_mask,
1612
+ inputs_embeds=inputs_embeds,
1613
+ output_attentions=output_attentions,
1614
+ output_hidden_states=output_hidden_states,
1615
+ return_dict=return_dict,
1616
+ training=training,
1617
+ )
1618
+ pooled_output = outputs[1]
1619
+
1620
+ pooled_output = self.dropout(pooled_output, training=training)
1621
+ logits = self.classifier(pooled_output)
1622
+
1623
+ loss = None if labels is None else self.hf_compute_loss(labels, logits)
1624
+
1625
+ if not return_dict:
1626
+ output = (logits,) + outputs[2:]
1627
+ return ((loss,) + output) if loss is not None else output
1628
+
1629
+ return TFSequenceClassifierOutput(
1630
+ loss=loss,
1631
+ logits=logits,
1632
+ hidden_states=outputs.hidden_states,
1633
+ attentions=outputs.attentions,
1634
+ )
1635
+
1636
+ def build(self, input_shape=None):
1637
+ if self.built:
1638
+ return
1639
+ self.built = True
1640
+ if getattr(self, "mobilebert", None) is not None:
1641
+ with tf.name_scope(self.mobilebert.name):
1642
+ self.mobilebert.build(None)
1643
+ if getattr(self, "classifier", None) is not None:
1644
+ with tf.name_scope(self.classifier.name):
1645
+ self.classifier.build([None, None, self.config.hidden_size])
1646
+
1647
+
1648
+ @add_start_docstrings(
1649
+ """
1650
+ MobileBert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a
1651
+ linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
1652
+ """,
1653
+ MOBILEBERT_START_DOCSTRING,
1654
+ )
1655
+ class TFMobileBertForQuestionAnswering(TFMobileBertPreTrainedModel, TFQuestionAnsweringLoss):
1656
+ # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
1657
+ _keys_to_ignore_on_load_unexpected = [
1658
+ r"pooler",
1659
+ r"predictions___cls",
1660
+ r"seq_relationship___cls",
1661
+ r"cls.predictions",
1662
+ r"cls.seq_relationship",
1663
+ ]
1664
+
1665
+ def __init__(self, config, *inputs, **kwargs):
1666
+ super().__init__(config, *inputs, **kwargs)
1667
+ self.num_labels = config.num_labels
1668
+
1669
+ self.mobilebert = TFMobileBertMainLayer(config, add_pooling_layer=False, name="mobilebert")
1670
+ self.qa_outputs = keras.layers.Dense(
1671
+ config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="qa_outputs"
1672
+ )
1673
+ self.config = config
1674
+
1675
+ @unpack_inputs
1676
+ @add_start_docstrings_to_model_forward(MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1677
+ @add_code_sample_docstrings(
1678
+ checkpoint=_CHECKPOINT_FOR_QA,
1679
+ output_type=TFQuestionAnsweringModelOutput,
1680
+ config_class=_CONFIG_FOR_DOC,
1681
+ qa_target_start_index=_QA_TARGET_START_INDEX,
1682
+ qa_target_end_index=_QA_TARGET_END_INDEX,
1683
+ expected_output=_QA_EXPECTED_OUTPUT,
1684
+ expected_loss=_QA_EXPECTED_LOSS,
1685
+ )
1686
+ def call(
1687
+ self,
1688
+ input_ids: TFModelInputType | None = None,
1689
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1690
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1691
+ position_ids: np.ndarray | tf.Tensor | None = None,
1692
+ head_mask: np.ndarray | tf.Tensor | None = None,
1693
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1694
+ output_attentions: Optional[bool] = None,
1695
+ output_hidden_states: Optional[bool] = None,
1696
+ return_dict: Optional[bool] = None,
1697
+ start_positions: np.ndarray | tf.Tensor | None = None,
1698
+ end_positions: np.ndarray | tf.Tensor | None = None,
1699
+ training: Optional[bool] = False,
1700
+ ) -> Union[Tuple, TFQuestionAnsweringModelOutput]:
1701
+ r"""
1702
+ start_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*):
1703
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
1704
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1705
+ are not taken into account for computing the loss.
1706
+ end_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*):
1707
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
1708
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1709
+ are not taken into account for computing the loss.
1710
+ """
1711
+ outputs = self.mobilebert(
1712
+ input_ids,
1713
+ attention_mask=attention_mask,
1714
+ token_type_ids=token_type_ids,
1715
+ position_ids=position_ids,
1716
+ head_mask=head_mask,
1717
+ inputs_embeds=inputs_embeds,
1718
+ output_attentions=output_attentions,
1719
+ output_hidden_states=output_hidden_states,
1720
+ return_dict=return_dict,
1721
+ training=training,
1722
+ )
1723
+ sequence_output = outputs[0]
1724
+
1725
+ logits = self.qa_outputs(sequence_output)
1726
+ start_logits, end_logits = tf.split(logits, 2, axis=-1)
1727
+ start_logits = tf.squeeze(start_logits, axis=-1)
1728
+ end_logits = tf.squeeze(end_logits, axis=-1)
1729
+
1730
+ loss = None
1731
+ if start_positions is not None and end_positions is not None:
1732
+ labels = {"start_position": start_positions, "end_position": end_positions}
1733
+ loss = self.hf_compute_loss(labels, (start_logits, end_logits))
1734
+
1735
+ if not return_dict:
1736
+ output = (start_logits, end_logits) + outputs[2:]
1737
+ return ((loss,) + output) if loss is not None else output
1738
+
1739
+ return TFQuestionAnsweringModelOutput(
1740
+ loss=loss,
1741
+ start_logits=start_logits,
1742
+ end_logits=end_logits,
1743
+ hidden_states=outputs.hidden_states,
1744
+ attentions=outputs.attentions,
1745
+ )
1746
+
1747
+ def build(self, input_shape=None):
1748
+ if self.built:
1749
+ return
1750
+ self.built = True
1751
+ if getattr(self, "mobilebert", None) is not None:
1752
+ with tf.name_scope(self.mobilebert.name):
1753
+ self.mobilebert.build(None)
1754
+ if getattr(self, "qa_outputs", None) is not None:
1755
+ with tf.name_scope(self.qa_outputs.name):
1756
+ self.qa_outputs.build([None, None, self.config.hidden_size])
1757
+
1758
+
1759
+ @add_start_docstrings(
1760
+ """
1761
+ MobileBert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and
1762
+ a softmax) e.g. for RocStories/SWAG tasks.
1763
+ """,
1764
+ MOBILEBERT_START_DOCSTRING,
1765
+ )
1766
+ class TFMobileBertForMultipleChoice(TFMobileBertPreTrainedModel, TFMultipleChoiceLoss):
1767
+ # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
1768
+ _keys_to_ignore_on_load_unexpected = [
1769
+ r"predictions___cls",
1770
+ r"seq_relationship___cls",
1771
+ r"cls.predictions",
1772
+ r"cls.seq_relationship",
1773
+ ]
1774
+ _keys_to_ignore_on_load_missing = [r"dropout"]
1775
+
1776
+ def __init__(self, config, *inputs, **kwargs):
1777
+ super().__init__(config, *inputs, **kwargs)
1778
+
1779
+ self.mobilebert = TFMobileBertMainLayer(config, name="mobilebert")
1780
+ self.dropout = keras.layers.Dropout(config.hidden_dropout_prob)
1781
+ self.classifier = keras.layers.Dense(
1782
+ 1, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
1783
+ )
1784
+ self.config = config
1785
+
1786
+ @unpack_inputs
1787
+ @add_start_docstrings_to_model_forward(
1788
+ MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")
1789
+ )
1790
+ @add_code_sample_docstrings(
1791
+ checkpoint=_CHECKPOINT_FOR_DOC,
1792
+ output_type=TFMultipleChoiceModelOutput,
1793
+ config_class=_CONFIG_FOR_DOC,
1794
+ )
1795
+ def call(
1796
+ self,
1797
+ input_ids: TFModelInputType | None = None,
1798
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1799
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1800
+ position_ids: np.ndarray | tf.Tensor | None = None,
1801
+ head_mask: np.ndarray | tf.Tensor | None = None,
1802
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1803
+ output_attentions: Optional[bool] = None,
1804
+ output_hidden_states: Optional[bool] = None,
1805
+ return_dict: Optional[bool] = None,
1806
+ labels: np.ndarray | tf.Tensor | None = None,
1807
+ training: Optional[bool] = False,
1808
+ ) -> Union[Tuple, TFMultipleChoiceModelOutput]:
1809
+ r"""
1810
+ labels (`tf.Tensor` of shape `(batch_size,)`, *optional*):
1811
+ Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices]`
1812
+ where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above)
1813
+ """
1814
+ if input_ids is not None:
1815
+ num_choices = shape_list(input_ids)[1]
1816
+ seq_length = shape_list(input_ids)[2]
1817
+ else:
1818
+ num_choices = shape_list(inputs_embeds)[1]
1819
+ seq_length = shape_list(inputs_embeds)[2]
1820
+
1821
+ flat_input_ids = tf.reshape(input_ids, (-1, seq_length)) if input_ids is not None else None
1822
+ flat_attention_mask = tf.reshape(attention_mask, (-1, seq_length)) if attention_mask is not None else None
1823
+ flat_token_type_ids = tf.reshape(token_type_ids, (-1, seq_length)) if token_type_ids is not None else None
1824
+ flat_position_ids = tf.reshape(position_ids, (-1, seq_length)) if position_ids is not None else None
1825
+ flat_inputs_embeds = (
1826
+ tf.reshape(inputs_embeds, (-1, seq_length, shape_list(inputs_embeds)[3]))
1827
+ if inputs_embeds is not None
1828
+ else None
1829
+ )
1830
+ outputs = self.mobilebert(
1831
+ flat_input_ids,
1832
+ flat_attention_mask,
1833
+ flat_token_type_ids,
1834
+ flat_position_ids,
1835
+ head_mask,
1836
+ flat_inputs_embeds,
1837
+ output_attentions,
1838
+ output_hidden_states,
1839
+ return_dict=return_dict,
1840
+ training=training,
1841
+ )
1842
+ pooled_output = outputs[1]
1843
+ pooled_output = self.dropout(pooled_output, training=training)
1844
+ logits = self.classifier(pooled_output)
1845
+ reshaped_logits = tf.reshape(logits, (-1, num_choices))
1846
+
1847
+ loss = None if labels is None else self.hf_compute_loss(labels, reshaped_logits)
1848
+
1849
+ if not return_dict:
1850
+ output = (reshaped_logits,) + outputs[2:]
1851
+ return ((loss,) + output) if loss is not None else output
1852
+
1853
+ return TFMultipleChoiceModelOutput(
1854
+ loss=loss,
1855
+ logits=reshaped_logits,
1856
+ hidden_states=outputs.hidden_states,
1857
+ attentions=outputs.attentions,
1858
+ )
1859
+
1860
+ def build(self, input_shape=None):
1861
+ if self.built:
1862
+ return
1863
+ self.built = True
1864
+ if getattr(self, "mobilebert", None) is not None:
1865
+ with tf.name_scope(self.mobilebert.name):
1866
+ self.mobilebert.build(None)
1867
+ if getattr(self, "classifier", None) is not None:
1868
+ with tf.name_scope(self.classifier.name):
1869
+ self.classifier.build([None, None, self.config.hidden_size])
1870
+
1871
+
1872
+ @add_start_docstrings(
1873
+ """
1874
+ MobileBert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g.
1875
+ for Named-Entity-Recognition (NER) tasks.
1876
+ """,
1877
+ MOBILEBERT_START_DOCSTRING,
1878
+ )
1879
+ class TFMobileBertForTokenClassification(TFMobileBertPreTrainedModel, TFTokenClassificationLoss):
1880
+ # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
1881
+ _keys_to_ignore_on_load_unexpected = [
1882
+ r"pooler",
1883
+ r"predictions___cls",
1884
+ r"seq_relationship___cls",
1885
+ r"cls.predictions",
1886
+ r"cls.seq_relationship",
1887
+ ]
1888
+ _keys_to_ignore_on_load_missing = [r"dropout"]
1889
+
1890
+ def __init__(self, config, *inputs, **kwargs):
1891
+ super().__init__(config, *inputs, **kwargs)
1892
+ self.num_labels = config.num_labels
1893
+
1894
+ self.mobilebert = TFMobileBertMainLayer(config, add_pooling_layer=False, name="mobilebert")
1895
+ classifier_dropout = (
1896
+ config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
1897
+ )
1898
+ self.dropout = keras.layers.Dropout(classifier_dropout)
1899
+ self.classifier = keras.layers.Dense(
1900
+ config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
1901
+ )
1902
+ self.config = config
1903
+
1904
+ @unpack_inputs
1905
+ @add_start_docstrings_to_model_forward(MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1906
+ @add_code_sample_docstrings(
1907
+ checkpoint=_CHECKPOINT_FOR_TOKEN_CLASSIFICATION,
1908
+ output_type=TFTokenClassifierOutput,
1909
+ config_class=_CONFIG_FOR_DOC,
1910
+ expected_output=_TOKEN_CLASS_EXPECTED_OUTPUT,
1911
+ expected_loss=_TOKEN_CLASS_EXPECTED_LOSS,
1912
+ )
1913
+ def call(
1914
+ self,
1915
+ input_ids: TFModelInputType | None = None,
1916
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1917
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1918
+ position_ids: np.ndarray | tf.Tensor | None = None,
1919
+ head_mask: np.ndarray | tf.Tensor | None = None,
1920
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1921
+ output_attentions: Optional[bool] = None,
1922
+ output_hidden_states: Optional[bool] = None,
1923
+ return_dict: Optional[bool] = None,
1924
+ labels: np.ndarray | tf.Tensor | None = None,
1925
+ training: Optional[bool] = False,
1926
+ ) -> Union[Tuple, TFTokenClassifierOutput]:
1927
+ r"""
1928
+ labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
1929
+ Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
1930
+ """
1931
+ outputs = self.mobilebert(
1932
+ input_ids,
1933
+ attention_mask=attention_mask,
1934
+ token_type_ids=token_type_ids,
1935
+ position_ids=position_ids,
1936
+ head_mask=head_mask,
1937
+ inputs_embeds=inputs_embeds,
1938
+ output_attentions=output_attentions,
1939
+ output_hidden_states=output_hidden_states,
1940
+ return_dict=return_dict,
1941
+ training=training,
1942
+ )
1943
+ sequence_output = outputs[0]
1944
+
1945
+ sequence_output = self.dropout(sequence_output, training=training)
1946
+ logits = self.classifier(sequence_output)
1947
+
1948
+ loss = None if labels is None else self.hf_compute_loss(labels, logits)
1949
+
1950
+ if not return_dict:
1951
+ output = (logits,) + outputs[2:]
1952
+ return ((loss,) + output) if loss is not None else output
1953
+
1954
+ return TFTokenClassifierOutput(
1955
+ loss=loss,
1956
+ logits=logits,
1957
+ hidden_states=outputs.hidden_states,
1958
+ attentions=outputs.attentions,
1959
+ )
1960
+
1961
+ def build(self, input_shape=None):
1962
+ if self.built:
1963
+ return
1964
+ self.built = True
1965
+ if getattr(self, "mobilebert", None) is not None:
1966
+ with tf.name_scope(self.mobilebert.name):
1967
+ self.mobilebert.build(None)
1968
+ if getattr(self, "classifier", None) is not None:
1969
+ with tf.name_scope(self.classifier.name):
1970
+ self.classifier.build([None, None, self.config.hidden_size])
llmeval-env/lib/python3.10/site-packages/transformers/models/mobilebert/tokenization_mobilebert.py ADDED
@@ -0,0 +1,506 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ #
3
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """Tokenization classes for MobileBERT."""
17
+
18
+
19
+ import collections
20
+ import os
21
+ import unicodedata
22
+ from typing import List, Optional, Tuple
23
+
24
+ from ...tokenization_utils import PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace
25
+ from ...utils import logging
26
+
27
+
28
+ logger = logging.get_logger(__name__)
29
+
30
+ VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt"}
31
+
32
+
33
+ # Copied from transformers.models.bert.tokenization_bert.load_vocab
34
+ def load_vocab(vocab_file):
35
+ """Loads a vocabulary file into a dictionary."""
36
+ vocab = collections.OrderedDict()
37
+ with open(vocab_file, "r", encoding="utf-8") as reader:
38
+ tokens = reader.readlines()
39
+ for index, token in enumerate(tokens):
40
+ token = token.rstrip("\n")
41
+ vocab[token] = index
42
+ return vocab
43
+
44
+
45
+ # Copied from transformers.models.bert.tokenization_bert.whitespace_tokenize
46
+ def whitespace_tokenize(text):
47
+ """Runs basic whitespace cleaning and splitting on a piece of text."""
48
+ text = text.strip()
49
+ if not text:
50
+ return []
51
+ tokens = text.split()
52
+ return tokens
53
+
54
+
55
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer with BERT->MobileBERT,Bert->MobileBert
56
+ class MobileBertTokenizer(PreTrainedTokenizer):
57
+ r"""
58
+ Construct a MobileBERT tokenizer. Based on WordPiece.
59
+
60
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
61
+ this superclass for more information regarding those methods.
62
+
63
+ Args:
64
+ vocab_file (`str`):
65
+ File containing the vocabulary.
66
+ do_lower_case (`bool`, *optional*, defaults to `True`):
67
+ Whether or not to lowercase the input when tokenizing.
68
+ do_basic_tokenize (`bool`, *optional*, defaults to `True`):
69
+ Whether or not to do basic tokenization before WordPiece.
70
+ never_split (`Iterable`, *optional*):
71
+ Collection of tokens which will never be split during tokenization. Only has an effect when
72
+ `do_basic_tokenize=True`
73
+ unk_token (`str`, *optional*, defaults to `"[UNK]"`):
74
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
75
+ token instead.
76
+ sep_token (`str`, *optional*, defaults to `"[SEP]"`):
77
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
78
+ sequence classification or for a text and a question for question answering. It is also used as the last
79
+ token of a sequence built with special tokens.
80
+ pad_token (`str`, *optional*, defaults to `"[PAD]"`):
81
+ The token used for padding, for example when batching sequences of different lengths.
82
+ cls_token (`str`, *optional*, defaults to `"[CLS]"`):
83
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
84
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
85
+ mask_token (`str`, *optional*, defaults to `"[MASK]"`):
86
+ The token used for masking values. This is the token used when training this model with masked language
87
+ modeling. This is the token which the model will try to predict.
88
+ tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
89
+ Whether or not to tokenize Chinese characters.
90
+
91
+ This should likely be deactivated for Japanese (see this
92
+ [issue](https://github.com/huggingface/transformers/issues/328)).
93
+ strip_accents (`bool`, *optional*):
94
+ Whether or not to strip all accents. If this option is not specified, then it will be determined by the
95
+ value for `lowercase` (as in the original MobileBERT).
96
+ """
97
+
98
+ vocab_files_names = VOCAB_FILES_NAMES
99
+
100
+ def __init__(
101
+ self,
102
+ vocab_file,
103
+ do_lower_case=True,
104
+ do_basic_tokenize=True,
105
+ never_split=None,
106
+ unk_token="[UNK]",
107
+ sep_token="[SEP]",
108
+ pad_token="[PAD]",
109
+ cls_token="[CLS]",
110
+ mask_token="[MASK]",
111
+ tokenize_chinese_chars=True,
112
+ strip_accents=None,
113
+ **kwargs,
114
+ ):
115
+ if not os.path.isfile(vocab_file):
116
+ raise ValueError(
117
+ f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"
118
+ " model use `tokenizer = MobileBertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`"
119
+ )
120
+ self.vocab = load_vocab(vocab_file)
121
+ self.ids_to_tokens = collections.OrderedDict([(ids, tok) for tok, ids in self.vocab.items()])
122
+ self.do_basic_tokenize = do_basic_tokenize
123
+ if do_basic_tokenize:
124
+ self.basic_tokenizer = BasicTokenizer(
125
+ do_lower_case=do_lower_case,
126
+ never_split=never_split,
127
+ tokenize_chinese_chars=tokenize_chinese_chars,
128
+ strip_accents=strip_accents,
129
+ )
130
+
131
+ self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=str(unk_token))
132
+
133
+ super().__init__(
134
+ do_lower_case=do_lower_case,
135
+ do_basic_tokenize=do_basic_tokenize,
136
+ never_split=never_split,
137
+ unk_token=unk_token,
138
+ sep_token=sep_token,
139
+ pad_token=pad_token,
140
+ cls_token=cls_token,
141
+ mask_token=mask_token,
142
+ tokenize_chinese_chars=tokenize_chinese_chars,
143
+ strip_accents=strip_accents,
144
+ **kwargs,
145
+ )
146
+
147
+ @property
148
+ def do_lower_case(self):
149
+ return self.basic_tokenizer.do_lower_case
150
+
151
+ @property
152
+ def vocab_size(self):
153
+ return len(self.vocab)
154
+
155
+ def get_vocab(self):
156
+ return dict(self.vocab, **self.added_tokens_encoder)
157
+
158
+ def _tokenize(self, text, split_special_tokens=False):
159
+ split_tokens = []
160
+ if self.do_basic_tokenize:
161
+ for token in self.basic_tokenizer.tokenize(
162
+ text, never_split=self.all_special_tokens if not split_special_tokens else None
163
+ ):
164
+ # If the token is part of the never_split set
165
+ if token in self.basic_tokenizer.never_split:
166
+ split_tokens.append(token)
167
+ else:
168
+ split_tokens += self.wordpiece_tokenizer.tokenize(token)
169
+ else:
170
+ split_tokens = self.wordpiece_tokenizer.tokenize(text)
171
+ return split_tokens
172
+
173
+ def _convert_token_to_id(self, token):
174
+ """Converts a token (str) in an id using the vocab."""
175
+ return self.vocab.get(token, self.vocab.get(self.unk_token))
176
+
177
+ def _convert_id_to_token(self, index):
178
+ """Converts an index (integer) in a token (str) using the vocab."""
179
+ return self.ids_to_tokens.get(index, self.unk_token)
180
+
181
+ def convert_tokens_to_string(self, tokens):
182
+ """Converts a sequence of tokens (string) in a single string."""
183
+ out_string = " ".join(tokens).replace(" ##", "").strip()
184
+ return out_string
185
+
186
+ def build_inputs_with_special_tokens(
187
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
188
+ ) -> List[int]:
189
+ """
190
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
191
+ adding special tokens. A MobileBERT sequence has the following format:
192
+
193
+ - single sequence: `[CLS] X [SEP]`
194
+ - pair of sequences: `[CLS] A [SEP] B [SEP]`
195
+
196
+ Args:
197
+ token_ids_0 (`List[int]`):
198
+ List of IDs to which the special tokens will be added.
199
+ token_ids_1 (`List[int]`, *optional*):
200
+ Optional second list of IDs for sequence pairs.
201
+
202
+ Returns:
203
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
204
+ """
205
+ if token_ids_1 is None:
206
+ return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
207
+ cls = [self.cls_token_id]
208
+ sep = [self.sep_token_id]
209
+ return cls + token_ids_0 + sep + token_ids_1 + sep
210
+
211
+ def get_special_tokens_mask(
212
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
213
+ ) -> List[int]:
214
+ """
215
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
216
+ special tokens using the tokenizer `prepare_for_model` method.
217
+
218
+ Args:
219
+ token_ids_0 (`List[int]`):
220
+ List of IDs.
221
+ token_ids_1 (`List[int]`, *optional*):
222
+ Optional second list of IDs for sequence pairs.
223
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
224
+ Whether or not the token list is already formatted with special tokens for the model.
225
+
226
+ Returns:
227
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
228
+ """
229
+
230
+ if already_has_special_tokens:
231
+ return super().get_special_tokens_mask(
232
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
233
+ )
234
+
235
+ if token_ids_1 is not None:
236
+ return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
237
+ return [1] + ([0] * len(token_ids_0)) + [1]
238
+
239
+ def create_token_type_ids_from_sequences(
240
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
241
+ ) -> List[int]:
242
+ """
243
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. A MobileBERT sequence
244
+ pair mask has the following format:
245
+
246
+ ```
247
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
248
+ | first sequence | second sequence |
249
+ ```
250
+
251
+ If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
252
+
253
+ Args:
254
+ token_ids_0 (`List[int]`):
255
+ List of IDs.
256
+ token_ids_1 (`List[int]`, *optional*):
257
+ Optional second list of IDs for sequence pairs.
258
+
259
+ Returns:
260
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
261
+ """
262
+ sep = [self.sep_token_id]
263
+ cls = [self.cls_token_id]
264
+ if token_ids_1 is None:
265
+ return len(cls + token_ids_0 + sep) * [0]
266
+ return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
267
+
268
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
269
+ index = 0
270
+ if os.path.isdir(save_directory):
271
+ vocab_file = os.path.join(
272
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
273
+ )
274
+ else:
275
+ vocab_file = (filename_prefix + "-" if filename_prefix else "") + save_directory
276
+ with open(vocab_file, "w", encoding="utf-8") as writer:
277
+ for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]):
278
+ if index != token_index:
279
+ logger.warning(
280
+ f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
281
+ " Please check that the vocabulary is not corrupted!"
282
+ )
283
+ index = token_index
284
+ writer.write(token + "\n")
285
+ index += 1
286
+ return (vocab_file,)
287
+
288
+
289
+ # Copied from transformers.models.bert.tokenization_bert.BasicTokenizer
290
+ class BasicTokenizer(object):
291
+ """
292
+ Constructs a BasicTokenizer that will run basic tokenization (punctuation splitting, lower casing, etc.).
293
+
294
+ Args:
295
+ do_lower_case (`bool`, *optional*, defaults to `True`):
296
+ Whether or not to lowercase the input when tokenizing.
297
+ never_split (`Iterable`, *optional*):
298
+ Collection of tokens which will never be split during tokenization. Only has an effect when
299
+ `do_basic_tokenize=True`
300
+ tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
301
+ Whether or not to tokenize Chinese characters.
302
+
303
+ This should likely be deactivated for Japanese (see this
304
+ [issue](https://github.com/huggingface/transformers/issues/328)).
305
+ strip_accents (`bool`, *optional*):
306
+ Whether or not to strip all accents. If this option is not specified, then it will be determined by the
307
+ value for `lowercase` (as in the original BERT).
308
+ do_split_on_punc (`bool`, *optional*, defaults to `True`):
309
+ In some instances we want to skip the basic punctuation splitting so that later tokenization can capture
310
+ the full context of the words, such as contractions.
311
+ """
312
+
313
+ def __init__(
314
+ self,
315
+ do_lower_case=True,
316
+ never_split=None,
317
+ tokenize_chinese_chars=True,
318
+ strip_accents=None,
319
+ do_split_on_punc=True,
320
+ ):
321
+ if never_split is None:
322
+ never_split = []
323
+ self.do_lower_case = do_lower_case
324
+ self.never_split = set(never_split)
325
+ self.tokenize_chinese_chars = tokenize_chinese_chars
326
+ self.strip_accents = strip_accents
327
+ self.do_split_on_punc = do_split_on_punc
328
+
329
+ def tokenize(self, text, never_split=None):
330
+ """
331
+ Basic Tokenization of a piece of text. For sub-word tokenization, see WordPieceTokenizer.
332
+
333
+ Args:
334
+ never_split (`List[str]`, *optional*)
335
+ Kept for backward compatibility purposes. Now implemented directly at the base class level (see
336
+ [`PreTrainedTokenizer.tokenize`]) List of token not to split.
337
+ """
338
+ # union() returns a new set by concatenating the two sets.
339
+ never_split = self.never_split.union(set(never_split)) if never_split else self.never_split
340
+ text = self._clean_text(text)
341
+
342
+ # This was added on November 1st, 2018 for the multilingual and Chinese
343
+ # models. This is also applied to the English models now, but it doesn't
344
+ # matter since the English models were not trained on any Chinese data
345
+ # and generally don't have any Chinese data in them (there are Chinese
346
+ # characters in the vocabulary because Wikipedia does have some Chinese
347
+ # words in the English Wikipedia.).
348
+ if self.tokenize_chinese_chars:
349
+ text = self._tokenize_chinese_chars(text)
350
+ # prevents treating the same character with different unicode codepoints as different characters
351
+ unicode_normalized_text = unicodedata.normalize("NFC", text)
352
+ orig_tokens = whitespace_tokenize(unicode_normalized_text)
353
+ split_tokens = []
354
+ for token in orig_tokens:
355
+ if token not in never_split:
356
+ if self.do_lower_case:
357
+ token = token.lower()
358
+ if self.strip_accents is not False:
359
+ token = self._run_strip_accents(token)
360
+ elif self.strip_accents:
361
+ token = self._run_strip_accents(token)
362
+ split_tokens.extend(self._run_split_on_punc(token, never_split))
363
+
364
+ output_tokens = whitespace_tokenize(" ".join(split_tokens))
365
+ return output_tokens
366
+
367
+ def _run_strip_accents(self, text):
368
+ """Strips accents from a piece of text."""
369
+ text = unicodedata.normalize("NFD", text)
370
+ output = []
371
+ for char in text:
372
+ cat = unicodedata.category(char)
373
+ if cat == "Mn":
374
+ continue
375
+ output.append(char)
376
+ return "".join(output)
377
+
378
+ def _run_split_on_punc(self, text, never_split=None):
379
+ """Splits punctuation on a piece of text."""
380
+ if not self.do_split_on_punc or (never_split is not None and text in never_split):
381
+ return [text]
382
+ chars = list(text)
383
+ i = 0
384
+ start_new_word = True
385
+ output = []
386
+ while i < len(chars):
387
+ char = chars[i]
388
+ if _is_punctuation(char):
389
+ output.append([char])
390
+ start_new_word = True
391
+ else:
392
+ if start_new_word:
393
+ output.append([])
394
+ start_new_word = False
395
+ output[-1].append(char)
396
+ i += 1
397
+
398
+ return ["".join(x) for x in output]
399
+
400
+ def _tokenize_chinese_chars(self, text):
401
+ """Adds whitespace around any CJK character."""
402
+ output = []
403
+ for char in text:
404
+ cp = ord(char)
405
+ if self._is_chinese_char(cp):
406
+ output.append(" ")
407
+ output.append(char)
408
+ output.append(" ")
409
+ else:
410
+ output.append(char)
411
+ return "".join(output)
412
+
413
+ def _is_chinese_char(self, cp):
414
+ """Checks whether CP is the codepoint of a CJK character."""
415
+ # This defines a "chinese character" as anything in the CJK Unicode block:
416
+ # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
417
+ #
418
+ # Note that the CJK Unicode block is NOT all Japanese and Korean characters,
419
+ # despite its name. The modern Korean Hangul alphabet is a different block,
420
+ # as is Japanese Hiragana and Katakana. Those alphabets are used to write
421
+ # space-separated words, so they are not treated specially and handled
422
+ # like the all of the other languages.
423
+ if (
424
+ (cp >= 0x4E00 and cp <= 0x9FFF)
425
+ or (cp >= 0x3400 and cp <= 0x4DBF) #
426
+ or (cp >= 0x20000 and cp <= 0x2A6DF) #
427
+ or (cp >= 0x2A700 and cp <= 0x2B73F) #
428
+ or (cp >= 0x2B740 and cp <= 0x2B81F) #
429
+ or (cp >= 0x2B820 and cp <= 0x2CEAF) #
430
+ or (cp >= 0xF900 and cp <= 0xFAFF)
431
+ or (cp >= 0x2F800 and cp <= 0x2FA1F) #
432
+ ): #
433
+ return True
434
+
435
+ return False
436
+
437
+ def _clean_text(self, text):
438
+ """Performs invalid character removal and whitespace cleanup on text."""
439
+ output = []
440
+ for char in text:
441
+ cp = ord(char)
442
+ if cp == 0 or cp == 0xFFFD or _is_control(char):
443
+ continue
444
+ if _is_whitespace(char):
445
+ output.append(" ")
446
+ else:
447
+ output.append(char)
448
+ return "".join(output)
449
+
450
+
451
+ # Copied from transformers.models.bert.tokenization_bert.WordpieceTokenizer
452
+ class WordpieceTokenizer(object):
453
+ """Runs WordPiece tokenization."""
454
+
455
+ def __init__(self, vocab, unk_token, max_input_chars_per_word=100):
456
+ self.vocab = vocab
457
+ self.unk_token = unk_token
458
+ self.max_input_chars_per_word = max_input_chars_per_word
459
+
460
+ def tokenize(self, text):
461
+ """
462
+ Tokenizes a piece of text into its word pieces. This uses a greedy longest-match-first algorithm to perform
463
+ tokenization using the given vocabulary.
464
+
465
+ For example, `input = "unaffable"` wil return as output `["un", "##aff", "##able"]`.
466
+
467
+ Args:
468
+ text: A single token or whitespace separated tokens. This should have
469
+ already been passed through *BasicTokenizer*.
470
+
471
+ Returns:
472
+ A list of wordpiece tokens.
473
+ """
474
+
475
+ output_tokens = []
476
+ for token in whitespace_tokenize(text):
477
+ chars = list(token)
478
+ if len(chars) > self.max_input_chars_per_word:
479
+ output_tokens.append(self.unk_token)
480
+ continue
481
+
482
+ is_bad = False
483
+ start = 0
484
+ sub_tokens = []
485
+ while start < len(chars):
486
+ end = len(chars)
487
+ cur_substr = None
488
+ while start < end:
489
+ substr = "".join(chars[start:end])
490
+ if start > 0:
491
+ substr = "##" + substr
492
+ if substr in self.vocab:
493
+ cur_substr = substr
494
+ break
495
+ end -= 1
496
+ if cur_substr is None:
497
+ is_bad = True
498
+ break
499
+ sub_tokens.append(cur_substr)
500
+ start = end
501
+
502
+ if is_bad:
503
+ output_tokens.append(self.unk_token)
504
+ else:
505
+ output_tokens.extend(sub_tokens)
506
+ return output_tokens
llmeval-env/lib/python3.10/site-packages/transformers/models/qwen2/__pycache__/tokenization_qwen2_fast.cpython-310.pyc ADDED
Binary file (3.87 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/reformer/__init__.py ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import TYPE_CHECKING
16
+
17
+ from ...utils import (
18
+ OptionalDependencyNotAvailable,
19
+ _LazyModule,
20
+ is_sentencepiece_available,
21
+ is_tokenizers_available,
22
+ is_torch_available,
23
+ )
24
+
25
+
26
+ _import_structure = {"configuration_reformer": ["REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "ReformerConfig"]}
27
+
28
+ try:
29
+ if not is_sentencepiece_available():
30
+ raise OptionalDependencyNotAvailable()
31
+ except OptionalDependencyNotAvailable:
32
+ pass
33
+ else:
34
+ _import_structure["tokenization_reformer"] = ["ReformerTokenizer"]
35
+
36
+ try:
37
+ if not is_tokenizers_available():
38
+ raise OptionalDependencyNotAvailable()
39
+ except OptionalDependencyNotAvailable:
40
+ pass
41
+ else:
42
+ _import_structure["tokenization_reformer_fast"] = ["ReformerTokenizerFast"]
43
+
44
+ try:
45
+ if not is_torch_available():
46
+ raise OptionalDependencyNotAvailable()
47
+ except OptionalDependencyNotAvailable:
48
+ pass
49
+ else:
50
+ _import_structure["modeling_reformer"] = [
51
+ "REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
52
+ "ReformerAttention",
53
+ "ReformerForMaskedLM",
54
+ "ReformerForQuestionAnswering",
55
+ "ReformerForSequenceClassification",
56
+ "ReformerLayer",
57
+ "ReformerModel",
58
+ "ReformerModelWithLMHead",
59
+ "ReformerPreTrainedModel",
60
+ ]
61
+
62
+
63
+ if TYPE_CHECKING:
64
+ from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
65
+
66
+ try:
67
+ if not is_sentencepiece_available():
68
+ raise OptionalDependencyNotAvailable()
69
+ except OptionalDependencyNotAvailable:
70
+ pass
71
+ else:
72
+ from .tokenization_reformer import ReformerTokenizer
73
+
74
+ try:
75
+ if not is_tokenizers_available():
76
+ raise OptionalDependencyNotAvailable()
77
+ except OptionalDependencyNotAvailable:
78
+ pass
79
+ else:
80
+ from .tokenization_reformer_fast import ReformerTokenizerFast
81
+
82
+ try:
83
+ if not is_torch_available():
84
+ raise OptionalDependencyNotAvailable()
85
+ except OptionalDependencyNotAvailable:
86
+ pass
87
+ else:
88
+ from .modeling_reformer import (
89
+ REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
90
+ ReformerAttention,
91
+ ReformerForMaskedLM,
92
+ ReformerForQuestionAnswering,
93
+ ReformerForSequenceClassification,
94
+ ReformerLayer,
95
+ ReformerModel,
96
+ ReformerModelWithLMHead,
97
+ ReformerPreTrainedModel,
98
+ )
99
+
100
+ else:
101
+ import sys
102
+
103
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
llmeval-env/lib/python3.10/site-packages/transformers/models/reformer/configuration_reformer.py ADDED
@@ -0,0 +1,235 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The Trax Authors and The HuggingFace Inc. team.
3
+ # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """ Reformer model configuration"""
17
+
18
+ from ...configuration_utils import PretrainedConfig
19
+ from ...utils import logging
20
+
21
+
22
+ logger = logging.get_logger(__name__)
23
+
24
+
25
+ from ..deprecated._archive_maps import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
26
+
27
+
28
+ class ReformerConfig(PretrainedConfig):
29
+ r"""
30
+ This is the configuration class to store the configuration of a [`ReformerModel`]. It is used to instantiate a
31
+ Reformer model according to the specified arguments, defining the model architecture. Instantiating a configuration
32
+ with the defaults will yield a similar configuration to that of the ReFormer
33
+ [google/reformer-crime-and-punishment](https://huggingface.co/google/reformer-crime-and-punishment) architecture.
34
+
35
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
36
+ documentation from [`PretrainedConfig`] for more information.
37
+
38
+ Args:
39
+ attention_head_size (`int`, *optional*, defaults to 64):
40
+ Dimensionality of the projected key, query and value vectors
41
+ attn_layers (`List[str]`, *optional*, defaults to `["local", "lsh", "local", "lsh", "local", "lsh"]`):
42
+ List of attention layer types in ascending order. It can be chosen between a LSHSelfAttention layer
43
+ (`"lsh"`) and a LocalSelfAttention layer (`"local"`).
44
+
45
+ For more information on LSHSelfAttention layer, see [LSH Self Attention](reformer#lsh-self-attention). For
46
+ more information on LocalSelfAttention layer, see [Local Self Attention](reformer#local-self-attention).
47
+ axial_pos_embds (`bool`, *optional*, defaults to `True`):
48
+ Whether or not to use axial position embeddings. For more information on how axial position embeddings
49
+ work, see [Axial Position Encodings](reformer#axial-positional-encodings).
50
+ axial_norm_std (`float`, *optional*, defaults to 1.0):
51
+ The standard deviation of the normal_initializer for initializing the weight matrices of the axial
52
+ positional encodings.
53
+ axial_pos_shape (`List[int]`, *optional*, defaults to `[64, 64]`):
54
+ The position dims of the axial position encodings. During training, the product of the position dims has to
55
+ be equal to the sequence length.
56
+
57
+ For more information on how axial position embeddings work, see [Axial Position
58
+ Encodings](reformer#axial-positional-encodings).
59
+ axial_pos_embds_dim (`List[int]`, *optional*, defaults to `[64, 192]`):
60
+ The embedding dims of the axial position encodings. The sum of the embedding dims has to be equal to the
61
+ hidden size.
62
+
63
+ For more information on how axial position embeddings work, see [Axial Position
64
+ Encodings](reformer#axial-positional-encodings).
65
+ chunk_size_lm_head (`int`, *optional*, defaults to 0):
66
+ The chunk size of the final language model feed forward head layer. A chunk size of 0 means that the feed
67
+ forward layer is not chunked. A chunk size of n means that the feed forward layer processes n <
68
+ sequence_length embeddings at a time.
69
+
70
+ For more information on feed forward chunking, see [How does Feed Forward Chunking
71
+ work?](../glossary#feed-forward-chunking).
72
+ eos_token_id (`int`, *optional*, defaults to 2):
73
+ The token id for the end-of-sentence token.
74
+ feed_forward_size (`int`, *optional*, defaults to 512):
75
+ Dimensionality of the feed_forward layer in the residual attention block.
76
+ hash_seed (`int`, *optional*):
77
+ Seed that can be used to make local sensitive hashing in `LSHSelfAttention` deterministic. This should only
78
+ be set for testing purposed. For evaluation and training purposes `hash_seed` should be left as `None` to
79
+ ensure fully random rotations in local sensitive hashing scheme.
80
+ hidden_act (`str` or `Callable`, *optional*, defaults to `"relu"`):
81
+ The non-linear activation function (function or string) in the feed forward layer in the residual attention
82
+ block. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported.
83
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.05):
84
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
85
+ hidden_size (`int`, *optional*, defaults to 256):
86
+ Dimensionality of the output hidden states of the residual attention blocks.
87
+ initializer_range (`float`, *optional*, defaults to 0.02):
88
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
89
+ is_decoder (`bool`, *optional*, defaults to `False`):
90
+ Whether or not to use a causal mask in addition to the `attention_mask` passed to [`ReformerModel`]. When
91
+ using the Reformer for causal language modeling, this argument should be set to `True`.
92
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
93
+ The epsilon used by the layer normalization layers.
94
+ local_chunk_length (`int`, *optional*, defaults to 64):
95
+ Length of chunk which attends to itself in `LocalSelfAttention`. Chunking reduces memory complexity from
96
+ sequence length x sequence length (self attention) to chunk length x chunk length x sequence length / chunk
97
+ length (chunked self attention).
98
+ local_num_chunks_before (`int`, *optional*, defaults to 1):
99
+ Number of previous neighbouring chunks to attend to in `LocalSelfAttention` layer to itself.
100
+ local_num_chunks_after (`int`, *optional*, defaults to 0):
101
+ Number of following neighbouring chunks to attend to in `LocalSelfAttention` layer in addition to itself.
102
+ local_attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
103
+ The dropout ratio for the attention probabilities in `LocalSelfAttention`.
104
+ lsh_attn_chunk_length (`int`, *optional*, defaults to 64):
105
+ Length of chunk which attends to itself in `LSHSelfAttention`. Chunking reduces memory complexity from
106
+ sequence length x sequence length (self attention) to chunk length x chunk length x sequence length / chunk
107
+ length (chunked self attention).
108
+ lsh_num_chunks_before (`int`, *optional*, defaults to 1):
109
+ Number of previous neighbouring chunks to attend to in `LSHSelfAttention` layer to itself.
110
+ lsh_num_chunks_after (`int`, *optional*, defaults to 0):
111
+ Number of following neighbouring chunks to attend to in `LSHSelfAttention` layer to itself.
112
+ lsh_attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
113
+ The dropout ratio for the attention probabilities in `LSHSelfAttention`.
114
+ max_position_embeddings (`int`, *optional*, defaults to 4096):
115
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
116
+ just in case (e.g., 512 or 1024 or 2048).
117
+ num_attention_heads (`int`, *optional*, defaults to 12):
118
+ Number of attention heads for each attention layer in the Transformer encoder.
119
+ num_buckets (`int` or `List[int]`, *optional*):
120
+ Number of buckets, the key query vectors can be "hashed into" using the locality sensitive hashing scheme.
121
+ Each query key vector is hashed into a hash in `1, ..., num_buckets`. The number of buckets can also be
122
+ factorized into a list for improved memory complexity. In this case, each query key vector is hashed into a
123
+ hash in `1-1, 1-2, ..., num_buckets[0]-1, ..., num_buckets[0]-num_buckets[1]` if `num_buckets` is
124
+ factorized into two factors. The number of buckets (or the product the factors) should approximately equal
125
+ sequence length / lsh_chunk_length. If `num_buckets` not set, a good value is calculated on the fly.
126
+ num_hashes (`int`, *optional*, defaults to 1):
127
+ Number of hashing rounds (e.g., number of random rotations) in Local Sensitive Hashing scheme. The higher
128
+ `num_hashes`, the more accurate the `LSHSelfAttention` becomes, but also the more memory and time intensive
129
+ the hashing becomes.
130
+ pad_token_id (`int`, *optional*, defaults to 0):
131
+ The token id for the padding token.
132
+ vocab_size (`int`, *optional*, defaults to 320):\
133
+ Vocabulary size of the Reformer model. Defines the number of different tokens that can be represented by
134
+ the `inputs_ids` passed when calling [`ReformerModel`].
135
+ tie_word_embeddings (`bool`, *optional*, defaults to `False`):
136
+ Whether to tie input and output embeddings.
137
+ use_cache (`bool`, *optional*, defaults to `True`):
138
+ Whether or not the model should return the last key/values attentions (not used by all models).
139
+ classifier_dropout (`float`, *optional*):
140
+ The dropout ratio for the classification head.
141
+
142
+ Examples:
143
+
144
+ ```python
145
+ >>> from transformers import ReformerConfig, ReformerModel
146
+
147
+ >>> # Initializing a Reformer configuration
148
+ >>> configuration = ReformerConfig()
149
+
150
+ >>> # Initializing a Reformer model (with random weights)
151
+ >>> model = ReformerModel(configuration)
152
+
153
+ >>> # Accessing the model configuration
154
+ >>> configuration = model.config
155
+ ```
156
+ """
157
+
158
+ model_type = "reformer"
159
+ keys_to_ignore_at_inference = ["past_buckets_states"]
160
+ attribute_map = {}
161
+
162
+ def __init__(
163
+ self,
164
+ attention_head_size=64,
165
+ attn_layers=["local", "lsh", "local", "lsh", "local", "lsh"],
166
+ axial_norm_std=1.0,
167
+ axial_pos_embds=True,
168
+ axial_pos_shape=[64, 64],
169
+ axial_pos_embds_dim=[64, 192],
170
+ chunk_size_lm_head=0,
171
+ eos_token_id=2,
172
+ feed_forward_size=512,
173
+ hash_seed=None,
174
+ hidden_act="relu",
175
+ hidden_dropout_prob=0.05,
176
+ hidden_size=256,
177
+ initializer_range=0.02,
178
+ is_decoder=False,
179
+ layer_norm_eps=1e-12,
180
+ local_num_chunks_before=1,
181
+ local_num_chunks_after=0,
182
+ local_attention_probs_dropout_prob=0.05,
183
+ local_attn_chunk_length=64,
184
+ lsh_attn_chunk_length=64,
185
+ lsh_attention_probs_dropout_prob=0.0,
186
+ lsh_num_chunks_before=1,
187
+ lsh_num_chunks_after=0,
188
+ max_position_embeddings=4096,
189
+ num_attention_heads=12,
190
+ num_buckets=None,
191
+ num_hashes=1,
192
+ pad_token_id=0,
193
+ vocab_size=320,
194
+ tie_word_embeddings=False,
195
+ use_cache=True,
196
+ classifier_dropout=None,
197
+ **kwargs,
198
+ ):
199
+ self.hash_seed = hash_seed
200
+ self.vocab_size = vocab_size
201
+ self.attention_head_size = attention_head_size
202
+ self.hidden_size = hidden_size
203
+ self.num_attention_heads = num_attention_heads
204
+ self.num_hashes = num_hashes
205
+ self.num_hidden_layers = len(attn_layers)
206
+ self.num_buckets = tuple(num_buckets) if isinstance(num_buckets, list) else num_buckets
207
+ self.lsh_attn_chunk_length = lsh_attn_chunk_length
208
+ self.local_attn_chunk_length = local_attn_chunk_length
209
+ self.lsh_num_chunks_after = lsh_num_chunks_after
210
+ self.lsh_num_chunks_before = lsh_num_chunks_before
211
+ self.local_num_chunks_after = local_num_chunks_after
212
+ self.local_num_chunks_before = local_num_chunks_before
213
+ self.hidden_act = hidden_act
214
+ self.feed_forward_size = feed_forward_size
215
+ self.hidden_dropout_prob = hidden_dropout_prob
216
+ self.lsh_attention_probs_dropout_prob = lsh_attention_probs_dropout_prob
217
+ self.local_attention_probs_dropout_prob = local_attention_probs_dropout_prob
218
+ self.max_position_embeddings = max_position_embeddings
219
+ self.initializer_range = initializer_range
220
+ self.layer_norm_eps = layer_norm_eps
221
+ self.axial_pos_embds = axial_pos_embds
222
+ self.axial_pos_shape = tuple(axial_pos_shape)
223
+ self.axial_pos_embds_dim = tuple(axial_pos_embds_dim)
224
+ self.axial_norm_std = axial_norm_std
225
+ self.chunk_size_lm_head = chunk_size_lm_head
226
+ self.attn_layers = attn_layers
227
+ self.use_cache = use_cache
228
+ self.classifier_dropout = classifier_dropout
229
+ super().__init__(
230
+ pad_token_id=pad_token_id,
231
+ eos_token_id=eos_token_id,
232
+ is_decoder=is_decoder,
233
+ tie_word_embeddings=tie_word_embeddings,
234
+ **kwargs,
235
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/speecht5/__init__.py ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import (
17
+ OptionalDependencyNotAvailable,
18
+ _LazyModule,
19
+ is_sentencepiece_available,
20
+ is_torch_available,
21
+ )
22
+
23
+
24
+ _import_structure = {
25
+ "configuration_speecht5": [
26
+ "SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP",
27
+ "SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP",
28
+ "SpeechT5Config",
29
+ "SpeechT5HifiGanConfig",
30
+ ],
31
+ "feature_extraction_speecht5": ["SpeechT5FeatureExtractor"],
32
+ "processing_speecht5": ["SpeechT5Processor"],
33
+ }
34
+
35
+ try:
36
+ if not is_sentencepiece_available():
37
+ raise OptionalDependencyNotAvailable()
38
+ except OptionalDependencyNotAvailable:
39
+ pass
40
+ else:
41
+ _import_structure["tokenization_speecht5"] = ["SpeechT5Tokenizer"]
42
+
43
+ try:
44
+ if not is_torch_available():
45
+ raise OptionalDependencyNotAvailable()
46
+ except OptionalDependencyNotAvailable:
47
+ pass
48
+ else:
49
+ _import_structure["modeling_speecht5"] = [
50
+ "SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST",
51
+ "SpeechT5ForSpeechToText",
52
+ "SpeechT5ForSpeechToSpeech",
53
+ "SpeechT5ForTextToSpeech",
54
+ "SpeechT5Model",
55
+ "SpeechT5PreTrainedModel",
56
+ "SpeechT5HifiGan",
57
+ ]
58
+
59
+ if TYPE_CHECKING:
60
+ from .configuration_speecht5 import (
61
+ SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
62
+ SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
63
+ SpeechT5Config,
64
+ SpeechT5HifiGanConfig,
65
+ )
66
+ from .feature_extraction_speecht5 import SpeechT5FeatureExtractor
67
+ from .processing_speecht5 import SpeechT5Processor
68
+
69
+ try:
70
+ if not is_sentencepiece_available():
71
+ raise OptionalDependencyNotAvailable()
72
+ except OptionalDependencyNotAvailable:
73
+ pass
74
+ else:
75
+ from .tokenization_speecht5 import SpeechT5Tokenizer
76
+
77
+ try:
78
+ if not is_torch_available():
79
+ raise OptionalDependencyNotAvailable()
80
+ except OptionalDependencyNotAvailable:
81
+ pass
82
+ else:
83
+ from .modeling_speecht5 import (
84
+ SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
85
+ SpeechT5ForSpeechToSpeech,
86
+ SpeechT5ForSpeechToText,
87
+ SpeechT5ForTextToSpeech,
88
+ SpeechT5HifiGan,
89
+ SpeechT5Model,
90
+ SpeechT5PreTrainedModel,
91
+ )
92
+
93
+ else:
94
+ import sys
95
+
96
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
llmeval-env/lib/python3.10/site-packages/transformers/models/speecht5/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.54 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/speecht5/__pycache__/configuration_speecht5.cpython-310.pyc ADDED
Binary file (20.5 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/speecht5/__pycache__/convert_hifigan.cpython-310.pyc ADDED
Binary file (2.85 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/speecht5/__pycache__/convert_speecht5_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc ADDED
Binary file (11.4 kB). View file