applied-ai-018 commited on
Commit
f357968
·
verified ·
1 Parent(s): 384088e

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. llmeval-env/lib/python3.10/site-packages/transformers/models/idefics2/__pycache__/configuration_idefics2.cpython-310.pyc +0 -0
  2. llmeval-env/lib/python3.10/site-packages/transformers/models/idefics2/__pycache__/convert_idefics2_weights_to_hf.cpython-310.pyc +0 -0
  3. llmeval-env/lib/python3.10/site-packages/transformers/models/idefics2/__pycache__/image_processing_idefics2.cpython-310.pyc +0 -0
  4. llmeval-env/lib/python3.10/site-packages/transformers/models/idefics2/__pycache__/modeling_idefics2.cpython-310.pyc +0 -0
  5. llmeval-env/lib/python3.10/site-packages/transformers/models/layoutlm/__pycache__/configuration_layoutlm.cpython-310.pyc +0 -0
  6. llmeval-env/lib/python3.10/site-packages/transformers/models/mamba/__init__.py +60 -0
  7. llmeval-env/lib/python3.10/site-packages/transformers/models/mamba/__pycache__/__init__.cpython-310.pyc +0 -0
  8. llmeval-env/lib/python3.10/site-packages/transformers/models/mamba/__pycache__/configuration_mamba.cpython-310.pyc +0 -0
  9. llmeval-env/lib/python3.10/site-packages/transformers/models/mamba/__pycache__/convert_mamba_ssm_checkpoint_to_pytorch.cpython-310.pyc +0 -0
  10. llmeval-env/lib/python3.10/site-packages/transformers/models/mamba/__pycache__/modeling_mamba.cpython-310.pyc +0 -0
  11. llmeval-env/lib/python3.10/site-packages/transformers/models/mamba/configuration_mamba.py +156 -0
  12. llmeval-env/lib/python3.10/site-packages/transformers/models/mamba/convert_mamba_ssm_checkpoint_to_pytorch.py +153 -0
  13. llmeval-env/lib/python3.10/site-packages/transformers/models/mamba/modeling_mamba.py +709 -0
  14. llmeval-env/lib/python3.10/site-packages/transformers/models/poolformer/__init__.py +83 -0
  15. llmeval-env/lib/python3.10/site-packages/transformers/models/poolformer/configuration_poolformer.py +147 -0
  16. llmeval-env/lib/python3.10/site-packages/transformers/models/poolformer/convert_poolformer_original_to_pytorch.py +214 -0
  17. llmeval-env/lib/python3.10/site-packages/transformers/models/poolformer/feature_extraction_poolformer.py +33 -0
  18. llmeval-env/lib/python3.10/site-packages/transformers/models/poolformer/image_processing_poolformer.py +377 -0
  19. llmeval-env/lib/python3.10/site-packages/transformers/models/poolformer/modeling_poolformer.py +448 -0
  20. llmeval-env/lib/python3.10/site-packages/transformers/models/roberta_prelayernorm/__init__.py +153 -0
  21. llmeval-env/lib/python3.10/site-packages/transformers/models/roberta_prelayernorm/__pycache__/__init__.cpython-310.pyc +0 -0
  22. llmeval-env/lib/python3.10/site-packages/transformers/models/roberta_prelayernorm/__pycache__/configuration_roberta_prelayernorm.cpython-310.pyc +0 -0
  23. llmeval-env/lib/python3.10/site-packages/transformers/models/roberta_prelayernorm/__pycache__/convert_roberta_prelayernorm_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc +0 -0
  24. llmeval-env/lib/python3.10/site-packages/transformers/models/roberta_prelayernorm/__pycache__/modeling_flax_roberta_prelayernorm.cpython-310.pyc +0 -0
  25. llmeval-env/lib/python3.10/site-packages/transformers/models/roberta_prelayernorm/__pycache__/modeling_roberta_prelayernorm.cpython-310.pyc +0 -0
  26. llmeval-env/lib/python3.10/site-packages/transformers/models/roberta_prelayernorm/__pycache__/modeling_tf_roberta_prelayernorm.cpython-310.pyc +0 -0
  27. llmeval-env/lib/python3.10/site-packages/transformers/models/roberta_prelayernorm/configuration_roberta_prelayernorm.py +156 -0
  28. llmeval-env/lib/python3.10/site-packages/transformers/models/roberta_prelayernorm/convert_roberta_prelayernorm_original_pytorch_checkpoint_to_pytorch.py +78 -0
  29. llmeval-env/lib/python3.10/site-packages/transformers/models/roberta_prelayernorm/modeling_flax_roberta_prelayernorm.py +1514 -0
  30. llmeval-env/lib/python3.10/site-packages/transformers/models/roberta_prelayernorm/modeling_roberta_prelayernorm.py +1566 -0
  31. llmeval-env/lib/python3.10/site-packages/transformers/models/roberta_prelayernorm/modeling_tf_roberta_prelayernorm.py +1799 -0
  32. llmeval-env/lib/python3.10/site-packages/transformers/models/sew/__init__.py +56 -0
  33. llmeval-env/lib/python3.10/site-packages/transformers/models/sew/__pycache__/__init__.cpython-310.pyc +0 -0
  34. llmeval-env/lib/python3.10/site-packages/transformers/models/sew/__pycache__/configuration_sew.cpython-310.pyc +0 -0
  35. llmeval-env/lib/python3.10/site-packages/transformers/models/sew/__pycache__/convert_sew_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc +0 -0
  36. llmeval-env/lib/python3.10/site-packages/transformers/models/sew/__pycache__/modeling_sew.cpython-310.pyc +0 -0
  37. llmeval-env/lib/python3.10/site-packages/transformers/models/sew/configuration_sew.py +256 -0
  38. llmeval-env/lib/python3.10/site-packages/transformers/models/sew/convert_sew_original_pytorch_checkpoint_to_pytorch.py +306 -0
  39. llmeval-env/lib/python3.10/site-packages/transformers/models/sew/modeling_sew.py +1226 -0
  40. llmeval-env/lib/python3.10/site-packages/transformers/models/switch_transformers/__init__.py +80 -0
  41. llmeval-env/lib/python3.10/site-packages/transformers/models/switch_transformers/__pycache__/__init__.cpython-310.pyc +0 -0
  42. llmeval-env/lib/python3.10/site-packages/transformers/models/switch_transformers/__pycache__/configuration_switch_transformers.cpython-310.pyc +0 -0
  43. llmeval-env/lib/python3.10/site-packages/transformers/models/switch_transformers/__pycache__/convert_big_switch.cpython-310.pyc +0 -0
  44. llmeval-env/lib/python3.10/site-packages/transformers/models/switch_transformers/__pycache__/convert_switch_transformers_original_flax_checkpoint_to_pytorch.cpython-310.pyc +0 -0
  45. llmeval-env/lib/python3.10/site-packages/transformers/models/switch_transformers/__pycache__/modeling_switch_transformers.cpython-310.pyc +0 -0
  46. llmeval-env/lib/python3.10/site-packages/transformers/models/switch_transformers/configuration_switch_transformers.py +184 -0
  47. llmeval-env/lib/python3.10/site-packages/transformers/models/switch_transformers/convert_big_switch.py +193 -0
  48. llmeval-env/lib/python3.10/site-packages/transformers/models/switch_transformers/convert_switch_transformers_original_flax_checkpoint_to_pytorch.py +203 -0
  49. llmeval-env/lib/python3.10/site-packages/transformers/models/switch_transformers/modeling_switch_transformers.py +1858 -0
  50. llmeval-env/lib/python3.10/site-packages/transformers/models/t5/__init__.py +160 -0
llmeval-env/lib/python3.10/site-packages/transformers/models/idefics2/__pycache__/configuration_idefics2.cpython-310.pyc ADDED
Binary file (10 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/idefics2/__pycache__/convert_idefics2_weights_to_hf.cpython-310.pyc ADDED
Binary file (4.36 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/idefics2/__pycache__/image_processing_idefics2.cpython-310.pyc ADDED
Binary file (22.7 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/idefics2/__pycache__/modeling_idefics2.cpython-310.pyc ADDED
Binary file (62.7 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/layoutlm/__pycache__/configuration_layoutlm.cpython-310.pyc ADDED
Binary file (8.44 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/mamba/__init__.py ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import TYPE_CHECKING
16
+
17
+ from ...utils import (
18
+ OptionalDependencyNotAvailable,
19
+ _LazyModule,
20
+ is_torch_available,
21
+ )
22
+
23
+
24
+ _import_structure = {
25
+ "configuration_mamba": ["MAMBA_PRETRAINED_CONFIG_ARCHIVE_MAP", "MambaConfig", "MambaOnnxConfig"],
26
+ }
27
+
28
+ try:
29
+ if not is_torch_available():
30
+ raise OptionalDependencyNotAvailable()
31
+ except OptionalDependencyNotAvailable:
32
+ pass
33
+ else:
34
+ _import_structure["modeling_mamba"] = [
35
+ "MAMBA_PRETRAINED_MODEL_ARCHIVE_LIST",
36
+ "MambaForCausalLM",
37
+ "MambaModel",
38
+ "MambaPreTrainedModel",
39
+ ]
40
+
41
+
42
+ if TYPE_CHECKING:
43
+ from .configuration_mamba import MAMBA_PRETRAINED_CONFIG_ARCHIVE_MAP, MambaConfig, MambaOnnxConfig
44
+
45
+ try:
46
+ if not is_torch_available():
47
+ raise OptionalDependencyNotAvailable()
48
+ except OptionalDependencyNotAvailable:
49
+ pass
50
+ else:
51
+ from .modeling_mamba import (
52
+ MAMBA_PRETRAINED_MODEL_ARCHIVE_LIST,
53
+ MambaForCausalLM,
54
+ MambaModel,
55
+ MambaPreTrainedModel,
56
+ )
57
+ else:
58
+ import sys
59
+
60
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
llmeval-env/lib/python3.10/site-packages/transformers/models/mamba/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (922 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/mamba/__pycache__/configuration_mamba.cpython-310.pyc ADDED
Binary file (6.13 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/mamba/__pycache__/convert_mamba_ssm_checkpoint_to_pytorch.cpython-310.pyc ADDED
Binary file (4.56 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/mamba/__pycache__/modeling_mamba.cpython-310.pyc ADDED
Binary file (23.1 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/mamba/configuration_mamba.py ADDED
@@ -0,0 +1,156 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """MAMBA configuration"""
16
+
17
+ import math
18
+
19
+ from ...configuration_utils import PretrainedConfig
20
+ from ...utils import logging
21
+
22
+
23
+ logger = logging.get_logger(__name__)
24
+
25
+
26
+ from ..deprecated._archive_maps import MAMBA_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
27
+
28
+
29
+ class MambaConfig(PretrainedConfig):
30
+ """
31
+ This is the configuration class to store the configuration of a [`MambaModel`]. It is used to instantiate a MAMBA
32
+ model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
33
+ defaults will yield a similar configuration to that of the MAMBA
34
+ [state-spaces/mamba-2.8b](https://huggingface.co/state-spaces/mamba-2.8b) architecture.
35
+
36
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
37
+ documentation from [`PretrainedConfig`] for more information.
38
+
39
+
40
+ Args:
41
+ vocab_size (`int`, *optional*, defaults to 50280):
42
+ Vocabulary size of the MAMBA model. Defines the number of different tokens that can be represented by the
43
+ `inputs_ids` passed when calling [`MambaModel`].
44
+ hidden_size (`int`, *optional*, defaults to 768):
45
+ Dimensionality of the embeddings and hidden states.
46
+ state_size (`int`, *optional*, defaults to 16): shape of the state space latents.
47
+ num_hidden_layers (`int`, *optional*, defaults to 32):
48
+ Number of hidden layers in the model.
49
+ layer_norm_epsilon (`float`, *optional*, defaults to 1e-05):
50
+ The epsilon to use in the layer normalization layers.
51
+ pad_token_id (`int`, *optional*, defaults to 0):
52
+ Padding token id.
53
+ bos_token_id (`int`, *optional*, defaults to 0):
54
+ The id of the beginning of sentence token in the vocabulary.
55
+ eos_token_id (`int`, *optional*, defaults to 0):
56
+ The id of the end of sentence token in the vocabulary.
57
+ expand (`int`, *optional*, defaults to 2): Expanding factor used to determine the intermediate size.
58
+ conv_kernel (`int`, *optional*, defaults to 4): Size of the convolution kernel.
59
+ use_bias (`bool`, *optional*, defaults to `False`):
60
+ Whether or not to use bias in ["in_proj", "out_proj"] of the mixer block
61
+ use_conv_bias (`bool`, *optional*, defaults to `True`):
62
+ Whether or not to use bias in the convolution layer of the mixer block.
63
+ hidden_act (`str`, *optional*, defaults to `"silu"`):
64
+ The non-linear activation function (function or string) in the decoder.
65
+ initializer_range (`float`, *optional*, defaults to 0.1):
66
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
67
+ residual_in_fp32 (`bool`, *optional*, defaults to `True`):
68
+ Whether or not residuals should be in `float32`. If set to `False` residuals will keep the same `dtype` as the rest of the model
69
+ time_step_rank (`Union[int,str]`, *optional*, defaults to `"auto"`):
70
+ Rank of the discretization projection matrix. `"auto"` means that it will default to `math.ceil(self.hidden_size / 16)`
71
+ time_step_scale (`float`, *optional*, defaults to 1.0):
72
+ Scale used used to scale `dt_proj.bias`.
73
+ time_step_min (`float`, *optional*, defaults to 0.001):
74
+ Minimum `time_step` used to bound `dt_proj.bias`.
75
+ time_step_max (`float`, *optional*, defaults to 0.1):
76
+ Maximum `time_step` used to bound `dt_proj.bias`.
77
+ time_step_init_scheme (`float`, *optional*, defaults to `"random"`):
78
+ Init scheme used for `dt_proj.weight`. Should be one of `["random","uniform"]`
79
+ time_step_floor (`float`, *optional*, defaults to 0.0001):
80
+ Minimum clamping value of the `dt_proj.bias` layer initialization.
81
+ rescale_prenorm_residual (`bool`, *optional*, defaults to `False`):
82
+ Whether or not to rescale `out_proj` weights when initializing.
83
+ use_cache (`bool`, *optional*, defaults to `True`):
84
+ Whether or not the cache should be used.
85
+
86
+
87
+ Example:
88
+
89
+ ```python
90
+ >>> from transformers import MambaConfig, MambaModel
91
+
92
+ >>> # Initializing a Mamba configuration
93
+ >>> configuration = MambaConfig()
94
+
95
+ >>> # Initializing a model (with random weights) from the configuration
96
+ >>> model = MambaModel(configuration)
97
+
98
+ >>> # Accessing the model configuration
99
+ >>> configuration = model.config
100
+ ```"""
101
+
102
+ model_type = "mamba"
103
+
104
+ def __init__(
105
+ self,
106
+ vocab_size=50280,
107
+ hidden_size=768,
108
+ state_size=16,
109
+ num_hidden_layers=32,
110
+ layer_norm_epsilon=1e-5,
111
+ pad_token_id=0,
112
+ bos_token_id=0,
113
+ eos_token_id=0,
114
+ expand=2,
115
+ conv_kernel=4,
116
+ use_bias=False,
117
+ use_conv_bias=True,
118
+ hidden_act="silu",
119
+ initializer_range=0.1,
120
+ residual_in_fp32=True,
121
+ time_step_rank="auto",
122
+ time_step_scale=1.0,
123
+ time_step_min=0.001,
124
+ time_step_max=0.1,
125
+ time_step_init_scheme="random",
126
+ time_step_floor=1e-4,
127
+ rescale_prenorm_residual=False,
128
+ use_cache=True,
129
+ **kwargs,
130
+ ):
131
+ self.vocab_size = vocab_size
132
+ self.hidden_size = hidden_size
133
+ self.state_size = state_size
134
+ self.num_hidden_layers = num_hidden_layers
135
+ self.layer_norm_epsilon = layer_norm_epsilon
136
+ self.conv_kernel = conv_kernel
137
+ self.expand = expand
138
+ self.intermediate_size = int(expand * self.hidden_size)
139
+ self.bos_token_id = bos_token_id
140
+ self.eos_token_id = eos_token_id
141
+ self.pad_token_id = pad_token_id
142
+ self.use_bias = use_bias
143
+ self.use_conv_bias = use_conv_bias
144
+ self.hidden_act = hidden_act
145
+ self.initializer_range = initializer_range
146
+ self.time_step_rank = math.ceil(self.hidden_size / 16) if time_step_rank == "auto" else time_step_rank
147
+ self.time_step_scale = time_step_scale
148
+ self.time_step_min = time_step_min
149
+ self.time_step_max = time_step_max
150
+ self.time_step_init_scheme = time_step_init_scheme
151
+ self.time_step_floor = time_step_floor
152
+ self.rescale_prenorm_residual = rescale_prenorm_residual
153
+ self.residual_in_fp32 = residual_in_fp32
154
+ self.use_cache = use_cache
155
+
156
+ super().__init__(bos_token_id=bos_token_id, eos_token_id=eos_token_id, pad_token_id=pad_token_id, **kwargs)
llmeval-env/lib/python3.10/site-packages/transformers/models/mamba/convert_mamba_ssm_checkpoint_to_pytorch.py ADDED
@@ -0,0 +1,153 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 state-spaces/mamba org and HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """This script can be used to convert checkpoints provided in the `mamba_ssm` library into the format provided in HuggingFace `transformers`. It depends on the `mamba_ssm` package to be installed."""
16
+
17
+ import argparse
18
+ import json
19
+ import math
20
+ from typing import Tuple
21
+
22
+ import torch
23
+
24
+ from transformers import AutoTokenizer, MambaConfig, MambaForCausalLM
25
+ from transformers.utils import logging
26
+ from transformers.utils.import_utils import is_mamba_ssm_available
27
+
28
+
29
+ if is_mamba_ssm_available():
30
+ from mamba_ssm.models.config_mamba import MambaConfig as MambaConfigSSM
31
+ from mamba_ssm.models.mixer_seq_simple import MambaLMHeadModel
32
+
33
+ def convert_ssm_config_to_hf_config(config_ssm: MambaConfigSSM) -> MambaConfig:
34
+ """Convert a MambaConfig from mamba_ssm to a MambaConfig from transformers."""
35
+ hf_config = MambaConfig()
36
+ # Set config hidden size, num hidden layers, and vocab size directly from the original config
37
+ hf_config.hidden_size = config_ssm.d_model
38
+ hf_config.intermediate_size = config_ssm.d_model * 2
39
+ hf_config.time_step_rank = math.ceil(config_ssm.d_model / 16)
40
+
41
+ hf_config.num_hidden_layers = config_ssm.n_layer
42
+ vocab_size = config_ssm.vocab_size
43
+ pad_vocab_size_multiple = config_ssm.pad_vocab_size_multiple
44
+ if (vocab_size % pad_vocab_size_multiple) != 0:
45
+ vocab_size += pad_vocab_size_multiple - (vocab_size % pad_vocab_size_multiple)
46
+ hf_config.vocab_size = vocab_size
47
+ return hf_config
48
+
49
+
50
+ logging.set_verbosity_info()
51
+ logger = logging.get_logger(__name__)
52
+
53
+
54
+ def convert_mamba_ssm_checkpoint_to_huggingface_model(
55
+ original_state_dict: dict, original_ssm_config_dict: dict
56
+ ) -> Tuple[MambaForCausalLM, AutoTokenizer]:
57
+ if not is_mamba_ssm_available():
58
+ raise ImportError(
59
+ "Calling convert_mamba_ssm_checkpoint_to_huggingface_model requires the mamba_ssm library to be installed. Please install it with `pip install mamba_ssm`."
60
+ )
61
+ original_ssm_config = MambaConfigSSM(**original_ssm_config_dict)
62
+
63
+ # Convert mamba_ssm config to huggingface MambaConfig
64
+ hf_config = convert_ssm_config_to_hf_config(original_ssm_config)
65
+
66
+ # No weights need to be renamed between the two models.
67
+ converted_state_dict = original_state_dict
68
+
69
+ # Load reshaped state dict into a huggingface model.
70
+ hf_model = MambaForCausalLM(hf_config)
71
+ tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-neox-20b")
72
+ hf_model.load_state_dict(converted_state_dict)
73
+ return (hf_model, tokenizer)
74
+
75
+
76
+ def validate_converted_model(
77
+ original_state_dict: dict, original_ssm_config_dict: dict, hf_model: MambaForCausalLM, tokenizer: AutoTokenizer
78
+ ) -> None:
79
+ """Validate the converted model returns the same output as the original model."""
80
+ torch_device = "cuda"
81
+
82
+ original_config = MambaConfigSSM(**original_ssm_config_dict)
83
+ original_model = MambaLMHeadModel(original_config).to(torch_device)
84
+ original_model.load_state_dict(original_state_dict)
85
+
86
+ hf_model = hf_model.to(torch_device)
87
+ input_ids = tokenizer("Hey how are you doing?", return_tensors="pt")["input_ids"].to(torch_device)
88
+ # Assert model logits are close
89
+ with torch.no_grad():
90
+ original_model_logits = original_model(input_ids).logits
91
+ hf_model_logits = hf_model(input_ids).logits
92
+ if not torch.allclose(original_model_logits, hf_model_logits, atol=1e-3):
93
+ raise ValueError("The converted model did not return the same logits as the original model.")
94
+
95
+ logger.info("Model conversion validated successfully.")
96
+
97
+
98
+ def convert_mamba_checkpoint_file_to_huggingface_model_file(
99
+ mamba_checkpoint_path: str, config_json_file: str, output_dir: str
100
+ ) -> None:
101
+ if not is_mamba_ssm_available():
102
+ raise ImportError(
103
+ "Calling convert_mamba_checkpoint_file_to_huggingface_model_file requires the mamba_ssm library to be installed. Please install it with `pip install mamba_ssm`."
104
+ )
105
+ if not torch.cuda.is_available():
106
+ raise ValueError(
107
+ "This script is to be run with a CUDA device, as the original mamba_ssm model does not support cpu."
108
+ )
109
+ logger.info(f"Loading model from {mamba_checkpoint_path} based on config from {config_json_file}")
110
+ # Load weights and config from paths
111
+ original_state_dict = torch.load(mamba_checkpoint_path, map_location="cpu")
112
+ with open(config_json_file, "r", encoding="utf-8") as json_file:
113
+ original_ssm_config_dict = json.load(json_file)
114
+
115
+ # Convert the model
116
+ hf_model, tokenizer = convert_mamba_ssm_checkpoint_to_huggingface_model(
117
+ original_state_dict, original_ssm_config_dict
118
+ )
119
+
120
+ # Validate the conversion
121
+ validate_converted_model(original_state_dict, original_ssm_config_dict, hf_model, tokenizer)
122
+
123
+ logger.info(f"Model converted successfully. Saving model to {output_dir}")
124
+
125
+ # Save new model to pytorch_dump_path
126
+ hf_model.save_pretrained(output_dir)
127
+ tokenizer.save_pretrained(output_dir)
128
+
129
+
130
+ if __name__ == "__main__":
131
+ parser = argparse.ArgumentParser()
132
+ parser.add_argument(
133
+ "-i",
134
+ "--mamba_checkpoint_file",
135
+ type=str,
136
+ required=True,
137
+ help="Path to a `pytorch_model.bin` mamba_ssm checkpoint file to be converted.",
138
+ )
139
+ parser.add_argument(
140
+ "-c",
141
+ "--config_json_file",
142
+ type=str,
143
+ required=True,
144
+ help="Path to a `config.json` file corresponding to a MambaConfig of the original mamba_ssm model.",
145
+ )
146
+ parser.add_argument(
147
+ "-o", "--output_dir", type=str, required=True, help="Path to directory to save the converted output model to."
148
+ )
149
+ args = parser.parse_args()
150
+
151
+ convert_mamba_checkpoint_file_to_huggingface_model_file(
152
+ args.mamba_checkpoint_file, args.config_json_file, args.output_dir
153
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/mamba/modeling_mamba.py ADDED
@@ -0,0 +1,709 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 state-spaces/mamba org and HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """PyTorch MAMBA model."""
16
+
17
+ import math
18
+ from dataclasses import dataclass
19
+ from typing import Any, Dict, Optional, Tuple, Union
20
+
21
+ import torch
22
+ import torch.utils.checkpoint
23
+ from torch import nn
24
+ from torch.nn import CrossEntropyLoss
25
+
26
+ from ...activations import ACT2FN
27
+ from ...modeling_utils import PreTrainedModel
28
+ from ...utils import (
29
+ ModelOutput,
30
+ add_code_sample_docstrings,
31
+ add_start_docstrings,
32
+ add_start_docstrings_to_model_forward,
33
+ logging,
34
+ )
35
+ from ...utils.import_utils import is_causal_conv1d_available, is_mamba_ssm_available
36
+ from .configuration_mamba import MambaConfig
37
+
38
+
39
+ logger = logging.get_logger(__name__)
40
+
41
+ if is_mamba_ssm_available():
42
+ from mamba_ssm.ops.selective_scan_interface import mamba_inner_fn, selective_scan_fn
43
+ from mamba_ssm.ops.triton.selective_state_update import selective_state_update
44
+ else:
45
+ selective_state_update, selective_scan_fn, mamba_inner_fn = None, None, None
46
+
47
+ if is_causal_conv1d_available():
48
+ from causal_conv1d import causal_conv1d_fn, causal_conv1d_update
49
+ else:
50
+ causal_conv1d_update, causal_conv1d_fn = None, None
51
+
52
+ is_fast_path_available = all(
53
+ (selective_state_update, selective_scan_fn, causal_conv1d_fn, causal_conv1d_update, mamba_inner_fn)
54
+ )
55
+
56
+ _CHECKPOINT_FOR_DOC = "state-spaces/mamba-130m-hf"
57
+ _CONFIG_FOR_DOC = "MambaConfig"
58
+
59
+
60
+ from ..deprecated._archive_maps import MAMBA_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
61
+
62
+
63
+ class MambaCache:
64
+ """
65
+ Arguments:
66
+ config: MambaConfig
67
+ batch_size: int
68
+ dtype: torch.dtype
69
+ device: torch.device
70
+
71
+ Attributes:
72
+ seqlen_offset: int
73
+ dtype: torch.dtype
74
+ conv_states: Dict[int, torch.Tensor] # layer_idx -> [batch_size, intermediate_size, conv_kernel_size]
75
+ ssm_states: Dict[int, torch.Tensor] # layer_idx -> [batch_size, intermediate_size, ssm_state_size]
76
+ """
77
+
78
+ def __init__(
79
+ self, config: MambaConfig, batch_size: int, dtype: torch.dtype = torch.float16, device: Optional[str] = None
80
+ ):
81
+ self.seqlen_offset = 0
82
+ self.dtype = dtype
83
+ intermediate_size = config.intermediate_size
84
+ ssm_state_size = config.state_size
85
+ conv_kernel_size = config.conv_kernel
86
+
87
+ self.conv_states = {
88
+ i: torch.zeros(batch_size, intermediate_size, conv_kernel_size, device=device, dtype=dtype)
89
+ for i in range(config.num_hidden_layers)
90
+ }
91
+ self.ssm_states = {
92
+ i: torch.zeros(batch_size, intermediate_size, ssm_state_size, device=device, dtype=dtype)
93
+ for i in range(config.num_hidden_layers)
94
+ }
95
+
96
+
97
+ class MambaMixer(nn.Module):
98
+ """
99
+ Compute ∆, A, B, C, and D the state space parameters and compute the `contextualized_states`.
100
+ A, D are input independent (see Mamba paper [1] Section 3.5.2 "Interpretation of A" for why A isn't selective)
101
+ ∆, B, C are input-dependent (this is a key difference between Mamba and the linear time invariant S4,
102
+ and is why Mamba is called **selective** state spaces)
103
+ """
104
+
105
+ def __init__(self, config: MambaConfig, layer_idx: int):
106
+ super().__init__()
107
+ self.hidden_size = config.hidden_size
108
+ self.ssm_state_size = config.state_size
109
+ self.conv_kernel_size = config.conv_kernel
110
+ self.intermediate_size = config.intermediate_size
111
+ self.time_step_rank = int(config.time_step_rank)
112
+ self.layer_idx = layer_idx
113
+ self.use_conv_bias = config.use_conv_bias
114
+ self.conv1d = nn.Conv1d(
115
+ in_channels=self.intermediate_size,
116
+ out_channels=self.intermediate_size,
117
+ bias=config.use_conv_bias,
118
+ kernel_size=config.conv_kernel,
119
+ groups=self.intermediate_size,
120
+ padding=config.conv_kernel - 1,
121
+ )
122
+
123
+ self.activation = config.hidden_act
124
+ self.act = ACT2FN[config.hidden_act]
125
+
126
+ # projection of the input hidden states
127
+ self.in_proj = nn.Linear(self.hidden_size, self.intermediate_size * 2, bias=config.use_bias)
128
+ # selective projection used to make dt, B and C input dependant
129
+ self.x_proj = nn.Linear(self.intermediate_size, self.time_step_rank + self.ssm_state_size * 2, bias=False)
130
+ # time step projection (discretization)
131
+ self.dt_proj = nn.Linear(self.time_step_rank, self.intermediate_size, bias=True)
132
+
133
+ # S4D real initialization. These are not discretized!
134
+ # The core is to load them, compute the discrete states, then write the updated state. Keeps the memory bounded
135
+ A = torch.arange(1, self.ssm_state_size + 1, dtype=torch.float32)[None, :]
136
+ A = A.expand(self.intermediate_size, -1).contiguous()
137
+
138
+ self.A_log = nn.Parameter(torch.log(A))
139
+ self.D = nn.Parameter(torch.ones(self.intermediate_size))
140
+ self.out_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=config.use_bias)
141
+ self.use_bias = config.use_bias
142
+
143
+ if not is_fast_path_available:
144
+ logger.warning_once(
145
+ "The fast path is not available because on of `(selective_state_update, selective_scan_fn, causal_conv1d_fn, causal_conv1d_update, mamba_inner_fn)`"
146
+ " is None. Falling back to the naive implementation. To install follow https://github.com/state-spaces/mamba/#installation and"
147
+ " https://github.com/Dao-AILab/causal-conv1d"
148
+ )
149
+
150
+ def cuda_kernels_forward(self, hidden_states: torch.Tensor, cache_params: Optional[MambaCache] = None):
151
+ # 1. Gated MLP's linear projection
152
+ projected_states = self.in_proj(hidden_states).transpose(1, 2)
153
+
154
+ if self.training and cache_params is None: # Doesn't support outputting the states -> used for training
155
+ contextualized_states = mamba_inner_fn(
156
+ projected_states,
157
+ self.conv1d.weight,
158
+ self.conv1d.bias if self.use_conv_bias else None,
159
+ self.x_proj.weight,
160
+ self.dt_proj.weight,
161
+ self.out_proj.weight,
162
+ self.out_proj.bias.float() if self.use_bias else None,
163
+ -torch.exp(self.A_log.float()),
164
+ None, # input-dependent B
165
+ None, # input-dependent C
166
+ self.D.float(),
167
+ delta_bias=self.dt_proj.bias.float(),
168
+ delta_softplus=True,
169
+ )
170
+
171
+ else:
172
+ hidden_states, gate = projected_states.chunk(2, dim=1)
173
+
174
+ # 2. Convolution sequence transformation
175
+ conv_weights = self.conv1d.weight.view(self.conv1d.weight.size(0), self.conv1d.weight.size(2))
176
+ if cache_params is not None and cache_params.seqlen_offset > 0:
177
+ hidden_states = causal_conv1d_update(
178
+ hidden_states.squeeze(-1),
179
+ cache_params.conv_states[self.layer_idx],
180
+ conv_weights,
181
+ self.conv1d.bias,
182
+ self.activation,
183
+ )
184
+ hidden_states = hidden_states.unsqueeze(-1)
185
+ else:
186
+ if cache_params is not None:
187
+ conv_states = nn.functional.pad(
188
+ hidden_states, (self.conv_kernel_size - hidden_states.shape[-1], 0)
189
+ )
190
+ cache_params.conv_states[self.layer_idx].copy_(conv_states)
191
+ hidden_states = causal_conv1d_fn(
192
+ hidden_states, conv_weights, self.conv1d.bias, activation=self.activation
193
+ )
194
+
195
+ # 3. State Space Model sequence transformation
196
+ # 3.a. input varying initialization of time_step, B and C
197
+ ssm_parameters = self.x_proj(hidden_states.transpose(1, 2))
198
+ time_step, B, C = torch.split(
199
+ ssm_parameters, [self.time_step_rank, self.ssm_state_size, self.ssm_state_size], dim=-1
200
+ )
201
+ discrete_time_step = self.dt_proj.weight @ time_step.transpose(1, 2)
202
+
203
+ A = -torch.exp(self.A_log.float())
204
+ # 3.c perform the recurrence y ← SSM(A, B, C)(x)
205
+ time_proj_bias = self.dt_proj.bias.float() if hasattr(self.dt_proj, "bias") else None
206
+ if cache_params is not None and cache_params.seqlen_offset > 0:
207
+ scan_outputs = selective_state_update(
208
+ cache_params.ssm_states[self.layer_idx],
209
+ hidden_states[..., 0],
210
+ discrete_time_step[..., 0],
211
+ A,
212
+ B[:, 0],
213
+ C[:, 0],
214
+ self.D,
215
+ gate[..., 0],
216
+ time_proj_bias,
217
+ dt_softplus=True,
218
+ ).unsqueeze(-1)
219
+ else:
220
+ scan_outputs, ssm_state = selective_scan_fn(
221
+ hidden_states,
222
+ discrete_time_step,
223
+ A,
224
+ B.transpose(1, 2),
225
+ C.transpose(1, 2),
226
+ self.D.float(),
227
+ gate,
228
+ time_proj_bias,
229
+ delta_softplus=True,
230
+ return_last_state=True,
231
+ )
232
+ if ssm_state is not None and cache_params is not None:
233
+ cache_params.ssm_states[self.layer_idx].copy_(ssm_state)
234
+
235
+ # 4. Final linear projection
236
+ contextualized_states = self.out_proj(scan_outputs.transpose(1, 2))
237
+ return contextualized_states
238
+
239
+ # fmt: off
240
+ def slow_forward(self, input_states, cache_params: Optional[MambaCache]=None):
241
+ batch_size, seq_len, _ = input_states.shape
242
+ dtype = input_states.dtype
243
+ # 1. Gated MLP's linear projection
244
+ projected_states = self.in_proj(input_states).transpose(1, 2) # [batch, 2 * intermediate_size, seq_len]
245
+ hidden_states, gate = projected_states.chunk(2, dim=1)
246
+
247
+ # 2. Convolution sequence transformation
248
+ if cache_params is not None:
249
+ ssm_state = cache_params.ssm_states[self.layer_idx].clone()
250
+ if cache_params.seqlen_offset > 0:
251
+ conv_state = cache_params.conv_states[self.layer_idx] # [batch, intermediate_size, conv_kernel_size]
252
+ conv_state = torch.roll(conv_state, shifts=-1, dims=-1)
253
+ conv_state[:, :, -1] = hidden_states[:, :, 0]
254
+ cache_params.conv_states[self.layer_idx].copy_(conv_state)
255
+ hidden_states = torch.sum(conv_state * self.conv1d.weight[:, 0, :], dim=-1)
256
+ if self.use_conv_bias:
257
+ hidden_states += self.conv1d.bias
258
+ hidden_states = self.act(hidden_states).to(dtype).unsqueeze(-1) # [batch, intermediate_size, 1] : decoding
259
+ else:
260
+ conv_state = nn.functional.pad(
261
+ hidden_states,
262
+ (self.conv_kernel_size - hidden_states.shape[-1], 0)
263
+ )
264
+ cache_params.conv_states[self.layer_idx].copy_(conv_state)
265
+ hidden_states = self.act(self.conv1d(hidden_states)[..., :seq_len]) # [batch, intermediate_size, seq_len]
266
+ else:
267
+ ssm_state = torch.zeros(
268
+ (batch_size, self.intermediate_size, self.ssm_state_size),
269
+ device=hidden_states.device, dtype=dtype
270
+ )
271
+ hidden_states = self.act(self.conv1d(hidden_states)[..., :seq_len]) # [batch, intermediate_size, seq_len]
272
+
273
+ # 3. State Space Model sequence transformation
274
+ # 3.a. Selection: [batch, seq_len, self.time_step_rank + self.ssm_state_size * 2]
275
+ ssm_parameters = self.x_proj(hidden_states.transpose(1, 2))
276
+ time_step, B, C = torch.split(
277
+ ssm_parameters, [self.time_step_rank, self.ssm_state_size, self.ssm_state_size], dim=-1
278
+ )
279
+ discrete_time_step = self.dt_proj(time_step) # [batch, seq_len, intermediate_size]
280
+ discrete_time_step = nn.functional.softplus(discrete_time_step).transpose(1, 2) # [batch, intermediate_size, seq_len]
281
+
282
+ # 3.b. Discretization: B and C to [batch, seq_len, intermediate_size, ssm_state_size] (SRAM)
283
+ A = -torch.exp(self.A_log.float()) # [intermediate_size, ssm_state_size]
284
+ discrete_A = torch.exp(A[None, :, None, :] * discrete_time_step[:, :, :, None]) # [batch, intermediate_size, seq_len, ssm_state_size]
285
+ discrete_B = discrete_time_step[:, :, :, None] * B[:, None, :, :].float() # [batch, intermediade_size, seq_len, ssm_state_size]
286
+ deltaB_u = discrete_B * hidden_states[:, :, :, None].float()
287
+
288
+ # 3.c perform the recurrence y ← SSM(A, B, C)(x)
289
+ scan_outputs = []
290
+ for i in range(seq_len):
291
+ ssm_state = discrete_A[:, :, i, :] * ssm_state + deltaB_u[:, :, i, :] # [batch, intermediade_size, ssm_state]
292
+ scan_output = torch.matmul(ssm_state.to(dtype), C[:, i, :].unsqueeze(-1)) # [batch, intermediade_size, 1]
293
+ scan_outputs.append(scan_output[:, :, 0])
294
+ scan_output = torch.stack(scan_outputs, dim=-1) # [batch, seq_len, intermediade_size]
295
+ scan_output = scan_output + (hidden_states * self.D[None, :, None])
296
+ scan_output = (scan_output * self.act(gate))
297
+
298
+ if cache_params is not None:
299
+ cache_params.ssm_states[self.layer_idx].copy_(ssm_state)
300
+
301
+ # 4. Final linear projection
302
+ contextualized_states = self.out_proj(scan_output.transpose(1, 2)) # [batch, seq_len, hidden_size]
303
+ return contextualized_states
304
+ # fmt: on
305
+
306
+ def forward(self, hidden_states, cache_params: Optional[MambaCache] = None):
307
+ if is_fast_path_available and "cuda" in self.x_proj.weight.device.type:
308
+ return self.cuda_kernels_forward(hidden_states, cache_params)
309
+ return self.slow_forward(hidden_states, cache_params)
310
+
311
+
312
+ class MambaRMSNorm(nn.Module):
313
+ def __init__(self, hidden_size, eps=1e-6):
314
+ """
315
+ MambaRMSNorm is equivalent to T5LayerNorm and LlamaRMSNorm
316
+ """
317
+ super().__init__()
318
+ self.weight = nn.Parameter(torch.ones(hidden_size))
319
+ self.variance_epsilon = eps
320
+
321
+ def forward(self, hidden_states):
322
+ input_dtype = hidden_states.dtype
323
+ hidden_states = hidden_states.to(torch.float32)
324
+ variance = hidden_states.pow(2).mean(-1, keepdim=True)
325
+ hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
326
+ return self.weight * hidden_states.to(input_dtype)
327
+
328
+
329
+ class MambaBlock(nn.Module):
330
+ def __init__(self, config, layer_idx):
331
+ super().__init__()
332
+ self.config = config
333
+ self.layer_idx = layer_idx
334
+ self.residual_in_fp32 = config.residual_in_fp32
335
+ self.norm = MambaRMSNorm(config.hidden_size, eps=config.layer_norm_epsilon)
336
+ self.mixer = MambaMixer(config, layer_idx=layer_idx)
337
+
338
+ def forward(self, hidden_states, cache_params: Optional[MambaCache] = None):
339
+ residual = hidden_states
340
+ hidden_states = self.norm(hidden_states.to(dtype=self.norm.weight.dtype))
341
+ if self.residual_in_fp32:
342
+ residual = residual.to(torch.float32)
343
+
344
+ hidden_states = self.mixer(hidden_states, cache_params=cache_params)
345
+ hidden_states = residual + hidden_states
346
+ return hidden_states
347
+
348
+
349
+ class MambaPreTrainedModel(PreTrainedModel):
350
+ """
351
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
352
+ models.
353
+ """
354
+
355
+ config_class = MambaConfig
356
+ base_model_prefix = "backbone"
357
+ _no_split_modules = ["MambaBlock"]
358
+ supports_gradient_checkpointing = True
359
+
360
+ def _init_weights(self, module):
361
+ """Initialize the weights."""
362
+ if isinstance(module, MambaMixer):
363
+ module.A_log._no_weight_decay = True
364
+ module.D._no_weight_decay = True
365
+
366
+ dt_init_std = self.config.time_step_rank**-0.5 * self.config.time_step_scale
367
+ if self.config.time_step_init_scheme == "constant":
368
+ nn.init.constant_(module.dt_proj.weight, dt_init_std)
369
+ elif self.config.time_step_init_scheme == "random":
370
+ nn.init.uniform_(module.dt_proj.weight, -dt_init_std, dt_init_std)
371
+
372
+ dt = torch.exp(
373
+ torch.rand(self.config.intermediate_size)
374
+ * (math.log(self.config.time_step_max) - math.log(self.config.time_step_min))
375
+ + math.log(self.config.time_step_min)
376
+ ).clamp(min=self.config.time_step_floor)
377
+ # # Inverse of softplus: https://github.com/pytorch/pytorch/issues/72759
378
+ inv_dt = dt + torch.log(-torch.expm1(-dt))
379
+ with torch.no_grad():
380
+ module.dt_proj.bias.copy_(inv_dt)
381
+ module.dt_proj.bias._no_reinit = True
382
+
383
+ if isinstance(module, nn.Linear):
384
+ if module.bias is not None:
385
+ if not getattr(module.bias, "_no_reinit", False):
386
+ nn.init.zeros_(module.bias)
387
+ elif isinstance(module, nn.Embedding):
388
+ nn.init.normal_(module.weight, std=self.config.initializer_range)
389
+
390
+ if self.config.rescale_prenorm_residual:
391
+ # Reinitialize selected weights subject to the OpenAI GPT-2 Paper Scheme:
392
+ # > A modified initialization which accounts for the accumulation on the residual path with model depth. Scale
393
+ # > the weights of residual layers at initialization by a factor of 1/√N where N is the # of residual layers.
394
+ # > -- GPT-2 :: https://openai.com/blog/better-language-models/
395
+ #
396
+ # Reference (Megatron-LM): https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/model/gpt_model.py
397
+ for name, p in module.named_parameters():
398
+ if name in ["out_proj.weight"]:
399
+ # Special Scaled Initialization --> There are 2 Layer Norms per Transformer Block
400
+ # Following Pytorch init, except scale by 1/sqrt(2 * n_layer)
401
+ # We need to reinit p since this code could be called multiple times
402
+ # Having just p *= scale would repeatedly scale it down
403
+ nn.init.kaiming_uniform_(p, a=math.sqrt(5))
404
+ with torch.no_grad():
405
+ p /= math.sqrt(self.config.num_layers)
406
+
407
+
408
+ @dataclass
409
+ class MambaOutput(ModelOutput):
410
+ """
411
+ Class for the MAMBA model outputs.
412
+
413
+ Args:
414
+ last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
415
+ Sequence of hidden-states at the output of the last layer of the model.
416
+ cache_params (`MambaCache`):
417
+ The state of the model at the last time step. Can be used in a forward method with the next `input_ids` to
418
+ avoid providing the old `input_ids`.
419
+
420
+ Includes both the State space model state matrices after the selective scan, and the Convolutional states
421
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
422
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
423
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
424
+
425
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
426
+ """
427
+
428
+ last_hidden_state: Optional[torch.FloatTensor] = None
429
+ cache_params: Optional[MambaCache] = None
430
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
431
+
432
+
433
+ @dataclass
434
+ class MambaCausalLMOutput(ModelOutput):
435
+ """
436
+ Base class for causal language model (or autoregressive) outputs.
437
+
438
+ Args:
439
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
440
+ Language modeling loss (for next-token prediction).
441
+ logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
442
+ Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
443
+ cache_params (`MambaCache`):
444
+ The state of the model at the last time step. Can be used in a forward method with the next `input_ids` to
445
+ avoid providing the old `input_ids`.
446
+
447
+ Includes both the State space model state matrices after the selective scan, and the Convolutional states
448
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
449
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
450
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
451
+
452
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
453
+ """
454
+
455
+ loss: Optional[torch.FloatTensor] = None
456
+ logits: Optional[torch.FloatTensor] = None
457
+ cache_params: Optional[MambaCache] = None
458
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
459
+
460
+
461
+ MAMBA_START_DOCSTRING = r"""
462
+
463
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
464
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
465
+ etc.)
466
+
467
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
468
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
469
+ and behavior.
470
+
471
+ Parameters:
472
+ config ([`MambaConfig`]): Model configuration class with all the parameters of the model.
473
+ Initializing with a config file does not load the weights associated with the model, only the
474
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
475
+ """
476
+
477
+ MAMBA_INPUTS_DOCSTRING = r"""
478
+ Args:
479
+ input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`):
480
+ Indices of input sequence tokens in the vocabulary.
481
+
482
+ If `cache_params.seqlen_offset>0`, only `input_ids` that do not have their past calculated should be passed as
483
+ `input_ids`.
484
+
485
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
486
+ [`PreTrainedTokenizer.__call__`] for details.
487
+
488
+ [What are input IDs?](../glossary#input-ids)
489
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
490
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
491
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
492
+ model's internal embedding lookup matrix.
493
+ cache_params (`MambaCache`, *optional*):
494
+ If passed along, the model uses the previous state in all the blocks (which will give the output for the
495
+ `input_ids` provided as if the model add `state_input_ids + input_ids` as context).
496
+ use_cache (`bool`, *optional*):
497
+ If set to `True`, the `cache_params` is returned and can be used to quickly generate the next logits.
498
+ output_hidden_states (`bool`, *optional*):
499
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
500
+ more detail.
501
+ return_dict (`bool`, *optional*):
502
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
503
+ """
504
+
505
+
506
+ @add_start_docstrings(
507
+ "The bare MAMBA Model transformer outputting raw hidden-states without any specific head on top.",
508
+ MAMBA_START_DOCSTRING,
509
+ )
510
+ class MambaModel(MambaPreTrainedModel):
511
+ def __init__(self, config):
512
+ super().__init__(config)
513
+
514
+ self.embeddings = nn.Embedding(config.vocab_size, config.hidden_size)
515
+ self.layers = nn.ModuleList([MambaBlock(config, layer_idx=idx) for idx in range(config.num_hidden_layers)])
516
+
517
+ self.gradient_checkpointing = False
518
+ self.norm_f = MambaRMSNorm(config.hidden_size, eps=config.layer_norm_epsilon)
519
+ # Initialize weights and apply final processing
520
+ self._register_load_state_dict_pre_hook(self.load_hook)
521
+ self.post_init()
522
+
523
+ def load_hook(self, state_dict, prefix, *args):
524
+ for k in state_dict:
525
+ if "embedding." in k:
526
+ state_dict[k.replace("embedding.", "embeddings.")] = state_dict.pop(k)
527
+ break
528
+
529
+ def get_input_embeddings(self):
530
+ return self.embeddings
531
+
532
+ def set_input_embeddings(self, new_embeddings):
533
+ self.embeddings = new_embeddings
534
+
535
+ @add_start_docstrings_to_model_forward(MAMBA_INPUTS_DOCSTRING)
536
+ @add_code_sample_docstrings(
537
+ checkpoint=_CHECKPOINT_FOR_DOC,
538
+ output_type=MambaOutput,
539
+ config_class=_CONFIG_FOR_DOC,
540
+ )
541
+ def forward(
542
+ self,
543
+ input_ids: Optional[torch.LongTensor] = None,
544
+ inputs_embeds: Optional[torch.LongTensor] = None,
545
+ cache_params: Optional[MambaCache] = None,
546
+ use_cache: Optional[bool] = None,
547
+ output_hidden_states: Optional[bool] = None,
548
+ return_dict: Optional[bool] = None,
549
+ **kwargs, # `attention_mask` is passed by the tokenizer and we don't want it
550
+ ) -> Union[Tuple, MambaOutput]:
551
+ output_hidden_states = (
552
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
553
+ )
554
+ use_cache = use_cache if use_cache is not None else (self.config.use_cache if not self.training else False)
555
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
556
+
557
+ if (input_ids is None) ^ (inputs_embeds is not None): # ^ is python for xor
558
+ raise ValueError(
559
+ "You cannot specify both input_ids and inputs_embeds at the same time, and must specify either one"
560
+ )
561
+
562
+ if inputs_embeds is None:
563
+ inputs_embeds = self.embeddings(input_ids)
564
+
565
+ if self.gradient_checkpointing and self.training and use_cache:
566
+ use_cache = False
567
+
568
+ if cache_params is None and use_cache:
569
+ cache_params = MambaCache(
570
+ self.config, inputs_embeds.size(0), device=inputs_embeds.device, dtype=inputs_embeds.dtype
571
+ )
572
+
573
+ hidden_states = inputs_embeds
574
+ all_hidden_states = () if output_hidden_states else None
575
+ for mixer_block in self.layers:
576
+ if self.gradient_checkpointing and self.training:
577
+ hidden_states = self._gradient_checkpointing_func(mixer_block.__call__, hidden_states, cache_params)
578
+ else:
579
+ hidden_states = mixer_block(hidden_states, cache_params=cache_params)
580
+
581
+ if output_hidden_states:
582
+ all_hidden_states = all_hidden_states + (hidden_states,)
583
+
584
+ if use_cache:
585
+ cache_params.seqlen_offset += inputs_embeds.shape[1]
586
+
587
+ hidden_states = self.norm_f(hidden_states)
588
+
589
+ if output_hidden_states:
590
+ all_hidden_states = all_hidden_states + (hidden_states,)
591
+
592
+ if not return_dict:
593
+ return tuple(v for v in [hidden_states, cache_params, all_hidden_states] if v is not None)
594
+
595
+ return MambaOutput(
596
+ last_hidden_state=hidden_states,
597
+ cache_params=cache_params if use_cache else None,
598
+ hidden_states=all_hidden_states,
599
+ )
600
+
601
+
602
+ @add_start_docstrings(
603
+ """
604
+ The MAMBA Model transformer with a language modeling head on top (linear layer with weights tied to the input
605
+ embeddings).
606
+ """,
607
+ MAMBA_START_DOCSTRING,
608
+ )
609
+ class MambaForCausalLM(MambaPreTrainedModel):
610
+ _tied_weights_keys = ["lm_head.weight"]
611
+
612
+ def __init__(self, config):
613
+ super().__init__(config)
614
+ self.backbone = MambaModel(config)
615
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
616
+ # Initialize weights and apply final processing
617
+ self.post_init()
618
+
619
+ def get_output_embeddings(self):
620
+ return self.lm_head
621
+
622
+ def set_output_embeddings(self, new_embeddings):
623
+ self.lm_head = new_embeddings
624
+
625
+ def get_input_embeddings(self):
626
+ return self.backbone.get_input_embeddings()
627
+
628
+ def set_input_embeddings(self, new_embeddings):
629
+ return self.backbone.set_input_embeddings(new_embeddings)
630
+
631
+ def _update_model_kwargs_for_generation(
632
+ self, outputs: ModelOutput, model_kwargs: Dict[str, Any], **kwargs
633
+ ) -> Dict[str, Any]:
634
+ model_kwargs["cache_params"] = outputs.get("cache_params", None)
635
+ return model_kwargs
636
+
637
+ def prepare_inputs_for_generation(
638
+ self, input_ids, cache_params: Optional[MambaCache] = None, inputs_embeds=None, attention_mask=None, **kwargs
639
+ ):
640
+ # only last token for inputs_ids if the state is passed along.
641
+ if cache_params is not None:
642
+ input_ids = input_ids[:, -1].unsqueeze(-1)
643
+
644
+ if inputs_embeds is not None and cache_params is None:
645
+ model_inputs = {"inputs_embeds": inputs_embeds}
646
+ else:
647
+ model_inputs = {"input_ids": input_ids}
648
+
649
+ model_inputs["cache_params"] = cache_params
650
+ return model_inputs
651
+
652
+ @add_start_docstrings_to_model_forward(MAMBA_INPUTS_DOCSTRING)
653
+ @add_code_sample_docstrings(
654
+ checkpoint=_CHECKPOINT_FOR_DOC,
655
+ output_type=MambaCausalLMOutput,
656
+ config_class=_CONFIG_FOR_DOC,
657
+ )
658
+ def forward(
659
+ self,
660
+ input_ids: Optional[torch.LongTensor] = None,
661
+ inputs_embeds: Optional[torch.FloatTensor] = None,
662
+ cache_params: Optional[MambaCache] = None,
663
+ labels: Optional[torch.LongTensor] = None,
664
+ output_hidden_states: Optional[bool] = None,
665
+ return_dict: Optional[bool] = None,
666
+ use_cache: Optional[bool] = None,
667
+ **kwargs, # for now we need this for generation
668
+ ) -> Union[Tuple, MambaCausalLMOutput]:
669
+ r"""
670
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
671
+ Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
672
+ `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100`
673
+ are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]`
674
+ """
675
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
676
+
677
+ mamba_outputs = self.backbone(
678
+ input_ids,
679
+ cache_params=cache_params,
680
+ inputs_embeds=inputs_embeds,
681
+ output_hidden_states=output_hidden_states,
682
+ return_dict=return_dict,
683
+ use_cache=use_cache,
684
+ )
685
+ hidden_states = mamba_outputs[0]
686
+
687
+ logits = self.lm_head(hidden_states.to(self.lm_head.weight.dtype)).float()
688
+
689
+ loss = None
690
+ if labels is not None:
691
+ # move labels to correct device to enable model parallelism
692
+ labels = labels.to(logits.device)
693
+ # Shift so that tokens < n predict n
694
+ shift_logits = logits[..., :-1, :].contiguous()
695
+ shift_labels = labels[..., 1:].contiguous()
696
+ # Flatten the tokens
697
+ loss_fct = CrossEntropyLoss()
698
+ loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
699
+
700
+ if not return_dict:
701
+ output = (logits,) + mamba_outputs[1:]
702
+ return ((loss,) + output) if loss is not None else output
703
+
704
+ return MambaCausalLMOutput(
705
+ loss=loss,
706
+ logits=logits,
707
+ cache_params=mamba_outputs.cache_params,
708
+ hidden_states=mamba_outputs.hidden_states,
709
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/poolformer/__init__.py ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
17
+
18
+
19
+ _import_structure = {
20
+ "configuration_poolformer": [
21
+ "POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
22
+ "PoolFormerConfig",
23
+ "PoolFormerOnnxConfig",
24
+ ]
25
+ }
26
+
27
+ try:
28
+ if not is_vision_available():
29
+ raise OptionalDependencyNotAvailable()
30
+ except OptionalDependencyNotAvailable:
31
+ pass
32
+ else:
33
+ _import_structure["feature_extraction_poolformer"] = ["PoolFormerFeatureExtractor"]
34
+ _import_structure["image_processing_poolformer"] = ["PoolFormerImageProcessor"]
35
+
36
+ try:
37
+ if not is_torch_available():
38
+ raise OptionalDependencyNotAvailable()
39
+ except OptionalDependencyNotAvailable:
40
+ pass
41
+ else:
42
+ _import_structure["modeling_poolformer"] = [
43
+ "POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
44
+ "PoolFormerForImageClassification",
45
+ "PoolFormerModel",
46
+ "PoolFormerPreTrainedModel",
47
+ ]
48
+
49
+
50
+ if TYPE_CHECKING:
51
+ from .configuration_poolformer import (
52
+ POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
53
+ PoolFormerConfig,
54
+ PoolFormerOnnxConfig,
55
+ )
56
+
57
+ try:
58
+ if not is_vision_available():
59
+ raise OptionalDependencyNotAvailable()
60
+ except OptionalDependencyNotAvailable:
61
+ pass
62
+ else:
63
+ from .feature_extraction_poolformer import PoolFormerFeatureExtractor
64
+ from .image_processing_poolformer import PoolFormerImageProcessor
65
+
66
+ try:
67
+ if not is_torch_available():
68
+ raise OptionalDependencyNotAvailable()
69
+ except OptionalDependencyNotAvailable:
70
+ pass
71
+ else:
72
+ from .modeling_poolformer import (
73
+ POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
74
+ PoolFormerForImageClassification,
75
+ PoolFormerModel,
76
+ PoolFormerPreTrainedModel,
77
+ )
78
+
79
+
80
+ else:
81
+ import sys
82
+
83
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure)
llmeval-env/lib/python3.10/site-packages/transformers/models/poolformer/configuration_poolformer.py ADDED
@@ -0,0 +1,147 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 Sea AI Labs and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PoolFormer model configuration"""
16
+ from collections import OrderedDict
17
+ from typing import Mapping
18
+
19
+ from packaging import version
20
+
21
+ from ...configuration_utils import PretrainedConfig
22
+ from ...onnx import OnnxConfig
23
+ from ...utils import logging
24
+
25
+
26
+ logger = logging.get_logger(__name__)
27
+
28
+
29
+ from ..deprecated._archive_maps import POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
30
+
31
+
32
+ class PoolFormerConfig(PretrainedConfig):
33
+ r"""
34
+ This is the configuration class to store the configuration of [`PoolFormerModel`]. It is used to instantiate a
35
+ PoolFormer model according to the specified arguments, defining the model architecture. Instantiating a
36
+ configuration with the defaults will yield a similar configuration to that of the PoolFormer
37
+ [sail/poolformer_s12](https://huggingface.co/sail/poolformer_s12) architecture.
38
+
39
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
40
+ documentation from [`PretrainedConfig`] for more information.
41
+
42
+
43
+ Args:
44
+ num_channels (`int`, *optional*, defaults to 3):
45
+ The number of channels in the input image.
46
+ patch_size (`int`, *optional*, defaults to 16):
47
+ The size of the input patch.
48
+ stride (`int`, *optional*, defaults to 16):
49
+ The stride of the input patch.
50
+ pool_size (`int`, *optional*, defaults to 3):
51
+ The size of the pooling window.
52
+ mlp_ratio (`float`, *optional*, defaults to 4.0):
53
+ The ratio of the number of channels in the output of the MLP to the number of channels in the input.
54
+ depths (`list`, *optional*, defaults to `[2, 2, 6, 2]`):
55
+ The depth of each encoder block.
56
+ hidden_sizes (`list`, *optional*, defaults to `[64, 128, 320, 512]`):
57
+ The hidden sizes of each encoder block.
58
+ patch_sizes (`list`, *optional*, defaults to `[7, 3, 3, 3]`):
59
+ The size of the input patch for each encoder block.
60
+ strides (`list`, *optional*, defaults to `[4, 2, 2, 2]`):
61
+ The stride of the input patch for each encoder block.
62
+ padding (`list`, *optional*, defaults to `[2, 1, 1, 1]`):
63
+ The padding of the input patch for each encoder block.
64
+ num_encoder_blocks (`int`, *optional*, defaults to 4):
65
+ The number of encoder blocks.
66
+ drop_path_rate (`float`, *optional*, defaults to 0.0):
67
+ The dropout rate for the dropout layers.
68
+ hidden_act (`str`, *optional*, defaults to `"gelu"`):
69
+ The activation function for the hidden layers.
70
+ use_layer_scale (`bool`, *optional*, defaults to `True`):
71
+ Whether to use layer scale.
72
+ layer_scale_init_value (`float`, *optional*, defaults to 1e-05):
73
+ The initial value for the layer scale.
74
+ initializer_range (`float`, *optional*, defaults to 0.02):
75
+ The initializer range for the weights.
76
+
77
+ Example:
78
+
79
+ ```python
80
+ >>> from transformers import PoolFormerConfig, PoolFormerModel
81
+
82
+ >>> # Initializing a PoolFormer sail/poolformer_s12 style configuration
83
+ >>> configuration = PoolFormerConfig()
84
+
85
+ >>> # Initializing a model (with random weights) from the sail/poolformer_s12 style configuration
86
+ >>> model = PoolFormerModel(configuration)
87
+
88
+ >>> # Accessing the model configuration
89
+ >>> configuration = model.config
90
+ ```
91
+ """
92
+
93
+ model_type = "poolformer"
94
+
95
+ def __init__(
96
+ self,
97
+ num_channels=3,
98
+ patch_size=16,
99
+ stride=16,
100
+ pool_size=3,
101
+ mlp_ratio=4.0,
102
+ depths=[2, 2, 6, 2],
103
+ hidden_sizes=[64, 128, 320, 512],
104
+ patch_sizes=[7, 3, 3, 3],
105
+ strides=[4, 2, 2, 2],
106
+ padding=[2, 1, 1, 1],
107
+ num_encoder_blocks=4,
108
+ drop_path_rate=0.0,
109
+ hidden_act="gelu",
110
+ use_layer_scale=True,
111
+ layer_scale_init_value=1e-5,
112
+ initializer_range=0.02,
113
+ **kwargs,
114
+ ):
115
+ self.num_channels = num_channels
116
+ self.patch_size = patch_size
117
+ self.stride = stride
118
+ self.padding = padding
119
+ self.pool_size = pool_size
120
+ self.hidden_sizes = hidden_sizes
121
+ self.mlp_ratio = mlp_ratio
122
+ self.depths = depths
123
+ self.patch_sizes = patch_sizes
124
+ self.strides = strides
125
+ self.num_encoder_blocks = num_encoder_blocks
126
+ self.drop_path_rate = drop_path_rate
127
+ self.hidden_act = hidden_act
128
+ self.use_layer_scale = use_layer_scale
129
+ self.layer_scale_init_value = layer_scale_init_value
130
+ self.initializer_range = initializer_range
131
+ super().__init__(**kwargs)
132
+
133
+
134
+ class PoolFormerOnnxConfig(OnnxConfig):
135
+ torch_onnx_minimum_version = version.parse("1.11")
136
+
137
+ @property
138
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
139
+ return OrderedDict(
140
+ [
141
+ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
142
+ ]
143
+ )
144
+
145
+ @property
146
+ def atol_for_validation(self) -> float:
147
+ return 2e-3
llmeval-env/lib/python3.10/site-packages/transformers/models/poolformer/convert_poolformer_original_to_pytorch.py ADDED
@@ -0,0 +1,214 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Convert PoolFormer checkpoints from the original repository. URL: https://github.com/sail-sg/poolformer"""
16
+
17
+ import argparse
18
+ import json
19
+ from collections import OrderedDict
20
+ from pathlib import Path
21
+
22
+ import requests
23
+ import torch
24
+ from huggingface_hub import hf_hub_download
25
+ from PIL import Image
26
+
27
+ from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
28
+ from transformers.utils import logging
29
+
30
+
31
+ logging.set_verbosity_info()
32
+ logger = logging.get_logger(__name__)
33
+
34
+
35
+ def replace_key_with_offset(key, offset, original_name, new_name):
36
+ """
37
+ Replaces the key by subtracting the offset from the original layer number
38
+ """
39
+ to_find = original_name.split(".")[0]
40
+ key_list = key.split(".")
41
+ orig_block_num = int(key_list[key_list.index(to_find) - 2])
42
+ layer_num = int(key_list[key_list.index(to_find) - 1])
43
+ new_block_num = orig_block_num - offset
44
+
45
+ key = key.replace(f"{orig_block_num}.{layer_num}.{original_name}", f"block.{new_block_num}.{layer_num}.{new_name}")
46
+ return key
47
+
48
+
49
+ def rename_keys(state_dict):
50
+ new_state_dict = OrderedDict()
51
+ total_embed_found, patch_emb_offset = 0, 0
52
+ for key, value in state_dict.items():
53
+ if key.startswith("network"):
54
+ key = key.replace("network", "poolformer.encoder")
55
+ if "proj" in key:
56
+ # Works for the first embedding as well as the internal embedding layers
57
+ if key.endswith("bias") and "patch_embed" not in key:
58
+ patch_emb_offset += 1
59
+ to_replace = key[: key.find("proj")]
60
+ key = key.replace(to_replace, f"patch_embeddings.{total_embed_found}.")
61
+ key = key.replace("proj", "projection")
62
+ if key.endswith("bias"):
63
+ total_embed_found += 1
64
+ if "patch_embeddings" in key:
65
+ key = "poolformer.encoder." + key
66
+ if "mlp.fc1" in key:
67
+ key = replace_key_with_offset(key, patch_emb_offset, "mlp.fc1", "output.conv1")
68
+ if "mlp.fc2" in key:
69
+ key = replace_key_with_offset(key, patch_emb_offset, "mlp.fc2", "output.conv2")
70
+ if "norm1" in key:
71
+ key = replace_key_with_offset(key, patch_emb_offset, "norm1", "before_norm")
72
+ if "norm2" in key:
73
+ key = replace_key_with_offset(key, patch_emb_offset, "norm2", "after_norm")
74
+ if "layer_scale_1" in key:
75
+ key = replace_key_with_offset(key, patch_emb_offset, "layer_scale_1", "layer_scale_1")
76
+ if "layer_scale_2" in key:
77
+ key = replace_key_with_offset(key, patch_emb_offset, "layer_scale_2", "layer_scale_2")
78
+ if "head" in key:
79
+ key = key.replace("head", "classifier")
80
+ new_state_dict[key] = value
81
+ return new_state_dict
82
+
83
+
84
+ # We will verify our results on a COCO image
85
+ def prepare_img():
86
+ url = "http://images.cocodataset.org/val2017/000000039769.jpg"
87
+ image = Image.open(requests.get(url, stream=True).raw)
88
+
89
+ return image
90
+
91
+
92
+ @torch.no_grad()
93
+ def convert_poolformer_checkpoint(model_name, checkpoint_path, pytorch_dump_folder_path):
94
+ """
95
+ Copy/paste/tweak model's weights to our PoolFormer structure.
96
+ """
97
+
98
+ # load default PoolFormer configuration
99
+ config = PoolFormerConfig()
100
+
101
+ # set attributes based on model_name
102
+ repo_id = "huggingface/label-files"
103
+ size = model_name[-3:]
104
+ config.num_labels = 1000
105
+ filename = "imagenet-1k-id2label.json"
106
+ expected_shape = (1, 1000)
107
+
108
+ # set config attributes
109
+ id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r"))
110
+ id2label = {int(k): v for k, v in id2label.items()}
111
+ config.id2label = id2label
112
+ config.label2id = {v: k for k, v in id2label.items()}
113
+ if size == "s12":
114
+ config.depths = [2, 2, 6, 2]
115
+ config.hidden_sizes = [64, 128, 320, 512]
116
+ config.mlp_ratio = 4.0
117
+ crop_pct = 0.9
118
+ elif size == "s24":
119
+ config.depths = [4, 4, 12, 4]
120
+ config.hidden_sizes = [64, 128, 320, 512]
121
+ config.mlp_ratio = 4.0
122
+ crop_pct = 0.9
123
+ elif size == "s36":
124
+ config.depths = [6, 6, 18, 6]
125
+ config.hidden_sizes = [64, 128, 320, 512]
126
+ config.mlp_ratio = 4.0
127
+ config.layer_scale_init_value = 1e-6
128
+ crop_pct = 0.9
129
+ elif size == "m36":
130
+ config.depths = [6, 6, 18, 6]
131
+ config.hidden_sizes = [96, 192, 384, 768]
132
+ config.mlp_ratio = 4.0
133
+ config.layer_scale_init_value = 1e-6
134
+ crop_pct = 0.95
135
+ elif size == "m48":
136
+ config.depths = [8, 8, 24, 8]
137
+ config.hidden_sizes = [96, 192, 384, 768]
138
+ config.mlp_ratio = 4.0
139
+ config.layer_scale_init_value = 1e-6
140
+ crop_pct = 0.95
141
+ else:
142
+ raise ValueError(f"Size {size} not supported")
143
+
144
+ # load image processor
145
+ image_processor = PoolFormerImageProcessor(crop_pct=crop_pct)
146
+
147
+ # Prepare image
148
+ image = prepare_img()
149
+ pixel_values = image_processor(images=image, return_tensors="pt").pixel_values
150
+
151
+ logger.info(f"Converting model {model_name}...")
152
+
153
+ # load original state dict
154
+ state_dict = torch.load(checkpoint_path, map_location=torch.device("cpu"))
155
+
156
+ # rename keys
157
+ state_dict = rename_keys(state_dict)
158
+
159
+ # create HuggingFace model and load state dict
160
+ model = PoolFormerForImageClassification(config)
161
+ model.load_state_dict(state_dict)
162
+ model.eval()
163
+
164
+ # Define image processor
165
+ image_processor = PoolFormerImageProcessor(crop_pct=crop_pct)
166
+ pixel_values = image_processor(images=prepare_img(), return_tensors="pt").pixel_values
167
+
168
+ # forward pass
169
+ outputs = model(pixel_values)
170
+ logits = outputs.logits
171
+
172
+ # define expected logit slices for different models
173
+ if size == "s12":
174
+ expected_slice = torch.tensor([-0.3045, -0.6758, -0.4869])
175
+ elif size == "s24":
176
+ expected_slice = torch.tensor([0.4402, -0.1374, -0.8045])
177
+ elif size == "s36":
178
+ expected_slice = torch.tensor([-0.6080, -0.5133, -0.5898])
179
+ elif size == "m36":
180
+ expected_slice = torch.tensor([0.3952, 0.2263, -1.2668])
181
+ elif size == "m48":
182
+ expected_slice = torch.tensor([0.1167, -0.0656, -0.3423])
183
+ else:
184
+ raise ValueError(f"Size {size} not supported")
185
+
186
+ # verify logits
187
+ assert logits.shape == expected_shape
188
+ assert torch.allclose(logits[0, :3], expected_slice, atol=1e-2)
189
+
190
+ # finally, save model and image processor
191
+ logger.info(f"Saving PyTorch model and image processor to {pytorch_dump_folder_path}...")
192
+ Path(pytorch_dump_folder_path).mkdir(exist_ok=True)
193
+ model.save_pretrained(pytorch_dump_folder_path)
194
+ print(f"Saving image processor to {pytorch_dump_folder_path}")
195
+ image_processor.save_pretrained(pytorch_dump_folder_path)
196
+
197
+
198
+ if __name__ == "__main__":
199
+ parser = argparse.ArgumentParser()
200
+
201
+ parser.add_argument(
202
+ "--model_name",
203
+ default="poolformer_s12",
204
+ type=str,
205
+ help="Name of the model you'd like to convert.",
206
+ )
207
+ parser.add_argument(
208
+ "--checkpoint_path", default=None, type=str, help="Path to the original PyTorch checkpoint (.pth file)."
209
+ )
210
+ parser.add_argument(
211
+ "--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
212
+ )
213
+ args = parser.parse_args()
214
+ convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
llmeval-env/lib/python3.10/site-packages/transformers/models/poolformer/feature_extraction_poolformer.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Feature extractor class for PoolFormer."""
16
+
17
+ import warnings
18
+
19
+ from ...utils import logging
20
+ from .image_processing_poolformer import PoolFormerImageProcessor
21
+
22
+
23
+ logger = logging.get_logger(__name__)
24
+
25
+
26
+ class PoolFormerFeatureExtractor(PoolFormerImageProcessor):
27
+ def __init__(self, *args, **kwargs) -> None:
28
+ warnings.warn(
29
+ "The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
30
+ " Please use PoolFormerImageProcessor instead.",
31
+ FutureWarning,
32
+ )
33
+ super().__init__(*args, **kwargs)
llmeval-env/lib/python3.10/site-packages/transformers/models/poolformer/image_processing_poolformer.py ADDED
@@ -0,0 +1,377 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Image processor class for PoolFormer."""
16
+
17
+ from typing import Dict, List, Optional, Union
18
+
19
+ import numpy as np
20
+
21
+ from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
22
+ from ...image_transforms import (
23
+ get_resize_output_image_size,
24
+ resize,
25
+ to_channel_dimension_format,
26
+ )
27
+ from ...image_utils import (
28
+ IMAGENET_DEFAULT_MEAN,
29
+ IMAGENET_DEFAULT_STD,
30
+ ChannelDimension,
31
+ ImageInput,
32
+ PILImageResampling,
33
+ infer_channel_dimension_format,
34
+ is_scaled_image,
35
+ make_list_of_images,
36
+ to_numpy_array,
37
+ valid_images,
38
+ validate_kwargs,
39
+ validate_preprocess_arguments,
40
+ )
41
+ from ...utils import TensorType, is_vision_available, logging
42
+
43
+
44
+ if is_vision_available():
45
+ import PIL
46
+
47
+
48
+ logger = logging.get_logger(__name__)
49
+
50
+
51
+ class PoolFormerImageProcessor(BaseImageProcessor):
52
+ r"""
53
+ Constructs a PoolFormer image processor.
54
+
55
+ Args:
56
+ do_resize (`bool`, *optional*, defaults to `True`):
57
+ Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by
58
+ `do_resize` in the `preprocess` method.
59
+ size (`Dict[str, int]` *optional*, defaults to `{"shortest_edge": 224}`):
60
+ Size of the image after resizing. Can be overridden by `size` in the `preprocess` method. If crop_pct is
61
+ unset:
62
+ - size is `{"height": h, "width": w}`: the image is resized to `(h, w)`.
63
+ - size is `{"shortest_edge": s}`: the shortest edge of the image is resized to s whilst maintaining the
64
+ aspect ratio.
65
+
66
+ If crop_pct is set:
67
+ - size is `{"height": h, "width": w}`: the image is resized to `(int(floor(h/crop_pct)),
68
+ int(floor(w/crop_pct)))`
69
+ - size is `{"height": c, "width": c}`: the shortest edge of the image is resized to `int(floor(c/crop_pct)`
70
+ whilst maintaining the aspect ratio.
71
+ - size is `{"shortest_edge": c}`: the shortest edge of the image is resized to `int(floor(c/crop_pct)`
72
+ whilst maintaining the aspect ratio.
73
+ crop_pct (`float`, *optional*, defaults to 0.9):
74
+ Percentage of the image to crop from the center. Can be overridden by `crop_pct` in the `preprocess`
75
+ method.
76
+ resample (`PILImageResampling`, *optional*, defaults to `Resampling.BICUBIC`):
77
+ Resampling filter to use if resizing the image. Can be overridden by `resample` in the `preprocess` method.
78
+ do_center_crop (`bool`, *optional*, defaults to `True`):
79
+ Whether to center crop the image. If the input size is smaller than `crop_size` along any edge, the image
80
+ is padded with 0's and then center cropped. Can be overridden by `do_center_crop` in the `preprocess`
81
+ method.
82
+ crop_size (`Dict[str, int]`, *optional*, defaults to `{"height": 224, "width": 224}`):
83
+ Size of the image after applying center crop. Only has an effect if `do_center_crop` is set to `True`. Can
84
+ be overridden by the `crop_size` parameter in the `preprocess` method.
85
+ rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
86
+ Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the
87
+ `preprocess` method.
88
+ do_rescale (`bool`, *optional*, defaults to `True`):
89
+ Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale`
90
+ parameter in the `preprocess` method.
91
+ do_normalize (`bool`, *optional*, defaults to `True`):
92
+ Controls whether to normalize the image. Can be overridden by the `do_normalize` parameter in the
93
+ `preprocess` method.
94
+ image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`):
95
+ Mean to use if normalizing the image. This is a float or list of floats the length of the number of
96
+ channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method.
97
+ image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`):
98
+ Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
99
+ number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
100
+ """
101
+
102
+ model_input_names = ["pixel_values"]
103
+
104
+ def __init__(
105
+ self,
106
+ do_resize: bool = True,
107
+ size: Dict[str, int] = None,
108
+ crop_pct: int = 0.9,
109
+ resample: PILImageResampling = PILImageResampling.BICUBIC,
110
+ do_center_crop: bool = True,
111
+ crop_size: Dict[str, int] = None,
112
+ rescale_factor: Union[int, float] = 1 / 255,
113
+ do_rescale: bool = True,
114
+ do_normalize: bool = True,
115
+ image_mean: Optional[Union[float, List[float]]] = None,
116
+ image_std: Optional[Union[float, List[float]]] = None,
117
+ **kwargs,
118
+ ) -> None:
119
+ super().__init__(**kwargs)
120
+ size = size if size is not None else {"shortest_edge": 224}
121
+ size = get_size_dict(size, default_to_square=False)
122
+ crop_size = crop_size if crop_size is not None else {"height": 224, "width": 224}
123
+ crop_size = get_size_dict(crop_size, param_name="crop_size")
124
+
125
+ self.do_resize = do_resize
126
+ self.size = size
127
+ self.crop_pct = crop_pct
128
+ self.resample = resample
129
+ self.do_center_crop = do_center_crop
130
+ self.crop_size = crop_size
131
+ self.do_rescale = do_rescale
132
+ self.rescale_factor = rescale_factor
133
+ self.do_normalize = do_normalize
134
+ self.image_mean = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
135
+ self.image_std = image_std if image_std is not None else IMAGENET_DEFAULT_STD
136
+ self._valid_processor_keys = [
137
+ "images",
138
+ "do_resize",
139
+ "size",
140
+ "crop_pct",
141
+ "resample",
142
+ "do_center_crop",
143
+ "crop_size",
144
+ "do_rescale",
145
+ "rescale_factor",
146
+ "do_normalize",
147
+ "image_mean",
148
+ "image_std",
149
+ "return_tensors",
150
+ "data_format",
151
+ "input_data_format",
152
+ ]
153
+
154
+ def resize(
155
+ self,
156
+ image: np.ndarray,
157
+ size: Dict[str, int],
158
+ crop_pct: Optional[float] = None,
159
+ resample: PILImageResampling = PILImageResampling.BICUBIC,
160
+ data_format: Optional[Union[str, ChannelDimension]] = None,
161
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
162
+ **kwargs,
163
+ ) -> np.ndarray:
164
+ """
165
+ Resize an image.
166
+
167
+ If crop_pct is unset:
168
+ - size is `{"height": h, "width": w}`: the image is resized to `(h, w)`.
169
+ - size is `{"shortest_edge": s}`: the shortest edge of the image is resized to s whilst maintaining the
170
+ aspect ratio.
171
+
172
+ if crop_pct is set:
173
+ - size is `{"height": h, "width": w}`: the image is resized to `(int(floor(h/crop_pct)),
174
+ int(floor(w/crop_pct)))`
175
+ - size is `{"height": c, "width": c}`: the shortest edge of the image is resized to `int(floor(c/crop_pct)`
176
+ whilst maintaining the aspect ratio.
177
+ - size is `{"shortest_edge": c}`: the shortest edge of the image is resized to `int(floor(c/crop_pct)`
178
+ whilst maintaining the aspect ratio.
179
+
180
+ Args:
181
+ image (`np.ndarray`):
182
+ Image to resize.
183
+ size (`Dict[str, int]`):
184
+ Size of the output image.
185
+ crop_pct (`float`, *optional*):
186
+ Percentage of the image that will be cropped from the center. If set, the image is resized
187
+ resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
188
+ Resampling filter to use when resizing the image.
189
+ data_format (`str` or `ChannelDimension`, *optional*):
190
+ The channel dimension format of the image. If not provided, it will be the same as the input image.
191
+ input_data_format (`str` or `ChannelDimension`, *optional*):
192
+ The channel dimension format of the input image. If not provided, it will be inferred.
193
+ """
194
+ size = get_size_dict(size, default_to_square=False)
195
+ if "shortest_edge" not in size and ("height" not in size or "width" not in size):
196
+ raise ValueError(f"size must contain 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}")
197
+ if crop_pct is not None:
198
+ if "shortest_edge" in size:
199
+ scale_size = int(size["shortest_edge"] / crop_pct)
200
+ elif "height" in size and "width" in size:
201
+ if size["height"] == size["width"]:
202
+ scale_size = int(size["height"] / crop_pct)
203
+ else:
204
+ scale_size = (int(size["height"] / crop_pct), int(size["width"] / crop_pct))
205
+ else:
206
+ raise ValueError("Invalid size for resize: {}".format(size))
207
+
208
+ output_size = get_resize_output_image_size(
209
+ image, size=scale_size, default_to_square=False, input_data_format=input_data_format
210
+ )
211
+ else:
212
+ if "shortest_edge" in size:
213
+ output_size = get_resize_output_image_size(
214
+ image, size=size["shortest_edge"], default_to_square=False, input_data_format=input_data_format
215
+ )
216
+ elif "height" in size and "width" in size:
217
+ output_size = (size["height"], size["width"])
218
+ else:
219
+ raise ValueError("Invalid size for resize: {}".format(size))
220
+
221
+ return resize(
222
+ image,
223
+ size=output_size,
224
+ resample=resample,
225
+ data_format=data_format,
226
+ input_data_format=input_data_format,
227
+ **kwargs,
228
+ )
229
+
230
+ def preprocess(
231
+ self,
232
+ images: ImageInput,
233
+ do_resize: bool = None,
234
+ size: Dict[str, int] = None,
235
+ crop_pct: int = None,
236
+ resample: PILImageResampling = None,
237
+ do_center_crop: bool = None,
238
+ crop_size: Dict[str, int] = None,
239
+ do_rescale: bool = None,
240
+ rescale_factor: float = None,
241
+ do_normalize: bool = None,
242
+ image_mean: Optional[Union[float, List[float]]] = None,
243
+ image_std: Optional[Union[float, List[float]]] = None,
244
+ return_tensors: Optional[Union[str, TensorType]] = None,
245
+ data_format: ChannelDimension = ChannelDimension.FIRST,
246
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
247
+ **kwargs,
248
+ ) -> PIL.Image.Image:
249
+ """
250
+ Preprocess an image or batch of images.
251
+
252
+ Args:
253
+ images (`ImageInput`):
254
+ Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
255
+ passing in images with pixel values between 0 and 1, set `do_rescale=False`.
256
+ do_resize (`bool`, *optional*, defaults to `self.do_resize`):
257
+ Whether to resize the image.
258
+ size (`Dict[str, int]`, *optional*, defaults to `self.size`):
259
+ Size of the image after applying resize.
260
+ crop_pct (`float`, *optional*, defaults to `self.crop_pct`):
261
+ Percentage of the image to crop. Only has an effect if `do_resize` is set to `True`.
262
+ resample (`int`, *optional*, defaults to `self.resample`):
263
+ Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`, Only
264
+ has an effect if `do_resize` is set to `True`.
265
+ do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`):
266
+ Whether to center crop the image.
267
+ crop_size (`Dict[str, int]`, *optional*, defaults to `self.crop_size`):
268
+ Size of the image after applying center crop.
269
+ do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
270
+ Whether to rescale the image values between [0 - 1].
271
+ rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
272
+ Rescale factor to rescale the image by if `do_rescale` is set to `True`.
273
+ do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
274
+ Whether to normalize the image.
275
+ image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):
276
+ Image mean.
277
+ image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):
278
+ Image standard deviation.
279
+ return_tensors (`str` or `TensorType`, *optional*):
280
+ The type of tensors to return. Can be one of:
281
+ - Unset: Return a list of `np.ndarray`.
282
+ - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
283
+ - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
284
+ - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
285
+ - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
286
+ data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
287
+ The channel dimension format for the output image. Can be one of:
288
+ - `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
289
+ - `ChannelDimension.LAST`: image in (height, width, num_channels) format.
290
+ input_data_format (`ChannelDimension` or `str`, *optional*):
291
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
292
+ from the input image. Can be one of:
293
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
294
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
295
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
296
+ """
297
+ do_resize = do_resize if do_resize is not None else self.do_resize
298
+ crop_pct = crop_pct if crop_pct is not None else self.crop_pct
299
+ resample = resample if resample is not None else self.resample
300
+ do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop
301
+ do_rescale = do_rescale if do_rescale is not None else self.do_rescale
302
+ rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
303
+ do_normalize = do_normalize if do_normalize is not None else self.do_normalize
304
+ image_mean = image_mean if image_mean is not None else self.image_mean
305
+ image_std = image_std if image_std is not None else self.image_std
306
+
307
+ size = size if size is not None else self.size
308
+ size = get_size_dict(size, default_to_square=False)
309
+ crop_size = crop_size if crop_size is not None else self.crop_size
310
+ crop_size = get_size_dict(crop_size, param_name="crop_size")
311
+
312
+ images = make_list_of_images(images)
313
+
314
+ validate_kwargs(captured_kwargs=kwargs.keys(), valid_processor_keys=self._valid_processor_keys)
315
+
316
+ if not valid_images(images):
317
+ raise ValueError(
318
+ "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
319
+ "torch.Tensor, tf.Tensor or jax.ndarray."
320
+ )
321
+ validate_preprocess_arguments(
322
+ do_rescale=do_rescale,
323
+ rescale_factor=rescale_factor,
324
+ do_normalize=do_normalize,
325
+ image_mean=image_mean,
326
+ image_std=image_std,
327
+ do_center_crop=do_center_crop,
328
+ crop_size=crop_size,
329
+ do_resize=do_resize,
330
+ size=size,
331
+ resample=resample,
332
+ )
333
+
334
+ # All transformations expect numpy arrays.
335
+ images = [to_numpy_array(image) for image in images]
336
+
337
+ if is_scaled_image(images[0]) and do_rescale:
338
+ logger.warning_once(
339
+ "It looks like you are trying to rescale already rescaled images. If the input"
340
+ " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
341
+ )
342
+
343
+ if input_data_format is None:
344
+ # We assume that all images have the same channel dimension format.
345
+ input_data_format = infer_channel_dimension_format(images[0])
346
+
347
+ if do_resize:
348
+ images = [
349
+ self.resize(
350
+ image=image, size=size, crop_pct=crop_pct, resample=resample, input_data_format=input_data_format
351
+ )
352
+ for image in images
353
+ ]
354
+
355
+ if do_center_crop:
356
+ images = [
357
+ self.center_crop(image=image, size=crop_size, input_data_format=input_data_format) for image in images
358
+ ]
359
+
360
+ if do_rescale:
361
+ images = [
362
+ self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
363
+ for image in images
364
+ ]
365
+
366
+ if do_normalize:
367
+ images = [
368
+ self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format)
369
+ for image in images
370
+ ]
371
+
372
+ images = [
373
+ to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images
374
+ ]
375
+
376
+ data = {"pixel_values": images}
377
+ return BatchFeature(data=data, tensor_type=return_tensors)
llmeval-env/lib/python3.10/site-packages/transformers/models/poolformer/modeling_poolformer.py ADDED
@@ -0,0 +1,448 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 Sea AI Lab and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch PoolFormer model."""
16
+
17
+
18
+ import collections.abc
19
+ from typing import Optional, Tuple, Union
20
+
21
+ import torch
22
+ import torch.utils.checkpoint
23
+ from torch import nn
24
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
25
+
26
+ from ...activations import ACT2FN
27
+ from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
28
+ from ...modeling_utils import PreTrainedModel
29
+ from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
30
+ from .configuration_poolformer import PoolFormerConfig
31
+
32
+
33
+ logger = logging.get_logger(__name__)
34
+
35
+ # General docstring
36
+ _CONFIG_FOR_DOC = "PoolFormerConfig"
37
+
38
+ # Base docstring
39
+ _CHECKPOINT_FOR_DOC = "sail/poolformer_s12"
40
+ _EXPECTED_OUTPUT_SHAPE = [1, 512, 7, 7]
41
+
42
+ # Image classification docstring
43
+ _IMAGE_CLASS_CHECKPOINT = "sail/poolformer_s12"
44
+ _IMAGE_CLASS_EXPECTED_OUTPUT = "tabby, tabby cat"
45
+
46
+
47
+ from ..deprecated._archive_maps import POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
48
+
49
+
50
+ # Copied from transformers.models.beit.modeling_beit.drop_path
51
+ def drop_path(input: torch.Tensor, drop_prob: float = 0.0, training: bool = False) -> torch.Tensor:
52
+ """
53
+ Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
54
+
55
+ Comment by Ross Wightman: This is the same as the DropConnect impl I created for EfficientNet, etc networks,
56
+ however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
57
+ See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the
58
+ layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the
59
+ argument.
60
+ """
61
+ if drop_prob == 0.0 or not training:
62
+ return input
63
+ keep_prob = 1 - drop_prob
64
+ shape = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
65
+ random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device)
66
+ random_tensor.floor_() # binarize
67
+ output = input.div(keep_prob) * random_tensor
68
+ return output
69
+
70
+
71
+ # Copied from transformers.models.beit.modeling_beit.BeitDropPath with Beit->PoolFormer
72
+ class PoolFormerDropPath(nn.Module):
73
+ """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
74
+
75
+ def __init__(self, drop_prob: Optional[float] = None) -> None:
76
+ super().__init__()
77
+ self.drop_prob = drop_prob
78
+
79
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
80
+ return drop_path(hidden_states, self.drop_prob, self.training)
81
+
82
+ def extra_repr(self) -> str:
83
+ return "p={}".format(self.drop_prob)
84
+
85
+
86
+ class PoolFormerEmbeddings(nn.Module):
87
+ """
88
+ Construct Patch Embeddings.
89
+ """
90
+
91
+ def __init__(self, hidden_size, num_channels, patch_size, stride, padding, norm_layer=None):
92
+ super().__init__()
93
+ patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size)
94
+ stride = stride if isinstance(stride, collections.abc.Iterable) else (stride, stride)
95
+ padding = padding if isinstance(padding, collections.abc.Iterable) else (padding, padding)
96
+
97
+ self.projection = nn.Conv2d(num_channels, hidden_size, kernel_size=patch_size, stride=stride, padding=padding)
98
+ self.norm = norm_layer(hidden_size) if norm_layer else nn.Identity()
99
+
100
+ def forward(self, pixel_values):
101
+ embeddings = self.projection(pixel_values)
102
+ embeddings = self.norm(embeddings)
103
+ return embeddings
104
+
105
+
106
+ class PoolFormerGroupNorm(nn.GroupNorm):
107
+ """
108
+ Group Normalization with 1 group. Input: tensor in shape [B, C, H, W]
109
+ """
110
+
111
+ def __init__(self, num_channels, **kwargs):
112
+ super().__init__(1, num_channels, **kwargs)
113
+
114
+
115
+ class PoolFormerPooling(nn.Module):
116
+ def __init__(self, pool_size):
117
+ super().__init__()
118
+ self.pool = nn.AvgPool2d(pool_size, stride=1, padding=pool_size // 2, count_include_pad=False)
119
+
120
+ def forward(self, hidden_states):
121
+ return self.pool(hidden_states) - hidden_states
122
+
123
+
124
+ class PoolFormerOutput(nn.Module):
125
+ def __init__(self, config, dropout_prob, hidden_size, intermediate_size):
126
+ super().__init__()
127
+ self.conv1 = nn.Conv2d(hidden_size, intermediate_size, 1)
128
+ self.conv2 = nn.Conv2d(intermediate_size, hidden_size, 1)
129
+ self.drop = PoolFormerDropPath(dropout_prob)
130
+ if isinstance(config.hidden_act, str):
131
+ self.act_fn = ACT2FN[config.hidden_act]
132
+ else:
133
+ self.act_fn = config.hidden_act
134
+
135
+ def forward(self, hidden_states):
136
+ hidden_states = self.conv1(hidden_states)
137
+ hidden_states = self.act_fn(hidden_states)
138
+ hidden_states = self.drop(hidden_states)
139
+ hidden_states = self.conv2(hidden_states)
140
+ hidden_states = self.drop(hidden_states)
141
+
142
+ return hidden_states
143
+
144
+
145
+ class PoolFormerLayer(nn.Module):
146
+ """This corresponds to the 'PoolFormerBlock' class in the original implementation."""
147
+
148
+ def __init__(self, config, num_channels, pool_size, hidden_size, intermediate_size, drop_path):
149
+ super().__init__()
150
+ self.pooling = PoolFormerPooling(pool_size)
151
+ self.output = PoolFormerOutput(config, drop_path, hidden_size, intermediate_size)
152
+ self.before_norm = PoolFormerGroupNorm(num_channels)
153
+ self.after_norm = PoolFormerGroupNorm(num_channels)
154
+
155
+ # Useful for training neural nets
156
+ self.drop_path = PoolFormerDropPath(drop_path) if drop_path > 0.0 else nn.Identity()
157
+ self.use_layer_scale = config.use_layer_scale
158
+ if config.use_layer_scale:
159
+ self.layer_scale_1 = nn.Parameter(
160
+ config.layer_scale_init_value * torch.ones((num_channels)), requires_grad=True
161
+ )
162
+ self.layer_scale_2 = nn.Parameter(
163
+ config.layer_scale_init_value * torch.ones((num_channels)), requires_grad=True
164
+ )
165
+
166
+ def forward(self, hidden_states):
167
+ if self.use_layer_scale:
168
+ pooling_output = self.pooling(self.before_norm(hidden_states))
169
+ scaled_op = self.layer_scale_1.unsqueeze(-1).unsqueeze(-1) * pooling_output
170
+ # First residual connection
171
+ hidden_states = hidden_states + self.drop_path(scaled_op)
172
+ outputs = ()
173
+
174
+ layer_output = self.output(self.after_norm(hidden_states))
175
+ scaled_op = self.layer_scale_2.unsqueeze(-1).unsqueeze(-1) * layer_output
176
+ # Second residual connection
177
+ output = hidden_states + self.drop_path(scaled_op)
178
+
179
+ outputs = (output,) + outputs
180
+ return outputs
181
+
182
+ else:
183
+ pooling_output = self.drop_path(self.pooling(self.before_norm(hidden_states)))
184
+ # First residual connection
185
+ hidden_states = pooling_output + hidden_states
186
+ outputs = ()
187
+
188
+ # Second residual connection inside the PoolFormerOutput block
189
+ layer_output = self.drop_path(self.output(self.after_norm(hidden_states)))
190
+ output = hidden_states + layer_output
191
+
192
+ outputs = (output,) + outputs
193
+ return outputs
194
+
195
+
196
+ class PoolFormerEncoder(nn.Module):
197
+ def __init__(self, config):
198
+ super().__init__()
199
+ self.config = config
200
+ # stochastic depth decay rule
201
+ dpr = [x.item() for x in torch.linspace(0, config.drop_path_rate, sum(config.depths))]
202
+
203
+ # patch embeddings
204
+ embeddings = []
205
+ for i in range(config.num_encoder_blocks):
206
+ embeddings.append(
207
+ PoolFormerEmbeddings(
208
+ patch_size=config.patch_sizes[i],
209
+ stride=config.strides[i],
210
+ padding=config.padding[i],
211
+ num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1],
212
+ hidden_size=config.hidden_sizes[i],
213
+ )
214
+ )
215
+ self.patch_embeddings = nn.ModuleList(embeddings)
216
+
217
+ # Transformer blocks
218
+ blocks = []
219
+ cur = 0
220
+ for i in range(config.num_encoder_blocks):
221
+ # each block consists of layers
222
+ layers = []
223
+ if i != 0:
224
+ cur += config.depths[i - 1]
225
+ for j in range(config.depths[i]):
226
+ layers.append(
227
+ PoolFormerLayer(
228
+ config,
229
+ num_channels=config.hidden_sizes[i],
230
+ pool_size=config.pool_size,
231
+ hidden_size=config.hidden_sizes[i],
232
+ intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio),
233
+ drop_path=dpr[cur + j],
234
+ )
235
+ )
236
+ blocks.append(nn.ModuleList(layers))
237
+
238
+ self.block = nn.ModuleList(blocks)
239
+
240
+ def forward(self, pixel_values, output_hidden_states=False, return_dict=True):
241
+ all_hidden_states = () if output_hidden_states else None
242
+
243
+ hidden_states = pixel_values
244
+ for idx, layers in enumerate(zip(self.patch_embeddings, self.block)):
245
+ embedding_layer, block_layer = layers
246
+ # Get patch embeddings from hidden_states
247
+ hidden_states = embedding_layer(hidden_states)
248
+ # Send the embeddings through the blocks
249
+ for _, blk in enumerate(block_layer):
250
+ layer_outputs = blk(hidden_states)
251
+ hidden_states = layer_outputs[0]
252
+
253
+ if output_hidden_states:
254
+ all_hidden_states = all_hidden_states + (hidden_states,)
255
+
256
+ if not return_dict:
257
+ return tuple(v for v in [hidden_states, all_hidden_states] if v is not None)
258
+
259
+ return BaseModelOutputWithNoAttention(last_hidden_state=hidden_states, hidden_states=all_hidden_states)
260
+
261
+
262
+ class PoolFormerPreTrainedModel(PreTrainedModel):
263
+ """
264
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
265
+ models.
266
+ """
267
+
268
+ config_class = PoolFormerConfig
269
+ base_model_prefix = "poolformer"
270
+ main_input_name = "pixel_values"
271
+
272
+ def _init_weights(self, module):
273
+ """Initialize the weights"""
274
+ if isinstance(module, (nn.Linear, nn.Conv2d)):
275
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
276
+ if module.bias is not None:
277
+ module.bias.data.zero_()
278
+ elif isinstance(module, nn.LayerNorm):
279
+ module.bias.data.zero_()
280
+ module.weight.data.fill_(1.0)
281
+
282
+
283
+ POOLFORMER_START_DOCSTRING = r"""
284
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
285
+ it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
286
+ behavior.
287
+
288
+ Parameters:
289
+ config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.
290
+ Initializing with a config file does not load the weights associated with the model, only the
291
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
292
+ """
293
+
294
+ POOLFORMER_INPUTS_DOCSTRING = r"""
295
+ Args:
296
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
297
+ Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
298
+ [`PoolFormerImageProcessor.__call__`] for details.
299
+ """
300
+
301
+
302
+ @add_start_docstrings(
303
+ "The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top.",
304
+ POOLFORMER_START_DOCSTRING,
305
+ )
306
+ class PoolFormerModel(PoolFormerPreTrainedModel):
307
+ def __init__(self, config):
308
+ super().__init__(config)
309
+ self.config = config
310
+
311
+ self.encoder = PoolFormerEncoder(config)
312
+
313
+ # Initialize weights and apply final processing
314
+ self.post_init()
315
+
316
+ def get_input_embeddings(self):
317
+ return self.embeddings.patch_embeddings
318
+
319
+ @add_start_docstrings_to_model_forward(POOLFORMER_INPUTS_DOCSTRING)
320
+ @add_code_sample_docstrings(
321
+ checkpoint=_CHECKPOINT_FOR_DOC,
322
+ output_type=BaseModelOutputWithNoAttention,
323
+ config_class=_CONFIG_FOR_DOC,
324
+ modality="vision",
325
+ expected_output=_EXPECTED_OUTPUT_SHAPE,
326
+ )
327
+ def forward(
328
+ self,
329
+ pixel_values: Optional[torch.FloatTensor] = None,
330
+ output_hidden_states: Optional[bool] = None,
331
+ return_dict: Optional[bool] = None,
332
+ ) -> Union[Tuple, BaseModelOutputWithNoAttention]:
333
+ output_hidden_states = (
334
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
335
+ )
336
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
337
+
338
+ if pixel_values is None:
339
+ raise ValueError("You have to specify pixel_values")
340
+
341
+ encoder_outputs = self.encoder(
342
+ pixel_values,
343
+ output_hidden_states=output_hidden_states,
344
+ return_dict=return_dict,
345
+ )
346
+ sequence_output = encoder_outputs[0]
347
+
348
+ if not return_dict:
349
+ return (sequence_output, None) + encoder_outputs[1:]
350
+
351
+ return BaseModelOutputWithNoAttention(
352
+ last_hidden_state=sequence_output,
353
+ hidden_states=encoder_outputs.hidden_states,
354
+ )
355
+
356
+
357
+ class PoolFormerFinalPooler(nn.Module):
358
+ def __init__(self, config):
359
+ super().__init__()
360
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
361
+
362
+ def forward(self, hidden_states):
363
+ output = self.dense(hidden_states)
364
+ return output
365
+
366
+
367
+ @add_start_docstrings(
368
+ """
369
+ PoolFormer Model transformer with an image classification head on top
370
+ """,
371
+ POOLFORMER_START_DOCSTRING,
372
+ )
373
+ class PoolFormerForImageClassification(PoolFormerPreTrainedModel):
374
+ def __init__(self, config):
375
+ super().__init__(config)
376
+ self.num_labels = config.num_labels
377
+ self.poolformer = PoolFormerModel(config)
378
+
379
+ # Final norm
380
+ self.norm = PoolFormerGroupNorm(config.hidden_sizes[-1])
381
+ # Classifier head
382
+ self.classifier = (
383
+ nn.Linear(config.hidden_sizes[-1], config.num_labels) if config.num_labels > 0 else nn.Identity()
384
+ )
385
+
386
+ # Initialize weights and apply final processing
387
+ self.post_init()
388
+
389
+ @add_start_docstrings_to_model_forward(POOLFORMER_INPUTS_DOCSTRING)
390
+ @add_code_sample_docstrings(
391
+ checkpoint=_IMAGE_CLASS_CHECKPOINT,
392
+ output_type=ImageClassifierOutputWithNoAttention,
393
+ config_class=_CONFIG_FOR_DOC,
394
+ expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT,
395
+ )
396
+ def forward(
397
+ self,
398
+ pixel_values: Optional[torch.FloatTensor] = None,
399
+ labels: Optional[torch.LongTensor] = None,
400
+ output_hidden_states: Optional[bool] = None,
401
+ return_dict: Optional[bool] = None,
402
+ ) -> Union[Tuple, ImageClassifierOutputWithNoAttention]:
403
+ r"""
404
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
405
+ Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
406
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
407
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
408
+ """
409
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
410
+
411
+ outputs = self.poolformer(
412
+ pixel_values,
413
+ output_hidden_states=output_hidden_states,
414
+ return_dict=return_dict,
415
+ )
416
+
417
+ sequence_output = outputs[0]
418
+
419
+ logits = self.classifier(self.norm(sequence_output).mean([-2, -1]))
420
+
421
+ loss = None
422
+ if labels is not None:
423
+ if self.config.problem_type is None:
424
+ if self.num_labels == 1:
425
+ self.config.problem_type = "regression"
426
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
427
+ self.config.problem_type = "single_label_classification"
428
+ else:
429
+ self.config.problem_type = "multi_label_classification"
430
+
431
+ if self.config.problem_type == "regression":
432
+ loss_fct = MSELoss()
433
+ if self.num_labels == 1:
434
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
435
+ else:
436
+ loss = loss_fct(logits, labels)
437
+ elif self.config.problem_type == "single_label_classification":
438
+ loss_fct = CrossEntropyLoss()
439
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
440
+ elif self.config.problem_type == "multi_label_classification":
441
+ loss_fct = BCEWithLogitsLoss()
442
+ loss = loss_fct(logits, labels)
443
+
444
+ if not return_dict:
445
+ output = (logits,) + outputs[2:]
446
+ return ((loss,) + output) if loss is not None else output
447
+
448
+ return ImageClassifierOutputWithNoAttention(loss=loss, logits=logits, hidden_states=outputs.hidden_states)
llmeval-env/lib/python3.10/site-packages/transformers/models/roberta_prelayernorm/__init__.py ADDED
@@ -0,0 +1,153 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import TYPE_CHECKING
16
+
17
+ from ...utils import (
18
+ OptionalDependencyNotAvailable,
19
+ _LazyModule,
20
+ is_flax_available,
21
+ is_tf_available,
22
+ is_torch_available,
23
+ )
24
+
25
+
26
+ _import_structure = {
27
+ "configuration_roberta_prelayernorm": [
28
+ "ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP",
29
+ "RobertaPreLayerNormConfig",
30
+ "RobertaPreLayerNormOnnxConfig",
31
+ ],
32
+ }
33
+
34
+ try:
35
+ if not is_torch_available():
36
+ raise OptionalDependencyNotAvailable()
37
+ except OptionalDependencyNotAvailable:
38
+ pass
39
+ else:
40
+ _import_structure["modeling_roberta_prelayernorm"] = [
41
+ "ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST",
42
+ "RobertaPreLayerNormForCausalLM",
43
+ "RobertaPreLayerNormForMaskedLM",
44
+ "RobertaPreLayerNormForMultipleChoice",
45
+ "RobertaPreLayerNormForQuestionAnswering",
46
+ "RobertaPreLayerNormForSequenceClassification",
47
+ "RobertaPreLayerNormForTokenClassification",
48
+ "RobertaPreLayerNormModel",
49
+ "RobertaPreLayerNormPreTrainedModel",
50
+ ]
51
+
52
+ try:
53
+ if not is_tf_available():
54
+ raise OptionalDependencyNotAvailable()
55
+ except OptionalDependencyNotAvailable:
56
+ pass
57
+ else:
58
+ _import_structure["modeling_tf_roberta_prelayernorm"] = [
59
+ "TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST",
60
+ "TFRobertaPreLayerNormForCausalLM",
61
+ "TFRobertaPreLayerNormForMaskedLM",
62
+ "TFRobertaPreLayerNormForMultipleChoice",
63
+ "TFRobertaPreLayerNormForQuestionAnswering",
64
+ "TFRobertaPreLayerNormForSequenceClassification",
65
+ "TFRobertaPreLayerNormForTokenClassification",
66
+ "TFRobertaPreLayerNormMainLayer",
67
+ "TFRobertaPreLayerNormModel",
68
+ "TFRobertaPreLayerNormPreTrainedModel",
69
+ ]
70
+
71
+ try:
72
+ if not is_flax_available():
73
+ raise OptionalDependencyNotAvailable()
74
+ except OptionalDependencyNotAvailable:
75
+ pass
76
+ else:
77
+ _import_structure["modeling_flax_roberta_prelayernorm"] = [
78
+ "FlaxRobertaPreLayerNormForCausalLM",
79
+ "FlaxRobertaPreLayerNormForMaskedLM",
80
+ "FlaxRobertaPreLayerNormForMultipleChoice",
81
+ "FlaxRobertaPreLayerNormForQuestionAnswering",
82
+ "FlaxRobertaPreLayerNormForSequenceClassification",
83
+ "FlaxRobertaPreLayerNormForTokenClassification",
84
+ "FlaxRobertaPreLayerNormModel",
85
+ "FlaxRobertaPreLayerNormPreTrainedModel",
86
+ ]
87
+
88
+
89
+ if TYPE_CHECKING:
90
+ from .configuration_roberta_prelayernorm import (
91
+ ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
92
+ RobertaPreLayerNormConfig,
93
+ RobertaPreLayerNormOnnxConfig,
94
+ )
95
+
96
+ try:
97
+ if not is_torch_available():
98
+ raise OptionalDependencyNotAvailable()
99
+ except OptionalDependencyNotAvailable:
100
+ pass
101
+ else:
102
+ from .modeling_roberta_prelayernorm import (
103
+ ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
104
+ RobertaPreLayerNormForCausalLM,
105
+ RobertaPreLayerNormForMaskedLM,
106
+ RobertaPreLayerNormForMultipleChoice,
107
+ RobertaPreLayerNormForQuestionAnswering,
108
+ RobertaPreLayerNormForSequenceClassification,
109
+ RobertaPreLayerNormForTokenClassification,
110
+ RobertaPreLayerNormModel,
111
+ RobertaPreLayerNormPreTrainedModel,
112
+ )
113
+
114
+ try:
115
+ if not is_tf_available():
116
+ raise OptionalDependencyNotAvailable()
117
+ except OptionalDependencyNotAvailable:
118
+ pass
119
+ else:
120
+ from .modeling_tf_roberta_prelayernorm import (
121
+ TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
122
+ TFRobertaPreLayerNormForCausalLM,
123
+ TFRobertaPreLayerNormForMaskedLM,
124
+ TFRobertaPreLayerNormForMultipleChoice,
125
+ TFRobertaPreLayerNormForQuestionAnswering,
126
+ TFRobertaPreLayerNormForSequenceClassification,
127
+ TFRobertaPreLayerNormForTokenClassification,
128
+ TFRobertaPreLayerNormMainLayer,
129
+ TFRobertaPreLayerNormModel,
130
+ TFRobertaPreLayerNormPreTrainedModel,
131
+ )
132
+
133
+ try:
134
+ if not is_flax_available():
135
+ raise OptionalDependencyNotAvailable()
136
+ except OptionalDependencyNotAvailable:
137
+ pass
138
+ else:
139
+ from .modeling_flax_roberta_prelayernorm import (
140
+ FlaxRobertaPreLayerNormForCausalLM,
141
+ FlaxRobertaPreLayerNormForMaskedLM,
142
+ FlaxRobertaPreLayerNormForMultipleChoice,
143
+ FlaxRobertaPreLayerNormForQuestionAnswering,
144
+ FlaxRobertaPreLayerNormForSequenceClassification,
145
+ FlaxRobertaPreLayerNormForTokenClassification,
146
+ FlaxRobertaPreLayerNormModel,
147
+ FlaxRobertaPreLayerNormPreTrainedModel,
148
+ )
149
+
150
+ else:
151
+ import sys
152
+
153
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
llmeval-env/lib/python3.10/site-packages/transformers/models/roberta_prelayernorm/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (2.58 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/roberta_prelayernorm/__pycache__/configuration_roberta_prelayernorm.cpython-310.pyc ADDED
Binary file (6.91 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/roberta_prelayernorm/__pycache__/convert_roberta_prelayernorm_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc ADDED
Binary file (1.9 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/roberta_prelayernorm/__pycache__/modeling_flax_roberta_prelayernorm.cpython-310.pyc ADDED
Binary file (36.1 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/roberta_prelayernorm/__pycache__/modeling_roberta_prelayernorm.cpython-310.pyc ADDED
Binary file (46.1 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/roberta_prelayernorm/__pycache__/modeling_tf_roberta_prelayernorm.cpython-310.pyc ADDED
Binary file (52.4 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/roberta_prelayernorm/configuration_roberta_prelayernorm.py ADDED
@@ -0,0 +1,156 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The Google AI Language Team Authors and The HuggingFace Inc. team.
3
+ # All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """ RoBERTa-PreLayerNorm configuration"""
17
+ from collections import OrderedDict
18
+ from typing import Mapping
19
+
20
+ from ...configuration_utils import PretrainedConfig
21
+ from ...onnx import OnnxConfig
22
+ from ...utils import logging
23
+
24
+
25
+ logger = logging.get_logger(__name__)
26
+
27
+
28
+ from ..deprecated._archive_maps import ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
29
+
30
+
31
+ # Copied from transformers.models.roberta.configuration_roberta.RobertaConfig with FacebookAI/roberta-base->andreasmadsen/efficient_mlm_m0.40,RoBERTa->RoBERTa-PreLayerNorm,Roberta->RobertaPreLayerNorm,roberta->roberta-prelayernorm
32
+ class RobertaPreLayerNormConfig(PretrainedConfig):
33
+ r"""
34
+ This is the configuration class to store the configuration of a [`RobertaPreLayerNormModel`] or a [`TFRobertaPreLayerNormModel`]. It is
35
+ used to instantiate a RoBERTa-PreLayerNorm model according to the specified arguments, defining the model architecture.
36
+ Instantiating a configuration with the defaults will yield a similar configuration to that of the RoBERTa-PreLayerNorm
37
+ [andreasmadsen/efficient_mlm_m0.40](https://huggingface.co/andreasmadsen/efficient_mlm_m0.40) architecture.
38
+
39
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
40
+ documentation from [`PretrainedConfig`] for more information.
41
+
42
+
43
+ Args:
44
+ vocab_size (`int`, *optional*, defaults to 50265):
45
+ Vocabulary size of the RoBERTa-PreLayerNorm model. Defines the number of different tokens that can be represented by the
46
+ `inputs_ids` passed when calling [`RobertaPreLayerNormModel`] or [`TFRobertaPreLayerNormModel`].
47
+ hidden_size (`int`, *optional*, defaults to 768):
48
+ Dimensionality of the encoder layers and the pooler layer.
49
+ num_hidden_layers (`int`, *optional*, defaults to 12):
50
+ Number of hidden layers in the Transformer encoder.
51
+ num_attention_heads (`int`, *optional*, defaults to 12):
52
+ Number of attention heads for each attention layer in the Transformer encoder.
53
+ intermediate_size (`int`, *optional*, defaults to 3072):
54
+ Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
55
+ hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`):
56
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
57
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
58
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
59
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
60
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
61
+ The dropout ratio for the attention probabilities.
62
+ max_position_embeddings (`int`, *optional*, defaults to 512):
63
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
64
+ just in case (e.g., 512 or 1024 or 2048).
65
+ type_vocab_size (`int`, *optional*, defaults to 2):
66
+ The vocabulary size of the `token_type_ids` passed when calling [`RobertaPreLayerNormModel`] or [`TFRobertaPreLayerNormModel`].
67
+ initializer_range (`float`, *optional*, defaults to 0.02):
68
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
69
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
70
+ The epsilon used by the layer normalization layers.
71
+ position_embedding_type (`str`, *optional*, defaults to `"absolute"`):
72
+ Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For
73
+ positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to
74
+ [Self-Attention with Relative Position Representations (Shaw et al.)](https://arxiv.org/abs/1803.02155).
75
+ For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models
76
+ with Better Relative Position Embeddings (Huang et al.)](https://arxiv.org/abs/2009.13658).
77
+ is_decoder (`bool`, *optional*, defaults to `False`):
78
+ Whether the model is used as a decoder or not. If `False`, the model is used as an encoder.
79
+ use_cache (`bool`, *optional*, defaults to `True`):
80
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
81
+ relevant if `config.is_decoder=True`.
82
+ classifier_dropout (`float`, *optional*):
83
+ The dropout ratio for the classification head.
84
+
85
+ Examples:
86
+
87
+ ```python
88
+ >>> from transformers import RobertaPreLayerNormConfig, RobertaPreLayerNormModel
89
+
90
+ >>> # Initializing a RoBERTa-PreLayerNorm configuration
91
+ >>> configuration = RobertaPreLayerNormConfig()
92
+
93
+ >>> # Initializing a model (with random weights) from the configuration
94
+ >>> model = RobertaPreLayerNormModel(configuration)
95
+
96
+ >>> # Accessing the model configuration
97
+ >>> configuration = model.config
98
+ ```"""
99
+
100
+ model_type = "roberta-prelayernorm"
101
+
102
+ def __init__(
103
+ self,
104
+ vocab_size=50265,
105
+ hidden_size=768,
106
+ num_hidden_layers=12,
107
+ num_attention_heads=12,
108
+ intermediate_size=3072,
109
+ hidden_act="gelu",
110
+ hidden_dropout_prob=0.1,
111
+ attention_probs_dropout_prob=0.1,
112
+ max_position_embeddings=512,
113
+ type_vocab_size=2,
114
+ initializer_range=0.02,
115
+ layer_norm_eps=1e-12,
116
+ pad_token_id=1,
117
+ bos_token_id=0,
118
+ eos_token_id=2,
119
+ position_embedding_type="absolute",
120
+ use_cache=True,
121
+ classifier_dropout=None,
122
+ **kwargs,
123
+ ):
124
+ super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
125
+
126
+ self.vocab_size = vocab_size
127
+ self.hidden_size = hidden_size
128
+ self.num_hidden_layers = num_hidden_layers
129
+ self.num_attention_heads = num_attention_heads
130
+ self.hidden_act = hidden_act
131
+ self.intermediate_size = intermediate_size
132
+ self.hidden_dropout_prob = hidden_dropout_prob
133
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
134
+ self.max_position_embeddings = max_position_embeddings
135
+ self.type_vocab_size = type_vocab_size
136
+ self.initializer_range = initializer_range
137
+ self.layer_norm_eps = layer_norm_eps
138
+ self.position_embedding_type = position_embedding_type
139
+ self.use_cache = use_cache
140
+ self.classifier_dropout = classifier_dropout
141
+
142
+
143
+ # Copied from transformers.models.roberta.configuration_roberta.RobertaOnnxConfig with Roberta->RobertaPreLayerNorm
144
+ class RobertaPreLayerNormOnnxConfig(OnnxConfig):
145
+ @property
146
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
147
+ if self.task == "multiple-choice":
148
+ dynamic_axis = {0: "batch", 1: "choice", 2: "sequence"}
149
+ else:
150
+ dynamic_axis = {0: "batch", 1: "sequence"}
151
+ return OrderedDict(
152
+ [
153
+ ("input_ids", dynamic_axis),
154
+ ("attention_mask", dynamic_axis),
155
+ ]
156
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/roberta_prelayernorm/convert_roberta_prelayernorm_original_pytorch_checkpoint_to_pytorch.py ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Convert RoBERTa-PreLayerNorm checkpoint."""
16
+
17
+
18
+ import argparse
19
+
20
+ import torch
21
+ from huggingface_hub import hf_hub_download
22
+
23
+ from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
24
+ from transformers.utils import logging
25
+
26
+
27
+ logging.set_verbosity_info()
28
+ logger = logging.get_logger(__name__)
29
+
30
+
31
+ def convert_roberta_prelayernorm_checkpoint_to_pytorch(checkpoint_repo: str, pytorch_dump_folder_path: str):
32
+ """
33
+ Copy/paste/tweak roberta_prelayernorm's weights to our BERT structure.
34
+ """
35
+ # convert configuration
36
+ config = RobertaPreLayerNormConfig.from_pretrained(
37
+ checkpoint_repo, architectures=["RobertaPreLayerNormForMaskedLM"]
38
+ )
39
+
40
+ # convert state_dict
41
+ original_state_dict = torch.load(hf_hub_download(repo_id=checkpoint_repo, filename="pytorch_model.bin"))
42
+ state_dict = {}
43
+ for tensor_key, tensor_value in original_state_dict.items():
44
+ # The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
45
+ if tensor_key.startswith("roberta."):
46
+ tensor_key = "roberta_prelayernorm." + tensor_key[len("roberta.") :]
47
+
48
+ # The original implementation contains weights which are not used, remove them from the state_dict
49
+ if tensor_key.endswith(".self.LayerNorm.weight") or tensor_key.endswith(".self.LayerNorm.bias"):
50
+ continue
51
+
52
+ state_dict[tensor_key] = tensor_value
53
+
54
+ model = RobertaPreLayerNormForMaskedLM.from_pretrained(
55
+ pretrained_model_name_or_path=None, config=config, state_dict=state_dict
56
+ )
57
+ model.save_pretrained(pytorch_dump_folder_path)
58
+
59
+ # convert tokenizer
60
+ tokenizer = AutoTokenizer.from_pretrained(checkpoint_repo)
61
+ tokenizer.save_pretrained(pytorch_dump_folder_path)
62
+
63
+
64
+ if __name__ == "__main__":
65
+ parser = argparse.ArgumentParser()
66
+ # Required parameters
67
+ parser.add_argument(
68
+ "--checkpoint-repo",
69
+ default=None,
70
+ type=str,
71
+ required=True,
72
+ help="Path the official PyTorch dump, e.g. 'andreasmadsen/efficient_mlm_m0.40'.",
73
+ )
74
+ parser.add_argument(
75
+ "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
76
+ )
77
+ args = parser.parse_args()
78
+ convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
llmeval-env/lib/python3.10/site-packages/transformers/models/roberta_prelayernorm/modeling_flax_roberta_prelayernorm.py ADDED
@@ -0,0 +1,1514 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The Google Flax Team Authors and The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Flax RoBERTa-PreLayerNorm model."""
16
+ from typing import Callable, Optional, Tuple
17
+
18
+ import flax.linen as nn
19
+ import jax
20
+ import jax.numpy as jnp
21
+ import numpy as np
22
+ from flax.core.frozen_dict import FrozenDict, freeze, unfreeze
23
+ from flax.linen import combine_masks, make_causal_mask
24
+ from flax.linen import partitioning as nn_partitioning
25
+ from flax.linen.attention import dot_product_attention_weights
26
+ from flax.traverse_util import flatten_dict, unflatten_dict
27
+ from jax import lax
28
+
29
+ from ...modeling_flax_outputs import (
30
+ FlaxBaseModelOutputWithPastAndCrossAttentions,
31
+ FlaxBaseModelOutputWithPooling,
32
+ FlaxBaseModelOutputWithPoolingAndCrossAttentions,
33
+ FlaxCausalLMOutputWithCrossAttentions,
34
+ FlaxMaskedLMOutput,
35
+ FlaxMultipleChoiceModelOutput,
36
+ FlaxQuestionAnsweringModelOutput,
37
+ FlaxSequenceClassifierOutput,
38
+ FlaxTokenClassifierOutput,
39
+ )
40
+ from ...modeling_flax_utils import ACT2FN, FlaxPreTrainedModel, append_call_sample_docstring, overwrite_call_docstring
41
+ from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging
42
+ from .configuration_roberta_prelayernorm import RobertaPreLayerNormConfig
43
+
44
+
45
+ logger = logging.get_logger(__name__)
46
+
47
+ _CHECKPOINT_FOR_DOC = "andreasmadsen/efficient_mlm_m0.40"
48
+ _CONFIG_FOR_DOC = "RobertaPreLayerNormConfig"
49
+
50
+ remat = nn_partitioning.remat
51
+
52
+
53
+ # Copied from transformers.models.roberta.modeling_flax_roberta.create_position_ids_from_input_ids
54
+ def create_position_ids_from_input_ids(input_ids, padding_idx):
55
+ """
56
+ Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols
57
+ are ignored. This is modified from fairseq's `utils.make_positions`.
58
+
59
+ Args:
60
+ input_ids: jnp.ndarray
61
+ padding_idx: int
62
+
63
+ Returns: jnp.ndarray
64
+ """
65
+ # The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA.
66
+ mask = (input_ids != padding_idx).astype("i4")
67
+
68
+ if mask.ndim > 2:
69
+ mask = mask.reshape((-1, mask.shape[-1]))
70
+ incremental_indices = jnp.cumsum(mask, axis=1).astype("i4") * mask
71
+ incremental_indices = incremental_indices.reshape(input_ids.shape)
72
+ else:
73
+ incremental_indices = jnp.cumsum(mask, axis=1).astype("i4") * mask
74
+
75
+ return incremental_indices.astype("i4") + padding_idx
76
+
77
+
78
+ ROBERTA_PRELAYERNORM_START_DOCSTRING = r"""
79
+
80
+ This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the
81
+ library implements for all its model (such as downloading, saving and converting weights from PyTorch models)
82
+
83
+ This model is also a
84
+ [flax.linen.Module](https://flax.readthedocs.io/en/latest/api_reference/flax.linen/module.html) subclass. Use it as
85
+ a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and
86
+ behavior.
87
+
88
+ Finally, this model supports inherent JAX features such as:
89
+
90
+ - [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit)
91
+ - [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation)
92
+ - [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap)
93
+ - [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap)
94
+
95
+ Parameters:
96
+ config ([`RobertaPreLayerNormConfig`]): Model configuration class with all the parameters of the
97
+ model. Initializing with a config file does not load the weights associated with the model, only the
98
+ configuration. Check out the [`~FlaxPreTrainedModel.from_pretrained`] method to load the model weights.
99
+ """
100
+
101
+ ROBERTA_PRELAYERNORM_INPUTS_DOCSTRING = r"""
102
+ Args:
103
+ input_ids (`numpy.ndarray` of shape `({0})`):
104
+ Indices of input sequence tokens in the vocabulary.
105
+
106
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
107
+ [`PreTrainedTokenizer.__call__`] for details.
108
+
109
+ [What are input IDs?](../glossary#input-ids)
110
+ attention_mask (`numpy.ndarray` of shape `({0})`, *optional*):
111
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
112
+
113
+ - 1 for tokens that are **not masked**,
114
+ - 0 for tokens that are **masked**.
115
+
116
+ [What are attention masks?](../glossary#attention-mask)
117
+ token_type_ids (`numpy.ndarray` of shape `({0})`, *optional*):
118
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
119
+ 1]`:
120
+
121
+ - 0 corresponds to a *sentence A* token,
122
+ - 1 corresponds to a *sentence B* token.
123
+
124
+ [What are token type IDs?](../glossary#token-type-ids)
125
+ position_ids (`numpy.ndarray` of shape `({0})`, *optional*):
126
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
127
+ config.max_position_embeddings - 1]`.
128
+ head_mask (`numpy.ndarray` of shape `({0})`, `optional):
129
+ Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
130
+
131
+ - 1 indicates the head is **not masked**,
132
+ - 0 indicates the head is **masked**.
133
+
134
+ return_dict (`bool`, *optional*):
135
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
136
+ """
137
+
138
+
139
+ # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertEmbeddings with Bert->RobertaPreLayerNorm
140
+ class FlaxRobertaPreLayerNormEmbeddings(nn.Module):
141
+ """Construct the embeddings from word, position and token_type embeddings."""
142
+
143
+ config: RobertaPreLayerNormConfig
144
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
145
+
146
+ def setup(self):
147
+ self.word_embeddings = nn.Embed(
148
+ self.config.vocab_size,
149
+ self.config.hidden_size,
150
+ embedding_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
151
+ dtype=self.dtype,
152
+ )
153
+ self.position_embeddings = nn.Embed(
154
+ self.config.max_position_embeddings,
155
+ self.config.hidden_size,
156
+ embedding_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
157
+ dtype=self.dtype,
158
+ )
159
+ self.token_type_embeddings = nn.Embed(
160
+ self.config.type_vocab_size,
161
+ self.config.hidden_size,
162
+ embedding_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
163
+ dtype=self.dtype,
164
+ )
165
+ self.LayerNorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype)
166
+ self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob)
167
+
168
+ def __call__(self, input_ids, token_type_ids, position_ids, attention_mask, deterministic: bool = True):
169
+ # Embed
170
+ inputs_embeds = self.word_embeddings(input_ids.astype("i4"))
171
+ position_embeds = self.position_embeddings(position_ids.astype("i4"))
172
+ token_type_embeddings = self.token_type_embeddings(token_type_ids.astype("i4"))
173
+
174
+ # Sum all embeddings
175
+ hidden_states = inputs_embeds + token_type_embeddings + position_embeds
176
+
177
+ # Layer Norm
178
+ hidden_states = self.LayerNorm(hidden_states)
179
+ hidden_states = self.dropout(hidden_states, deterministic=deterministic)
180
+ return hidden_states
181
+
182
+
183
+ # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertSelfAttention with Bert->RobertaPreLayerNorm
184
+ class FlaxRobertaPreLayerNormSelfAttention(nn.Module):
185
+ config: RobertaPreLayerNormConfig
186
+ causal: bool = False
187
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
188
+
189
+ def setup(self):
190
+ self.head_dim = self.config.hidden_size // self.config.num_attention_heads
191
+ if self.config.hidden_size % self.config.num_attention_heads != 0:
192
+ raise ValueError(
193
+ "`config.hidden_size`: {self.config.hidden_size} has to be a multiple of `config.num_attention_heads` "
194
+ " : {self.config.num_attention_heads}"
195
+ )
196
+
197
+ self.query = nn.Dense(
198
+ self.config.hidden_size,
199
+ dtype=self.dtype,
200
+ kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
201
+ )
202
+ self.key = nn.Dense(
203
+ self.config.hidden_size,
204
+ dtype=self.dtype,
205
+ kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
206
+ )
207
+ self.value = nn.Dense(
208
+ self.config.hidden_size,
209
+ dtype=self.dtype,
210
+ kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
211
+ )
212
+
213
+ if self.causal:
214
+ self.causal_mask = make_causal_mask(
215
+ jnp.ones((1, self.config.max_position_embeddings), dtype="bool"), dtype="bool"
216
+ )
217
+
218
+ def _split_heads(self, hidden_states):
219
+ return hidden_states.reshape(hidden_states.shape[:2] + (self.config.num_attention_heads, self.head_dim))
220
+
221
+ def _merge_heads(self, hidden_states):
222
+ return hidden_states.reshape(hidden_states.shape[:2] + (self.config.hidden_size,))
223
+
224
+ @nn.compact
225
+ # Copied from transformers.models.bart.modeling_flax_bart.FlaxBartAttention._concatenate_to_cache
226
+ def _concatenate_to_cache(self, key, value, query, attention_mask):
227
+ """
228
+ This function takes projected key, value states from a single input token and concatenates the states to cached
229
+ states from previous steps. This function is slighly adapted from the official Flax repository:
230
+ https://github.com/google/flax/blob/491ce18759622506588784b4fca0e4bf05f8c8cd/flax/linen/attention.py#L252
231
+ """
232
+ # detect if we're initializing by absence of existing cache data.
233
+ is_initialized = self.has_variable("cache", "cached_key")
234
+ cached_key = self.variable("cache", "cached_key", jnp.zeros, key.shape, key.dtype)
235
+ cached_value = self.variable("cache", "cached_value", jnp.zeros, value.shape, value.dtype)
236
+ cache_index = self.variable("cache", "cache_index", lambda: jnp.array(0, dtype=jnp.int32))
237
+
238
+ if is_initialized:
239
+ *batch_dims, max_length, num_heads, depth_per_head = cached_key.value.shape
240
+ # update key, value caches with our new 1d spatial slices
241
+ cur_index = cache_index.value
242
+ indices = (0,) * len(batch_dims) + (cur_index, 0, 0)
243
+ key = lax.dynamic_update_slice(cached_key.value, key, indices)
244
+ value = lax.dynamic_update_slice(cached_value.value, value, indices)
245
+ cached_key.value = key
246
+ cached_value.value = value
247
+ num_updated_cache_vectors = query.shape[1]
248
+ cache_index.value = cache_index.value + num_updated_cache_vectors
249
+ # causal mask for cached decoder self-attention: our single query position should only attend to those key positions that have already been generated and cached, not the remaining zero elements.
250
+ pad_mask = jnp.broadcast_to(
251
+ jnp.arange(max_length) < cur_index + num_updated_cache_vectors,
252
+ tuple(batch_dims) + (1, num_updated_cache_vectors, max_length),
253
+ )
254
+ attention_mask = combine_masks(pad_mask, attention_mask)
255
+ return key, value, attention_mask
256
+
257
+ def __call__(
258
+ self,
259
+ hidden_states,
260
+ attention_mask,
261
+ layer_head_mask,
262
+ key_value_states: Optional[jnp.ndarray] = None,
263
+ init_cache: bool = False,
264
+ deterministic=True,
265
+ output_attentions: bool = False,
266
+ ):
267
+ # if key_value_states are provided this layer is used as a cross-attention layer
268
+ # for the decoder
269
+ is_cross_attention = key_value_states is not None
270
+ batch_size = hidden_states.shape[0]
271
+
272
+ # get query proj
273
+ query_states = self.query(hidden_states)
274
+ # get key, value proj
275
+ if is_cross_attention:
276
+ # cross_attentions
277
+ key_states = self.key(key_value_states)
278
+ value_states = self.value(key_value_states)
279
+ else:
280
+ # self_attention
281
+ key_states = self.key(hidden_states)
282
+ value_states = self.value(hidden_states)
283
+
284
+ query_states = self._split_heads(query_states)
285
+ key_states = self._split_heads(key_states)
286
+ value_states = self._split_heads(value_states)
287
+
288
+ # handle cache prepare causal attention mask
289
+ if self.causal:
290
+ query_length, key_length = query_states.shape[1], key_states.shape[1]
291
+ if self.has_variable("cache", "cached_key"):
292
+ mask_shift = self.variables["cache"]["cache_index"]
293
+ max_decoder_length = self.variables["cache"]["cached_key"].shape[1]
294
+ causal_mask = lax.dynamic_slice(
295
+ self.causal_mask, (0, 0, mask_shift, 0), (1, 1, query_length, max_decoder_length)
296
+ )
297
+ else:
298
+ causal_mask = self.causal_mask[:, :, :query_length, :key_length]
299
+ causal_mask = jnp.broadcast_to(causal_mask, (batch_size,) + causal_mask.shape[1:])
300
+
301
+ # combine masks if needed
302
+ if attention_mask is not None and self.causal:
303
+ attention_mask = jnp.broadcast_to(jnp.expand_dims(attention_mask, axis=(-3, -2)), causal_mask.shape)
304
+ attention_mask = combine_masks(attention_mask, causal_mask)
305
+ elif self.causal:
306
+ attention_mask = causal_mask
307
+ elif attention_mask is not None:
308
+ attention_mask = jnp.expand_dims(attention_mask, axis=(-3, -2))
309
+
310
+ # During fast autoregressive decoding, we feed one position at a time,
311
+ # and cache the keys and values step by step.
312
+ if self.causal and (self.has_variable("cache", "cached_key") or init_cache):
313
+ key_states, value_states, attention_mask = self._concatenate_to_cache(
314
+ key_states, value_states, query_states, attention_mask
315
+ )
316
+
317
+ # Convert the boolean attention mask to an attention bias.
318
+ if attention_mask is not None:
319
+ # attention mask in the form of attention bias
320
+ attention_bias = lax.select(
321
+ attention_mask > 0,
322
+ jnp.full(attention_mask.shape, 0.0).astype(self.dtype),
323
+ jnp.full(attention_mask.shape, jnp.finfo(self.dtype).min).astype(self.dtype),
324
+ )
325
+ else:
326
+ attention_bias = None
327
+
328
+ dropout_rng = None
329
+ if not deterministic and self.config.attention_probs_dropout_prob > 0.0:
330
+ dropout_rng = self.make_rng("dropout")
331
+
332
+ attn_weights = dot_product_attention_weights(
333
+ query_states,
334
+ key_states,
335
+ bias=attention_bias,
336
+ dropout_rng=dropout_rng,
337
+ dropout_rate=self.config.attention_probs_dropout_prob,
338
+ broadcast_dropout=True,
339
+ deterministic=deterministic,
340
+ dtype=self.dtype,
341
+ precision=None,
342
+ )
343
+
344
+ # Mask heads if we want to
345
+ if layer_head_mask is not None:
346
+ attn_weights = jnp.einsum("...hqk,h->...hqk", attn_weights, layer_head_mask)
347
+
348
+ attn_output = jnp.einsum("...hqk,...khd->...qhd", attn_weights, value_states)
349
+ attn_output = attn_output.reshape(attn_output.shape[:2] + (-1,))
350
+
351
+ outputs = (attn_output, attn_weights) if output_attentions else (attn_output,)
352
+ return outputs
353
+
354
+
355
+ class FlaxRobertaPreLayerNormSelfOutput(nn.Module):
356
+ config: RobertaPreLayerNormConfig
357
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
358
+
359
+ def setup(self):
360
+ self.dense = nn.Dense(
361
+ self.config.hidden_size,
362
+ kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
363
+ dtype=self.dtype,
364
+ )
365
+ self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob)
366
+
367
+ def __call__(self, hidden_states, input_tensor, deterministic: bool = True):
368
+ hidden_states = self.dense(hidden_states)
369
+ hidden_states = self.dropout(hidden_states, deterministic=deterministic)
370
+ hidden_states = hidden_states + input_tensor
371
+ return hidden_states
372
+
373
+
374
+ class FlaxRobertaPreLayerNormAttention(nn.Module):
375
+ config: RobertaPreLayerNormConfig
376
+ causal: bool = False
377
+ dtype: jnp.dtype = jnp.float32
378
+
379
+ def setup(self):
380
+ self.self = FlaxRobertaPreLayerNormSelfAttention(self.config, causal=self.causal, dtype=self.dtype)
381
+ self.output = FlaxRobertaPreLayerNormSelfOutput(self.config, dtype=self.dtype)
382
+ self.LayerNorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype)
383
+
384
+ def __call__(
385
+ self,
386
+ hidden_states,
387
+ attention_mask,
388
+ layer_head_mask,
389
+ key_value_states=None,
390
+ init_cache=False,
391
+ deterministic=True,
392
+ output_attentions: bool = False,
393
+ ):
394
+ hidden_states_pre_layer_norm = self.LayerNorm(hidden_states)
395
+ # Attention mask comes in as attention_mask.shape == (*batch_sizes, kv_length)
396
+ # FLAX expects: attention_mask.shape == (*batch_sizes, 1, 1, kv_length) such that it is broadcastable
397
+ # with attn_weights.shape == (*batch_sizes, num_heads, q_length, kv_length)
398
+ attn_outputs = self.self(
399
+ hidden_states_pre_layer_norm,
400
+ attention_mask,
401
+ layer_head_mask=layer_head_mask,
402
+ key_value_states=key_value_states,
403
+ init_cache=init_cache,
404
+ deterministic=deterministic,
405
+ output_attentions=output_attentions,
406
+ )
407
+ attn_output = attn_outputs[0]
408
+ hidden_states = self.output(attn_output, hidden_states, deterministic=deterministic)
409
+
410
+ outputs = (hidden_states,)
411
+
412
+ if output_attentions:
413
+ outputs += (attn_outputs[1],)
414
+
415
+ return outputs
416
+
417
+
418
+ class FlaxRobertaPreLayerNormIntermediate(nn.Module):
419
+ config: RobertaPreLayerNormConfig
420
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
421
+
422
+ def setup(self):
423
+ self.LayerNorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype)
424
+ self.dense = nn.Dense(
425
+ self.config.intermediate_size,
426
+ kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
427
+ dtype=self.dtype,
428
+ )
429
+ self.activation = ACT2FN[self.config.hidden_act]
430
+
431
+ def __call__(self, hidden_states):
432
+ hidden_states = self.LayerNorm(hidden_states)
433
+ hidden_states = self.dense(hidden_states)
434
+ hidden_states = self.activation(hidden_states)
435
+ return hidden_states
436
+
437
+
438
+ class FlaxRobertaPreLayerNormOutput(nn.Module):
439
+ config: RobertaPreLayerNormConfig
440
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
441
+
442
+ def setup(self):
443
+ self.dense = nn.Dense(
444
+ self.config.hidden_size,
445
+ kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
446
+ dtype=self.dtype,
447
+ )
448
+ self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob)
449
+
450
+ def __call__(self, hidden_states, attention_output, deterministic: bool = True):
451
+ hidden_states = self.dense(hidden_states)
452
+ hidden_states = self.dropout(hidden_states, deterministic=deterministic)
453
+ hidden_states = hidden_states + attention_output
454
+ return hidden_states
455
+
456
+
457
+ # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertLayer with Bert->RobertaPreLayerNorm
458
+ class FlaxRobertaPreLayerNormLayer(nn.Module):
459
+ config: RobertaPreLayerNormConfig
460
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
461
+
462
+ def setup(self):
463
+ self.attention = FlaxRobertaPreLayerNormAttention(self.config, causal=self.config.is_decoder, dtype=self.dtype)
464
+ self.intermediate = FlaxRobertaPreLayerNormIntermediate(self.config, dtype=self.dtype)
465
+ self.output = FlaxRobertaPreLayerNormOutput(self.config, dtype=self.dtype)
466
+ if self.config.add_cross_attention:
467
+ self.crossattention = FlaxRobertaPreLayerNormAttention(self.config, causal=False, dtype=self.dtype)
468
+
469
+ def __call__(
470
+ self,
471
+ hidden_states,
472
+ attention_mask,
473
+ layer_head_mask,
474
+ encoder_hidden_states: Optional[jnp.ndarray] = None,
475
+ encoder_attention_mask: Optional[jnp.ndarray] = None,
476
+ init_cache: bool = False,
477
+ deterministic: bool = True,
478
+ output_attentions: bool = False,
479
+ ):
480
+ # Self Attention
481
+ attention_outputs = self.attention(
482
+ hidden_states,
483
+ attention_mask,
484
+ layer_head_mask=layer_head_mask,
485
+ init_cache=init_cache,
486
+ deterministic=deterministic,
487
+ output_attentions=output_attentions,
488
+ )
489
+ attention_output = attention_outputs[0]
490
+
491
+ # Cross-Attention Block
492
+ if encoder_hidden_states is not None:
493
+ cross_attention_outputs = self.crossattention(
494
+ attention_output,
495
+ attention_mask=encoder_attention_mask,
496
+ layer_head_mask=layer_head_mask,
497
+ key_value_states=encoder_hidden_states,
498
+ deterministic=deterministic,
499
+ output_attentions=output_attentions,
500
+ )
501
+ attention_output = cross_attention_outputs[0]
502
+
503
+ hidden_states = self.intermediate(attention_output)
504
+ hidden_states = self.output(hidden_states, attention_output, deterministic=deterministic)
505
+
506
+ outputs = (hidden_states,)
507
+
508
+ if output_attentions:
509
+ outputs += (attention_outputs[1],)
510
+ if encoder_hidden_states is not None:
511
+ outputs += (cross_attention_outputs[1],)
512
+ return outputs
513
+
514
+
515
+ # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertLayerCollection with Bert->RobertaPreLayerNorm
516
+ class FlaxRobertaPreLayerNormLayerCollection(nn.Module):
517
+ config: RobertaPreLayerNormConfig
518
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
519
+ gradient_checkpointing: bool = False
520
+
521
+ def setup(self):
522
+ if self.gradient_checkpointing:
523
+ FlaxRobertaPreLayerNormCheckpointLayer = remat(FlaxRobertaPreLayerNormLayer, static_argnums=(5, 6, 7))
524
+ self.layers = [
525
+ FlaxRobertaPreLayerNormCheckpointLayer(self.config, name=str(i), dtype=self.dtype)
526
+ for i in range(self.config.num_hidden_layers)
527
+ ]
528
+ else:
529
+ self.layers = [
530
+ FlaxRobertaPreLayerNormLayer(self.config, name=str(i), dtype=self.dtype)
531
+ for i in range(self.config.num_hidden_layers)
532
+ ]
533
+
534
+ def __call__(
535
+ self,
536
+ hidden_states,
537
+ attention_mask,
538
+ head_mask,
539
+ encoder_hidden_states: Optional[jnp.ndarray] = None,
540
+ encoder_attention_mask: Optional[jnp.ndarray] = None,
541
+ init_cache: bool = False,
542
+ deterministic: bool = True,
543
+ output_attentions: bool = False,
544
+ output_hidden_states: bool = False,
545
+ return_dict: bool = True,
546
+ ):
547
+ all_attentions = () if output_attentions else None
548
+ all_hidden_states = () if output_hidden_states else None
549
+ all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None
550
+
551
+ # Check if head_mask has a correct number of layers specified if desired
552
+ if head_mask is not None:
553
+ if head_mask.shape[0] != (len(self.layers)):
554
+ raise ValueError(
555
+ f"The head_mask should be specified for {len(self.layers)} layers, but it is for "
556
+ f" {head_mask.shape[0]}."
557
+ )
558
+
559
+ for i, layer in enumerate(self.layers):
560
+ if output_hidden_states:
561
+ all_hidden_states += (hidden_states,)
562
+
563
+ layer_outputs = layer(
564
+ hidden_states,
565
+ attention_mask,
566
+ head_mask[i] if head_mask is not None else None,
567
+ encoder_hidden_states,
568
+ encoder_attention_mask,
569
+ init_cache,
570
+ deterministic,
571
+ output_attentions,
572
+ )
573
+
574
+ hidden_states = layer_outputs[0]
575
+
576
+ if output_attentions:
577
+ all_attentions += (layer_outputs[1],)
578
+
579
+ if encoder_hidden_states is not None:
580
+ all_cross_attentions += (layer_outputs[2],)
581
+
582
+ if output_hidden_states:
583
+ all_hidden_states += (hidden_states,)
584
+
585
+ outputs = (hidden_states, all_hidden_states, all_attentions, all_cross_attentions)
586
+
587
+ if not return_dict:
588
+ return tuple(v for v in outputs if v is not None)
589
+
590
+ return FlaxBaseModelOutputWithPastAndCrossAttentions(
591
+ last_hidden_state=hidden_states,
592
+ hidden_states=all_hidden_states,
593
+ attentions=all_attentions,
594
+ cross_attentions=all_cross_attentions,
595
+ )
596
+
597
+
598
+ # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertEncoder with Bert->RobertaPreLayerNorm
599
+ class FlaxRobertaPreLayerNormEncoder(nn.Module):
600
+ config: RobertaPreLayerNormConfig
601
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
602
+ gradient_checkpointing: bool = False
603
+
604
+ def setup(self):
605
+ self.layer = FlaxRobertaPreLayerNormLayerCollection(
606
+ self.config,
607
+ dtype=self.dtype,
608
+ gradient_checkpointing=self.gradient_checkpointing,
609
+ )
610
+
611
+ def __call__(
612
+ self,
613
+ hidden_states,
614
+ attention_mask,
615
+ head_mask,
616
+ encoder_hidden_states: Optional[jnp.ndarray] = None,
617
+ encoder_attention_mask: Optional[jnp.ndarray] = None,
618
+ init_cache: bool = False,
619
+ deterministic: bool = True,
620
+ output_attentions: bool = False,
621
+ output_hidden_states: bool = False,
622
+ return_dict: bool = True,
623
+ ):
624
+ return self.layer(
625
+ hidden_states,
626
+ attention_mask,
627
+ head_mask=head_mask,
628
+ encoder_hidden_states=encoder_hidden_states,
629
+ encoder_attention_mask=encoder_attention_mask,
630
+ init_cache=init_cache,
631
+ deterministic=deterministic,
632
+ output_attentions=output_attentions,
633
+ output_hidden_states=output_hidden_states,
634
+ return_dict=return_dict,
635
+ )
636
+
637
+
638
+ # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertPooler with Bert->RobertaPreLayerNorm
639
+ class FlaxRobertaPreLayerNormPooler(nn.Module):
640
+ config: RobertaPreLayerNormConfig
641
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
642
+
643
+ def setup(self):
644
+ self.dense = nn.Dense(
645
+ self.config.hidden_size,
646
+ kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
647
+ dtype=self.dtype,
648
+ )
649
+
650
+ def __call__(self, hidden_states):
651
+ cls_hidden_state = hidden_states[:, 0]
652
+ cls_hidden_state = self.dense(cls_hidden_state)
653
+ return nn.tanh(cls_hidden_state)
654
+
655
+
656
+ # Copied from transformers.models.roberta.modeling_flax_roberta.FlaxRobertaLMHead with Roberta->RobertaPreLayerNorm
657
+ class FlaxRobertaPreLayerNormLMHead(nn.Module):
658
+ config: RobertaPreLayerNormConfig
659
+ dtype: jnp.dtype = jnp.float32
660
+ bias_init: Callable[..., np.ndarray] = jax.nn.initializers.zeros
661
+
662
+ def setup(self):
663
+ self.dense = nn.Dense(
664
+ self.config.hidden_size,
665
+ dtype=self.dtype,
666
+ kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
667
+ )
668
+ self.layer_norm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype)
669
+ self.decoder = nn.Dense(
670
+ self.config.vocab_size,
671
+ dtype=self.dtype,
672
+ use_bias=False,
673
+ kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
674
+ )
675
+ self.bias = self.param("bias", self.bias_init, (self.config.vocab_size,))
676
+
677
+ def __call__(self, hidden_states, shared_embedding=None):
678
+ hidden_states = self.dense(hidden_states)
679
+ hidden_states = ACT2FN["gelu"](hidden_states)
680
+ hidden_states = self.layer_norm(hidden_states)
681
+
682
+ if shared_embedding is not None:
683
+ hidden_states = self.decoder.apply({"params": {"kernel": shared_embedding.T}}, hidden_states)
684
+ else:
685
+ hidden_states = self.decoder(hidden_states)
686
+
687
+ bias = jnp.asarray(self.bias, self.dtype)
688
+ hidden_states += bias
689
+ return hidden_states
690
+
691
+
692
+ # Copied from transformers.models.roberta.modeling_flax_roberta.FlaxRobertaClassificationHead with Roberta->RobertaPreLayerNorm
693
+ class FlaxRobertaPreLayerNormClassificationHead(nn.Module):
694
+ config: RobertaPreLayerNormConfig
695
+ dtype: jnp.dtype = jnp.float32
696
+
697
+ def setup(self):
698
+ self.dense = nn.Dense(
699
+ self.config.hidden_size,
700
+ dtype=self.dtype,
701
+ kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
702
+ )
703
+ classifier_dropout = (
704
+ self.config.classifier_dropout
705
+ if self.config.classifier_dropout is not None
706
+ else self.config.hidden_dropout_prob
707
+ )
708
+ self.dropout = nn.Dropout(rate=classifier_dropout)
709
+ self.out_proj = nn.Dense(
710
+ self.config.num_labels,
711
+ dtype=self.dtype,
712
+ kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
713
+ )
714
+
715
+ def __call__(self, hidden_states, deterministic=True):
716
+ hidden_states = hidden_states[:, 0, :] # take <s> token (equiv. to [CLS])
717
+ hidden_states = self.dropout(hidden_states, deterministic=deterministic)
718
+ hidden_states = self.dense(hidden_states)
719
+ hidden_states = nn.tanh(hidden_states)
720
+ hidden_states = self.dropout(hidden_states, deterministic=deterministic)
721
+ hidden_states = self.out_proj(hidden_states)
722
+ return hidden_states
723
+
724
+
725
+ # Copied from transformers.models.roberta.modeling_flax_roberta.FlaxRobertaPreTrainedModel with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta->roberta_prelayernorm
726
+ class FlaxRobertaPreLayerNormPreTrainedModel(FlaxPreTrainedModel):
727
+ """
728
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
729
+ models.
730
+ """
731
+
732
+ config_class = RobertaPreLayerNormConfig
733
+ base_model_prefix = "roberta_prelayernorm"
734
+
735
+ module_class: nn.Module = None
736
+
737
+ def __init__(
738
+ self,
739
+ config: RobertaPreLayerNormConfig,
740
+ input_shape: Tuple = (1, 1),
741
+ seed: int = 0,
742
+ dtype: jnp.dtype = jnp.float32,
743
+ _do_init: bool = True,
744
+ gradient_checkpointing: bool = False,
745
+ **kwargs,
746
+ ):
747
+ module = self.module_class(config=config, dtype=dtype, gradient_checkpointing=gradient_checkpointing, **kwargs)
748
+ super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init)
749
+
750
+ # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertPreTrainedModel.enable_gradient_checkpointing
751
+ def enable_gradient_checkpointing(self):
752
+ self._module = self.module_class(
753
+ config=self.config,
754
+ dtype=self.dtype,
755
+ gradient_checkpointing=True,
756
+ )
757
+
758
+ def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict = None) -> FrozenDict:
759
+ # init input tensors
760
+ input_ids = jnp.zeros(input_shape, dtype="i4")
761
+ token_type_ids = jnp.ones_like(input_ids)
762
+ position_ids = create_position_ids_from_input_ids(input_ids, self.config.pad_token_id)
763
+ attention_mask = jnp.ones_like(input_ids)
764
+ head_mask = jnp.ones((self.config.num_hidden_layers, self.config.num_attention_heads))
765
+
766
+ params_rng, dropout_rng = jax.random.split(rng)
767
+ rngs = {"params": params_rng, "dropout": dropout_rng}
768
+
769
+ if self.config.add_cross_attention:
770
+ encoder_hidden_states = jnp.zeros(input_shape + (self.config.hidden_size,))
771
+ encoder_attention_mask = attention_mask
772
+ module_init_outputs = self.module.init(
773
+ rngs,
774
+ input_ids,
775
+ attention_mask,
776
+ token_type_ids,
777
+ position_ids,
778
+ head_mask,
779
+ encoder_hidden_states,
780
+ encoder_attention_mask,
781
+ return_dict=False,
782
+ )
783
+ else:
784
+ module_init_outputs = self.module.init(
785
+ rngs, input_ids, attention_mask, token_type_ids, position_ids, head_mask, return_dict=False
786
+ )
787
+
788
+ random_params = module_init_outputs["params"]
789
+
790
+ if params is not None:
791
+ random_params = flatten_dict(unfreeze(random_params))
792
+ params = flatten_dict(unfreeze(params))
793
+ for missing_key in self._missing_keys:
794
+ params[missing_key] = random_params[missing_key]
795
+ self._missing_keys = set()
796
+ return freeze(unflatten_dict(params))
797
+ else:
798
+ return random_params
799
+
800
+ # Copied from transformers.models.bart.modeling_flax_bart.FlaxBartDecoderPreTrainedModel.init_cache
801
+ def init_cache(self, batch_size, max_length):
802
+ r"""
803
+ Args:
804
+ batch_size (`int`):
805
+ batch_size used for fast auto-regressive decoding. Defines the batch size of the initialized cache.
806
+ max_length (`int`):
807
+ maximum possible length for auto-regressive decoding. Defines the sequence length of the initialized
808
+ cache.
809
+ """
810
+ # init input variables to retrieve cache
811
+ input_ids = jnp.ones((batch_size, max_length), dtype="i4")
812
+ attention_mask = jnp.ones_like(input_ids, dtype="i4")
813
+ position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_ids.shape)
814
+
815
+ init_variables = self.module.init(
816
+ jax.random.PRNGKey(0), input_ids, attention_mask, position_ids, return_dict=False, init_cache=True
817
+ )
818
+ return unfreeze(init_variables["cache"])
819
+
820
+ @add_start_docstrings_to_model_forward(ROBERTA_PRELAYERNORM_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
821
+ def __call__(
822
+ self,
823
+ input_ids,
824
+ attention_mask=None,
825
+ token_type_ids=None,
826
+ position_ids=None,
827
+ head_mask=None,
828
+ encoder_hidden_states=None,
829
+ encoder_attention_mask=None,
830
+ params: dict = None,
831
+ dropout_rng: jax.random.PRNGKey = None,
832
+ train: bool = False,
833
+ output_attentions: Optional[bool] = None,
834
+ output_hidden_states: Optional[bool] = None,
835
+ return_dict: Optional[bool] = None,
836
+ past_key_values: dict = None,
837
+ ):
838
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
839
+ output_hidden_states = (
840
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
841
+ )
842
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
843
+
844
+ # init input tensors if not passed
845
+ if token_type_ids is None:
846
+ token_type_ids = jnp.zeros_like(input_ids)
847
+
848
+ if position_ids is None:
849
+ position_ids = create_position_ids_from_input_ids(input_ids, self.config.pad_token_id)
850
+
851
+ if attention_mask is None:
852
+ attention_mask = jnp.ones_like(input_ids)
853
+
854
+ if head_mask is None:
855
+ head_mask = jnp.ones((self.config.num_hidden_layers, self.config.num_attention_heads))
856
+
857
+ # Handle any PRNG if needed
858
+ rngs = {}
859
+ if dropout_rng is not None:
860
+ rngs["dropout"] = dropout_rng
861
+
862
+ inputs = {"params": params or self.params}
863
+
864
+ if self.config.add_cross_attention:
865
+ # if past_key_values are passed then cache is already initialized a private flag init_cache has to be passed
866
+ # down to ensure cache is used. It has to be made sure that cache is marked as mutable so that it can be
867
+ # changed by FlaxRobertaPreLayerNormAttention module
868
+ if past_key_values:
869
+ inputs["cache"] = past_key_values
870
+ mutable = ["cache"]
871
+ else:
872
+ mutable = False
873
+
874
+ outputs = self.module.apply(
875
+ inputs,
876
+ jnp.array(input_ids, dtype="i4"),
877
+ jnp.array(attention_mask, dtype="i4"),
878
+ token_type_ids=jnp.array(token_type_ids, dtype="i4"),
879
+ position_ids=jnp.array(position_ids, dtype="i4"),
880
+ head_mask=jnp.array(head_mask, dtype="i4"),
881
+ encoder_hidden_states=encoder_hidden_states,
882
+ encoder_attention_mask=encoder_attention_mask,
883
+ deterministic=not train,
884
+ output_attentions=output_attentions,
885
+ output_hidden_states=output_hidden_states,
886
+ return_dict=return_dict,
887
+ rngs=rngs,
888
+ mutable=mutable,
889
+ )
890
+
891
+ # add updated cache to model output
892
+ if past_key_values is not None and return_dict:
893
+ outputs, past_key_values = outputs
894
+ outputs["past_key_values"] = unfreeze(past_key_values["cache"])
895
+ return outputs
896
+ elif past_key_values is not None and not return_dict:
897
+ outputs, past_key_values = outputs
898
+ outputs = outputs[:1] + (unfreeze(past_key_values["cache"]),) + outputs[1:]
899
+
900
+ else:
901
+ outputs = self.module.apply(
902
+ inputs,
903
+ jnp.array(input_ids, dtype="i4"),
904
+ jnp.array(attention_mask, dtype="i4"),
905
+ token_type_ids=jnp.array(token_type_ids, dtype="i4"),
906
+ position_ids=jnp.array(position_ids, dtype="i4"),
907
+ head_mask=jnp.array(head_mask, dtype="i4"),
908
+ deterministic=not train,
909
+ output_attentions=output_attentions,
910
+ output_hidden_states=output_hidden_states,
911
+ return_dict=return_dict,
912
+ rngs=rngs,
913
+ )
914
+
915
+ return outputs
916
+
917
+
918
+ class FlaxRobertaPreLayerNormModule(nn.Module):
919
+ config: RobertaPreLayerNormConfig
920
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
921
+ add_pooling_layer: bool = True
922
+ gradient_checkpointing: bool = False
923
+
924
+ def setup(self):
925
+ self.embeddings = FlaxRobertaPreLayerNormEmbeddings(self.config, dtype=self.dtype)
926
+ self.encoder = FlaxRobertaPreLayerNormEncoder(
927
+ self.config,
928
+ dtype=self.dtype,
929
+ gradient_checkpointing=self.gradient_checkpointing,
930
+ )
931
+ self.LayerNorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype)
932
+ self.pooler = FlaxRobertaPreLayerNormPooler(self.config, dtype=self.dtype)
933
+
934
+ def __call__(
935
+ self,
936
+ input_ids,
937
+ attention_mask,
938
+ token_type_ids: Optional[jnp.ndarray] = None,
939
+ position_ids: Optional[jnp.ndarray] = None,
940
+ head_mask: Optional[jnp.ndarray] = None,
941
+ encoder_hidden_states: Optional[jnp.ndarray] = None,
942
+ encoder_attention_mask: Optional[jnp.ndarray] = None,
943
+ init_cache: bool = False,
944
+ deterministic: bool = True,
945
+ output_attentions: bool = False,
946
+ output_hidden_states: bool = False,
947
+ return_dict: bool = True,
948
+ ):
949
+ # make sure `token_type_ids` is correctly initialized when not passed
950
+ if token_type_ids is None:
951
+ token_type_ids = jnp.zeros_like(input_ids)
952
+
953
+ # make sure `position_ids` is correctly initialized when not passed
954
+ if position_ids is None:
955
+ position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_ids.shape)
956
+
957
+ hidden_states = self.embeddings(
958
+ input_ids, token_type_ids, position_ids, attention_mask, deterministic=deterministic
959
+ )
960
+ outputs = self.encoder(
961
+ hidden_states,
962
+ attention_mask,
963
+ head_mask=head_mask,
964
+ deterministic=deterministic,
965
+ encoder_hidden_states=encoder_hidden_states,
966
+ encoder_attention_mask=encoder_attention_mask,
967
+ init_cache=init_cache,
968
+ output_attentions=output_attentions,
969
+ output_hidden_states=output_hidden_states,
970
+ return_dict=return_dict,
971
+ )
972
+ hidden_states = outputs[0]
973
+ hidden_states = self.LayerNorm(hidden_states)
974
+ pooled = self.pooler(hidden_states) if self.add_pooling_layer else None
975
+
976
+ if not return_dict:
977
+ # if pooled is None, don't return it
978
+ if pooled is None:
979
+ return (hidden_states,) + outputs[1:]
980
+ return (hidden_states, pooled) + outputs[1:]
981
+
982
+ return FlaxBaseModelOutputWithPoolingAndCrossAttentions(
983
+ last_hidden_state=hidden_states,
984
+ pooler_output=pooled,
985
+ hidden_states=outputs.hidden_states,
986
+ attentions=outputs.attentions,
987
+ cross_attentions=outputs.cross_attentions,
988
+ )
989
+
990
+
991
+ @add_start_docstrings(
992
+ "The bare RoBERTa-PreLayerNorm Model transformer outputting raw hidden-states without any specific head on top.",
993
+ ROBERTA_PRELAYERNORM_START_DOCSTRING,
994
+ )
995
+ # Copied from transformers.models.roberta.modeling_flax_roberta.FlaxRobertaModel with Roberta->RobertaPreLayerNorm
996
+ class FlaxRobertaPreLayerNormModel(FlaxRobertaPreLayerNormPreTrainedModel):
997
+ module_class = FlaxRobertaPreLayerNormModule
998
+
999
+
1000
+ append_call_sample_docstring(
1001
+ FlaxRobertaPreLayerNormModel,
1002
+ _CHECKPOINT_FOR_DOC,
1003
+ FlaxBaseModelOutputWithPooling,
1004
+ _CONFIG_FOR_DOC,
1005
+ )
1006
+
1007
+
1008
+ # Copied from transformers.models.roberta.modeling_flax_roberta.FlaxRobertaForMaskedLMModule with Roberta->RobertaPreLayerNorm,roberta->roberta_prelayernorm
1009
+ class FlaxRobertaPreLayerNormForMaskedLMModule(nn.Module):
1010
+ config: RobertaPreLayerNormConfig
1011
+ dtype: jnp.dtype = jnp.float32
1012
+ gradient_checkpointing: bool = False
1013
+
1014
+ def setup(self):
1015
+ self.roberta_prelayernorm = FlaxRobertaPreLayerNormModule(
1016
+ config=self.config,
1017
+ add_pooling_layer=False,
1018
+ dtype=self.dtype,
1019
+ gradient_checkpointing=self.gradient_checkpointing,
1020
+ )
1021
+ self.lm_head = FlaxRobertaPreLayerNormLMHead(config=self.config, dtype=self.dtype)
1022
+
1023
+ def __call__(
1024
+ self,
1025
+ input_ids,
1026
+ attention_mask,
1027
+ token_type_ids,
1028
+ position_ids,
1029
+ head_mask,
1030
+ deterministic: bool = True,
1031
+ output_attentions: bool = False,
1032
+ output_hidden_states: bool = False,
1033
+ return_dict: bool = True,
1034
+ ):
1035
+ # Model
1036
+ outputs = self.roberta_prelayernorm(
1037
+ input_ids,
1038
+ attention_mask,
1039
+ token_type_ids,
1040
+ position_ids,
1041
+ head_mask,
1042
+ deterministic=deterministic,
1043
+ output_attentions=output_attentions,
1044
+ output_hidden_states=output_hidden_states,
1045
+ return_dict=return_dict,
1046
+ )
1047
+
1048
+ hidden_states = outputs[0]
1049
+ if self.config.tie_word_embeddings:
1050
+ shared_embedding = self.roberta_prelayernorm.variables["params"]["embeddings"]["word_embeddings"][
1051
+ "embedding"
1052
+ ]
1053
+ else:
1054
+ shared_embedding = None
1055
+
1056
+ # Compute the prediction scores
1057
+ logits = self.lm_head(hidden_states, shared_embedding=shared_embedding)
1058
+
1059
+ if not return_dict:
1060
+ return (logits,) + outputs[1:]
1061
+
1062
+ return FlaxMaskedLMOutput(
1063
+ logits=logits,
1064
+ hidden_states=outputs.hidden_states,
1065
+ attentions=outputs.attentions,
1066
+ )
1067
+
1068
+
1069
+ @add_start_docstrings(
1070
+ """RoBERTa-PreLayerNorm Model with a `language modeling` head on top.""", ROBERTA_PRELAYERNORM_START_DOCSTRING
1071
+ )
1072
+ # Copied from transformers.models.roberta.modeling_flax_roberta.FlaxRobertaForMaskedLM with Roberta->RobertaPreLayerNorm
1073
+ class FlaxRobertaPreLayerNormForMaskedLM(FlaxRobertaPreLayerNormPreTrainedModel):
1074
+ module_class = FlaxRobertaPreLayerNormForMaskedLMModule
1075
+
1076
+
1077
+ append_call_sample_docstring(
1078
+ FlaxRobertaPreLayerNormForMaskedLM,
1079
+ _CHECKPOINT_FOR_DOC,
1080
+ FlaxBaseModelOutputWithPooling,
1081
+ _CONFIG_FOR_DOC,
1082
+ mask="<mask>",
1083
+ )
1084
+
1085
+
1086
+ # Copied from transformers.models.roberta.modeling_flax_roberta.FlaxRobertaForSequenceClassificationModule with Roberta->RobertaPreLayerNorm,roberta->roberta_prelayernorm
1087
+ class FlaxRobertaPreLayerNormForSequenceClassificationModule(nn.Module):
1088
+ config: RobertaPreLayerNormConfig
1089
+ dtype: jnp.dtype = jnp.float32
1090
+ gradient_checkpointing: bool = False
1091
+
1092
+ def setup(self):
1093
+ self.roberta_prelayernorm = FlaxRobertaPreLayerNormModule(
1094
+ config=self.config,
1095
+ dtype=self.dtype,
1096
+ add_pooling_layer=False,
1097
+ gradient_checkpointing=self.gradient_checkpointing,
1098
+ )
1099
+ self.classifier = FlaxRobertaPreLayerNormClassificationHead(config=self.config, dtype=self.dtype)
1100
+
1101
+ def __call__(
1102
+ self,
1103
+ input_ids,
1104
+ attention_mask,
1105
+ token_type_ids,
1106
+ position_ids,
1107
+ head_mask,
1108
+ deterministic: bool = True,
1109
+ output_attentions: bool = False,
1110
+ output_hidden_states: bool = False,
1111
+ return_dict: bool = True,
1112
+ ):
1113
+ # Model
1114
+ outputs = self.roberta_prelayernorm(
1115
+ input_ids,
1116
+ attention_mask,
1117
+ token_type_ids,
1118
+ position_ids,
1119
+ head_mask,
1120
+ deterministic=deterministic,
1121
+ output_attentions=output_attentions,
1122
+ output_hidden_states=output_hidden_states,
1123
+ return_dict=return_dict,
1124
+ )
1125
+
1126
+ sequence_output = outputs[0]
1127
+ logits = self.classifier(sequence_output, deterministic=deterministic)
1128
+
1129
+ if not return_dict:
1130
+ return (logits,) + outputs[1:]
1131
+
1132
+ return FlaxSequenceClassifierOutput(
1133
+ logits=logits,
1134
+ hidden_states=outputs.hidden_states,
1135
+ attentions=outputs.attentions,
1136
+ )
1137
+
1138
+
1139
+ @add_start_docstrings(
1140
+ """
1141
+ RobertaPreLayerNorm Model transformer with a sequence classification/regression head on top (a linear layer on top
1142
+ of the pooled output) e.g. for GLUE tasks.
1143
+ """,
1144
+ ROBERTA_PRELAYERNORM_START_DOCSTRING,
1145
+ )
1146
+ # Copied from transformers.models.roberta.modeling_flax_roberta.FlaxRobertaForSequenceClassification with Roberta->RobertaPreLayerNorm
1147
+ class FlaxRobertaPreLayerNormForSequenceClassification(FlaxRobertaPreLayerNormPreTrainedModel):
1148
+ module_class = FlaxRobertaPreLayerNormForSequenceClassificationModule
1149
+
1150
+
1151
+ append_call_sample_docstring(
1152
+ FlaxRobertaPreLayerNormForSequenceClassification,
1153
+ _CHECKPOINT_FOR_DOC,
1154
+ FlaxSequenceClassifierOutput,
1155
+ _CONFIG_FOR_DOC,
1156
+ )
1157
+
1158
+
1159
+ # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertForMultipleChoiceModule with Bert->RobertaPreLayerNorm, with self.bert->self.roberta_prelayernorm
1160
+ class FlaxRobertaPreLayerNormForMultipleChoiceModule(nn.Module):
1161
+ config: RobertaPreLayerNormConfig
1162
+ dtype: jnp.dtype = jnp.float32
1163
+ gradient_checkpointing: bool = False
1164
+
1165
+ def setup(self):
1166
+ self.roberta_prelayernorm = FlaxRobertaPreLayerNormModule(
1167
+ config=self.config,
1168
+ dtype=self.dtype,
1169
+ gradient_checkpointing=self.gradient_checkpointing,
1170
+ )
1171
+ self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob)
1172
+ self.classifier = nn.Dense(1, dtype=self.dtype)
1173
+
1174
+ def __call__(
1175
+ self,
1176
+ input_ids,
1177
+ attention_mask,
1178
+ token_type_ids,
1179
+ position_ids,
1180
+ head_mask,
1181
+ deterministic: bool = True,
1182
+ output_attentions: bool = False,
1183
+ output_hidden_states: bool = False,
1184
+ return_dict: bool = True,
1185
+ ):
1186
+ num_choices = input_ids.shape[1]
1187
+ input_ids = input_ids.reshape(-1, input_ids.shape[-1]) if input_ids is not None else None
1188
+ attention_mask = attention_mask.reshape(-1, attention_mask.shape[-1]) if attention_mask is not None else None
1189
+ token_type_ids = token_type_ids.reshape(-1, token_type_ids.shape[-1]) if token_type_ids is not None else None
1190
+ position_ids = position_ids.reshape(-1, position_ids.shape[-1]) if position_ids is not None else None
1191
+
1192
+ # Model
1193
+ outputs = self.roberta_prelayernorm(
1194
+ input_ids,
1195
+ attention_mask,
1196
+ token_type_ids,
1197
+ position_ids,
1198
+ head_mask,
1199
+ deterministic=deterministic,
1200
+ output_attentions=output_attentions,
1201
+ output_hidden_states=output_hidden_states,
1202
+ return_dict=return_dict,
1203
+ )
1204
+
1205
+ pooled_output = outputs[1]
1206
+ pooled_output = self.dropout(pooled_output, deterministic=deterministic)
1207
+ logits = self.classifier(pooled_output)
1208
+
1209
+ reshaped_logits = logits.reshape(-1, num_choices)
1210
+
1211
+ if not return_dict:
1212
+ return (reshaped_logits,) + outputs[2:]
1213
+
1214
+ return FlaxMultipleChoiceModelOutput(
1215
+ logits=reshaped_logits,
1216
+ hidden_states=outputs.hidden_states,
1217
+ attentions=outputs.attentions,
1218
+ )
1219
+
1220
+
1221
+ @add_start_docstrings(
1222
+ """
1223
+ RobertaPreLayerNorm Model with a multiple choice classification head on top (a linear layer on top of the pooled
1224
+ output and a softmax) e.g. for RocStories/SWAG tasks.
1225
+ """,
1226
+ ROBERTA_PRELAYERNORM_START_DOCSTRING,
1227
+ )
1228
+ # Copied from transformers.models.roberta.modeling_flax_roberta.FlaxRobertaForMultipleChoice with Roberta->RobertaPreLayerNorm
1229
+ class FlaxRobertaPreLayerNormForMultipleChoice(FlaxRobertaPreLayerNormPreTrainedModel):
1230
+ module_class = FlaxRobertaPreLayerNormForMultipleChoiceModule
1231
+
1232
+
1233
+ overwrite_call_docstring(
1234
+ FlaxRobertaPreLayerNormForMultipleChoice,
1235
+ ROBERTA_PRELAYERNORM_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length"),
1236
+ )
1237
+ append_call_sample_docstring(
1238
+ FlaxRobertaPreLayerNormForMultipleChoice,
1239
+ _CHECKPOINT_FOR_DOC,
1240
+ FlaxMultipleChoiceModelOutput,
1241
+ _CONFIG_FOR_DOC,
1242
+ )
1243
+
1244
+
1245
+ # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertForTokenClassificationModule with Bert->RobertaPreLayerNorm, with self.bert->self.roberta_prelayernorm
1246
+ class FlaxRobertaPreLayerNormForTokenClassificationModule(nn.Module):
1247
+ config: RobertaPreLayerNormConfig
1248
+ dtype: jnp.dtype = jnp.float32
1249
+ gradient_checkpointing: bool = False
1250
+
1251
+ def setup(self):
1252
+ self.roberta_prelayernorm = FlaxRobertaPreLayerNormModule(
1253
+ config=self.config,
1254
+ dtype=self.dtype,
1255
+ add_pooling_layer=False,
1256
+ gradient_checkpointing=self.gradient_checkpointing,
1257
+ )
1258
+ classifier_dropout = (
1259
+ self.config.classifier_dropout
1260
+ if self.config.classifier_dropout is not None
1261
+ else self.config.hidden_dropout_prob
1262
+ )
1263
+ self.dropout = nn.Dropout(rate=classifier_dropout)
1264
+ self.classifier = nn.Dense(self.config.num_labels, dtype=self.dtype)
1265
+
1266
+ def __call__(
1267
+ self,
1268
+ input_ids,
1269
+ attention_mask,
1270
+ token_type_ids,
1271
+ position_ids,
1272
+ head_mask,
1273
+ deterministic: bool = True,
1274
+ output_attentions: bool = False,
1275
+ output_hidden_states: bool = False,
1276
+ return_dict: bool = True,
1277
+ ):
1278
+ # Model
1279
+ outputs = self.roberta_prelayernorm(
1280
+ input_ids,
1281
+ attention_mask,
1282
+ token_type_ids,
1283
+ position_ids,
1284
+ head_mask,
1285
+ deterministic=deterministic,
1286
+ output_attentions=output_attentions,
1287
+ output_hidden_states=output_hidden_states,
1288
+ return_dict=return_dict,
1289
+ )
1290
+
1291
+ hidden_states = outputs[0]
1292
+ hidden_states = self.dropout(hidden_states, deterministic=deterministic)
1293
+ logits = self.classifier(hidden_states)
1294
+
1295
+ if not return_dict:
1296
+ return (logits,) + outputs[1:]
1297
+
1298
+ return FlaxTokenClassifierOutput(
1299
+ logits=logits,
1300
+ hidden_states=outputs.hidden_states,
1301
+ attentions=outputs.attentions,
1302
+ )
1303
+
1304
+
1305
+ @add_start_docstrings(
1306
+ """
1307
+ RobertaPreLayerNorm Model with a token classification head on top (a linear layer on top of the hidden-states
1308
+ output) e.g. for Named-Entity-Recognition (NER) tasks.
1309
+ """,
1310
+ ROBERTA_PRELAYERNORM_START_DOCSTRING,
1311
+ )
1312
+ # Copied from transformers.models.roberta.modeling_flax_roberta.FlaxRobertaForTokenClassification with Roberta->RobertaPreLayerNorm
1313
+ class FlaxRobertaPreLayerNormForTokenClassification(FlaxRobertaPreLayerNormPreTrainedModel):
1314
+ module_class = FlaxRobertaPreLayerNormForTokenClassificationModule
1315
+
1316
+
1317
+ append_call_sample_docstring(
1318
+ FlaxRobertaPreLayerNormForTokenClassification,
1319
+ _CHECKPOINT_FOR_DOC,
1320
+ FlaxTokenClassifierOutput,
1321
+ _CONFIG_FOR_DOC,
1322
+ )
1323
+
1324
+
1325
+ # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertForQuestionAnsweringModule with Bert->RobertaPreLayerNorm, with self.bert->self.roberta_prelayernorm
1326
+ class FlaxRobertaPreLayerNormForQuestionAnsweringModule(nn.Module):
1327
+ config: RobertaPreLayerNormConfig
1328
+ dtype: jnp.dtype = jnp.float32
1329
+ gradient_checkpointing: bool = False
1330
+
1331
+ def setup(self):
1332
+ self.roberta_prelayernorm = FlaxRobertaPreLayerNormModule(
1333
+ config=self.config,
1334
+ dtype=self.dtype,
1335
+ add_pooling_layer=False,
1336
+ gradient_checkpointing=self.gradient_checkpointing,
1337
+ )
1338
+ self.qa_outputs = nn.Dense(self.config.num_labels, dtype=self.dtype)
1339
+
1340
+ def __call__(
1341
+ self,
1342
+ input_ids,
1343
+ attention_mask,
1344
+ token_type_ids,
1345
+ position_ids,
1346
+ head_mask,
1347
+ deterministic: bool = True,
1348
+ output_attentions: bool = False,
1349
+ output_hidden_states: bool = False,
1350
+ return_dict: bool = True,
1351
+ ):
1352
+ # Model
1353
+ outputs = self.roberta_prelayernorm(
1354
+ input_ids,
1355
+ attention_mask,
1356
+ token_type_ids,
1357
+ position_ids,
1358
+ head_mask,
1359
+ deterministic=deterministic,
1360
+ output_attentions=output_attentions,
1361
+ output_hidden_states=output_hidden_states,
1362
+ return_dict=return_dict,
1363
+ )
1364
+
1365
+ hidden_states = outputs[0]
1366
+
1367
+ logits = self.qa_outputs(hidden_states)
1368
+ start_logits, end_logits = jnp.split(logits, self.config.num_labels, axis=-1)
1369
+ start_logits = start_logits.squeeze(-1)
1370
+ end_logits = end_logits.squeeze(-1)
1371
+
1372
+ if not return_dict:
1373
+ return (start_logits, end_logits) + outputs[1:]
1374
+
1375
+ return FlaxQuestionAnsweringModelOutput(
1376
+ start_logits=start_logits,
1377
+ end_logits=end_logits,
1378
+ hidden_states=outputs.hidden_states,
1379
+ attentions=outputs.attentions,
1380
+ )
1381
+
1382
+
1383
+ @add_start_docstrings(
1384
+ """
1385
+ RobertaPreLayerNorm Model with a span classification head on top for extractive question-answering tasks like SQuAD
1386
+ (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
1387
+ """,
1388
+ ROBERTA_PRELAYERNORM_START_DOCSTRING,
1389
+ )
1390
+ # Copied from transformers.models.roberta.modeling_flax_roberta.FlaxRobertaForQuestionAnswering with Roberta->RobertaPreLayerNorm
1391
+ class FlaxRobertaPreLayerNormForQuestionAnswering(FlaxRobertaPreLayerNormPreTrainedModel):
1392
+ module_class = FlaxRobertaPreLayerNormForQuestionAnsweringModule
1393
+
1394
+
1395
+ append_call_sample_docstring(
1396
+ FlaxRobertaPreLayerNormForQuestionAnswering,
1397
+ _CHECKPOINT_FOR_DOC,
1398
+ FlaxQuestionAnsweringModelOutput,
1399
+ _CONFIG_FOR_DOC,
1400
+ )
1401
+
1402
+
1403
+ # Copied from transformers.models.roberta.modeling_flax_roberta.FlaxRobertaForCausalLMModule with Roberta->RobertaPreLayerNorm,roberta->roberta_prelayernorm
1404
+ class FlaxRobertaPreLayerNormForCausalLMModule(nn.Module):
1405
+ config: RobertaPreLayerNormConfig
1406
+ dtype: jnp.dtype = jnp.float32
1407
+ gradient_checkpointing: bool = False
1408
+
1409
+ def setup(self):
1410
+ self.roberta_prelayernorm = FlaxRobertaPreLayerNormModule(
1411
+ config=self.config,
1412
+ add_pooling_layer=False,
1413
+ dtype=self.dtype,
1414
+ gradient_checkpointing=self.gradient_checkpointing,
1415
+ )
1416
+ self.lm_head = FlaxRobertaPreLayerNormLMHead(config=self.config, dtype=self.dtype)
1417
+
1418
+ def __call__(
1419
+ self,
1420
+ input_ids,
1421
+ attention_mask,
1422
+ position_ids,
1423
+ token_type_ids: Optional[jnp.ndarray] = None,
1424
+ head_mask: Optional[jnp.ndarray] = None,
1425
+ encoder_hidden_states: Optional[jnp.ndarray] = None,
1426
+ encoder_attention_mask: Optional[jnp.ndarray] = None,
1427
+ init_cache: bool = False,
1428
+ deterministic: bool = True,
1429
+ output_attentions: bool = False,
1430
+ output_hidden_states: bool = False,
1431
+ return_dict: bool = True,
1432
+ ):
1433
+ # Model
1434
+ outputs = self.roberta_prelayernorm(
1435
+ input_ids,
1436
+ attention_mask,
1437
+ token_type_ids,
1438
+ position_ids,
1439
+ head_mask,
1440
+ encoder_hidden_states=encoder_hidden_states,
1441
+ encoder_attention_mask=encoder_attention_mask,
1442
+ init_cache=init_cache,
1443
+ deterministic=deterministic,
1444
+ output_attentions=output_attentions,
1445
+ output_hidden_states=output_hidden_states,
1446
+ return_dict=return_dict,
1447
+ )
1448
+
1449
+ hidden_states = outputs[0]
1450
+ if self.config.tie_word_embeddings:
1451
+ shared_embedding = self.roberta_prelayernorm.variables["params"]["embeddings"]["word_embeddings"][
1452
+ "embedding"
1453
+ ]
1454
+ else:
1455
+ shared_embedding = None
1456
+
1457
+ # Compute the prediction scores
1458
+ logits = self.lm_head(hidden_states, shared_embedding=shared_embedding)
1459
+
1460
+ if not return_dict:
1461
+ return (logits,) + outputs[1:]
1462
+
1463
+ return FlaxCausalLMOutputWithCrossAttentions(
1464
+ logits=logits,
1465
+ hidden_states=outputs.hidden_states,
1466
+ attentions=outputs.attentions,
1467
+ cross_attentions=outputs.cross_attentions,
1468
+ )
1469
+
1470
+
1471
+ @add_start_docstrings(
1472
+ """
1473
+ RobertaPreLayerNorm Model with a language modeling head on top (a linear layer on top of the hidden-states output)
1474
+ e.g for autoregressive tasks.
1475
+ """,
1476
+ ROBERTA_PRELAYERNORM_START_DOCSTRING,
1477
+ )
1478
+ # Copied from transformers.models.roberta.modeling_flax_roberta.FlaxRobertaForCausalLM with Roberta->RobertaPreLayerNorm
1479
+ class FlaxRobertaPreLayerNormForCausalLM(FlaxRobertaPreLayerNormPreTrainedModel):
1480
+ module_class = FlaxRobertaPreLayerNormForCausalLMModule
1481
+
1482
+ def prepare_inputs_for_generation(self, input_ids, max_length, attention_mask: Optional[jax.Array] = None):
1483
+ # initializing the cache
1484
+ batch_size, seq_length = input_ids.shape
1485
+
1486
+ past_key_values = self.init_cache(batch_size, max_length)
1487
+ # Note that usually one would have to put 0's in the attention_mask for x > input_ids.shape[-1] and x < cache_length.
1488
+ # But since the decoder uses a causal mask, those positions are masked anyway.
1489
+ # Thus, we can create a single static attention_mask here, which is more efficient for compilation
1490
+ extended_attention_mask = jnp.ones((batch_size, max_length), dtype="i4")
1491
+ if attention_mask is not None:
1492
+ position_ids = attention_mask.cumsum(axis=-1) - 1
1493
+ extended_attention_mask = lax.dynamic_update_slice(extended_attention_mask, attention_mask, (0, 0))
1494
+ else:
1495
+ position_ids = jnp.broadcast_to(jnp.arange(seq_length, dtype="i4")[None, :], (batch_size, seq_length))
1496
+
1497
+ return {
1498
+ "past_key_values": past_key_values,
1499
+ "attention_mask": extended_attention_mask,
1500
+ "position_ids": position_ids,
1501
+ }
1502
+
1503
+ def update_inputs_for_generation(self, model_outputs, model_kwargs):
1504
+ model_kwargs["past_key_values"] = model_outputs.past_key_values
1505
+ model_kwargs["position_ids"] = model_kwargs["position_ids"][:, -1:] + 1
1506
+ return model_kwargs
1507
+
1508
+
1509
+ append_call_sample_docstring(
1510
+ FlaxRobertaPreLayerNormForCausalLM,
1511
+ _CHECKPOINT_FOR_DOC,
1512
+ FlaxCausalLMOutputWithCrossAttentions,
1513
+ _CONFIG_FOR_DOC,
1514
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/roberta_prelayernorm/modeling_roberta_prelayernorm.py ADDED
@@ -0,0 +1,1566 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The Google AI Language Team Authors and The HuggingFace Inc. team.
3
+ # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """PyTorch RoBERTa-PreLayerNorm model."""
17
+
18
+ import math
19
+ from typing import List, Optional, Tuple, Union
20
+
21
+ import torch
22
+ import torch.utils.checkpoint
23
+ from torch import nn
24
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
25
+
26
+ from ...activations import ACT2FN, gelu
27
+ from ...modeling_outputs import (
28
+ BaseModelOutputWithPastAndCrossAttentions,
29
+ BaseModelOutputWithPoolingAndCrossAttentions,
30
+ CausalLMOutputWithCrossAttentions,
31
+ MaskedLMOutput,
32
+ MultipleChoiceModelOutput,
33
+ QuestionAnsweringModelOutput,
34
+ SequenceClassifierOutput,
35
+ TokenClassifierOutput,
36
+ )
37
+ from ...modeling_utils import PreTrainedModel
38
+ from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
39
+ from ...utils import (
40
+ add_code_sample_docstrings,
41
+ add_start_docstrings,
42
+ add_start_docstrings_to_model_forward,
43
+ logging,
44
+ replace_return_docstrings,
45
+ )
46
+ from .configuration_roberta_prelayernorm import RobertaPreLayerNormConfig
47
+
48
+
49
+ logger = logging.get_logger(__name__)
50
+
51
+ _CHECKPOINT_FOR_DOC = "andreasmadsen/efficient_mlm_m0.40"
52
+ _CONFIG_FOR_DOC = "RobertaPreLayerNormConfig"
53
+
54
+
55
+ from ..deprecated._archive_maps import ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
56
+
57
+
58
+ # Copied from transformers.models.roberta.modeling_roberta.RobertaEmbeddings with Roberta->RobertaPreLayerNorm
59
+ class RobertaPreLayerNormEmbeddings(nn.Module):
60
+ """
61
+ Same as BertEmbeddings with a tiny tweak for positional embeddings indexing.
62
+ """
63
+
64
+ # Copied from transformers.models.bert.modeling_bert.BertEmbeddings.__init__
65
+ def __init__(self, config):
66
+ super().__init__()
67
+ self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
68
+ self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
69
+ self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
70
+
71
+ # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
72
+ # any TensorFlow checkpoint file
73
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
74
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
75
+ # position_ids (1, len position emb) is contiguous in memory and exported when serialized
76
+ self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
77
+ self.register_buffer(
78
+ "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
79
+ )
80
+ self.register_buffer(
81
+ "token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False
82
+ )
83
+
84
+ # End copy
85
+ self.padding_idx = config.pad_token_id
86
+ self.position_embeddings = nn.Embedding(
87
+ config.max_position_embeddings, config.hidden_size, padding_idx=self.padding_idx
88
+ )
89
+
90
+ def forward(
91
+ self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0
92
+ ):
93
+ if position_ids is None:
94
+ if input_ids is not None:
95
+ # Create the position ids from the input token ids. Any padded tokens remain padded.
96
+ position_ids = create_position_ids_from_input_ids(input_ids, self.padding_idx, past_key_values_length)
97
+ else:
98
+ position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds)
99
+
100
+ if input_ids is not None:
101
+ input_shape = input_ids.size()
102
+ else:
103
+ input_shape = inputs_embeds.size()[:-1]
104
+
105
+ seq_length = input_shape[1]
106
+
107
+ # Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs
108
+ # when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves
109
+ # issue #5664
110
+ if token_type_ids is None:
111
+ if hasattr(self, "token_type_ids"):
112
+ buffered_token_type_ids = self.token_type_ids[:, :seq_length]
113
+ buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length)
114
+ token_type_ids = buffered_token_type_ids_expanded
115
+ else:
116
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
117
+
118
+ if inputs_embeds is None:
119
+ inputs_embeds = self.word_embeddings(input_ids)
120
+ token_type_embeddings = self.token_type_embeddings(token_type_ids)
121
+
122
+ embeddings = inputs_embeds + token_type_embeddings
123
+ if self.position_embedding_type == "absolute":
124
+ position_embeddings = self.position_embeddings(position_ids)
125
+ embeddings += position_embeddings
126
+ embeddings = self.LayerNorm(embeddings)
127
+ embeddings = self.dropout(embeddings)
128
+ return embeddings
129
+
130
+ def create_position_ids_from_inputs_embeds(self, inputs_embeds):
131
+ """
132
+ We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids.
133
+
134
+ Args:
135
+ inputs_embeds: torch.Tensor
136
+
137
+ Returns: torch.Tensor
138
+ """
139
+ input_shape = inputs_embeds.size()[:-1]
140
+ sequence_length = input_shape[1]
141
+
142
+ position_ids = torch.arange(
143
+ self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device
144
+ )
145
+ return position_ids.unsqueeze(0).expand(input_shape)
146
+
147
+
148
+ # Copied from transformers.models.bert.modeling_bert.BertSelfAttention with Bert->RobertaPreLayerNorm
149
+ class RobertaPreLayerNormSelfAttention(nn.Module):
150
+ def __init__(self, config, position_embedding_type=None):
151
+ super().__init__()
152
+ if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
153
+ raise ValueError(
154
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
155
+ f"heads ({config.num_attention_heads})"
156
+ )
157
+
158
+ self.num_attention_heads = config.num_attention_heads
159
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
160
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
161
+
162
+ self.query = nn.Linear(config.hidden_size, self.all_head_size)
163
+ self.key = nn.Linear(config.hidden_size, self.all_head_size)
164
+ self.value = nn.Linear(config.hidden_size, self.all_head_size)
165
+
166
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
167
+ self.position_embedding_type = position_embedding_type or getattr(
168
+ config, "position_embedding_type", "absolute"
169
+ )
170
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
171
+ self.max_position_embeddings = config.max_position_embeddings
172
+ self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
173
+
174
+ self.is_decoder = config.is_decoder
175
+
176
+ def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor:
177
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
178
+ x = x.view(new_x_shape)
179
+ return x.permute(0, 2, 1, 3)
180
+
181
+ def forward(
182
+ self,
183
+ hidden_states: torch.Tensor,
184
+ attention_mask: Optional[torch.FloatTensor] = None,
185
+ head_mask: Optional[torch.FloatTensor] = None,
186
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
187
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
188
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
189
+ output_attentions: Optional[bool] = False,
190
+ ) -> Tuple[torch.Tensor]:
191
+ mixed_query_layer = self.query(hidden_states)
192
+
193
+ # If this is instantiated as a cross-attention module, the keys
194
+ # and values come from an encoder; the attention mask needs to be
195
+ # such that the encoder's padding tokens are not attended to.
196
+ is_cross_attention = encoder_hidden_states is not None
197
+
198
+ if is_cross_attention and past_key_value is not None:
199
+ # reuse k,v, cross_attentions
200
+ key_layer = past_key_value[0]
201
+ value_layer = past_key_value[1]
202
+ attention_mask = encoder_attention_mask
203
+ elif is_cross_attention:
204
+ key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
205
+ value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
206
+ attention_mask = encoder_attention_mask
207
+ elif past_key_value is not None:
208
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
209
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
210
+ key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
211
+ value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
212
+ else:
213
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
214
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
215
+
216
+ query_layer = self.transpose_for_scores(mixed_query_layer)
217
+
218
+ use_cache = past_key_value is not None
219
+ if self.is_decoder:
220
+ # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
221
+ # Further calls to cross_attention layer can then reuse all cross-attention
222
+ # key/value_states (first "if" case)
223
+ # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
224
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
225
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
226
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
227
+ past_key_value = (key_layer, value_layer)
228
+
229
+ # Take the dot product between "query" and "key" to get the raw attention scores.
230
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
231
+
232
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
233
+ query_length, key_length = query_layer.shape[2], key_layer.shape[2]
234
+ if use_cache:
235
+ position_ids_l = torch.tensor(key_length - 1, dtype=torch.long, device=hidden_states.device).view(
236
+ -1, 1
237
+ )
238
+ else:
239
+ position_ids_l = torch.arange(query_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
240
+ position_ids_r = torch.arange(key_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
241
+ distance = position_ids_l - position_ids_r
242
+
243
+ positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
244
+ positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
245
+
246
+ if self.position_embedding_type == "relative_key":
247
+ relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
248
+ attention_scores = attention_scores + relative_position_scores
249
+ elif self.position_embedding_type == "relative_key_query":
250
+ relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
251
+ relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
252
+ attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
253
+
254
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
255
+ if attention_mask is not None:
256
+ # Apply the attention mask is (precomputed for all layers in RobertaPreLayerNormModel forward() function)
257
+ attention_scores = attention_scores + attention_mask
258
+
259
+ # Normalize the attention scores to probabilities.
260
+ attention_probs = nn.functional.softmax(attention_scores, dim=-1)
261
+
262
+ # This is actually dropping out entire tokens to attend to, which might
263
+ # seem a bit unusual, but is taken from the original Transformer paper.
264
+ attention_probs = self.dropout(attention_probs)
265
+
266
+ # Mask heads if we want to
267
+ if head_mask is not None:
268
+ attention_probs = attention_probs * head_mask
269
+
270
+ context_layer = torch.matmul(attention_probs, value_layer)
271
+
272
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
273
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
274
+ context_layer = context_layer.view(new_context_layer_shape)
275
+
276
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
277
+
278
+ if self.is_decoder:
279
+ outputs = outputs + (past_key_value,)
280
+ return outputs
281
+
282
+
283
+ class RobertaPreLayerNormSelfOutput(nn.Module):
284
+ def __init__(self, config):
285
+ super().__init__()
286
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
287
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
288
+
289
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
290
+ hidden_states = self.dense(hidden_states)
291
+ hidden_states = self.dropout(hidden_states)
292
+ hidden_states = hidden_states + input_tensor
293
+ return hidden_states
294
+
295
+
296
+ class RobertaPreLayerNormAttention(nn.Module):
297
+ def __init__(self, config, position_embedding_type=None):
298
+ super().__init__()
299
+ self.self = RobertaPreLayerNormSelfAttention(config, position_embedding_type=position_embedding_type)
300
+ self.output = RobertaPreLayerNormSelfOutput(config)
301
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
302
+ self.pruned_heads = set()
303
+
304
+ # Copied from transformers.models.bert.modeling_bert.BertAttention.prune_heads
305
+ def prune_heads(self, heads):
306
+ if len(heads) == 0:
307
+ return
308
+ heads, index = find_pruneable_heads_and_indices(
309
+ heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
310
+ )
311
+
312
+ # Prune linear layers
313
+ self.self.query = prune_linear_layer(self.self.query, index)
314
+ self.self.key = prune_linear_layer(self.self.key, index)
315
+ self.self.value = prune_linear_layer(self.self.value, index)
316
+ self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
317
+
318
+ # Update hyper params and store pruned heads
319
+ self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
320
+ self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
321
+ self.pruned_heads = self.pruned_heads.union(heads)
322
+
323
+ def forward(
324
+ self,
325
+ hidden_states: torch.Tensor,
326
+ attention_mask: Optional[torch.FloatTensor] = None,
327
+ head_mask: Optional[torch.FloatTensor] = None,
328
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
329
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
330
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
331
+ output_attentions: Optional[bool] = False,
332
+ ) -> Tuple[torch.Tensor]:
333
+ hidden_states_pre_layer_norm = self.LayerNorm(hidden_states)
334
+ self_outputs = self.self(
335
+ hidden_states_pre_layer_norm,
336
+ attention_mask,
337
+ head_mask,
338
+ encoder_hidden_states,
339
+ encoder_attention_mask,
340
+ past_key_value,
341
+ output_attentions,
342
+ )
343
+ attention_output = self.output(self_outputs[0], hidden_states)
344
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
345
+ return outputs
346
+
347
+
348
+ class RobertaPreLayerNormIntermediate(nn.Module):
349
+ def __init__(self, config):
350
+ super().__init__()
351
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
352
+ self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
353
+ if isinstance(config.hidden_act, str):
354
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
355
+ else:
356
+ self.intermediate_act_fn = config.hidden_act
357
+
358
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
359
+ hidden_states = self.LayerNorm(hidden_states)
360
+ hidden_states = self.dense(hidden_states)
361
+ hidden_states = self.intermediate_act_fn(hidden_states)
362
+ return hidden_states
363
+
364
+
365
+ class RobertaPreLayerNormOutput(nn.Module):
366
+ def __init__(self, config):
367
+ super().__init__()
368
+ self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
369
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
370
+
371
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
372
+ hidden_states = self.dense(hidden_states)
373
+ hidden_states = self.dropout(hidden_states)
374
+ hidden_states = hidden_states + input_tensor
375
+ return hidden_states
376
+
377
+
378
+ # Copied from transformers.models.bert.modeling_bert.BertLayer with Bert->RobertaPreLayerNorm
379
+ class RobertaPreLayerNormLayer(nn.Module):
380
+ def __init__(self, config):
381
+ super().__init__()
382
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
383
+ self.seq_len_dim = 1
384
+ self.attention = RobertaPreLayerNormAttention(config)
385
+ self.is_decoder = config.is_decoder
386
+ self.add_cross_attention = config.add_cross_attention
387
+ if self.add_cross_attention:
388
+ if not self.is_decoder:
389
+ raise ValueError(f"{self} should be used as a decoder model if cross attention is added")
390
+ self.crossattention = RobertaPreLayerNormAttention(config, position_embedding_type="absolute")
391
+ self.intermediate = RobertaPreLayerNormIntermediate(config)
392
+ self.output = RobertaPreLayerNormOutput(config)
393
+
394
+ def forward(
395
+ self,
396
+ hidden_states: torch.Tensor,
397
+ attention_mask: Optional[torch.FloatTensor] = None,
398
+ head_mask: Optional[torch.FloatTensor] = None,
399
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
400
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
401
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
402
+ output_attentions: Optional[bool] = False,
403
+ ) -> Tuple[torch.Tensor]:
404
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
405
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
406
+ self_attention_outputs = self.attention(
407
+ hidden_states,
408
+ attention_mask,
409
+ head_mask,
410
+ output_attentions=output_attentions,
411
+ past_key_value=self_attn_past_key_value,
412
+ )
413
+ attention_output = self_attention_outputs[0]
414
+
415
+ # if decoder, the last output is tuple of self-attn cache
416
+ if self.is_decoder:
417
+ outputs = self_attention_outputs[1:-1]
418
+ present_key_value = self_attention_outputs[-1]
419
+ else:
420
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
421
+
422
+ cross_attn_present_key_value = None
423
+ if self.is_decoder and encoder_hidden_states is not None:
424
+ if not hasattr(self, "crossattention"):
425
+ raise ValueError(
426
+ f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers"
427
+ " by setting `config.add_cross_attention=True`"
428
+ )
429
+
430
+ # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple
431
+ cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
432
+ cross_attention_outputs = self.crossattention(
433
+ attention_output,
434
+ attention_mask,
435
+ head_mask,
436
+ encoder_hidden_states,
437
+ encoder_attention_mask,
438
+ cross_attn_past_key_value,
439
+ output_attentions,
440
+ )
441
+ attention_output = cross_attention_outputs[0]
442
+ outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
443
+
444
+ # add cross-attn cache to positions 3,4 of present_key_value tuple
445
+ cross_attn_present_key_value = cross_attention_outputs[-1]
446
+ present_key_value = present_key_value + cross_attn_present_key_value
447
+
448
+ layer_output = apply_chunking_to_forward(
449
+ self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
450
+ )
451
+ outputs = (layer_output,) + outputs
452
+
453
+ # if decoder, return the attn key/values as the last output
454
+ if self.is_decoder:
455
+ outputs = outputs + (present_key_value,)
456
+
457
+ return outputs
458
+
459
+ def feed_forward_chunk(self, attention_output):
460
+ intermediate_output = self.intermediate(attention_output)
461
+ layer_output = self.output(intermediate_output, attention_output)
462
+ return layer_output
463
+
464
+
465
+ # Copied from transformers.models.bert.modeling_bert.BertEncoder with Bert->RobertaPreLayerNorm
466
+ class RobertaPreLayerNormEncoder(nn.Module):
467
+ def __init__(self, config):
468
+ super().__init__()
469
+ self.config = config
470
+ self.layer = nn.ModuleList([RobertaPreLayerNormLayer(config) for _ in range(config.num_hidden_layers)])
471
+ self.gradient_checkpointing = False
472
+
473
+ def forward(
474
+ self,
475
+ hidden_states: torch.Tensor,
476
+ attention_mask: Optional[torch.FloatTensor] = None,
477
+ head_mask: Optional[torch.FloatTensor] = None,
478
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
479
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
480
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
481
+ use_cache: Optional[bool] = None,
482
+ output_attentions: Optional[bool] = False,
483
+ output_hidden_states: Optional[bool] = False,
484
+ return_dict: Optional[bool] = True,
485
+ ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]:
486
+ all_hidden_states = () if output_hidden_states else None
487
+ all_self_attentions = () if output_attentions else None
488
+ all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
489
+
490
+ if self.gradient_checkpointing and self.training:
491
+ if use_cache:
492
+ logger.warning_once(
493
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
494
+ )
495
+ use_cache = False
496
+
497
+ next_decoder_cache = () if use_cache else None
498
+ for i, layer_module in enumerate(self.layer):
499
+ if output_hidden_states:
500
+ all_hidden_states = all_hidden_states + (hidden_states,)
501
+
502
+ layer_head_mask = head_mask[i] if head_mask is not None else None
503
+ past_key_value = past_key_values[i] if past_key_values is not None else None
504
+
505
+ if self.gradient_checkpointing and self.training:
506
+ layer_outputs = self._gradient_checkpointing_func(
507
+ layer_module.__call__,
508
+ hidden_states,
509
+ attention_mask,
510
+ layer_head_mask,
511
+ encoder_hidden_states,
512
+ encoder_attention_mask,
513
+ past_key_value,
514
+ output_attentions,
515
+ )
516
+ else:
517
+ layer_outputs = layer_module(
518
+ hidden_states,
519
+ attention_mask,
520
+ layer_head_mask,
521
+ encoder_hidden_states,
522
+ encoder_attention_mask,
523
+ past_key_value,
524
+ output_attentions,
525
+ )
526
+
527
+ hidden_states = layer_outputs[0]
528
+ if use_cache:
529
+ next_decoder_cache += (layer_outputs[-1],)
530
+ if output_attentions:
531
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
532
+ if self.config.add_cross_attention:
533
+ all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
534
+
535
+ if output_hidden_states:
536
+ all_hidden_states = all_hidden_states + (hidden_states,)
537
+
538
+ if not return_dict:
539
+ return tuple(
540
+ v
541
+ for v in [
542
+ hidden_states,
543
+ next_decoder_cache,
544
+ all_hidden_states,
545
+ all_self_attentions,
546
+ all_cross_attentions,
547
+ ]
548
+ if v is not None
549
+ )
550
+ return BaseModelOutputWithPastAndCrossAttentions(
551
+ last_hidden_state=hidden_states,
552
+ past_key_values=next_decoder_cache,
553
+ hidden_states=all_hidden_states,
554
+ attentions=all_self_attentions,
555
+ cross_attentions=all_cross_attentions,
556
+ )
557
+
558
+
559
+ # Copied from transformers.models.bert.modeling_bert.BertPooler
560
+ class RobertaPreLayerNormPooler(nn.Module):
561
+ def __init__(self, config):
562
+ super().__init__()
563
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
564
+ self.activation = nn.Tanh()
565
+
566
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
567
+ # We "pool" the model by simply taking the hidden state corresponding
568
+ # to the first token.
569
+ first_token_tensor = hidden_states[:, 0]
570
+ pooled_output = self.dense(first_token_tensor)
571
+ pooled_output = self.activation(pooled_output)
572
+ return pooled_output
573
+
574
+
575
+ # Copied from transformers.models.roberta.modeling_roberta.RobertaPreTrainedModel with Roberta->RobertaPreLayerNorm,roberta->roberta_prelayernorm
576
+ class RobertaPreLayerNormPreTrainedModel(PreTrainedModel):
577
+ """
578
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
579
+ models.
580
+ """
581
+
582
+ config_class = RobertaPreLayerNormConfig
583
+ base_model_prefix = "roberta_prelayernorm"
584
+ supports_gradient_checkpointing = True
585
+ _no_split_modules = ["RobertaPreLayerNormEmbeddings", "RobertaPreLayerNormSelfAttention"]
586
+
587
+ # Copied from transformers.models.bert.modeling_bert.BertPreTrainedModel._init_weights
588
+ def _init_weights(self, module):
589
+ """Initialize the weights"""
590
+ if isinstance(module, nn.Linear):
591
+ # Slightly different from the TF version which uses truncated_normal for initialization
592
+ # cf https://github.com/pytorch/pytorch/pull/5617
593
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
594
+ if module.bias is not None:
595
+ module.bias.data.zero_()
596
+ elif isinstance(module, nn.Embedding):
597
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
598
+ if module.padding_idx is not None:
599
+ module.weight.data[module.padding_idx].zero_()
600
+ elif isinstance(module, nn.LayerNorm):
601
+ module.bias.data.zero_()
602
+ module.weight.data.fill_(1.0)
603
+
604
+
605
+ ROBERTA_PRELAYERNORM_START_DOCSTRING = r"""
606
+
607
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
608
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
609
+ etc.)
610
+
611
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
612
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
613
+ and behavior.
614
+
615
+ Parameters:
616
+ config ([`RobertaPreLayerNormConfig`]): Model configuration class with all the parameters of the
617
+ model. Initializing with a config file does not load the weights associated with the model, only the
618
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
619
+ """
620
+
621
+ ROBERTA_PRELAYERNORM_INPUTS_DOCSTRING = r"""
622
+ Args:
623
+ input_ids (`torch.LongTensor` of shape `({0})`):
624
+ Indices of input sequence tokens in the vocabulary.
625
+
626
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
627
+ [`PreTrainedTokenizer.__call__`] for details.
628
+
629
+ [What are input IDs?](../glossary#input-ids)
630
+ attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
631
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
632
+
633
+ - 1 for tokens that are **not masked**,
634
+ - 0 for tokens that are **masked**.
635
+
636
+ [What are attention masks?](../glossary#attention-mask)
637
+ token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
638
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,1]`:
639
+
640
+ - 0 corresponds to a *sentence A* token,
641
+ - 1 corresponds to a *sentence B* token.
642
+ This parameter can only be used when the model is initialized with `type_vocab_size` parameter with value
643
+ >= 2. All the value in this tensor should be always < type_vocab_size.
644
+
645
+ [What are token type IDs?](../glossary#token-type-ids)
646
+ position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
647
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
648
+ config.max_position_embeddings - 1]`.
649
+
650
+ [What are position IDs?](../glossary#position-ids)
651
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
652
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
653
+
654
+ - 1 indicates the head is **not masked**,
655
+ - 0 indicates the head is **masked**.
656
+
657
+ inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
658
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
659
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
660
+ model's internal embedding lookup matrix.
661
+ output_attentions (`bool`, *optional*):
662
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
663
+ tensors for more detail.
664
+ output_hidden_states (`bool`, *optional*):
665
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
666
+ more detail.
667
+ return_dict (`bool`, *optional*):
668
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
669
+ """
670
+
671
+
672
+ @add_start_docstrings(
673
+ "The bare RoBERTa-PreLayerNorm Model transformer outputting raw hidden-states without any specific head on top.",
674
+ ROBERTA_PRELAYERNORM_START_DOCSTRING,
675
+ )
676
+ class RobertaPreLayerNormModel(RobertaPreLayerNormPreTrainedModel):
677
+ """
678
+
679
+ The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
680
+ cross-attention is added between the self-attention layers, following the architecture described in *Attention is
681
+ all you need*_ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz
682
+ Kaiser and Illia Polosukhin.
683
+
684
+ To behave as an decoder the model needs to be initialized with the `is_decoder` argument of the configuration set
685
+ to `True`. To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder` argument and
686
+ `add_cross_attention` set to `True`; an `encoder_hidden_states` is then expected as an input to the forward pass.
687
+
688
+ .. _*Attention is all you need*: https://arxiv.org/abs/1706.03762
689
+
690
+ """
691
+
692
+ def __init__(self, config, add_pooling_layer=True):
693
+ super().__init__(config)
694
+ self.config = config
695
+
696
+ self.embeddings = RobertaPreLayerNormEmbeddings(config)
697
+ self.encoder = RobertaPreLayerNormEncoder(config)
698
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
699
+
700
+ self.pooler = RobertaPreLayerNormPooler(config) if add_pooling_layer else None
701
+
702
+ # Initialize weights and apply final processing
703
+ self.post_init()
704
+
705
+ def get_input_embeddings(self):
706
+ return self.embeddings.word_embeddings
707
+
708
+ def set_input_embeddings(self, value):
709
+ self.embeddings.word_embeddings = value
710
+
711
+ def _prune_heads(self, heads_to_prune):
712
+ """
713
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
714
+ class PreTrainedModel
715
+ """
716
+ for layer, heads in heads_to_prune.items():
717
+ self.encoder.layer[layer].attention.prune_heads(heads)
718
+
719
+ @add_start_docstrings_to_model_forward(ROBERTA_PRELAYERNORM_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
720
+ @add_code_sample_docstrings(
721
+ checkpoint=_CHECKPOINT_FOR_DOC,
722
+ output_type=BaseModelOutputWithPoolingAndCrossAttentions,
723
+ config_class=_CONFIG_FOR_DOC,
724
+ )
725
+ def forward(
726
+ self,
727
+ input_ids: Optional[torch.Tensor] = None,
728
+ attention_mask: Optional[torch.Tensor] = None,
729
+ token_type_ids: Optional[torch.Tensor] = None,
730
+ position_ids: Optional[torch.Tensor] = None,
731
+ head_mask: Optional[torch.Tensor] = None,
732
+ inputs_embeds: Optional[torch.Tensor] = None,
733
+ encoder_hidden_states: Optional[torch.Tensor] = None,
734
+ encoder_attention_mask: Optional[torch.Tensor] = None,
735
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
736
+ use_cache: Optional[bool] = None,
737
+ output_attentions: Optional[bool] = None,
738
+ output_hidden_states: Optional[bool] = None,
739
+ return_dict: Optional[bool] = None,
740
+ ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]:
741
+ r"""
742
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
743
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
744
+ the model is configured as a decoder.
745
+ encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
746
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
747
+ the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
748
+
749
+ - 1 for tokens that are **not masked**,
750
+ - 0 for tokens that are **masked**.
751
+ past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
752
+ Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
753
+
754
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
755
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
756
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
757
+ use_cache (`bool`, *optional*):
758
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
759
+ `past_key_values`).
760
+ """
761
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
762
+ output_hidden_states = (
763
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
764
+ )
765
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
766
+
767
+ if self.config.is_decoder:
768
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
769
+ else:
770
+ use_cache = False
771
+
772
+ if input_ids is not None and inputs_embeds is not None:
773
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
774
+ elif input_ids is not None:
775
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
776
+ input_shape = input_ids.size()
777
+ elif inputs_embeds is not None:
778
+ input_shape = inputs_embeds.size()[:-1]
779
+ else:
780
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
781
+
782
+ batch_size, seq_length = input_shape
783
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
784
+
785
+ # past_key_values_length
786
+ past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
787
+
788
+ if attention_mask is None:
789
+ attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)
790
+
791
+ if token_type_ids is None:
792
+ if hasattr(self.embeddings, "token_type_ids"):
793
+ buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length]
794
+ buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length)
795
+ token_type_ids = buffered_token_type_ids_expanded
796
+ else:
797
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
798
+
799
+ # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
800
+ # ourselves in which case we just need to make it broadcastable to all heads.
801
+ extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape)
802
+
803
+ # If a 2D or 3D attention mask is provided for the cross-attention
804
+ # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
805
+ if self.config.is_decoder and encoder_hidden_states is not None:
806
+ encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
807
+ encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
808
+ if encoder_attention_mask is None:
809
+ encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
810
+ encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
811
+ else:
812
+ encoder_extended_attention_mask = None
813
+
814
+ # Prepare head mask if needed
815
+ # 1.0 in head_mask indicate we keep the head
816
+ # attention_probs has shape bsz x n_heads x N x N
817
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
818
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
819
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
820
+
821
+ embedding_output = self.embeddings(
822
+ input_ids=input_ids,
823
+ position_ids=position_ids,
824
+ token_type_ids=token_type_ids,
825
+ inputs_embeds=inputs_embeds,
826
+ past_key_values_length=past_key_values_length,
827
+ )
828
+ encoder_outputs = self.encoder(
829
+ embedding_output,
830
+ attention_mask=extended_attention_mask,
831
+ head_mask=head_mask,
832
+ encoder_hidden_states=encoder_hidden_states,
833
+ encoder_attention_mask=encoder_extended_attention_mask,
834
+ past_key_values=past_key_values,
835
+ use_cache=use_cache,
836
+ output_attentions=output_attentions,
837
+ output_hidden_states=output_hidden_states,
838
+ return_dict=return_dict,
839
+ )
840
+ sequence_output = encoder_outputs[0]
841
+ sequence_output = self.LayerNorm(sequence_output)
842
+ pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
843
+
844
+ if not return_dict:
845
+ return (sequence_output, pooled_output) + encoder_outputs[1:]
846
+
847
+ return BaseModelOutputWithPoolingAndCrossAttentions(
848
+ last_hidden_state=sequence_output,
849
+ pooler_output=pooled_output,
850
+ past_key_values=encoder_outputs.past_key_values,
851
+ hidden_states=encoder_outputs.hidden_states,
852
+ attentions=encoder_outputs.attentions,
853
+ cross_attentions=encoder_outputs.cross_attentions,
854
+ )
855
+
856
+
857
+ @add_start_docstrings(
858
+ """RoBERTa-PreLayerNorm Model with a `language modeling` head on top for CLM fine-tuning.""",
859
+ ROBERTA_PRELAYERNORM_START_DOCSTRING,
860
+ )
861
+ # Copied from transformers.models.roberta.modeling_roberta.RobertaForCausalLM with FacebookAI/roberta-base->andreasmadsen/efficient_mlm_m0.40,ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta->roberta_prelayernorm, RobertaPreLayerNormTokenizer->RobertaTokenizer
862
+ class RobertaPreLayerNormForCausalLM(RobertaPreLayerNormPreTrainedModel):
863
+ _tied_weights_keys = ["lm_head.decoder.weight", "lm_head.decoder.bias"]
864
+
865
+ def __init__(self, config):
866
+ super().__init__(config)
867
+
868
+ if not config.is_decoder:
869
+ logger.warning(
870
+ "If you want to use `RobertaPreLayerNormLMHeadModel` as a standalone, add `is_decoder=True.`"
871
+ )
872
+
873
+ self.roberta_prelayernorm = RobertaPreLayerNormModel(config, add_pooling_layer=False)
874
+ self.lm_head = RobertaPreLayerNormLMHead(config)
875
+
876
+ # Initialize weights and apply final processing
877
+ self.post_init()
878
+
879
+ def get_output_embeddings(self):
880
+ return self.lm_head.decoder
881
+
882
+ def set_output_embeddings(self, new_embeddings):
883
+ self.lm_head.decoder = new_embeddings
884
+
885
+ @add_start_docstrings_to_model_forward(ROBERTA_PRELAYERNORM_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
886
+ @replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC)
887
+ def forward(
888
+ self,
889
+ input_ids: Optional[torch.LongTensor] = None,
890
+ attention_mask: Optional[torch.FloatTensor] = None,
891
+ token_type_ids: Optional[torch.LongTensor] = None,
892
+ position_ids: Optional[torch.LongTensor] = None,
893
+ head_mask: Optional[torch.FloatTensor] = None,
894
+ inputs_embeds: Optional[torch.FloatTensor] = None,
895
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
896
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
897
+ labels: Optional[torch.LongTensor] = None,
898
+ past_key_values: Tuple[Tuple[torch.FloatTensor]] = None,
899
+ use_cache: Optional[bool] = None,
900
+ output_attentions: Optional[bool] = None,
901
+ output_hidden_states: Optional[bool] = None,
902
+ return_dict: Optional[bool] = None,
903
+ ) -> Union[Tuple[torch.Tensor], CausalLMOutputWithCrossAttentions]:
904
+ r"""
905
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
906
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
907
+ the model is configured as a decoder.
908
+ encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
909
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
910
+ the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
911
+
912
+ - 1 for tokens that are **not masked**,
913
+ - 0 for tokens that are **masked**.
914
+
915
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
916
+ Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in
917
+ `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are
918
+ ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
919
+ past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
920
+ Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
921
+
922
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
923
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
924
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
925
+ use_cache (`bool`, *optional*):
926
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
927
+ `past_key_values`).
928
+
929
+ Returns:
930
+
931
+ Example:
932
+
933
+ ```python
934
+ >>> from transformers import AutoTokenizer, RobertaPreLayerNormForCausalLM, AutoConfig
935
+ >>> import torch
936
+
937
+ >>> tokenizer = AutoTokenizer.from_pretrained("andreasmadsen/efficient_mlm_m0.40")
938
+ >>> config = AutoConfig.from_pretrained("andreasmadsen/efficient_mlm_m0.40")
939
+ >>> config.is_decoder = True
940
+ >>> model = RobertaPreLayerNormForCausalLM.from_pretrained("andreasmadsen/efficient_mlm_m0.40", config=config)
941
+
942
+ >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
943
+ >>> outputs = model(**inputs)
944
+
945
+ >>> prediction_logits = outputs.logits
946
+ ```"""
947
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
948
+ if labels is not None:
949
+ use_cache = False
950
+
951
+ outputs = self.roberta_prelayernorm(
952
+ input_ids,
953
+ attention_mask=attention_mask,
954
+ token_type_ids=token_type_ids,
955
+ position_ids=position_ids,
956
+ head_mask=head_mask,
957
+ inputs_embeds=inputs_embeds,
958
+ encoder_hidden_states=encoder_hidden_states,
959
+ encoder_attention_mask=encoder_attention_mask,
960
+ past_key_values=past_key_values,
961
+ use_cache=use_cache,
962
+ output_attentions=output_attentions,
963
+ output_hidden_states=output_hidden_states,
964
+ return_dict=return_dict,
965
+ )
966
+
967
+ sequence_output = outputs[0]
968
+ prediction_scores = self.lm_head(sequence_output)
969
+
970
+ lm_loss = None
971
+ if labels is not None:
972
+ # move labels to correct device to enable model parallelism
973
+ labels = labels.to(prediction_scores.device)
974
+ # we are doing next-token prediction; shift prediction scores and input ids by one
975
+ shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous()
976
+ labels = labels[:, 1:].contiguous()
977
+ loss_fct = CrossEntropyLoss()
978
+ lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
979
+
980
+ if not return_dict:
981
+ output = (prediction_scores,) + outputs[2:]
982
+ return ((lm_loss,) + output) if lm_loss is not None else output
983
+
984
+ return CausalLMOutputWithCrossAttentions(
985
+ loss=lm_loss,
986
+ logits=prediction_scores,
987
+ past_key_values=outputs.past_key_values,
988
+ hidden_states=outputs.hidden_states,
989
+ attentions=outputs.attentions,
990
+ cross_attentions=outputs.cross_attentions,
991
+ )
992
+
993
+ def prepare_inputs_for_generation(self, input_ids, past_key_values=None, attention_mask=None, **model_kwargs):
994
+ input_shape = input_ids.shape
995
+ # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
996
+ if attention_mask is None:
997
+ attention_mask = input_ids.new_ones(input_shape)
998
+
999
+ # cut decoder_input_ids if past_key_values is used
1000
+ if past_key_values is not None:
1001
+ past_length = past_key_values[0][0].shape[2]
1002
+
1003
+ # Some generation methods already pass only the last input ID
1004
+ if input_ids.shape[1] > past_length:
1005
+ remove_prefix_length = past_length
1006
+ else:
1007
+ # Default to old behavior: keep only final ID
1008
+ remove_prefix_length = input_ids.shape[1] - 1
1009
+
1010
+ input_ids = input_ids[:, remove_prefix_length:]
1011
+
1012
+ return {"input_ids": input_ids, "attention_mask": attention_mask, "past_key_values": past_key_values}
1013
+
1014
+ def _reorder_cache(self, past_key_values, beam_idx):
1015
+ reordered_past = ()
1016
+ for layer_past in past_key_values:
1017
+ reordered_past += (
1018
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),
1019
+ )
1020
+ return reordered_past
1021
+
1022
+
1023
+ @add_start_docstrings(
1024
+ """RoBERTa-PreLayerNorm Model with a `language modeling` head on top.""", ROBERTA_PRELAYERNORM_START_DOCSTRING
1025
+ )
1026
+ class RobertaPreLayerNormForMaskedLM(RobertaPreLayerNormPreTrainedModel):
1027
+ _tied_weights_keys = ["lm_head.decoder.weight", "lm_head.decoder.bias"]
1028
+
1029
+ # Copied from transformers.models.roberta.modeling_roberta.RobertaForMaskedLM.__init__ with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta->roberta_prelayernorm
1030
+ def __init__(self, config):
1031
+ super().__init__(config)
1032
+
1033
+ if config.is_decoder:
1034
+ logger.warning(
1035
+ "If you want to use `RobertaPreLayerNormForMaskedLM` make sure `config.is_decoder=False` for "
1036
+ "bi-directional self-attention."
1037
+ )
1038
+
1039
+ self.roberta_prelayernorm = RobertaPreLayerNormModel(config, add_pooling_layer=False)
1040
+ self.lm_head = RobertaPreLayerNormLMHead(config)
1041
+
1042
+ # Initialize weights and apply final processing
1043
+ self.post_init()
1044
+
1045
+ def get_output_embeddings(self):
1046
+ return self.lm_head.decoder
1047
+
1048
+ def set_output_embeddings(self, new_embeddings):
1049
+ self.lm_head.decoder = new_embeddings
1050
+
1051
+ @add_start_docstrings_to_model_forward(ROBERTA_PRELAYERNORM_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1052
+ @add_code_sample_docstrings(
1053
+ checkpoint=_CHECKPOINT_FOR_DOC,
1054
+ output_type=MaskedLMOutput,
1055
+ config_class=_CONFIG_FOR_DOC,
1056
+ mask="<mask>",
1057
+ expected_output="' Paris'",
1058
+ expected_loss=0.69,
1059
+ )
1060
+ # Copied from transformers.models.roberta.modeling_roberta.RobertaForMaskedLM.forward with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta->roberta_prelayernorm
1061
+ def forward(
1062
+ self,
1063
+ input_ids: Optional[torch.LongTensor] = None,
1064
+ attention_mask: Optional[torch.FloatTensor] = None,
1065
+ token_type_ids: Optional[torch.LongTensor] = None,
1066
+ position_ids: Optional[torch.LongTensor] = None,
1067
+ head_mask: Optional[torch.FloatTensor] = None,
1068
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1069
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
1070
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
1071
+ labels: Optional[torch.LongTensor] = None,
1072
+ output_attentions: Optional[bool] = None,
1073
+ output_hidden_states: Optional[bool] = None,
1074
+ return_dict: Optional[bool] = None,
1075
+ ) -> Union[Tuple[torch.Tensor], MaskedLMOutput]:
1076
+ r"""
1077
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1078
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
1079
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
1080
+ loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
1081
+ kwargs (`Dict[str, any]`, optional, defaults to *{}*):
1082
+ Used to hide legacy arguments that have been deprecated.
1083
+ """
1084
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1085
+
1086
+ outputs = self.roberta_prelayernorm(
1087
+ input_ids,
1088
+ attention_mask=attention_mask,
1089
+ token_type_ids=token_type_ids,
1090
+ position_ids=position_ids,
1091
+ head_mask=head_mask,
1092
+ inputs_embeds=inputs_embeds,
1093
+ encoder_hidden_states=encoder_hidden_states,
1094
+ encoder_attention_mask=encoder_attention_mask,
1095
+ output_attentions=output_attentions,
1096
+ output_hidden_states=output_hidden_states,
1097
+ return_dict=return_dict,
1098
+ )
1099
+ sequence_output = outputs[0]
1100
+ prediction_scores = self.lm_head(sequence_output)
1101
+
1102
+ masked_lm_loss = None
1103
+ if labels is not None:
1104
+ # move labels to correct device to enable model parallelism
1105
+ labels = labels.to(prediction_scores.device)
1106
+ loss_fct = CrossEntropyLoss()
1107
+ masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
1108
+
1109
+ if not return_dict:
1110
+ output = (prediction_scores,) + outputs[2:]
1111
+ return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
1112
+
1113
+ return MaskedLMOutput(
1114
+ loss=masked_lm_loss,
1115
+ logits=prediction_scores,
1116
+ hidden_states=outputs.hidden_states,
1117
+ attentions=outputs.attentions,
1118
+ )
1119
+
1120
+
1121
+ # Copied from transformers.models.roberta.modeling_roberta.RobertaLMHead with Roberta->RobertaPreLayerNorm
1122
+ class RobertaPreLayerNormLMHead(nn.Module):
1123
+ """RobertaPreLayerNorm Head for masked language modeling."""
1124
+
1125
+ def __init__(self, config):
1126
+ super().__init__()
1127
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
1128
+ self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
1129
+
1130
+ self.decoder = nn.Linear(config.hidden_size, config.vocab_size)
1131
+ self.bias = nn.Parameter(torch.zeros(config.vocab_size))
1132
+ self.decoder.bias = self.bias
1133
+
1134
+ def forward(self, features, **kwargs):
1135
+ x = self.dense(features)
1136
+ x = gelu(x)
1137
+ x = self.layer_norm(x)
1138
+
1139
+ # project back to size of vocabulary with bias
1140
+ x = self.decoder(x)
1141
+
1142
+ return x
1143
+
1144
+ def _tie_weights(self):
1145
+ # To tie those two weights if they get disconnected (on TPU or when the bias is resized)
1146
+ # For accelerate compatibility and to not break backward compatibility
1147
+ if self.decoder.bias.device.type == "meta":
1148
+ self.decoder.bias = self.bias
1149
+ else:
1150
+ self.bias = self.decoder.bias
1151
+
1152
+
1153
+ @add_start_docstrings(
1154
+ """
1155
+ RoBERTa-PreLayerNorm Model transformer with a sequence classification/regression head on top (a linear layer on top
1156
+ of the pooled output) e.g. for GLUE tasks.
1157
+ """,
1158
+ ROBERTA_PRELAYERNORM_START_DOCSTRING,
1159
+ )
1160
+ class RobertaPreLayerNormForSequenceClassification(RobertaPreLayerNormPreTrainedModel):
1161
+ def __init__(self, config):
1162
+ super().__init__(config)
1163
+ self.num_labels = config.num_labels
1164
+ self.config = config
1165
+
1166
+ self.roberta_prelayernorm = RobertaPreLayerNormModel(config, add_pooling_layer=False)
1167
+ self.classifier = RobertaPreLayerNormClassificationHead(config)
1168
+
1169
+ # Initialize weights and apply final processing
1170
+ self.post_init()
1171
+
1172
+ @add_start_docstrings_to_model_forward(ROBERTA_PRELAYERNORM_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1173
+ @add_code_sample_docstrings(
1174
+ checkpoint=_CHECKPOINT_FOR_DOC,
1175
+ output_type=SequenceClassifierOutput,
1176
+ config_class=_CONFIG_FOR_DOC,
1177
+ )
1178
+ # Copied from transformers.models.roberta.modeling_roberta.RobertaForSequenceClassification.forward with roberta->roberta_prelayernorm
1179
+ def forward(
1180
+ self,
1181
+ input_ids: Optional[torch.LongTensor] = None,
1182
+ attention_mask: Optional[torch.FloatTensor] = None,
1183
+ token_type_ids: Optional[torch.LongTensor] = None,
1184
+ position_ids: Optional[torch.LongTensor] = None,
1185
+ head_mask: Optional[torch.FloatTensor] = None,
1186
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1187
+ labels: Optional[torch.LongTensor] = None,
1188
+ output_attentions: Optional[bool] = None,
1189
+ output_hidden_states: Optional[bool] = None,
1190
+ return_dict: Optional[bool] = None,
1191
+ ) -> Union[Tuple[torch.Tensor], SequenceClassifierOutput]:
1192
+ r"""
1193
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1194
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1195
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1196
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1197
+ """
1198
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1199
+
1200
+ outputs = self.roberta_prelayernorm(
1201
+ input_ids,
1202
+ attention_mask=attention_mask,
1203
+ token_type_ids=token_type_ids,
1204
+ position_ids=position_ids,
1205
+ head_mask=head_mask,
1206
+ inputs_embeds=inputs_embeds,
1207
+ output_attentions=output_attentions,
1208
+ output_hidden_states=output_hidden_states,
1209
+ return_dict=return_dict,
1210
+ )
1211
+ sequence_output = outputs[0]
1212
+ logits = self.classifier(sequence_output)
1213
+
1214
+ loss = None
1215
+ if labels is not None:
1216
+ # move labels to correct device to enable model parallelism
1217
+ labels = labels.to(logits.device)
1218
+ if self.config.problem_type is None:
1219
+ if self.num_labels == 1:
1220
+ self.config.problem_type = "regression"
1221
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
1222
+ self.config.problem_type = "single_label_classification"
1223
+ else:
1224
+ self.config.problem_type = "multi_label_classification"
1225
+
1226
+ if self.config.problem_type == "regression":
1227
+ loss_fct = MSELoss()
1228
+ if self.num_labels == 1:
1229
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
1230
+ else:
1231
+ loss = loss_fct(logits, labels)
1232
+ elif self.config.problem_type == "single_label_classification":
1233
+ loss_fct = CrossEntropyLoss()
1234
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1235
+ elif self.config.problem_type == "multi_label_classification":
1236
+ loss_fct = BCEWithLogitsLoss()
1237
+ loss = loss_fct(logits, labels)
1238
+
1239
+ if not return_dict:
1240
+ output = (logits,) + outputs[2:]
1241
+ return ((loss,) + output) if loss is not None else output
1242
+
1243
+ return SequenceClassifierOutput(
1244
+ loss=loss,
1245
+ logits=logits,
1246
+ hidden_states=outputs.hidden_states,
1247
+ attentions=outputs.attentions,
1248
+ )
1249
+
1250
+
1251
+ @add_start_docstrings(
1252
+ """
1253
+ RobertaPreLayerNorm Model with a multiple choice classification head on top (a linear layer on top of the pooled
1254
+ output and a softmax) e.g. for RocStories/SWAG tasks.
1255
+ """,
1256
+ ROBERTA_PRELAYERNORM_START_DOCSTRING,
1257
+ )
1258
+ # Copied from transformers.models.roberta.modeling_roberta.RobertaForMultipleChoice with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta->roberta_prelayernorm
1259
+ class RobertaPreLayerNormForMultipleChoice(RobertaPreLayerNormPreTrainedModel):
1260
+ def __init__(self, config):
1261
+ super().__init__(config)
1262
+
1263
+ self.roberta_prelayernorm = RobertaPreLayerNormModel(config)
1264
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
1265
+ self.classifier = nn.Linear(config.hidden_size, 1)
1266
+
1267
+ # Initialize weights and apply final processing
1268
+ self.post_init()
1269
+
1270
+ @add_start_docstrings_to_model_forward(
1271
+ ROBERTA_PRELAYERNORM_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")
1272
+ )
1273
+ @add_code_sample_docstrings(
1274
+ checkpoint=_CHECKPOINT_FOR_DOC,
1275
+ output_type=MultipleChoiceModelOutput,
1276
+ config_class=_CONFIG_FOR_DOC,
1277
+ )
1278
+ def forward(
1279
+ self,
1280
+ input_ids: Optional[torch.LongTensor] = None,
1281
+ token_type_ids: Optional[torch.LongTensor] = None,
1282
+ attention_mask: Optional[torch.FloatTensor] = None,
1283
+ labels: Optional[torch.LongTensor] = None,
1284
+ position_ids: Optional[torch.LongTensor] = None,
1285
+ head_mask: Optional[torch.FloatTensor] = None,
1286
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1287
+ output_attentions: Optional[bool] = None,
1288
+ output_hidden_states: Optional[bool] = None,
1289
+ return_dict: Optional[bool] = None,
1290
+ ) -> Union[Tuple[torch.Tensor], MultipleChoiceModelOutput]:
1291
+ r"""
1292
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1293
+ Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
1294
+ num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
1295
+ `input_ids` above)
1296
+ """
1297
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1298
+ num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
1299
+
1300
+ flat_input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
1301
+ flat_position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
1302
+ flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
1303
+ flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
1304
+ flat_inputs_embeds = (
1305
+ inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
1306
+ if inputs_embeds is not None
1307
+ else None
1308
+ )
1309
+
1310
+ outputs = self.roberta_prelayernorm(
1311
+ flat_input_ids,
1312
+ position_ids=flat_position_ids,
1313
+ token_type_ids=flat_token_type_ids,
1314
+ attention_mask=flat_attention_mask,
1315
+ head_mask=head_mask,
1316
+ inputs_embeds=flat_inputs_embeds,
1317
+ output_attentions=output_attentions,
1318
+ output_hidden_states=output_hidden_states,
1319
+ return_dict=return_dict,
1320
+ )
1321
+ pooled_output = outputs[1]
1322
+
1323
+ pooled_output = self.dropout(pooled_output)
1324
+ logits = self.classifier(pooled_output)
1325
+ reshaped_logits = logits.view(-1, num_choices)
1326
+
1327
+ loss = None
1328
+ if labels is not None:
1329
+ # move labels to correct device to enable model parallelism
1330
+ labels = labels.to(reshaped_logits.device)
1331
+ loss_fct = CrossEntropyLoss()
1332
+ loss = loss_fct(reshaped_logits, labels)
1333
+
1334
+ if not return_dict:
1335
+ output = (reshaped_logits,) + outputs[2:]
1336
+ return ((loss,) + output) if loss is not None else output
1337
+
1338
+ return MultipleChoiceModelOutput(
1339
+ loss=loss,
1340
+ logits=reshaped_logits,
1341
+ hidden_states=outputs.hidden_states,
1342
+ attentions=outputs.attentions,
1343
+ )
1344
+
1345
+
1346
+ @add_start_docstrings(
1347
+ """
1348
+ RobertaPreLayerNorm Model with a token classification head on top (a linear layer on top of the hidden-states
1349
+ output) e.g. for Named-Entity-Recognition (NER) tasks.
1350
+ """,
1351
+ ROBERTA_PRELAYERNORM_START_DOCSTRING,
1352
+ )
1353
+ class RobertaPreLayerNormForTokenClassification(RobertaPreLayerNormPreTrainedModel):
1354
+ def __init__(self, config):
1355
+ super().__init__(config)
1356
+ self.num_labels = config.num_labels
1357
+
1358
+ self.roberta_prelayernorm = RobertaPreLayerNormModel(config, add_pooling_layer=False)
1359
+ classifier_dropout = (
1360
+ config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
1361
+ )
1362
+ self.dropout = nn.Dropout(classifier_dropout)
1363
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
1364
+
1365
+ # Initialize weights and apply final processing
1366
+ self.post_init()
1367
+
1368
+ @add_start_docstrings_to_model_forward(ROBERTA_PRELAYERNORM_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1369
+ @add_code_sample_docstrings(
1370
+ checkpoint=_CHECKPOINT_FOR_DOC,
1371
+ output_type=TokenClassifierOutput,
1372
+ config_class=_CONFIG_FOR_DOC,
1373
+ )
1374
+ # Copied from transformers.models.roberta.modeling_roberta.RobertaForTokenClassification.forward with roberta->roberta_prelayernorm
1375
+ def forward(
1376
+ self,
1377
+ input_ids: Optional[torch.LongTensor] = None,
1378
+ attention_mask: Optional[torch.FloatTensor] = None,
1379
+ token_type_ids: Optional[torch.LongTensor] = None,
1380
+ position_ids: Optional[torch.LongTensor] = None,
1381
+ head_mask: Optional[torch.FloatTensor] = None,
1382
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1383
+ labels: Optional[torch.LongTensor] = None,
1384
+ output_attentions: Optional[bool] = None,
1385
+ output_hidden_states: Optional[bool] = None,
1386
+ return_dict: Optional[bool] = None,
1387
+ ) -> Union[Tuple[torch.Tensor], TokenClassifierOutput]:
1388
+ r"""
1389
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1390
+ Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
1391
+ """
1392
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1393
+
1394
+ outputs = self.roberta_prelayernorm(
1395
+ input_ids,
1396
+ attention_mask=attention_mask,
1397
+ token_type_ids=token_type_ids,
1398
+ position_ids=position_ids,
1399
+ head_mask=head_mask,
1400
+ inputs_embeds=inputs_embeds,
1401
+ output_attentions=output_attentions,
1402
+ output_hidden_states=output_hidden_states,
1403
+ return_dict=return_dict,
1404
+ )
1405
+
1406
+ sequence_output = outputs[0]
1407
+
1408
+ sequence_output = self.dropout(sequence_output)
1409
+ logits = self.classifier(sequence_output)
1410
+
1411
+ loss = None
1412
+ if labels is not None:
1413
+ # move labels to correct device to enable model parallelism
1414
+ labels = labels.to(logits.device)
1415
+ loss_fct = CrossEntropyLoss()
1416
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1417
+
1418
+ if not return_dict:
1419
+ output = (logits,) + outputs[2:]
1420
+ return ((loss,) + output) if loss is not None else output
1421
+
1422
+ return TokenClassifierOutput(
1423
+ loss=loss,
1424
+ logits=logits,
1425
+ hidden_states=outputs.hidden_states,
1426
+ attentions=outputs.attentions,
1427
+ )
1428
+
1429
+
1430
+ # Copied from transformers.models.roberta.modeling_roberta.RobertaClassificationHead with Roberta->RobertaPreLayerNorm
1431
+ class RobertaPreLayerNormClassificationHead(nn.Module):
1432
+ """Head for sentence-level classification tasks."""
1433
+
1434
+ def __init__(self, config):
1435
+ super().__init__()
1436
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
1437
+ classifier_dropout = (
1438
+ config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
1439
+ )
1440
+ self.dropout = nn.Dropout(classifier_dropout)
1441
+ self.out_proj = nn.Linear(config.hidden_size, config.num_labels)
1442
+
1443
+ def forward(self, features, **kwargs):
1444
+ x = features[:, 0, :] # take <s> token (equiv. to [CLS])
1445
+ x = self.dropout(x)
1446
+ x = self.dense(x)
1447
+ x = torch.tanh(x)
1448
+ x = self.dropout(x)
1449
+ x = self.out_proj(x)
1450
+ return x
1451
+
1452
+
1453
+ @add_start_docstrings(
1454
+ """
1455
+ RobertaPreLayerNorm Model with a span classification head on top for extractive question-answering tasks like SQuAD
1456
+ (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
1457
+ """,
1458
+ ROBERTA_PRELAYERNORM_START_DOCSTRING,
1459
+ )
1460
+ class RobertaPreLayerNormForQuestionAnswering(RobertaPreLayerNormPreTrainedModel):
1461
+ def __init__(self, config):
1462
+ super().__init__(config)
1463
+ self.num_labels = config.num_labels
1464
+
1465
+ self.roberta_prelayernorm = RobertaPreLayerNormModel(config, add_pooling_layer=False)
1466
+ self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
1467
+
1468
+ # Initialize weights and apply final processing
1469
+ self.post_init()
1470
+
1471
+ @add_start_docstrings_to_model_forward(ROBERTA_PRELAYERNORM_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1472
+ @add_code_sample_docstrings(
1473
+ checkpoint=_CHECKPOINT_FOR_DOC,
1474
+ output_type=QuestionAnsweringModelOutput,
1475
+ config_class=_CONFIG_FOR_DOC,
1476
+ )
1477
+ # Copied from transformers.models.roberta.modeling_roberta.RobertaForQuestionAnswering.forward with roberta->roberta_prelayernorm
1478
+ def forward(
1479
+ self,
1480
+ input_ids: Optional[torch.LongTensor] = None,
1481
+ attention_mask: Optional[torch.FloatTensor] = None,
1482
+ token_type_ids: Optional[torch.LongTensor] = None,
1483
+ position_ids: Optional[torch.LongTensor] = None,
1484
+ head_mask: Optional[torch.FloatTensor] = None,
1485
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1486
+ start_positions: Optional[torch.LongTensor] = None,
1487
+ end_positions: Optional[torch.LongTensor] = None,
1488
+ output_attentions: Optional[bool] = None,
1489
+ output_hidden_states: Optional[bool] = None,
1490
+ return_dict: Optional[bool] = None,
1491
+ ) -> Union[Tuple[torch.Tensor], QuestionAnsweringModelOutput]:
1492
+ r"""
1493
+ start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1494
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
1495
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1496
+ are not taken into account for computing the loss.
1497
+ end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1498
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
1499
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1500
+ are not taken into account for computing the loss.
1501
+ """
1502
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1503
+
1504
+ outputs = self.roberta_prelayernorm(
1505
+ input_ids,
1506
+ attention_mask=attention_mask,
1507
+ token_type_ids=token_type_ids,
1508
+ position_ids=position_ids,
1509
+ head_mask=head_mask,
1510
+ inputs_embeds=inputs_embeds,
1511
+ output_attentions=output_attentions,
1512
+ output_hidden_states=output_hidden_states,
1513
+ return_dict=return_dict,
1514
+ )
1515
+
1516
+ sequence_output = outputs[0]
1517
+
1518
+ logits = self.qa_outputs(sequence_output)
1519
+ start_logits, end_logits = logits.split(1, dim=-1)
1520
+ start_logits = start_logits.squeeze(-1).contiguous()
1521
+ end_logits = end_logits.squeeze(-1).contiguous()
1522
+
1523
+ total_loss = None
1524
+ if start_positions is not None and end_positions is not None:
1525
+ # If we are on multi-GPU, split add a dimension
1526
+ if len(start_positions.size()) > 1:
1527
+ start_positions = start_positions.squeeze(-1)
1528
+ if len(end_positions.size()) > 1:
1529
+ end_positions = end_positions.squeeze(-1)
1530
+ # sometimes the start/end positions are outside our model inputs, we ignore these terms
1531
+ ignored_index = start_logits.size(1)
1532
+ start_positions = start_positions.clamp(0, ignored_index)
1533
+ end_positions = end_positions.clamp(0, ignored_index)
1534
+
1535
+ loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
1536
+ start_loss = loss_fct(start_logits, start_positions)
1537
+ end_loss = loss_fct(end_logits, end_positions)
1538
+ total_loss = (start_loss + end_loss) / 2
1539
+
1540
+ if not return_dict:
1541
+ output = (start_logits, end_logits) + outputs[2:]
1542
+ return ((total_loss,) + output) if total_loss is not None else output
1543
+
1544
+ return QuestionAnsweringModelOutput(
1545
+ loss=total_loss,
1546
+ start_logits=start_logits,
1547
+ end_logits=end_logits,
1548
+ hidden_states=outputs.hidden_states,
1549
+ attentions=outputs.attentions,
1550
+ )
1551
+
1552
+
1553
+ def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_length=0):
1554
+ """
1555
+ Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols
1556
+ are ignored. This is modified from fairseq's `utils.make_positions`.
1557
+
1558
+ Args:
1559
+ x: torch.Tensor x:
1560
+
1561
+ Returns: torch.Tensor
1562
+ """
1563
+ # The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA.
1564
+ mask = input_ids.ne(padding_idx).int()
1565
+ incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask
1566
+ return incremental_indices.long() + padding_idx
llmeval-env/lib/python3.10/site-packages/transformers/models/roberta_prelayernorm/modeling_tf_roberta_prelayernorm.py ADDED
@@ -0,0 +1,1799 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The Google AI Language Team Authors and The HuggingFace Inc. team.
3
+ # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """ TF 2.0 RoBERTa-PreLayerNorm model."""
17
+
18
+
19
+ from __future__ import annotations
20
+
21
+ import math
22
+ import warnings
23
+ from typing import Optional, Tuple, Union
24
+
25
+ import numpy as np
26
+ import tensorflow as tf
27
+
28
+ from ...activations_tf import get_tf_activation
29
+ from ...modeling_tf_outputs import (
30
+ TFBaseModelOutputWithPastAndCrossAttentions,
31
+ TFBaseModelOutputWithPoolingAndCrossAttentions,
32
+ TFCausalLMOutputWithCrossAttentions,
33
+ TFMaskedLMOutput,
34
+ TFMultipleChoiceModelOutput,
35
+ TFQuestionAnsweringModelOutput,
36
+ TFSequenceClassifierOutput,
37
+ TFTokenClassifierOutput,
38
+ )
39
+ from ...modeling_tf_utils import (
40
+ TFCausalLanguageModelingLoss,
41
+ TFMaskedLanguageModelingLoss,
42
+ TFModelInputType,
43
+ TFMultipleChoiceLoss,
44
+ TFPreTrainedModel,
45
+ TFQuestionAnsweringLoss,
46
+ TFSequenceClassificationLoss,
47
+ TFTokenClassificationLoss,
48
+ get_initializer,
49
+ keras,
50
+ keras_serializable,
51
+ unpack_inputs,
52
+ )
53
+ from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax
54
+ from ...utils import (
55
+ add_code_sample_docstrings,
56
+ add_start_docstrings,
57
+ add_start_docstrings_to_model_forward,
58
+ logging,
59
+ )
60
+ from .configuration_roberta_prelayernorm import RobertaPreLayerNormConfig
61
+
62
+
63
+ logger = logging.get_logger(__name__)
64
+
65
+ _CHECKPOINT_FOR_DOC = "andreasmadsen/efficient_mlm_m0.40"
66
+ _CONFIG_FOR_DOC = "RobertaPreLayerNormConfig"
67
+
68
+
69
+ from ..deprecated._archive_maps import TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
70
+
71
+
72
+ # Copied from transformers.models.roberta.modeling_tf_roberta.TFRobertaEmbeddings with Roberta->RobertaPreLayerNorm
73
+ class TFRobertaPreLayerNormEmbeddings(keras.layers.Layer):
74
+ """
75
+ Same as BertEmbeddings with a tiny tweak for positional embeddings indexing.
76
+ """
77
+
78
+ def __init__(self, config, **kwargs):
79
+ super().__init__(**kwargs)
80
+
81
+ self.padding_idx = 1
82
+ self.config = config
83
+ self.hidden_size = config.hidden_size
84
+ self.max_position_embeddings = config.max_position_embeddings
85
+ self.initializer_range = config.initializer_range
86
+ self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
87
+ self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob)
88
+
89
+ def build(self, input_shape=None):
90
+ with tf.name_scope("word_embeddings"):
91
+ self.weight = self.add_weight(
92
+ name="weight",
93
+ shape=[self.config.vocab_size, self.hidden_size],
94
+ initializer=get_initializer(self.initializer_range),
95
+ )
96
+
97
+ with tf.name_scope("token_type_embeddings"):
98
+ self.token_type_embeddings = self.add_weight(
99
+ name="embeddings",
100
+ shape=[self.config.type_vocab_size, self.hidden_size],
101
+ initializer=get_initializer(self.initializer_range),
102
+ )
103
+
104
+ with tf.name_scope("position_embeddings"):
105
+ self.position_embeddings = self.add_weight(
106
+ name="embeddings",
107
+ shape=[self.max_position_embeddings, self.hidden_size],
108
+ initializer=get_initializer(self.initializer_range),
109
+ )
110
+
111
+ if self.built:
112
+ return
113
+ self.built = True
114
+ if getattr(self, "LayerNorm", None) is not None:
115
+ with tf.name_scope(self.LayerNorm.name):
116
+ self.LayerNorm.build([None, None, self.config.hidden_size])
117
+
118
+ def create_position_ids_from_input_ids(self, input_ids, past_key_values_length=0):
119
+ """
120
+ Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding
121
+ symbols are ignored. This is modified from fairseq's `utils.make_positions`.
122
+
123
+ Args:
124
+ input_ids: tf.Tensor
125
+ Returns: tf.Tensor
126
+ """
127
+ mask = tf.cast(tf.math.not_equal(input_ids, self.padding_idx), dtype=input_ids.dtype)
128
+ incremental_indices = (tf.math.cumsum(mask, axis=1) + past_key_values_length) * mask
129
+
130
+ return incremental_indices + self.padding_idx
131
+
132
+ def call(
133
+ self,
134
+ input_ids=None,
135
+ position_ids=None,
136
+ token_type_ids=None,
137
+ inputs_embeds=None,
138
+ past_key_values_length=0,
139
+ training=False,
140
+ ):
141
+ """
142
+ Applies embedding based on inputs tensor.
143
+
144
+ Returns:
145
+ final_embeddings (`tf.Tensor`): output embedding tensor.
146
+ """
147
+ assert not (input_ids is None and inputs_embeds is None)
148
+
149
+ if input_ids is not None:
150
+ check_embeddings_within_bounds(input_ids, self.config.vocab_size)
151
+ inputs_embeds = tf.gather(params=self.weight, indices=input_ids)
152
+
153
+ input_shape = shape_list(inputs_embeds)[:-1]
154
+
155
+ if token_type_ids is None:
156
+ token_type_ids = tf.fill(dims=input_shape, value=0)
157
+
158
+ if position_ids is None:
159
+ if input_ids is not None:
160
+ # Create the position ids from the input token ids. Any padded tokens remain padded.
161
+ position_ids = self.create_position_ids_from_input_ids(
162
+ input_ids=input_ids, past_key_values_length=past_key_values_length
163
+ )
164
+ else:
165
+ position_ids = tf.expand_dims(
166
+ tf.range(start=self.padding_idx + 1, limit=input_shape[-1] + self.padding_idx + 1), axis=0
167
+ )
168
+
169
+ position_embeds = tf.gather(params=self.position_embeddings, indices=position_ids)
170
+ token_type_embeds = tf.gather(params=self.token_type_embeddings, indices=token_type_ids)
171
+ final_embeddings = inputs_embeds + position_embeds + token_type_embeds
172
+ final_embeddings = self.LayerNorm(inputs=final_embeddings)
173
+ final_embeddings = self.dropout(inputs=final_embeddings, training=training)
174
+
175
+ return final_embeddings
176
+
177
+
178
+ # Copied from transformers.models.bert.modeling_tf_bert.TFBertPooler with Bert->RobertaPreLayerNorm
179
+ class TFRobertaPreLayerNormPooler(keras.layers.Layer):
180
+ def __init__(self, config: RobertaPreLayerNormConfig, **kwargs):
181
+ super().__init__(**kwargs)
182
+
183
+ self.dense = keras.layers.Dense(
184
+ units=config.hidden_size,
185
+ kernel_initializer=get_initializer(config.initializer_range),
186
+ activation="tanh",
187
+ name="dense",
188
+ )
189
+ self.config = config
190
+
191
+ def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
192
+ # We "pool" the model by simply taking the hidden state corresponding
193
+ # to the first token.
194
+ first_token_tensor = hidden_states[:, 0]
195
+ pooled_output = self.dense(inputs=first_token_tensor)
196
+
197
+ return pooled_output
198
+
199
+ def build(self, input_shape=None):
200
+ if self.built:
201
+ return
202
+ self.built = True
203
+ if getattr(self, "dense", None) is not None:
204
+ with tf.name_scope(self.dense.name):
205
+ self.dense.build([None, None, self.config.hidden_size])
206
+
207
+
208
+ # Copied from transformers.models.bert.modeling_tf_bert.TFBertSelfAttention with Bert->RobertaPreLayerNorm
209
+ class TFRobertaPreLayerNormSelfAttention(keras.layers.Layer):
210
+ def __init__(self, config: RobertaPreLayerNormConfig, **kwargs):
211
+ super().__init__(**kwargs)
212
+
213
+ if config.hidden_size % config.num_attention_heads != 0:
214
+ raise ValueError(
215
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number "
216
+ f"of attention heads ({config.num_attention_heads})"
217
+ )
218
+
219
+ self.num_attention_heads = config.num_attention_heads
220
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
221
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
222
+ self.sqrt_att_head_size = math.sqrt(self.attention_head_size)
223
+
224
+ self.query = keras.layers.Dense(
225
+ units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="query"
226
+ )
227
+ self.key = keras.layers.Dense(
228
+ units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="key"
229
+ )
230
+ self.value = keras.layers.Dense(
231
+ units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="value"
232
+ )
233
+ self.dropout = keras.layers.Dropout(rate=config.attention_probs_dropout_prob)
234
+
235
+ self.is_decoder = config.is_decoder
236
+ self.config = config
237
+
238
+ def transpose_for_scores(self, tensor: tf.Tensor, batch_size: int) -> tf.Tensor:
239
+ # Reshape from [batch_size, seq_length, all_head_size] to [batch_size, seq_length, num_attention_heads, attention_head_size]
240
+ tensor = tf.reshape(tensor=tensor, shape=(batch_size, -1, self.num_attention_heads, self.attention_head_size))
241
+
242
+ # Transpose the tensor from [batch_size, seq_length, num_attention_heads, attention_head_size] to [batch_size, num_attention_heads, seq_length, attention_head_size]
243
+ return tf.transpose(tensor, perm=[0, 2, 1, 3])
244
+
245
+ def call(
246
+ self,
247
+ hidden_states: tf.Tensor,
248
+ attention_mask: tf.Tensor,
249
+ head_mask: tf.Tensor,
250
+ encoder_hidden_states: tf.Tensor,
251
+ encoder_attention_mask: tf.Tensor,
252
+ past_key_value: Tuple[tf.Tensor],
253
+ output_attentions: bool,
254
+ training: bool = False,
255
+ ) -> Tuple[tf.Tensor]:
256
+ batch_size = shape_list(hidden_states)[0]
257
+ mixed_query_layer = self.query(inputs=hidden_states)
258
+
259
+ # If this is instantiated as a cross-attention module, the keys
260
+ # and values come from an encoder; the attention mask needs to be
261
+ # such that the encoder's padding tokens are not attended to.
262
+ is_cross_attention = encoder_hidden_states is not None
263
+
264
+ if is_cross_attention and past_key_value is not None:
265
+ # reuse k,v, cross_attentions
266
+ key_layer = past_key_value[0]
267
+ value_layer = past_key_value[1]
268
+ attention_mask = encoder_attention_mask
269
+ elif is_cross_attention:
270
+ key_layer = self.transpose_for_scores(self.key(inputs=encoder_hidden_states), batch_size)
271
+ value_layer = self.transpose_for_scores(self.value(inputs=encoder_hidden_states), batch_size)
272
+ attention_mask = encoder_attention_mask
273
+ elif past_key_value is not None:
274
+ key_layer = self.transpose_for_scores(self.key(inputs=hidden_states), batch_size)
275
+ value_layer = self.transpose_for_scores(self.value(inputs=hidden_states), batch_size)
276
+ key_layer = tf.concat([past_key_value[0], key_layer], axis=2)
277
+ value_layer = tf.concat([past_key_value[1], value_layer], axis=2)
278
+ else:
279
+ key_layer = self.transpose_for_scores(self.key(inputs=hidden_states), batch_size)
280
+ value_layer = self.transpose_for_scores(self.value(inputs=hidden_states), batch_size)
281
+
282
+ query_layer = self.transpose_for_scores(mixed_query_layer, batch_size)
283
+
284
+ if self.is_decoder:
285
+ # if cross_attention save Tuple(tf.Tensor, tf.Tensor) of all cross attention key/value_states.
286
+ # Further calls to cross_attention layer can then reuse all cross-attention
287
+ # key/value_states (first "if" case)
288
+ # if uni-directional self-attention (decoder) save Tuple(tf.Tensor, tf.Tensor) of
289
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
290
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
291
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
292
+ past_key_value = (key_layer, value_layer)
293
+
294
+ # Take the dot product between "query" and "key" to get the raw attention scores.
295
+ # (batch size, num_heads, seq_len_q, seq_len_k)
296
+ attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True)
297
+ dk = tf.cast(self.sqrt_att_head_size, dtype=attention_scores.dtype)
298
+ attention_scores = tf.divide(attention_scores, dk)
299
+
300
+ if attention_mask is not None:
301
+ # Apply the attention mask is (precomputed for all layers in TFRobertaPreLayerNormModel call() function)
302
+ attention_scores = tf.add(attention_scores, attention_mask)
303
+
304
+ # Normalize the attention scores to probabilities.
305
+ attention_probs = stable_softmax(logits=attention_scores, axis=-1)
306
+
307
+ # This is actually dropping out entire tokens to attend to, which might
308
+ # seem a bit unusual, but is taken from the original Transformer paper.
309
+ attention_probs = self.dropout(inputs=attention_probs, training=training)
310
+
311
+ # Mask heads if we want to
312
+ if head_mask is not None:
313
+ attention_probs = tf.multiply(attention_probs, head_mask)
314
+
315
+ attention_output = tf.matmul(attention_probs, value_layer)
316
+ attention_output = tf.transpose(attention_output, perm=[0, 2, 1, 3])
317
+
318
+ # (batch_size, seq_len_q, all_head_size)
319
+ attention_output = tf.reshape(tensor=attention_output, shape=(batch_size, -1, self.all_head_size))
320
+ outputs = (attention_output, attention_probs) if output_attentions else (attention_output,)
321
+
322
+ if self.is_decoder:
323
+ outputs = outputs + (past_key_value,)
324
+ return outputs
325
+
326
+ def build(self, input_shape=None):
327
+ if self.built:
328
+ return
329
+ self.built = True
330
+ if getattr(self, "query", None) is not None:
331
+ with tf.name_scope(self.query.name):
332
+ self.query.build([None, None, self.config.hidden_size])
333
+ if getattr(self, "key", None) is not None:
334
+ with tf.name_scope(self.key.name):
335
+ self.key.build([None, None, self.config.hidden_size])
336
+ if getattr(self, "value", None) is not None:
337
+ with tf.name_scope(self.value.name):
338
+ self.value.build([None, None, self.config.hidden_size])
339
+
340
+
341
+ class TFRobertaPreLayerNormSelfOutput(keras.layers.Layer):
342
+ def __init__(self, config: RobertaPreLayerNormConfig, **kwargs):
343
+ super().__init__(**kwargs)
344
+
345
+ self.dense = keras.layers.Dense(
346
+ units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
347
+ )
348
+ self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob)
349
+ self.config = config
350
+
351
+ def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: bool = False) -> tf.Tensor:
352
+ hidden_states = self.dense(inputs=hidden_states)
353
+ hidden_states = self.dropout(inputs=hidden_states, training=training)
354
+ hidden_states = hidden_states + input_tensor
355
+
356
+ return hidden_states
357
+
358
+ def build(self, input_shape=None):
359
+ if self.built:
360
+ return
361
+ self.built = True
362
+ if getattr(self, "dense", None) is not None:
363
+ with tf.name_scope(self.dense.name):
364
+ self.dense.build([None, None, self.config.hidden_size])
365
+
366
+
367
+ class TFRobertaPreLayerNormAttention(keras.layers.Layer):
368
+ def __init__(self, config: RobertaPreLayerNormConfig, **kwargs):
369
+ super().__init__(**kwargs)
370
+
371
+ self.self_attention = TFRobertaPreLayerNormSelfAttention(config, name="self")
372
+ self.dense_output = TFRobertaPreLayerNormSelfOutput(config, name="output")
373
+ self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
374
+ self.config = config
375
+
376
+ # Copied from transformers.models.bert.modeling_tf_bert.TFBertAttention.prune_heads
377
+ def prune_heads(self, heads):
378
+ raise NotImplementedError
379
+
380
+ def call(
381
+ self,
382
+ input_tensor: tf.Tensor,
383
+ attention_mask: tf.Tensor,
384
+ head_mask: tf.Tensor,
385
+ encoder_hidden_states: tf.Tensor,
386
+ encoder_attention_mask: tf.Tensor,
387
+ past_key_value: Tuple[tf.Tensor],
388
+ output_attentions: bool,
389
+ training: bool = False,
390
+ ) -> Tuple[tf.Tensor]:
391
+ hidden_states_pre_layer_norm = self.LayerNorm(inputs=input_tensor)
392
+ self_outputs = self.self_attention(
393
+ hidden_states=hidden_states_pre_layer_norm,
394
+ attention_mask=attention_mask,
395
+ head_mask=head_mask,
396
+ encoder_hidden_states=encoder_hidden_states,
397
+ encoder_attention_mask=encoder_attention_mask,
398
+ past_key_value=past_key_value,
399
+ output_attentions=output_attentions,
400
+ training=training,
401
+ )
402
+ attention_output = self.dense_output(
403
+ hidden_states=self_outputs[0], input_tensor=input_tensor, training=training
404
+ )
405
+ # add attentions (possibly with past_key_value) if we output them
406
+ outputs = (attention_output,) + self_outputs[1:]
407
+
408
+ return outputs
409
+
410
+ def build(self, input_shape=None):
411
+ if self.built:
412
+ return
413
+ self.built = True
414
+ if getattr(self, "self_attention", None) is not None:
415
+ with tf.name_scope(self.self_attention.name):
416
+ self.self_attention.build(None)
417
+ if getattr(self, "dense_output", None) is not None:
418
+ with tf.name_scope(self.dense_output.name):
419
+ self.dense_output.build(None)
420
+ if getattr(self, "LayerNorm", None) is not None:
421
+ with tf.name_scope(self.LayerNorm.name):
422
+ self.LayerNorm.build([None, None, self.config.hidden_size])
423
+
424
+
425
+ class TFRobertaPreLayerNormIntermediate(keras.layers.Layer):
426
+ def __init__(self, config: RobertaPreLayerNormConfig, **kwargs):
427
+ super().__init__(**kwargs)
428
+
429
+ self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
430
+ self.dense = keras.layers.Dense(
431
+ units=config.intermediate_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
432
+ )
433
+
434
+ if isinstance(config.hidden_act, str):
435
+ self.intermediate_act_fn = get_tf_activation(config.hidden_act)
436
+ else:
437
+ self.intermediate_act_fn = config.hidden_act
438
+ self.config = config
439
+
440
+ def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
441
+ hidden_states = self.LayerNorm(inputs=hidden_states)
442
+ hidden_states = self.dense(inputs=hidden_states)
443
+ hidden_states = self.intermediate_act_fn(hidden_states)
444
+
445
+ return hidden_states
446
+
447
+ def build(self, input_shape=None):
448
+ if self.built:
449
+ return
450
+ self.built = True
451
+ if getattr(self, "LayerNorm", None) is not None:
452
+ with tf.name_scope(self.LayerNorm.name):
453
+ self.LayerNorm.build([None, None, self.config.hidden_size])
454
+ if getattr(self, "dense", None) is not None:
455
+ with tf.name_scope(self.dense.name):
456
+ self.dense.build([None, None, self.config.hidden_size])
457
+
458
+
459
+ class TFRobertaPreLayerNormOutput(keras.layers.Layer):
460
+ def __init__(self, config: RobertaPreLayerNormConfig, **kwargs):
461
+ super().__init__(**kwargs)
462
+
463
+ self.dense = keras.layers.Dense(
464
+ units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
465
+ )
466
+ self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob)
467
+ self.config = config
468
+
469
+ def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: bool = False) -> tf.Tensor:
470
+ hidden_states = self.dense(inputs=hidden_states)
471
+ hidden_states = self.dropout(inputs=hidden_states, training=training)
472
+ hidden_states = hidden_states + input_tensor
473
+
474
+ return hidden_states
475
+
476
+ def build(self, input_shape=None):
477
+ if self.built:
478
+ return
479
+ self.built = True
480
+ if getattr(self, "dense", None) is not None:
481
+ with tf.name_scope(self.dense.name):
482
+ self.dense.build([None, None, self.config.intermediate_size])
483
+
484
+
485
+ # Copied from transformers.models.bert.modeling_tf_bert.TFBertLayer with Bert->RobertaPreLayerNorm
486
+ class TFRobertaPreLayerNormLayer(keras.layers.Layer):
487
+ def __init__(self, config: RobertaPreLayerNormConfig, **kwargs):
488
+ super().__init__(**kwargs)
489
+
490
+ self.attention = TFRobertaPreLayerNormAttention(config, name="attention")
491
+ self.is_decoder = config.is_decoder
492
+ self.add_cross_attention = config.add_cross_attention
493
+ if self.add_cross_attention:
494
+ if not self.is_decoder:
495
+ raise ValueError(f"{self} should be used as a decoder model if cross attention is added")
496
+ self.crossattention = TFRobertaPreLayerNormAttention(config, name="crossattention")
497
+ self.intermediate = TFRobertaPreLayerNormIntermediate(config, name="intermediate")
498
+ self.bert_output = TFRobertaPreLayerNormOutput(config, name="output")
499
+
500
+ def call(
501
+ self,
502
+ hidden_states: tf.Tensor,
503
+ attention_mask: tf.Tensor,
504
+ head_mask: tf.Tensor,
505
+ encoder_hidden_states: tf.Tensor | None,
506
+ encoder_attention_mask: tf.Tensor | None,
507
+ past_key_value: Tuple[tf.Tensor] | None,
508
+ output_attentions: bool,
509
+ training: bool = False,
510
+ ) -> Tuple[tf.Tensor]:
511
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
512
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
513
+ self_attention_outputs = self.attention(
514
+ input_tensor=hidden_states,
515
+ attention_mask=attention_mask,
516
+ head_mask=head_mask,
517
+ encoder_hidden_states=None,
518
+ encoder_attention_mask=None,
519
+ past_key_value=self_attn_past_key_value,
520
+ output_attentions=output_attentions,
521
+ training=training,
522
+ )
523
+ attention_output = self_attention_outputs[0]
524
+
525
+ # if decoder, the last output is tuple of self-attn cache
526
+ if self.is_decoder:
527
+ outputs = self_attention_outputs[1:-1]
528
+ present_key_value = self_attention_outputs[-1]
529
+ else:
530
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
531
+
532
+ cross_attn_present_key_value = None
533
+ if self.is_decoder and encoder_hidden_states is not None:
534
+ if not hasattr(self, "crossattention"):
535
+ raise ValueError(
536
+ f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers"
537
+ " by setting `config.add_cross_attention=True`"
538
+ )
539
+
540
+ # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple
541
+ cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
542
+ cross_attention_outputs = self.crossattention(
543
+ input_tensor=attention_output,
544
+ attention_mask=attention_mask,
545
+ head_mask=head_mask,
546
+ encoder_hidden_states=encoder_hidden_states,
547
+ encoder_attention_mask=encoder_attention_mask,
548
+ past_key_value=cross_attn_past_key_value,
549
+ output_attentions=output_attentions,
550
+ training=training,
551
+ )
552
+ attention_output = cross_attention_outputs[0]
553
+ outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
554
+
555
+ # add cross-attn cache to positions 3,4 of present_key_value tuple
556
+ cross_attn_present_key_value = cross_attention_outputs[-1]
557
+ present_key_value = present_key_value + cross_attn_present_key_value
558
+
559
+ intermediate_output = self.intermediate(hidden_states=attention_output)
560
+ layer_output = self.bert_output(
561
+ hidden_states=intermediate_output, input_tensor=attention_output, training=training
562
+ )
563
+ outputs = (layer_output,) + outputs # add attentions if we output them
564
+
565
+ # if decoder, return the attn key/values as the last output
566
+ if self.is_decoder:
567
+ outputs = outputs + (present_key_value,)
568
+
569
+ return outputs
570
+
571
+ def build(self, input_shape=None):
572
+ if self.built:
573
+ return
574
+ self.built = True
575
+ if getattr(self, "attention", None) is not None:
576
+ with tf.name_scope(self.attention.name):
577
+ self.attention.build(None)
578
+ if getattr(self, "intermediate", None) is not None:
579
+ with tf.name_scope(self.intermediate.name):
580
+ self.intermediate.build(None)
581
+ if getattr(self, "bert_output", None) is not None:
582
+ with tf.name_scope(self.bert_output.name):
583
+ self.bert_output.build(None)
584
+ if getattr(self, "crossattention", None) is not None:
585
+ with tf.name_scope(self.crossattention.name):
586
+ self.crossattention.build(None)
587
+
588
+
589
+ # Copied from transformers.models.bert.modeling_tf_bert.TFBertEncoder with Bert->RobertaPreLayerNorm
590
+ class TFRobertaPreLayerNormEncoder(keras.layers.Layer):
591
+ def __init__(self, config: RobertaPreLayerNormConfig, **kwargs):
592
+ super().__init__(**kwargs)
593
+ self.config = config
594
+ self.layer = [TFRobertaPreLayerNormLayer(config, name=f"layer_._{i}") for i in range(config.num_hidden_layers)]
595
+
596
+ def call(
597
+ self,
598
+ hidden_states: tf.Tensor,
599
+ attention_mask: tf.Tensor,
600
+ head_mask: tf.Tensor,
601
+ encoder_hidden_states: tf.Tensor | None,
602
+ encoder_attention_mask: tf.Tensor | None,
603
+ past_key_values: Tuple[Tuple[tf.Tensor]] | None,
604
+ use_cache: Optional[bool],
605
+ output_attentions: bool,
606
+ output_hidden_states: bool,
607
+ return_dict: bool,
608
+ training: bool = False,
609
+ ) -> Union[TFBaseModelOutputWithPastAndCrossAttentions, Tuple[tf.Tensor]]:
610
+ all_hidden_states = () if output_hidden_states else None
611
+ all_attentions = () if output_attentions else None
612
+ all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
613
+
614
+ next_decoder_cache = () if use_cache else None
615
+ for i, layer_module in enumerate(self.layer):
616
+ if output_hidden_states:
617
+ all_hidden_states = all_hidden_states + (hidden_states,)
618
+
619
+ past_key_value = past_key_values[i] if past_key_values is not None else None
620
+
621
+ layer_outputs = layer_module(
622
+ hidden_states=hidden_states,
623
+ attention_mask=attention_mask,
624
+ head_mask=head_mask[i],
625
+ encoder_hidden_states=encoder_hidden_states,
626
+ encoder_attention_mask=encoder_attention_mask,
627
+ past_key_value=past_key_value,
628
+ output_attentions=output_attentions,
629
+ training=training,
630
+ )
631
+ hidden_states = layer_outputs[0]
632
+
633
+ if use_cache:
634
+ next_decoder_cache += (layer_outputs[-1],)
635
+
636
+ if output_attentions:
637
+ all_attentions = all_attentions + (layer_outputs[1],)
638
+ if self.config.add_cross_attention and encoder_hidden_states is not None:
639
+ all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
640
+
641
+ # Add last layer
642
+ if output_hidden_states:
643
+ all_hidden_states = all_hidden_states + (hidden_states,)
644
+
645
+ if not return_dict:
646
+ return tuple(
647
+ v for v in [hidden_states, all_hidden_states, all_attentions, all_cross_attentions] if v is not None
648
+ )
649
+
650
+ return TFBaseModelOutputWithPastAndCrossAttentions(
651
+ last_hidden_state=hidden_states,
652
+ past_key_values=next_decoder_cache,
653
+ hidden_states=all_hidden_states,
654
+ attentions=all_attentions,
655
+ cross_attentions=all_cross_attentions,
656
+ )
657
+
658
+ def build(self, input_shape=None):
659
+ if self.built:
660
+ return
661
+ self.built = True
662
+ if getattr(self, "layer", None) is not None:
663
+ for layer in self.layer:
664
+ with tf.name_scope(layer.name):
665
+ layer.build(None)
666
+
667
+
668
+ @keras_serializable
669
+ class TFRobertaPreLayerNormMainLayer(keras.layers.Layer):
670
+ config_class = RobertaPreLayerNormConfig
671
+
672
+ def __init__(self, config, add_pooling_layer=True, **kwargs):
673
+ super().__init__(**kwargs)
674
+
675
+ self.config = config
676
+ self.is_decoder = config.is_decoder
677
+
678
+ self.num_hidden_layers = config.num_hidden_layers
679
+ self.initializer_range = config.initializer_range
680
+ self.output_attentions = config.output_attentions
681
+ self.output_hidden_states = config.output_hidden_states
682
+ self.return_dict = config.use_return_dict
683
+ self.encoder = TFRobertaPreLayerNormEncoder(config, name="encoder")
684
+ self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
685
+ self.pooler = TFRobertaPreLayerNormPooler(config, name="pooler") if add_pooling_layer else None
686
+ # The embeddings must be the last declaration in order to follow the weights order
687
+ self.embeddings = TFRobertaPreLayerNormEmbeddings(config, name="embeddings")
688
+
689
+ def get_input_embeddings(self) -> keras.layers.Layer:
690
+ return self.embeddings
691
+
692
+ def set_input_embeddings(self, value: tf.Variable):
693
+ self.embeddings.weight = value
694
+ self.embeddings.vocab_size = shape_list(value)[0]
695
+
696
+ def _prune_heads(self, heads_to_prune):
697
+ """
698
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
699
+ class PreTrainedModel
700
+ """
701
+ raise NotImplementedError
702
+
703
+ @unpack_inputs
704
+ def call(
705
+ self,
706
+ input_ids: TFModelInputType | None = None,
707
+ attention_mask: np.ndarray | tf.Tensor | None = None,
708
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
709
+ position_ids: np.ndarray | tf.Tensor | None = None,
710
+ head_mask: np.ndarray | tf.Tensor | None = None,
711
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
712
+ encoder_hidden_states: np.ndarray | tf.Tensor | None = None,
713
+ encoder_attention_mask: np.ndarray | tf.Tensor | None = None,
714
+ past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None,
715
+ use_cache: Optional[bool] = None,
716
+ output_attentions: Optional[bool] = None,
717
+ output_hidden_states: Optional[bool] = None,
718
+ return_dict: Optional[bool] = None,
719
+ training: bool = False,
720
+ ) -> Union[TFBaseModelOutputWithPoolingAndCrossAttentions, Tuple[tf.Tensor]]:
721
+ if not self.config.is_decoder:
722
+ use_cache = False
723
+
724
+ if input_ids is not None and inputs_embeds is not None:
725
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
726
+ elif input_ids is not None:
727
+ input_shape = shape_list(input_ids)
728
+ elif inputs_embeds is not None:
729
+ input_shape = shape_list(inputs_embeds)[:-1]
730
+ else:
731
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
732
+
733
+ batch_size, seq_length = input_shape
734
+
735
+ if past_key_values is None:
736
+ past_key_values_length = 0
737
+ past_key_values = [None] * len(self.encoder.layer)
738
+ else:
739
+ past_key_values_length = shape_list(past_key_values[0][0])[-2]
740
+
741
+ if attention_mask is None:
742
+ attention_mask = tf.fill(dims=(batch_size, seq_length + past_key_values_length), value=1)
743
+
744
+ if token_type_ids is None:
745
+ token_type_ids = tf.fill(dims=input_shape, value=0)
746
+
747
+ embedding_output = self.embeddings(
748
+ input_ids=input_ids,
749
+ position_ids=position_ids,
750
+ token_type_ids=token_type_ids,
751
+ inputs_embeds=inputs_embeds,
752
+ past_key_values_length=past_key_values_length,
753
+ training=training,
754
+ )
755
+
756
+ # We create a 3D attention mask from a 2D tensor mask.
757
+ # Sizes are [batch_size, 1, 1, to_seq_length]
758
+ # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
759
+ # this attention mask is more simple than the triangular masking of causal attention
760
+ # used in OpenAI GPT, we just need to prepare the broadcast dimension here.
761
+ attention_mask_shape = shape_list(attention_mask)
762
+
763
+ mask_seq_length = seq_length + past_key_values_length
764
+ # Provided a padding mask of dimensions [batch_size, mask_seq_length]
765
+ # - if the model is a decoder, apply a causal mask in addition to the padding mask
766
+ # - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, mask_seq_length, mask_seq_length]
767
+ if self.is_decoder:
768
+ seq_ids = tf.range(mask_seq_length)
769
+ causal_mask = tf.less_equal(
770
+ tf.tile(seq_ids[None, None, :], (batch_size, mask_seq_length, 1)),
771
+ seq_ids[None, :, None],
772
+ )
773
+ causal_mask = tf.cast(causal_mask, dtype=attention_mask.dtype)
774
+ extended_attention_mask = causal_mask * attention_mask[:, None, :]
775
+ attention_mask_shape = shape_list(extended_attention_mask)
776
+ extended_attention_mask = tf.reshape(
777
+ extended_attention_mask, (attention_mask_shape[0], 1, attention_mask_shape[1], attention_mask_shape[2])
778
+ )
779
+ if past_key_values[0] is not None:
780
+ # attention_mask needs to be sliced to the shape `[batch_size, 1, from_seq_length - cached_seq_length, to_seq_length]
781
+ extended_attention_mask = extended_attention_mask[:, :, -seq_length:, :]
782
+ else:
783
+ extended_attention_mask = tf.reshape(
784
+ attention_mask, (attention_mask_shape[0], 1, 1, attention_mask_shape[1])
785
+ )
786
+
787
+ # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
788
+ # masked positions, this operation will create a tensor which is 0.0 for
789
+ # positions we want to attend and -10000.0 for masked positions.
790
+ # Since we are adding it to the raw scores before the softmax, this is
791
+ # effectively the same as removing these entirely.
792
+ extended_attention_mask = tf.cast(extended_attention_mask, dtype=embedding_output.dtype)
793
+ one_cst = tf.constant(1.0, dtype=embedding_output.dtype)
794
+ ten_thousand_cst = tf.constant(-10000.0, dtype=embedding_output.dtype)
795
+ extended_attention_mask = tf.multiply(tf.subtract(one_cst, extended_attention_mask), ten_thousand_cst)
796
+
797
+ if self.is_decoder and encoder_attention_mask is not None:
798
+ # If a 2D ou 3D attention mask is provided for the cross-attention
799
+ # we need to make broadcastable to [batch_size, num_heads, mask_seq_length, mask_seq_length]
800
+ # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
801
+ encoder_attention_mask = tf.cast(encoder_attention_mask, dtype=extended_attention_mask.dtype)
802
+ num_dims_encoder_attention_mask = len(shape_list(encoder_attention_mask))
803
+ if num_dims_encoder_attention_mask == 3:
804
+ encoder_extended_attention_mask = encoder_attention_mask[:, None, :, :]
805
+ if num_dims_encoder_attention_mask == 2:
806
+ encoder_extended_attention_mask = encoder_attention_mask[:, None, None, :]
807
+
808
+ # T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
809
+ # Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow/transformer/transformer_layers.py#L270
810
+ # encoder_extended_attention_mask = tf.math.equal(encoder_extended_attention_mask,
811
+ # tf.transpose(encoder_extended_attention_mask, perm=(-1, -2)))
812
+
813
+ encoder_extended_attention_mask = (1.0 - encoder_extended_attention_mask) * -10000.0
814
+ else:
815
+ encoder_extended_attention_mask = None
816
+
817
+ # Prepare head mask if needed
818
+ # 1.0 in head_mask indicate we keep the head
819
+ # attention_probs has shape bsz x n_heads x N x N
820
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
821
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
822
+ if head_mask is not None:
823
+ raise NotImplementedError
824
+ else:
825
+ head_mask = [None] * self.config.num_hidden_layers
826
+
827
+ encoder_outputs = self.encoder(
828
+ hidden_states=embedding_output,
829
+ attention_mask=extended_attention_mask,
830
+ head_mask=head_mask,
831
+ encoder_hidden_states=encoder_hidden_states,
832
+ encoder_attention_mask=encoder_extended_attention_mask,
833
+ past_key_values=past_key_values,
834
+ use_cache=use_cache,
835
+ output_attentions=output_attentions,
836
+ output_hidden_states=output_hidden_states,
837
+ return_dict=return_dict,
838
+ training=training,
839
+ )
840
+
841
+ sequence_output = encoder_outputs[0]
842
+ sequence_output = self.LayerNorm(inputs=sequence_output)
843
+ pooled_output = self.pooler(hidden_states=sequence_output) if self.pooler is not None else None
844
+
845
+ if not return_dict:
846
+ return (
847
+ sequence_output,
848
+ pooled_output,
849
+ ) + encoder_outputs[1:]
850
+
851
+ return TFBaseModelOutputWithPoolingAndCrossAttentions(
852
+ last_hidden_state=sequence_output,
853
+ pooler_output=pooled_output,
854
+ past_key_values=encoder_outputs.past_key_values,
855
+ hidden_states=encoder_outputs.hidden_states,
856
+ attentions=encoder_outputs.attentions,
857
+ cross_attentions=encoder_outputs.cross_attentions,
858
+ )
859
+
860
+ def build(self, input_shape=None):
861
+ if self.built:
862
+ return
863
+ self.built = True
864
+ if getattr(self, "encoder", None) is not None:
865
+ with tf.name_scope(self.encoder.name):
866
+ self.encoder.build(None)
867
+ if getattr(self, "LayerNorm", None) is not None:
868
+ with tf.name_scope(self.LayerNorm.name):
869
+ self.LayerNorm.build([None, None, self.config.hidden_size])
870
+ if getattr(self, "pooler", None) is not None:
871
+ with tf.name_scope(self.pooler.name):
872
+ self.pooler.build(None)
873
+ if getattr(self, "embeddings", None) is not None:
874
+ with tf.name_scope(self.embeddings.name):
875
+ self.embeddings.build(None)
876
+
877
+
878
+ # Copied from transformers.models.roberta.modeling_tf_roberta.TFRobertaPreTrainedModel with Roberta->RobertaPreLayerNorm,roberta->roberta_prelayernorm
879
+ class TFRobertaPreLayerNormPreTrainedModel(TFPreTrainedModel):
880
+ """
881
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
882
+ models.
883
+ """
884
+
885
+ config_class = RobertaPreLayerNormConfig
886
+ base_model_prefix = "roberta_prelayernorm"
887
+
888
+
889
+ ROBERTA_PRELAYERNORM_START_DOCSTRING = r"""
890
+
891
+ This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
892
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
893
+ etc.)
894
+
895
+ This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
896
+ as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
897
+ behavior.
898
+
899
+ <Tip>
900
+
901
+ TensorFlow models and layers in `transformers` accept two formats as input:
902
+
903
+ - having all inputs as keyword arguments (like PyTorch models), or
904
+ - having all inputs as a list, tuple or dict in the first positional argument.
905
+
906
+ The reason the second format is supported is that Keras methods prefer this format when passing inputs to models
907
+ and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just
908
+ pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second
909
+ format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with
910
+ the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first
911
+ positional argument:
912
+
913
+ - a single Tensor with `input_ids` only and nothing else: `model(input_ids)`
914
+ - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
915
+ `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`
916
+ - a dictionary with one or several input Tensors associated to the input names given in the docstring:
917
+ `model({"input_ids": input_ids, "token_type_ids": token_type_ids})`
918
+
919
+ Note that when creating models and layers with
920
+ [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry
921
+ about any of this, as you can just pass inputs like you would to any other Python function!
922
+
923
+ </Tip>
924
+
925
+ Parameters:
926
+ config ([`RobertaPreLayerNormConfig`]): Model configuration class with all the parameters of the
927
+ model. Initializing with a config file does not load the weights associated with the model, only the
928
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
929
+ """
930
+
931
+ ROBERTA_PRELAYERNORM_INPUTS_DOCSTRING = r"""
932
+ Args:
933
+ input_ids (`Numpy array` or `tf.Tensor` of shape `({0})`):
934
+ Indices of input sequence tokens in the vocabulary.
935
+
936
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and
937
+ [`PreTrainedTokenizer.encode`] for details.
938
+
939
+ [What are input IDs?](../glossary#input-ids)
940
+ attention_mask (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*):
941
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
942
+
943
+ - 1 for tokens that are **not masked**,
944
+ - 0 for tokens that are **masked**.
945
+
946
+ [What are attention masks?](../glossary#attention-mask)
947
+ token_type_ids (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*):
948
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
949
+ 1]`:
950
+
951
+ - 0 corresponds to a *sentence A* token,
952
+ - 1 corresponds to a *sentence B* token.
953
+
954
+ [What are token type IDs?](../glossary#token-type-ids)
955
+ position_ids (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*):
956
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
957
+ config.max_position_embeddings - 1]`.
958
+
959
+ [What are position IDs?](../glossary#position-ids)
960
+ head_mask (`Numpy array` or `tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
961
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
962
+
963
+ - 1 indicates the head is **not masked**,
964
+ - 0 indicates the head is **masked**.
965
+
966
+ inputs_embeds (`tf.Tensor` of shape `({0}, hidden_size)`, *optional*):
967
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
968
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
969
+ model's internal embedding lookup matrix.
970
+ output_attentions (`bool`, *optional*):
971
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
972
+ tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the
973
+ config will be used instead.
974
+ output_hidden_states (`bool`, *optional*):
975
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
976
+ more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
977
+ used instead.
978
+ return_dict (`bool`, *optional*):
979
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in
980
+ eager mode, in graph mode the value will always be set to True.
981
+ training (`bool`, *optional*, defaults to `False`):
982
+ Whether or not to use the model in training mode (some modules like dropout modules have different
983
+ behaviors between training and evaluation).
984
+ """
985
+
986
+
987
+ @add_start_docstrings(
988
+ "The bare RoBERTa-PreLayerNorm Model transformer outputting raw hidden-states without any specific head on top.",
989
+ ROBERTA_PRELAYERNORM_START_DOCSTRING,
990
+ )
991
+ # Copied from transformers.models.roberta.modeling_tf_roberta.TFRobertaModel with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta->roberta_prelayernorm
992
+ class TFRobertaPreLayerNormModel(TFRobertaPreLayerNormPreTrainedModel):
993
+ def __init__(self, config, *inputs, **kwargs):
994
+ super().__init__(config, *inputs, **kwargs)
995
+ self.roberta_prelayernorm = TFRobertaPreLayerNormMainLayer(config, name="roberta_prelayernorm")
996
+
997
+ @unpack_inputs
998
+ @add_start_docstrings_to_model_forward(ROBERTA_PRELAYERNORM_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
999
+ @add_code_sample_docstrings(
1000
+ checkpoint=_CHECKPOINT_FOR_DOC,
1001
+ output_type=TFBaseModelOutputWithPoolingAndCrossAttentions,
1002
+ config_class=_CONFIG_FOR_DOC,
1003
+ )
1004
+ def call(
1005
+ self,
1006
+ input_ids: TFModelInputType | None = None,
1007
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1008
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1009
+ position_ids: np.ndarray | tf.Tensor | None = None,
1010
+ head_mask: np.ndarray | tf.Tensor | None = None,
1011
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1012
+ encoder_hidden_states: np.ndarray | tf.Tensor | None = None,
1013
+ encoder_attention_mask: np.ndarray | tf.Tensor | None = None,
1014
+ past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None,
1015
+ use_cache: Optional[bool] = None,
1016
+ output_attentions: Optional[bool] = None,
1017
+ output_hidden_states: Optional[bool] = None,
1018
+ return_dict: Optional[bool] = None,
1019
+ training: Optional[bool] = False,
1020
+ ) -> Union[Tuple, TFBaseModelOutputWithPoolingAndCrossAttentions]:
1021
+ r"""
1022
+ encoder_hidden_states (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
1023
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
1024
+ the model is configured as a decoder.
1025
+ encoder_attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
1026
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
1027
+ the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
1028
+
1029
+ - 1 for tokens that are **not masked**,
1030
+ - 0 for tokens that are **masked**.
1031
+
1032
+ past_key_values (`Tuple[Tuple[tf.Tensor]]` of length `config.n_layers`)
1033
+ contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
1034
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
1035
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
1036
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
1037
+ use_cache (`bool`, *optional*, defaults to `True`):
1038
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
1039
+ `past_key_values`). Set to `False` during training, `True` during generation
1040
+ """
1041
+ outputs = self.roberta_prelayernorm(
1042
+ input_ids=input_ids,
1043
+ attention_mask=attention_mask,
1044
+ token_type_ids=token_type_ids,
1045
+ position_ids=position_ids,
1046
+ head_mask=head_mask,
1047
+ inputs_embeds=inputs_embeds,
1048
+ encoder_hidden_states=encoder_hidden_states,
1049
+ encoder_attention_mask=encoder_attention_mask,
1050
+ past_key_values=past_key_values,
1051
+ use_cache=use_cache,
1052
+ output_attentions=output_attentions,
1053
+ output_hidden_states=output_hidden_states,
1054
+ return_dict=return_dict,
1055
+ training=training,
1056
+ )
1057
+
1058
+ return outputs
1059
+
1060
+ def build(self, input_shape=None):
1061
+ if self.built:
1062
+ return
1063
+ self.built = True
1064
+ if getattr(self, "roberta_prelayernorm", None) is not None:
1065
+ with tf.name_scope(self.roberta_prelayernorm.name):
1066
+ self.roberta_prelayernorm.build(None)
1067
+
1068
+
1069
+ # Copied from transformers.models.roberta.modeling_tf_roberta.TFRobertaLMHead with Roberta->RobertaPreLayerNorm
1070
+ class TFRobertaPreLayerNormLMHead(keras.layers.Layer):
1071
+ """RobertaPreLayerNorm Head for masked language modeling."""
1072
+
1073
+ def __init__(self, config, input_embeddings, **kwargs):
1074
+ super().__init__(**kwargs)
1075
+
1076
+ self.config = config
1077
+ self.hidden_size = config.hidden_size
1078
+ self.dense = keras.layers.Dense(
1079
+ config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
1080
+ )
1081
+ self.layer_norm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm")
1082
+ self.act = get_tf_activation("gelu")
1083
+
1084
+ # The output weights are the same as the input embeddings, but there is
1085
+ # an output-only bias for each token.
1086
+ self.decoder = input_embeddings
1087
+
1088
+ def build(self, input_shape=None):
1089
+ self.bias = self.add_weight(shape=(self.config.vocab_size,), initializer="zeros", trainable=True, name="bias")
1090
+
1091
+ if self.built:
1092
+ return
1093
+ self.built = True
1094
+ if getattr(self, "dense", None) is not None:
1095
+ with tf.name_scope(self.dense.name):
1096
+ self.dense.build([None, None, self.config.hidden_size])
1097
+ if getattr(self, "layer_norm", None) is not None:
1098
+ with tf.name_scope(self.layer_norm.name):
1099
+ self.layer_norm.build([None, None, self.config.hidden_size])
1100
+
1101
+ def get_output_embeddings(self):
1102
+ return self.decoder
1103
+
1104
+ def set_output_embeddings(self, value):
1105
+ self.decoder.weight = value
1106
+ self.decoder.vocab_size = shape_list(value)[0]
1107
+
1108
+ def get_bias(self):
1109
+ return {"bias": self.bias}
1110
+
1111
+ def set_bias(self, value):
1112
+ self.bias = value["bias"]
1113
+ self.config.vocab_size = shape_list(value["bias"])[0]
1114
+
1115
+ def call(self, hidden_states):
1116
+ hidden_states = self.dense(hidden_states)
1117
+ hidden_states = self.act(hidden_states)
1118
+ hidden_states = self.layer_norm(hidden_states)
1119
+
1120
+ # project back to size of vocabulary with bias
1121
+ seq_length = shape_list(tensor=hidden_states)[1]
1122
+ hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, self.hidden_size])
1123
+ hidden_states = tf.matmul(a=hidden_states, b=self.decoder.weight, transpose_b=True)
1124
+ hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, seq_length, self.config.vocab_size])
1125
+ hidden_states = tf.nn.bias_add(value=hidden_states, bias=self.bias)
1126
+
1127
+ return hidden_states
1128
+
1129
+
1130
+ @add_start_docstrings(
1131
+ """RoBERTa-PreLayerNorm Model with a `language modeling` head on top.""", ROBERTA_PRELAYERNORM_START_DOCSTRING
1132
+ )
1133
+ class TFRobertaPreLayerNormForMaskedLM(TFRobertaPreLayerNormPreTrainedModel, TFMaskedLanguageModelingLoss):
1134
+ # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
1135
+ _keys_to_ignore_on_load_unexpected = [r"pooler", r"lm_head.decoder.weight"]
1136
+
1137
+ # Copied from transformers.models.roberta.modeling_tf_roberta.TFRobertaForMaskedLM.__init__ with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta->roberta_prelayernorm
1138
+ def __init__(self, config, *inputs, **kwargs):
1139
+ super().__init__(config, *inputs, **kwargs)
1140
+
1141
+ self.roberta_prelayernorm = TFRobertaPreLayerNormMainLayer(
1142
+ config, add_pooling_layer=False, name="roberta_prelayernorm"
1143
+ )
1144
+ self.lm_head = TFRobertaPreLayerNormLMHead(config, self.roberta_prelayernorm.embeddings, name="lm_head")
1145
+
1146
+ def get_lm_head(self):
1147
+ return self.lm_head
1148
+
1149
+ def get_prefix_bias_name(self):
1150
+ warnings.warn("The method get_prefix_bias_name is deprecated. Please use `get_bias` instead.", FutureWarning)
1151
+ return self.name + "/" + self.lm_head.name
1152
+
1153
+ @unpack_inputs
1154
+ @add_start_docstrings_to_model_forward(ROBERTA_PRELAYERNORM_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1155
+ @add_code_sample_docstrings(
1156
+ checkpoint=_CHECKPOINT_FOR_DOC,
1157
+ output_type=TFMaskedLMOutput,
1158
+ config_class=_CONFIG_FOR_DOC,
1159
+ mask="<mask>",
1160
+ expected_output="' Paris'",
1161
+ expected_loss=0.69,
1162
+ )
1163
+ # Copied from transformers.models.roberta.modeling_tf_roberta.TFRobertaForMaskedLM.call with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta->roberta_prelayernorm
1164
+ def call(
1165
+ self,
1166
+ input_ids: TFModelInputType | None = None,
1167
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1168
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1169
+ position_ids: np.ndarray | tf.Tensor | None = None,
1170
+ head_mask: np.ndarray | tf.Tensor | None = None,
1171
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1172
+ output_attentions: Optional[bool] = None,
1173
+ output_hidden_states: Optional[bool] = None,
1174
+ return_dict: Optional[bool] = None,
1175
+ labels: np.ndarray | tf.Tensor | None = None,
1176
+ training: Optional[bool] = False,
1177
+ ) -> Union[TFMaskedLMOutput, Tuple[tf.Tensor]]:
1178
+ r"""
1179
+ labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
1180
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
1181
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
1182
+ loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
1183
+ """
1184
+ outputs = self.roberta_prelayernorm(
1185
+ input_ids,
1186
+ attention_mask=attention_mask,
1187
+ token_type_ids=token_type_ids,
1188
+ position_ids=position_ids,
1189
+ head_mask=head_mask,
1190
+ inputs_embeds=inputs_embeds,
1191
+ output_attentions=output_attentions,
1192
+ output_hidden_states=output_hidden_states,
1193
+ return_dict=return_dict,
1194
+ training=training,
1195
+ )
1196
+
1197
+ sequence_output = outputs[0]
1198
+ prediction_scores = self.lm_head(sequence_output)
1199
+
1200
+ loss = None if labels is None else self.hf_compute_loss(labels, prediction_scores)
1201
+
1202
+ if not return_dict:
1203
+ output = (prediction_scores,) + outputs[2:]
1204
+ return ((loss,) + output) if loss is not None else output
1205
+
1206
+ return TFMaskedLMOutput(
1207
+ loss=loss,
1208
+ logits=prediction_scores,
1209
+ hidden_states=outputs.hidden_states,
1210
+ attentions=outputs.attentions,
1211
+ )
1212
+
1213
+ def build(self, input_shape=None):
1214
+ if self.built:
1215
+ return
1216
+ self.built = True
1217
+ if getattr(self, "roberta_prelayernorm", None) is not None:
1218
+ with tf.name_scope(self.roberta_prelayernorm.name):
1219
+ self.roberta_prelayernorm.build(None)
1220
+ if getattr(self, "lm_head", None) is not None:
1221
+ with tf.name_scope(self.lm_head.name):
1222
+ self.lm_head.build(None)
1223
+
1224
+
1225
+ # Copied from transformers.models.roberta.modeling_tf_roberta.TFRobertaForCausalLM with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta->roberta_prelayernorm
1226
+ class TFRobertaPreLayerNormForCausalLM(TFRobertaPreLayerNormPreTrainedModel, TFCausalLanguageModelingLoss):
1227
+ # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
1228
+ _keys_to_ignore_on_load_unexpected = [r"pooler", r"lm_head.decoder.weight"]
1229
+
1230
+ def __init__(self, config: RobertaPreLayerNormConfig, *inputs, **kwargs):
1231
+ super().__init__(config, *inputs, **kwargs)
1232
+
1233
+ if not config.is_decoder:
1234
+ logger.warning(
1235
+ "If you want to use `TFRobertaPreLayerNormLMHeadModel` as a standalone, add `is_decoder=True.`"
1236
+ )
1237
+
1238
+ self.roberta_prelayernorm = TFRobertaPreLayerNormMainLayer(
1239
+ config, add_pooling_layer=False, name="roberta_prelayernorm"
1240
+ )
1241
+ self.lm_head = TFRobertaPreLayerNormLMHead(
1242
+ config, input_embeddings=self.roberta_prelayernorm.embeddings, name="lm_head"
1243
+ )
1244
+
1245
+ def get_lm_head(self):
1246
+ return self.lm_head
1247
+
1248
+ def get_prefix_bias_name(self):
1249
+ warnings.warn("The method get_prefix_bias_name is deprecated. Please use `get_bias` instead.", FutureWarning)
1250
+ return self.name + "/" + self.lm_head.name
1251
+
1252
+ # Copied from transformers.models.bert.modeling_tf_bert.TFBertLMHeadModel.prepare_inputs_for_generation
1253
+ def prepare_inputs_for_generation(self, input_ids, past_key_values=None, attention_mask=None, **model_kwargs):
1254
+ input_shape = input_ids.shape
1255
+ # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
1256
+ if attention_mask is None:
1257
+ attention_mask = tf.ones(input_shape)
1258
+
1259
+ # cut decoder_input_ids if past is used
1260
+ if past_key_values is not None:
1261
+ input_ids = input_ids[:, -1:]
1262
+
1263
+ return {"input_ids": input_ids, "attention_mask": attention_mask, "past_key_values": past_key_values}
1264
+
1265
+ @unpack_inputs
1266
+ @add_start_docstrings_to_model_forward(ROBERTA_PRELAYERNORM_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1267
+ @add_code_sample_docstrings(
1268
+ checkpoint=_CHECKPOINT_FOR_DOC,
1269
+ output_type=TFCausalLMOutputWithCrossAttentions,
1270
+ config_class=_CONFIG_FOR_DOC,
1271
+ )
1272
+ def call(
1273
+ self,
1274
+ input_ids: TFModelInputType | None = None,
1275
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1276
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1277
+ position_ids: np.ndarray | tf.Tensor | None = None,
1278
+ head_mask: np.ndarray | tf.Tensor | None = None,
1279
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1280
+ encoder_hidden_states: np.ndarray | tf.Tensor | None = None,
1281
+ encoder_attention_mask: np.ndarray | tf.Tensor | None = None,
1282
+ past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None,
1283
+ use_cache: Optional[bool] = None,
1284
+ output_attentions: Optional[bool] = None,
1285
+ output_hidden_states: Optional[bool] = None,
1286
+ return_dict: Optional[bool] = None,
1287
+ labels: np.ndarray | tf.Tensor | None = None,
1288
+ training: Optional[bool] = False,
1289
+ ) -> Union[TFCausalLMOutputWithCrossAttentions, Tuple[tf.Tensor]]:
1290
+ r"""
1291
+ encoder_hidden_states (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
1292
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
1293
+ the model is configured as a decoder.
1294
+ encoder_attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
1295
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
1296
+ the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
1297
+
1298
+ - 1 for tokens that are **not masked**,
1299
+ - 0 for tokens that are **masked**.
1300
+
1301
+ past_key_values (`Tuple[Tuple[tf.Tensor]]` of length `config.n_layers`)
1302
+ contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
1303
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
1304
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
1305
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
1306
+ use_cache (`bool`, *optional*, defaults to `True`):
1307
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
1308
+ `past_key_values`). Set to `False` during training, `True` during generation
1309
+ labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
1310
+ Labels for computing the cross entropy classification loss. Indices should be in `[0, ...,
1311
+ config.vocab_size - 1]`.
1312
+ """
1313
+ outputs = self.roberta_prelayernorm(
1314
+ input_ids=input_ids,
1315
+ attention_mask=attention_mask,
1316
+ token_type_ids=token_type_ids,
1317
+ position_ids=position_ids,
1318
+ head_mask=head_mask,
1319
+ inputs_embeds=inputs_embeds,
1320
+ encoder_hidden_states=encoder_hidden_states,
1321
+ encoder_attention_mask=encoder_attention_mask,
1322
+ past_key_values=past_key_values,
1323
+ use_cache=use_cache,
1324
+ output_attentions=output_attentions,
1325
+ output_hidden_states=output_hidden_states,
1326
+ return_dict=return_dict,
1327
+ training=training,
1328
+ )
1329
+
1330
+ sequence_output = outputs[0]
1331
+ logits = self.lm_head(hidden_states=sequence_output, training=training)
1332
+ loss = None
1333
+
1334
+ if labels is not None:
1335
+ # shift labels to the left and cut last logit token
1336
+ shifted_logits = logits[:, :-1]
1337
+ labels = labels[:, 1:]
1338
+ loss = self.hf_compute_loss(labels=labels, logits=shifted_logits)
1339
+
1340
+ if not return_dict:
1341
+ output = (logits,) + outputs[2:]
1342
+ return ((loss,) + output) if loss is not None else output
1343
+
1344
+ return TFCausalLMOutputWithCrossAttentions(
1345
+ loss=loss,
1346
+ logits=logits,
1347
+ past_key_values=outputs.past_key_values,
1348
+ hidden_states=outputs.hidden_states,
1349
+ attentions=outputs.attentions,
1350
+ cross_attentions=outputs.cross_attentions,
1351
+ )
1352
+
1353
+ def build(self, input_shape=None):
1354
+ if self.built:
1355
+ return
1356
+ self.built = True
1357
+ if getattr(self, "roberta_prelayernorm", None) is not None:
1358
+ with tf.name_scope(self.roberta_prelayernorm.name):
1359
+ self.roberta_prelayernorm.build(None)
1360
+ if getattr(self, "lm_head", None) is not None:
1361
+ with tf.name_scope(self.lm_head.name):
1362
+ self.lm_head.build(None)
1363
+
1364
+
1365
+ # Copied from transformers.models.roberta.modeling_tf_roberta.TFRobertaClassificationHead with Roberta->RobertaPreLayerNorm
1366
+ class TFRobertaPreLayerNormClassificationHead(keras.layers.Layer):
1367
+ """Head for sentence-level classification tasks."""
1368
+
1369
+ def __init__(self, config, **kwargs):
1370
+ super().__init__(**kwargs)
1371
+ self.dense = keras.layers.Dense(
1372
+ config.hidden_size,
1373
+ kernel_initializer=get_initializer(config.initializer_range),
1374
+ activation="tanh",
1375
+ name="dense",
1376
+ )
1377
+ classifier_dropout = (
1378
+ config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
1379
+ )
1380
+ self.dropout = keras.layers.Dropout(classifier_dropout)
1381
+ self.out_proj = keras.layers.Dense(
1382
+ config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="out_proj"
1383
+ )
1384
+ self.config = config
1385
+
1386
+ def call(self, features, training=False):
1387
+ x = features[:, 0, :] # take <s> token (equiv. to [CLS])
1388
+ x = self.dropout(x, training=training)
1389
+ x = self.dense(x)
1390
+ x = self.dropout(x, training=training)
1391
+ x = self.out_proj(x)
1392
+ return x
1393
+
1394
+ def build(self, input_shape=None):
1395
+ if self.built:
1396
+ return
1397
+ self.built = True
1398
+ if getattr(self, "dense", None) is not None:
1399
+ with tf.name_scope(self.dense.name):
1400
+ self.dense.build([None, None, self.config.hidden_size])
1401
+ if getattr(self, "out_proj", None) is not None:
1402
+ with tf.name_scope(self.out_proj.name):
1403
+ self.out_proj.build([None, None, self.config.hidden_size])
1404
+
1405
+
1406
+ @add_start_docstrings(
1407
+ """
1408
+ RoBERTa-PreLayerNorm Model transformer with a sequence classification/regression head on top (a linear layer on top
1409
+ of the pooled output) e.g. for GLUE tasks.
1410
+ """,
1411
+ ROBERTA_PRELAYERNORM_START_DOCSTRING,
1412
+ )
1413
+ class TFRobertaPreLayerNormForSequenceClassification(
1414
+ TFRobertaPreLayerNormPreTrainedModel, TFSequenceClassificationLoss
1415
+ ):
1416
+ # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
1417
+ _keys_to_ignore_on_load_unexpected = [r"pooler", r"lm_head"]
1418
+
1419
+ def __init__(self, config, *inputs, **kwargs):
1420
+ super().__init__(config, *inputs, **kwargs)
1421
+ self.num_labels = config.num_labels
1422
+
1423
+ self.roberta_prelayernorm = TFRobertaPreLayerNormMainLayer(
1424
+ config, add_pooling_layer=False, name="roberta_prelayernorm"
1425
+ )
1426
+ self.classifier = TFRobertaPreLayerNormClassificationHead(config, name="classifier")
1427
+
1428
+ @unpack_inputs
1429
+ @add_start_docstrings_to_model_forward(ROBERTA_PRELAYERNORM_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1430
+ @add_code_sample_docstrings(
1431
+ checkpoint=_CHECKPOINT_FOR_DOC,
1432
+ output_type=TFSequenceClassifierOutput,
1433
+ config_class=_CONFIG_FOR_DOC,
1434
+ )
1435
+ # Copied from transformers.models.roberta.modeling_tf_roberta.TFRobertaForSequenceClassification.call with roberta->roberta_prelayernorm
1436
+ def call(
1437
+ self,
1438
+ input_ids: TFModelInputType | None = None,
1439
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1440
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1441
+ position_ids: np.ndarray | tf.Tensor | None = None,
1442
+ head_mask: np.ndarray | tf.Tensor | None = None,
1443
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1444
+ output_attentions: Optional[bool] = None,
1445
+ output_hidden_states: Optional[bool] = None,
1446
+ return_dict: Optional[bool] = None,
1447
+ labels: np.ndarray | tf.Tensor | None = None,
1448
+ training: Optional[bool] = False,
1449
+ ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
1450
+ r"""
1451
+ labels (`tf.Tensor` of shape `(batch_size,)`, *optional*):
1452
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1453
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1454
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1455
+ """
1456
+ outputs = self.roberta_prelayernorm(
1457
+ input_ids,
1458
+ attention_mask=attention_mask,
1459
+ token_type_ids=token_type_ids,
1460
+ position_ids=position_ids,
1461
+ head_mask=head_mask,
1462
+ inputs_embeds=inputs_embeds,
1463
+ output_attentions=output_attentions,
1464
+ output_hidden_states=output_hidden_states,
1465
+ return_dict=return_dict,
1466
+ training=training,
1467
+ )
1468
+ sequence_output = outputs[0]
1469
+ logits = self.classifier(sequence_output, training=training)
1470
+
1471
+ loss = None if labels is None else self.hf_compute_loss(labels, logits)
1472
+
1473
+ if not return_dict:
1474
+ output = (logits,) + outputs[2:]
1475
+ return ((loss,) + output) if loss is not None else output
1476
+
1477
+ return TFSequenceClassifierOutput(
1478
+ loss=loss,
1479
+ logits=logits,
1480
+ hidden_states=outputs.hidden_states,
1481
+ attentions=outputs.attentions,
1482
+ )
1483
+
1484
+ def build(self, input_shape=None):
1485
+ if self.built:
1486
+ return
1487
+ self.built = True
1488
+ if getattr(self, "roberta_prelayernorm", None) is not None:
1489
+ with tf.name_scope(self.roberta_prelayernorm.name):
1490
+ self.roberta_prelayernorm.build(None)
1491
+ if getattr(self, "classifier", None) is not None:
1492
+ with tf.name_scope(self.classifier.name):
1493
+ self.classifier.build(None)
1494
+
1495
+
1496
+ @add_start_docstrings(
1497
+ """
1498
+ RobertaPreLayerNorm Model with a multiple choice classification head on top (a linear layer on top of the pooled
1499
+ output and a softmax) e.g. for RocStories/SWAG tasks.
1500
+ """,
1501
+ ROBERTA_PRELAYERNORM_START_DOCSTRING,
1502
+ )
1503
+ # Copied from transformers.models.roberta.modeling_tf_roberta.TFRobertaForMultipleChoice with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta->roberta_prelayernorm
1504
+ class TFRobertaPreLayerNormForMultipleChoice(TFRobertaPreLayerNormPreTrainedModel, TFMultipleChoiceLoss):
1505
+ # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
1506
+ _keys_to_ignore_on_load_unexpected = [r"lm_head"]
1507
+ _keys_to_ignore_on_load_missing = [r"dropout"]
1508
+
1509
+ def __init__(self, config, *inputs, **kwargs):
1510
+ super().__init__(config, *inputs, **kwargs)
1511
+
1512
+ self.roberta_prelayernorm = TFRobertaPreLayerNormMainLayer(config, name="roberta_prelayernorm")
1513
+ self.dropout = keras.layers.Dropout(config.hidden_dropout_prob)
1514
+ self.classifier = keras.layers.Dense(
1515
+ 1, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
1516
+ )
1517
+ self.config = config
1518
+
1519
+ @unpack_inputs
1520
+ @add_start_docstrings_to_model_forward(
1521
+ ROBERTA_PRELAYERNORM_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")
1522
+ )
1523
+ @add_code_sample_docstrings(
1524
+ checkpoint=_CHECKPOINT_FOR_DOC,
1525
+ output_type=TFMultipleChoiceModelOutput,
1526
+ config_class=_CONFIG_FOR_DOC,
1527
+ )
1528
+ def call(
1529
+ self,
1530
+ input_ids: TFModelInputType | None = None,
1531
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1532
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1533
+ position_ids: np.ndarray | tf.Tensor | None = None,
1534
+ head_mask: np.ndarray | tf.Tensor | None = None,
1535
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1536
+ output_attentions: Optional[bool] = None,
1537
+ output_hidden_states: Optional[bool] = None,
1538
+ return_dict: Optional[bool] = None,
1539
+ labels: np.ndarray | tf.Tensor | None = None,
1540
+ training: Optional[bool] = False,
1541
+ ) -> Union[TFMultipleChoiceModelOutput, Tuple[tf.Tensor]]:
1542
+ r"""
1543
+ labels (`tf.Tensor` of shape `(batch_size,)`, *optional*):
1544
+ Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices]`
1545
+ where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above)
1546
+ """
1547
+
1548
+ if input_ids is not None:
1549
+ num_choices = shape_list(input_ids)[1]
1550
+ seq_length = shape_list(input_ids)[2]
1551
+ else:
1552
+ num_choices = shape_list(inputs_embeds)[1]
1553
+ seq_length = shape_list(inputs_embeds)[2]
1554
+
1555
+ flat_input_ids = tf.reshape(input_ids, (-1, seq_length)) if input_ids is not None else None
1556
+ flat_attention_mask = tf.reshape(attention_mask, (-1, seq_length)) if attention_mask is not None else None
1557
+ flat_token_type_ids = tf.reshape(token_type_ids, (-1, seq_length)) if token_type_ids is not None else None
1558
+ flat_position_ids = tf.reshape(position_ids, (-1, seq_length)) if position_ids is not None else None
1559
+ outputs = self.roberta_prelayernorm(
1560
+ flat_input_ids,
1561
+ flat_attention_mask,
1562
+ flat_token_type_ids,
1563
+ flat_position_ids,
1564
+ head_mask,
1565
+ inputs_embeds,
1566
+ output_attentions,
1567
+ output_hidden_states,
1568
+ return_dict=return_dict,
1569
+ training=training,
1570
+ )
1571
+ pooled_output = outputs[1]
1572
+ pooled_output = self.dropout(pooled_output, training=training)
1573
+ logits = self.classifier(pooled_output)
1574
+ reshaped_logits = tf.reshape(logits, (-1, num_choices))
1575
+
1576
+ loss = None if labels is None else self.hf_compute_loss(labels, reshaped_logits)
1577
+
1578
+ if not return_dict:
1579
+ output = (reshaped_logits,) + outputs[2:]
1580
+ return ((loss,) + output) if loss is not None else output
1581
+
1582
+ return TFMultipleChoiceModelOutput(
1583
+ loss=loss,
1584
+ logits=reshaped_logits,
1585
+ hidden_states=outputs.hidden_states,
1586
+ attentions=outputs.attentions,
1587
+ )
1588
+
1589
+ def build(self, input_shape=None):
1590
+ if self.built:
1591
+ return
1592
+ self.built = True
1593
+ if getattr(self, "roberta_prelayernorm", None) is not None:
1594
+ with tf.name_scope(self.roberta_prelayernorm.name):
1595
+ self.roberta_prelayernorm.build(None)
1596
+ if getattr(self, "classifier", None) is not None:
1597
+ with tf.name_scope(self.classifier.name):
1598
+ self.classifier.build([None, None, self.config.hidden_size])
1599
+
1600
+
1601
+ @add_start_docstrings(
1602
+ """
1603
+ RoBERTa-PreLayerNorm Model with a token classification head on top (a linear layer on top of the hidden-states
1604
+ output) e.g. for Named-Entity-Recognition (NER) tasks.
1605
+ """,
1606
+ ROBERTA_PRELAYERNORM_START_DOCSTRING,
1607
+ )
1608
+ class TFRobertaPreLayerNormForTokenClassification(TFRobertaPreLayerNormPreTrainedModel, TFTokenClassificationLoss):
1609
+ # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
1610
+ _keys_to_ignore_on_load_unexpected = [r"pooler", r"lm_head"]
1611
+ _keys_to_ignore_on_load_missing = [r"dropout"]
1612
+
1613
+ def __init__(self, config, *inputs, **kwargs):
1614
+ super().__init__(config, *inputs, **kwargs)
1615
+ self.num_labels = config.num_labels
1616
+
1617
+ self.roberta_prelayernorm = TFRobertaPreLayerNormMainLayer(
1618
+ config, add_pooling_layer=False, name="roberta_prelayernorm"
1619
+ )
1620
+ classifier_dropout = (
1621
+ config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
1622
+ )
1623
+ self.dropout = keras.layers.Dropout(classifier_dropout)
1624
+ self.classifier = keras.layers.Dense(
1625
+ config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
1626
+ )
1627
+ self.config = config
1628
+
1629
+ @unpack_inputs
1630
+ @add_start_docstrings_to_model_forward(ROBERTA_PRELAYERNORM_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1631
+ @add_code_sample_docstrings(
1632
+ checkpoint=_CHECKPOINT_FOR_DOC,
1633
+ output_type=TFTokenClassifierOutput,
1634
+ config_class=_CONFIG_FOR_DOC,
1635
+ )
1636
+ # Copied from transformers.models.roberta.modeling_tf_roberta.TFRobertaForTokenClassification.call with roberta->roberta_prelayernorm
1637
+ def call(
1638
+ self,
1639
+ input_ids: TFModelInputType | None = None,
1640
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1641
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1642
+ position_ids: np.ndarray | tf.Tensor | None = None,
1643
+ head_mask: np.ndarray | tf.Tensor | None = None,
1644
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1645
+ output_attentions: Optional[bool] = None,
1646
+ output_hidden_states: Optional[bool] = None,
1647
+ return_dict: Optional[bool] = None,
1648
+ labels: np.ndarray | tf.Tensor | None = None,
1649
+ training: Optional[bool] = False,
1650
+ ) -> Union[TFTokenClassifierOutput, Tuple[tf.Tensor]]:
1651
+ r"""
1652
+ labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
1653
+ Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
1654
+ """
1655
+ outputs = self.roberta_prelayernorm(
1656
+ input_ids,
1657
+ attention_mask=attention_mask,
1658
+ token_type_ids=token_type_ids,
1659
+ position_ids=position_ids,
1660
+ head_mask=head_mask,
1661
+ inputs_embeds=inputs_embeds,
1662
+ output_attentions=output_attentions,
1663
+ output_hidden_states=output_hidden_states,
1664
+ return_dict=return_dict,
1665
+ training=training,
1666
+ )
1667
+ sequence_output = outputs[0]
1668
+
1669
+ sequence_output = self.dropout(sequence_output, training=training)
1670
+ logits = self.classifier(sequence_output)
1671
+
1672
+ loss = None if labels is None else self.hf_compute_loss(labels, logits)
1673
+
1674
+ if not return_dict:
1675
+ output = (logits,) + outputs[2:]
1676
+ return ((loss,) + output) if loss is not None else output
1677
+
1678
+ return TFTokenClassifierOutput(
1679
+ loss=loss,
1680
+ logits=logits,
1681
+ hidden_states=outputs.hidden_states,
1682
+ attentions=outputs.attentions,
1683
+ )
1684
+
1685
+ def build(self, input_shape=None):
1686
+ if self.built:
1687
+ return
1688
+ self.built = True
1689
+ if getattr(self, "roberta_prelayernorm", None) is not None:
1690
+ with tf.name_scope(self.roberta_prelayernorm.name):
1691
+ self.roberta_prelayernorm.build(None)
1692
+ if getattr(self, "classifier", None) is not None:
1693
+ with tf.name_scope(self.classifier.name):
1694
+ self.classifier.build([None, None, self.config.hidden_size])
1695
+
1696
+
1697
+ @add_start_docstrings(
1698
+ """
1699
+ RoBERTa-PreLayerNorm Model with a span classification head on top for extractive question-answering tasks like
1700
+ SQuAD (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
1701
+ """,
1702
+ ROBERTA_PRELAYERNORM_START_DOCSTRING,
1703
+ )
1704
+ class TFRobertaPreLayerNormForQuestionAnswering(TFRobertaPreLayerNormPreTrainedModel, TFQuestionAnsweringLoss):
1705
+ # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
1706
+ _keys_to_ignore_on_load_unexpected = [r"pooler", r"lm_head"]
1707
+
1708
+ def __init__(self, config, *inputs, **kwargs):
1709
+ super().__init__(config, *inputs, **kwargs)
1710
+ self.num_labels = config.num_labels
1711
+
1712
+ self.roberta_prelayernorm = TFRobertaPreLayerNormMainLayer(
1713
+ config, add_pooling_layer=False, name="roberta_prelayernorm"
1714
+ )
1715
+ self.qa_outputs = keras.layers.Dense(
1716
+ config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="qa_outputs"
1717
+ )
1718
+ self.config = config
1719
+
1720
+ @unpack_inputs
1721
+ @add_start_docstrings_to_model_forward(ROBERTA_PRELAYERNORM_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1722
+ @add_code_sample_docstrings(
1723
+ checkpoint=_CHECKPOINT_FOR_DOC,
1724
+ output_type=TFQuestionAnsweringModelOutput,
1725
+ config_class=_CONFIG_FOR_DOC,
1726
+ )
1727
+ # Copied from transformers.models.roberta.modeling_tf_roberta.TFRobertaForQuestionAnswering.call with roberta->roberta_prelayernorm
1728
+ def call(
1729
+ self,
1730
+ input_ids: TFModelInputType | None = None,
1731
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1732
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1733
+ position_ids: np.ndarray | tf.Tensor | None = None,
1734
+ head_mask: np.ndarray | tf.Tensor | None = None,
1735
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1736
+ output_attentions: Optional[bool] = None,
1737
+ output_hidden_states: Optional[bool] = None,
1738
+ return_dict: Optional[bool] = None,
1739
+ start_positions: np.ndarray | tf.Tensor | None = None,
1740
+ end_positions: np.ndarray | tf.Tensor | None = None,
1741
+ training: Optional[bool] = False,
1742
+ ) -> Union[TFQuestionAnsweringModelOutput, Tuple[tf.Tensor]]:
1743
+ r"""
1744
+ start_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*):
1745
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
1746
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1747
+ are not taken into account for computing the loss.
1748
+ end_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*):
1749
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
1750
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1751
+ are not taken into account for computing the loss.
1752
+ """
1753
+ outputs = self.roberta_prelayernorm(
1754
+ input_ids,
1755
+ attention_mask=attention_mask,
1756
+ token_type_ids=token_type_ids,
1757
+ position_ids=position_ids,
1758
+ head_mask=head_mask,
1759
+ inputs_embeds=inputs_embeds,
1760
+ output_attentions=output_attentions,
1761
+ output_hidden_states=output_hidden_states,
1762
+ return_dict=return_dict,
1763
+ training=training,
1764
+ )
1765
+ sequence_output = outputs[0]
1766
+
1767
+ logits = self.qa_outputs(sequence_output)
1768
+ start_logits, end_logits = tf.split(logits, 2, axis=-1)
1769
+ start_logits = tf.squeeze(start_logits, axis=-1)
1770
+ end_logits = tf.squeeze(end_logits, axis=-1)
1771
+
1772
+ loss = None
1773
+ if start_positions is not None and end_positions is not None:
1774
+ labels = {"start_position": start_positions}
1775
+ labels["end_position"] = end_positions
1776
+ loss = self.hf_compute_loss(labels, (start_logits, end_logits))
1777
+
1778
+ if not return_dict:
1779
+ output = (start_logits, end_logits) + outputs[2:]
1780
+ return ((loss,) + output) if loss is not None else output
1781
+
1782
+ return TFQuestionAnsweringModelOutput(
1783
+ loss=loss,
1784
+ start_logits=start_logits,
1785
+ end_logits=end_logits,
1786
+ hidden_states=outputs.hidden_states,
1787
+ attentions=outputs.attentions,
1788
+ )
1789
+
1790
+ def build(self, input_shape=None):
1791
+ if self.built:
1792
+ return
1793
+ self.built = True
1794
+ if getattr(self, "roberta_prelayernorm", None) is not None:
1795
+ with tf.name_scope(self.roberta_prelayernorm.name):
1796
+ self.roberta_prelayernorm.build(None)
1797
+ if getattr(self, "qa_outputs", None) is not None:
1798
+ with tf.name_scope(self.qa_outputs.name):
1799
+ self.qa_outputs.build([None, None, self.config.hidden_size])
llmeval-env/lib/python3.10/site-packages/transformers/models/sew/__init__.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2021 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
17
+
18
+
19
+ _import_structure = {"configuration_sew": ["SEW_PRETRAINED_CONFIG_ARCHIVE_MAP", "SEWConfig"]}
20
+
21
+ try:
22
+ if not is_torch_available():
23
+ raise OptionalDependencyNotAvailable()
24
+ except OptionalDependencyNotAvailable:
25
+ pass
26
+ else:
27
+ _import_structure["modeling_sew"] = [
28
+ "SEW_PRETRAINED_MODEL_ARCHIVE_LIST",
29
+ "SEWForCTC",
30
+ "SEWForSequenceClassification",
31
+ "SEWModel",
32
+ "SEWPreTrainedModel",
33
+ ]
34
+
35
+ if TYPE_CHECKING:
36
+ from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
37
+
38
+ try:
39
+ if not is_torch_available():
40
+ raise OptionalDependencyNotAvailable()
41
+ except OptionalDependencyNotAvailable:
42
+ pass
43
+ else:
44
+ from .modeling_sew import (
45
+ SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
46
+ SEWForCTC,
47
+ SEWForSequenceClassification,
48
+ SEWModel,
49
+ SEWPreTrainedModel,
50
+ )
51
+
52
+
53
+ else:
54
+ import sys
55
+
56
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
llmeval-env/lib/python3.10/site-packages/transformers/models/sew/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (920 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/sew/__pycache__/configuration_sew.cpython-310.pyc ADDED
Binary file (12.5 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/sew/__pycache__/convert_sew_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc ADDED
Binary file (8.01 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/sew/__pycache__/modeling_sew.cpython-310.pyc ADDED
Binary file (33.8 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/sew/configuration_sew.py ADDED
@@ -0,0 +1,256 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 ASAPP Inc. and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ SEW model configuration"""
16
+
17
+ import functools
18
+ import operator
19
+
20
+ from ...configuration_utils import PretrainedConfig
21
+ from ...utils import logging
22
+
23
+
24
+ logger = logging.get_logger(__name__)
25
+
26
+
27
+ from ..deprecated._archive_maps import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
28
+
29
+
30
+ class SEWConfig(PretrainedConfig):
31
+ r"""
32
+ This is the configuration class to store the configuration of a [`SEWModel`]. It is used to instantiate a SEW model
33
+ according to the specified arguments, defining the model architecture. Instantiating a configuration with the
34
+ defaults will yield a similar configuration to that of the SEW
35
+ [asapp/sew-tiny-100k](https://huggingface.co/asapp/sew-tiny-100k) architecture.
36
+
37
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
38
+ documentation from [`PretrainedConfig`] for more information.
39
+
40
+
41
+ Args:
42
+ vocab_size (`int`, *optional*, defaults to 32):
43
+ Vocabulary size of the SEW model. Defines the number of different tokens that can be represented by the
44
+ `inputs_ids` passed when calling [`SEW`].
45
+ hidden_size (`int`, *optional*, defaults to 768):
46
+ Dimensionality of the encoder layers and the pooler layer.
47
+ num_hidden_layers (`int`, *optional*, defaults to 12):
48
+ Number of hidden layers in the Transformer encoder.
49
+ num_attention_heads (`int`, *optional*, defaults to 12):
50
+ Number of attention heads for each attention layer in the Transformer encoder.
51
+ intermediate_size (`int`, *optional*, defaults to 3072):
52
+ Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
53
+ squeeze_factor (`int`, *optional*, defaults to 2):
54
+ Sequence length downsampling factor after the encoder and upsampling factor after the transformer.
55
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
56
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
57
+ `"relu"`, `"selu"` and `"gelu_new"` are supported.
58
+ hidden_dropout (`float`, *optional*, defaults to 0.1):
59
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
60
+ activation_dropout (`float`, *optional*, defaults to 0.1):
61
+ The dropout ratio for activations inside the fully connected layer.
62
+ attention_dropout (`float`, *optional*, defaults to 0.1):
63
+ The dropout ratio for the attention probabilities.
64
+ final_dropout (`float`, *optional*, defaults to 0.1):
65
+ The dropout probability for the final projection layer of [`SEWForCTC`].
66
+ layerdrop (`float`, *optional*, defaults to 0.1):
67
+ The LayerDrop probability. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556) for more
68
+ details.
69
+ initializer_range (`float`, *optional*, defaults to 0.02):
70
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
71
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
72
+ The epsilon used by the layer normalization layers.
73
+ feat_extract_norm (`str`, *optional*, defaults to `"group"`):
74
+ The norm to be applied to 1D convolutional layers in feature encoder. One of `"group"` for group
75
+ normalization of only the first 1D convolutional layer or `"layer"` for layer normalization of all 1D
76
+ convolutional layers.
77
+ feat_proj_dropout (`float`, *optional*, defaults to 0.0):
78
+ The dropout probability for output of the feature encoder.
79
+ feat_extract_activation (`str, `optional`, defaults to `"gelu"`):
80
+ The non-linear activation function (function or string) in the 1D convolutional layers of the feature
81
+ extractor. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported.
82
+ conv_dim (`Tuple[int]` or `List[int]`, *optional*, defaults to `(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512)`):
83
+ A tuple of integers defining the number of input and output channels of each 1D convolutional layer in the
84
+ feature encoder. The length of *conv_dim* defines the number of 1D convolutional layers.
85
+ conv_stride (`Tuple[int]` or `List[int]`, *optional*, defaults to `(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1)`):
86
+ A tuple of integers defining the stride of each 1D convolutional layer in the feature encoder. The length
87
+ of *conv_stride* defines the number of convolutional layers and has to match the length of *conv_dim*.
88
+ conv_kernel (`Tuple[int]` or `List[int]`, *optional*, defaults to `(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1)`):
89
+ A tuple of integers defining the kernel size of each 1D convolutional layer in the feature encoder. The
90
+ length of *conv_kernel* defines the number of convolutional layers and has to match the length of
91
+ *conv_dim*.
92
+ conv_bias (`bool`, *optional*, defaults to `False`):
93
+ Whether the 1D convolutional layers have a bias.
94
+ num_conv_pos_embeddings (`int`, *optional*, defaults to 128):
95
+ Number of convolutional positional embeddings. Defines the kernel size of 1D convolutional positional
96
+ embeddings layer.
97
+ num_conv_pos_embedding_groups (`int`, *optional*, defaults to 16):
98
+ Number of groups of 1D convolutional positional embeddings layer.
99
+ apply_spec_augment (`bool`, *optional*, defaults to `True`):
100
+ Whether to apply *SpecAugment* data augmentation to the outputs of the feature encoder. For reference see
101
+ [SpecAugment: A Simple Data Augmentation Method for Automatic Speech
102
+ Recognition](https://arxiv.org/abs/1904.08779).
103
+ mask_time_prob (`float`, *optional*, defaults to 0.05):
104
+ Percentage (between 0 and 1) of all feature vectors along the time axis which will be masked. The masking
105
+ procecure generates ''mask_time_prob*len(time_axis)/mask_time_length'' independent masks over the axis. If
106
+ reasoning from the propability of each feature vector to be chosen as the start of the vector span to be
107
+ masked, *mask_time_prob* should be `prob_vector_start*mask_time_length`. Note that overlap may decrease the
108
+ actual percentage of masked vectors. This is only relevant if `apply_spec_augment is True`.
109
+ mask_time_length (`int`, *optional*, defaults to 10):
110
+ Length of vector span along the time axis.
111
+ mask_time_min_masks (`int`, *optional*, defaults to 2),:
112
+ The minimum number of masks of length `mask_feature_length` generated along the time axis, each time step,
113
+ irrespectively of `mask_feature_prob`. Only relevant if ''mask_time_prob*len(time_axis)/mask_time_length <
114
+ mask_time_min_masks''
115
+ mask_feature_prob (`float`, *optional*, defaults to 0.0):
116
+ Percentage (between 0 and 1) of all feature vectors along the feature axis which will be masked. The
117
+ masking procecure generates ''mask_feature_prob*len(feature_axis)/mask_time_length'' independent masks over
118
+ the axis. If reasoning from the propability of each feature vector to be chosen as the start of the vector
119
+ span to be masked, *mask_feature_prob* should be `prob_vector_start*mask_feature_length`. Note that overlap
120
+ may decrease the actual percentage of masked vectors. This is only relevant if `apply_spec_augment is
121
+ True`.
122
+ mask_feature_length (`int`, *optional*, defaults to 10):
123
+ Length of vector span along the feature axis.
124
+ mask_feature_min_masks (`int`, *optional*, defaults to 0),:
125
+ The minimum number of masks of length `mask_feature_length` generated along the feature axis, each time
126
+ step, irrespectively of `mask_feature_prob`. Only relevant if
127
+ ''mask_feature_prob*len(feature_axis)/mask_feature_length < mask_feature_min_masks''
128
+ ctc_loss_reduction (`str`, *optional*, defaults to `"sum"`):
129
+ Specifies the reduction to apply to the output of `torch.nn.CTCLoss`. Only relevant when training an
130
+ instance of [`SEWForCTC`].
131
+ ctc_zero_infinity (`bool`, *optional*, defaults to `False`):
132
+ Whether to zero infinite losses and the associated gradients of `torch.nn.CTCLoss`. Infinite losses mainly
133
+ occur when the inputs are too short to be aligned to the targets. Only relevant when training an instance
134
+ of [`SEWForCTC`].
135
+ use_weighted_layer_sum (`bool`, *optional*, defaults to `False`):
136
+ Whether to use a weighted average of layer outputs with learned weights. Only relevant when using an
137
+ instance of [`Wav2Vec2ForSequenceClassification`].
138
+ classifier_proj_size (`int`, *optional*, defaults to 256):
139
+ Dimensionality of the projection before token mean-pooling for classification.
140
+
141
+ Example:
142
+
143
+ ```python
144
+ >>> from transformers import SEWConfig, SEWModel
145
+
146
+ >>> # Initializing a SEW asapp/sew-tiny-100k style configuration
147
+ >>> configuration = SEWConfig()
148
+
149
+ >>> # Initializing a model (with random weights) from the asapp/sew-tiny-100k style configuration
150
+ >>> model = SEWModel(configuration)
151
+
152
+ >>> # Accessing the model configuration
153
+ >>> configuration = model.config
154
+ ```"""
155
+
156
+ model_type = "sew"
157
+
158
+ def __init__(
159
+ self,
160
+ vocab_size=32,
161
+ hidden_size=768,
162
+ num_hidden_layers=12,
163
+ num_attention_heads=12,
164
+ intermediate_size=3072,
165
+ squeeze_factor=2,
166
+ hidden_act="gelu",
167
+ hidden_dropout=0.1,
168
+ activation_dropout=0.1,
169
+ attention_dropout=0.1,
170
+ feat_proj_dropout=0.0,
171
+ final_dropout=0.1,
172
+ layerdrop=0.1,
173
+ initializer_range=0.02,
174
+ layer_norm_eps=1e-5,
175
+ feat_extract_norm="group",
176
+ feat_extract_activation="gelu",
177
+ conv_dim=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512),
178
+ conv_stride=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1),
179
+ conv_kernel=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1),
180
+ conv_bias=False,
181
+ num_conv_pos_embeddings=128,
182
+ num_conv_pos_embedding_groups=16,
183
+ apply_spec_augment=True,
184
+ mask_time_prob=0.05,
185
+ mask_time_length=10,
186
+ mask_time_min_masks=2,
187
+ mask_feature_prob=0.0,
188
+ mask_feature_length=10,
189
+ mask_feature_min_masks=0,
190
+ ctc_loss_reduction="mean",
191
+ ctc_zero_infinity=False,
192
+ use_weighted_layer_sum=False,
193
+ classifier_proj_size=256,
194
+ pad_token_id=0,
195
+ bos_token_id=1,
196
+ eos_token_id=2,
197
+ **kwargs,
198
+ ):
199
+ super().__init__(**kwargs, pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id)
200
+ self.hidden_size = hidden_size
201
+ self.feat_extract_norm = feat_extract_norm
202
+ self.feat_extract_activation = feat_extract_activation
203
+ self.conv_dim = list(conv_dim)
204
+ self.conv_stride = list(conv_stride)
205
+ self.conv_kernel = list(conv_kernel)
206
+ self.conv_bias = conv_bias
207
+ self.num_conv_pos_embeddings = num_conv_pos_embeddings
208
+ self.num_conv_pos_embedding_groups = num_conv_pos_embedding_groups
209
+ self.num_feat_extract_layers = len(self.conv_dim)
210
+ self.num_hidden_layers = num_hidden_layers
211
+ self.intermediate_size = intermediate_size
212
+ self.squeeze_factor = squeeze_factor
213
+ self.hidden_act = hidden_act
214
+ self.num_attention_heads = num_attention_heads
215
+ self.hidden_dropout = hidden_dropout
216
+ self.attention_dropout = attention_dropout
217
+ self.activation_dropout = activation_dropout
218
+ self.feat_proj_dropout = feat_proj_dropout
219
+ self.final_dropout = final_dropout
220
+ self.layerdrop = layerdrop
221
+ self.layer_norm_eps = layer_norm_eps
222
+ self.initializer_range = initializer_range
223
+ self.vocab_size = vocab_size
224
+
225
+ if (
226
+ (len(self.conv_stride) != self.num_feat_extract_layers)
227
+ or (len(self.conv_kernel) != self.num_feat_extract_layers)
228
+ or (len(self.conv_dim) != self.num_feat_extract_layers)
229
+ ):
230
+ raise ValueError(
231
+ "Configuration for convolutional layers is incorrect. "
232
+ "It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`, "
233
+ f"but is `len(config.conv_dim) = {len(self.conv_dim)}`, `len(config.conv_stride) "
234
+ f"= {len(self.conv_stride)}`, `len(config.conv_kernel) = {len(self.conv_kernel)}`."
235
+ )
236
+
237
+ # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
238
+ self.apply_spec_augment = apply_spec_augment
239
+ self.mask_time_prob = mask_time_prob
240
+ self.mask_time_length = mask_time_length
241
+ self.mask_time_min_masks = mask_time_min_masks
242
+ self.mask_feature_prob = mask_feature_prob
243
+ self.mask_feature_length = mask_feature_length
244
+ self.mask_feature_min_masks = mask_feature_min_masks
245
+
246
+ # ctc loss
247
+ self.ctc_loss_reduction = ctc_loss_reduction
248
+ self.ctc_zero_infinity = ctc_zero_infinity
249
+
250
+ # sequence classification
251
+ self.use_weighted_layer_sum = use_weighted_layer_sum
252
+ self.classifier_proj_size = classifier_proj_size
253
+
254
+ @property
255
+ def inputs_to_logits_ratio(self):
256
+ return functools.reduce(operator.mul, self.conv_stride, 1)
llmeval-env/lib/python3.10/site-packages/transformers/models/sew/convert_sew_original_pytorch_checkpoint_to_pytorch.py ADDED
@@ -0,0 +1,306 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Convert SEW checkpoint."""
16
+
17
+
18
+ import argparse
19
+ import json
20
+ import os
21
+
22
+ import fairseq
23
+ import torch
24
+ from fairseq.data import Dictionary
25
+
26
+ # Register SEW's fairseq modules
27
+ from sew_asapp import tasks # noqa: F401
28
+
29
+ from transformers import (
30
+ SEWConfig,
31
+ SEWForCTC,
32
+ SEWModel,
33
+ Wav2Vec2CTCTokenizer,
34
+ Wav2Vec2FeatureExtractor,
35
+ Wav2Vec2Processor,
36
+ logging,
37
+ )
38
+
39
+
40
+ logging.set_verbosity_info()
41
+ logger = logging.get_logger(__name__)
42
+
43
+ MAPPING = {
44
+ "post_extract_proj": "feature_projection",
45
+ "encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
46
+ "self_attn.k_proj": "encoder.layers.*.attention.k_proj",
47
+ "self_attn.v_proj": "encoder.layers.*.attention.v_proj",
48
+ "self_attn.q_proj": "encoder.layers.*.attention.q_proj",
49
+ "self_attn.out_proj": "encoder.layers.*.attention.out_proj",
50
+ "self_attn_layer_norm": "encoder.layers.*.layer_norm",
51
+ "fc1": "encoder.layers.*.feed_forward.intermediate_dense",
52
+ "fc2": "encoder.layers.*.feed_forward.output_dense",
53
+ "final_layer_norm": "encoder.layers.*.final_layer_norm",
54
+ "encoder.upsample.0": "encoder.upsample.projection",
55
+ "encoder.layer_norm": "encoder.layer_norm",
56
+ "w2v_model.layer_norm": "layer_norm",
57
+ "w2v_encoder.proj": "lm_head",
58
+ "mask_emb": "masked_spec_embed",
59
+ }
60
+
61
+
62
+ def set_recursively(hf_pointer, key, value, full_name, weight_type):
63
+ for attribute in key.split("."):
64
+ hf_pointer = getattr(hf_pointer, attribute)
65
+
66
+ if weight_type is not None:
67
+ hf_shape = getattr(hf_pointer, weight_type).shape
68
+ else:
69
+ hf_shape = hf_pointer.shape
70
+
71
+ assert hf_shape == value.shape, (
72
+ f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
73
+ f" {value.shape} for {full_name}"
74
+ )
75
+
76
+ if weight_type == "weight":
77
+ hf_pointer.weight.data = value
78
+ elif weight_type == "weight_g":
79
+ hf_pointer.weight_g.data = value
80
+ elif weight_type == "weight_v":
81
+ hf_pointer.weight_v.data = value
82
+ elif weight_type == "bias":
83
+ hf_pointer.bias.data = value
84
+ else:
85
+ hf_pointer.data = value
86
+
87
+ logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.")
88
+
89
+
90
+ def recursively_load_weights(fairseq_model, hf_model, is_finetuned):
91
+ unused_weights = []
92
+ fairseq_dict = fairseq_model.state_dict()
93
+
94
+ feature_extractor = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
95
+
96
+ for name, value in fairseq_dict.items():
97
+ is_used = False
98
+ if "conv_layers" in name:
99
+ load_conv_layer(
100
+ name,
101
+ value,
102
+ feature_extractor,
103
+ unused_weights,
104
+ hf_model.config.feat_extract_norm == "group",
105
+ )
106
+ is_used = True
107
+ else:
108
+ for key, mapped_key in MAPPING.items():
109
+ mapped_key = "sew." + mapped_key if (is_finetuned and mapped_key != "lm_head") else mapped_key
110
+
111
+ if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
112
+ is_used = True
113
+ if "*" in mapped_key:
114
+ layer_index = name.split(key)[0].split(".")[-2]
115
+ mapped_key = mapped_key.replace("*", layer_index)
116
+ if "weight_g" in name:
117
+ weight_type = "weight_g"
118
+ elif "weight_v" in name:
119
+ weight_type = "weight_v"
120
+ elif "weight" in name:
121
+ weight_type = "weight"
122
+ elif "bias" in name:
123
+ weight_type = "bias"
124
+ else:
125
+ weight_type = None
126
+ set_recursively(hf_model, mapped_key, value, name, weight_type)
127
+ continue
128
+ if not is_used:
129
+ unused_weights.append(name)
130
+
131
+ logger.warning(f"Unused weights: {unused_weights}")
132
+
133
+
134
+ def load_conv_layer(full_name, value, feature_extractor, unused_weights, use_group_norm):
135
+ name = full_name.split("conv_layers.")[-1]
136
+ items = name.split(".")
137
+ layer_id = int(items[0])
138
+ type_id = int(items[1])
139
+
140
+ if type_id == 0:
141
+ if "bias" in name:
142
+ assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
143
+ f"{full_name} has size {value.shape}, but"
144
+ f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
145
+ )
146
+ feature_extractor.conv_layers[layer_id].conv.bias.data = value
147
+ logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}.")
148
+ elif "weight" in name:
149
+ assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
150
+ f"{full_name} has size {value.shape}, but"
151
+ f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
152
+ )
153
+ feature_extractor.conv_layers[layer_id].conv.weight.data = value
154
+ logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}.")
155
+ elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
156
+ if "bias" in name:
157
+ assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
158
+ f"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
159
+ " found."
160
+ )
161
+ feature_extractor.conv_layers[layer_id].layer_norm.bias.data = value
162
+ logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.")
163
+ elif "weight" in name:
164
+ assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
165
+ f"{full_name} has size {value.shape}, but"
166
+ f" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
167
+ )
168
+ feature_extractor.conv_layers[layer_id].layer_norm.weight.data = value
169
+ logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.")
170
+ else:
171
+ unused_weights.append(full_name)
172
+
173
+
174
+ def convert_config(model, is_finetuned):
175
+ config = SEWConfig()
176
+ if is_finetuned:
177
+ fs_config = model.w2v_encoder.w2v_model.cfg
178
+ else:
179
+ fs_config = model.cfg
180
+
181
+ config.conv_bias = fs_config.conv_bias
182
+ conv_layers = eval(fs_config.conv_feature_layers)
183
+ config.conv_dim = [x[0] for x in conv_layers]
184
+ config.conv_kernel = [x[1] for x in conv_layers]
185
+ config.conv_stride = [x[2] for x in conv_layers]
186
+ config.feat_extract_activation = "gelu"
187
+ config.feat_extract_norm = "layer" if fs_config.extractor_mode == "layer_norm" else "group"
188
+ config.final_dropout = 0.0
189
+ config.hidden_act = fs_config.activation_fn.name
190
+ config.hidden_size = fs_config.encoder_embed_dim
191
+ config.initializer_range = 0.02
192
+ config.intermediate_size = fs_config.encoder_ffn_embed_dim
193
+ config.layer_norm_eps = 1e-5
194
+ config.layerdrop = fs_config.encoder_layerdrop
195
+ config.num_attention_heads = fs_config.encoder_attention_heads
196
+ config.num_conv_pos_embedding_groups = fs_config.conv_pos_groups
197
+ config.num_conv_pos_embeddings = fs_config.conv_pos
198
+ config.num_feat_extract_layers = len(conv_layers)
199
+ config.num_hidden_layers = fs_config.encoder_layers
200
+ config.squeeze_factor = fs_config.squeeze_factor
201
+
202
+ # take care of any params that are overridden by the Wav2VecCtc model
203
+ if is_finetuned:
204
+ fs_config = model.cfg
205
+ config.final_dropout = fs_config.final_dropout
206
+ config.layerdrop = fs_config.layerdrop
207
+ config.activation_dropout = fs_config.activation_dropout
208
+ config.apply_spec_augment = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
209
+ config.attention_dropout = fs_config.attention_dropout
210
+ config.feat_proj_dropout = fs_config.dropout_input
211
+ config.hidden_dropout = fs_config.dropout
212
+ config.mask_feature_length = fs_config.mask_channel_length
213
+ config.mask_feature_prob = fs_config.mask_channel_prob
214
+ config.mask_time_length = fs_config.mask_length
215
+ config.mask_time_prob = fs_config.mask_prob
216
+
217
+ config.feature_extractor_type = "Wav2Vec2FeatureExtractor"
218
+ config.tokenizer_class = "Wav2Vec2CTCTokenizer"
219
+
220
+ return config
221
+
222
+
223
+ @torch.no_grad()
224
+ def convert_sew_checkpoint(
225
+ checkpoint_path, pytorch_dump_folder_path, config_path=None, dict_path=None, is_finetuned=True
226
+ ):
227
+ """
228
+ Copy/paste/tweak model's weights to transformers design.
229
+ """
230
+
231
+ if is_finetuned:
232
+ model, _, _ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
233
+ [checkpoint_path], arg_overrides={"data": "/".join(dict_path.split("/")[:-1])}
234
+ )
235
+ else:
236
+ model, _, _ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path])
237
+
238
+ if config_path is not None:
239
+ config = SEWConfig.from_pretrained(config_path)
240
+ else:
241
+ config = convert_config(model[0], is_finetuned)
242
+ model = model[0].eval()
243
+
244
+ return_attention_mask = True if config.feat_extract_norm == "layer" else False
245
+ feature_extractor = Wav2Vec2FeatureExtractor(
246
+ feature_size=1,
247
+ sampling_rate=16000,
248
+ padding_value=0,
249
+ do_normalize=True,
250
+ return_attention_mask=return_attention_mask,
251
+ )
252
+
253
+ if is_finetuned:
254
+ if dict_path:
255
+ target_dict = Dictionary.load(dict_path)
256
+
257
+ # important change bos & pad token id since CTC symbol is <pad> and
258
+ # not <s> as in fairseq
259
+ target_dict.indices[target_dict.bos_word] = target_dict.pad_index
260
+ target_dict.indices[target_dict.pad_word] = target_dict.bos_index
261
+ config.bos_token_id = target_dict.pad_index
262
+ config.pad_token_id = target_dict.bos_index
263
+ config.eos_token_id = target_dict.eos_index
264
+ config.vocab_size = len(target_dict.symbols)
265
+ vocab_path = os.path.join(pytorch_dump_folder_path, "vocab.json")
266
+ if not os.path.isdir(pytorch_dump_folder_path):
267
+ logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(pytorch_dump_folder_path))
268
+ return
269
+ os.makedirs(pytorch_dump_folder_path, exist_ok=True)
270
+ with open(vocab_path, "w", encoding="utf-8") as vocab_handle:
271
+ json.dump(target_dict.indices, vocab_handle)
272
+ tokenizer = Wav2Vec2CTCTokenizer(
273
+ vocab_path,
274
+ unk_token=target_dict.unk_word,
275
+ pad_token=target_dict.pad_word,
276
+ bos_token=target_dict.bos_word,
277
+ eos_token=target_dict.eos_word,
278
+ word_delimiter_token="|",
279
+ do_lower_case=False,
280
+ )
281
+ processor = Wav2Vec2Processor(feature_extractor=feature_extractor, tokenizer=tokenizer)
282
+ processor.save_pretrained(pytorch_dump_folder_path)
283
+
284
+ hf_model = SEWForCTC(config)
285
+ else:
286
+ hf_model = SEWModel(config)
287
+ feature_extractor.save_pretrained(pytorch_dump_folder_path)
288
+
289
+ recursively_load_weights(model, hf_model, is_finetuned)
290
+
291
+ hf_model.save_pretrained(pytorch_dump_folder_path)
292
+
293
+
294
+ if __name__ == "__main__":
295
+ parser = argparse.ArgumentParser()
296
+ parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
297
+ parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
298
+ parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
299
+ parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
300
+ parser.add_argument(
301
+ "--is_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
302
+ )
303
+ args = parser.parse_args()
304
+ convert_sew_checkpoint(
305
+ args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
306
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/sew/modeling_sew.py ADDED
@@ -0,0 +1,1226 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 ASAPP Inc. and the HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch SEW model."""
16
+
17
+ import math
18
+ import warnings
19
+ from typing import Optional, Tuple, Union
20
+
21
+ import numpy as np
22
+ import torch
23
+ import torch.utils.checkpoint
24
+ from torch import nn
25
+ from torch.nn import CrossEntropyLoss
26
+
27
+ from ...activations import ACT2FN
28
+ from ...integrations.deepspeed import is_deepspeed_zero3_enabled
29
+ from ...modeling_outputs import BaseModelOutput, CausalLMOutput, SequenceClassifierOutput
30
+ from ...modeling_utils import PreTrainedModel
31
+ from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
32
+ from .configuration_sew import SEWConfig
33
+
34
+
35
+ logger = logging.get_logger(__name__)
36
+
37
+
38
+ _HIDDEN_STATES_START_POSITION = 1
39
+
40
+ # General docstring
41
+ _CONFIG_FOR_DOC = "SEWConfig"
42
+
43
+ # Base docstring
44
+ _CHECKPOINT_FOR_DOC = "asapp/sew-tiny-100k-ft-ls100h"
45
+ _EXPECTED_OUTPUT_SHAPE = [1, 292, 512]
46
+
47
+ # CTC docstring
48
+ _CTC_EXPECTED_OUTPUT = (
49
+ "'MISTER QUILTER IS THE APPOSTILE OF THE MIDDLE CLASSES AND WE ARE GLAD TO WELCOME HIS GOSPOLLE'"
50
+ )
51
+ _CTC_EXPECTED_LOSS = 0.42
52
+
53
+ # Audio class docstring
54
+ _SEQ_CLASS_CHECKPOINT = "anton-l/sew-mid-100k-ft-keyword-spotting"
55
+ _SEQ_CLASS_EXPECTED_OUTPUT = "'_unknown_'"
56
+ _SEQ_CLASS_EXPECTED_LOSS = 9.52
57
+
58
+
59
+ from ..deprecated._archive_maps import SEW_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
60
+
61
+
62
+ # Copied from transformers.models.wav2vec2.modeling_wav2vec2._compute_mask_indices
63
+ def _compute_mask_indices(
64
+ shape: Tuple[int, int],
65
+ mask_prob: float,
66
+ mask_length: int,
67
+ attention_mask: Optional[torch.LongTensor] = None,
68
+ min_masks: int = 0,
69
+ ) -> np.ndarray:
70
+ """
71
+ Computes random mask spans for a given shape. Used to implement [SpecAugment: A Simple Data Augmentation Method for
72
+ ASR](https://arxiv.org/abs/1904.08779). Note that this method is not optimized to run on TPU and should be run on
73
+ CPU as part of the preprocessing during training.
74
+
75
+ Args:
76
+ shape: The shape for which to compute masks. This should be of a tuple of size 2 where
77
+ the first element is the batch size and the second element is the length of the axis to span.
78
+ mask_prob: The percentage of the whole axis (between 0 and 1) which will be masked. The number of
79
+ independently generated mask spans of length `mask_length` is computed by
80
+ `mask_prob*shape[1]/mask_length`. Note that due to overlaps, `mask_prob` is an upper bound and the
81
+ actual percentage will be smaller.
82
+ mask_length: size of the mask
83
+ min_masks: minimum number of masked spans
84
+ attention_mask: A (right-padded) attention mask which independently shortens the feature axis of
85
+ each batch dimension.
86
+ """
87
+ batch_size, sequence_length = shape
88
+
89
+ if mask_length < 1:
90
+ raise ValueError("`mask_length` has to be bigger than 0.")
91
+
92
+ if mask_length > sequence_length:
93
+ raise ValueError(
94
+ f"`mask_length` has to be smaller than `sequence_length`, but got `mask_length`: {mask_length}"
95
+ f" and `sequence_length`: {sequence_length}`"
96
+ )
97
+
98
+ # epsilon is used for probabilistic rounding
99
+ epsilon = np.random.rand(1).item()
100
+
101
+ def compute_num_masked_span(input_length):
102
+ """Given input length, compute how many spans should be masked"""
103
+ num_masked_span = int(mask_prob * input_length / mask_length + epsilon)
104
+ num_masked_span = max(num_masked_span, min_masks)
105
+
106
+ # make sure num masked span <= sequence_length
107
+ if num_masked_span * mask_length > sequence_length:
108
+ num_masked_span = sequence_length // mask_length
109
+
110
+ # make sure num_masked span is also <= input_length - (mask_length - 1)
111
+ if input_length - (mask_length - 1) < num_masked_span:
112
+ num_masked_span = max(input_length - (mask_length - 1), 0)
113
+
114
+ return num_masked_span
115
+
116
+ # compute number of masked spans in batch
117
+ input_lengths = (
118
+ attention_mask.sum(-1).detach().tolist()
119
+ if attention_mask is not None
120
+ else [sequence_length for _ in range(batch_size)]
121
+ )
122
+
123
+ # SpecAugment mask to fill
124
+ spec_aug_mask = np.zeros((batch_size, sequence_length), dtype=bool)
125
+ spec_aug_mask_idxs = []
126
+
127
+ max_num_masked_span = compute_num_masked_span(sequence_length)
128
+
129
+ if max_num_masked_span == 0:
130
+ return spec_aug_mask
131
+
132
+ for input_length in input_lengths:
133
+ # compute num of masked spans for this input
134
+ num_masked_span = compute_num_masked_span(input_length)
135
+
136
+ # get random indices to mask
137
+ spec_aug_mask_idx = np.random.choice(
138
+ np.arange(input_length - (mask_length - 1)), num_masked_span, replace=False
139
+ )
140
+
141
+ # pick first sampled index that will serve as a dummy index to pad vector
142
+ # to ensure same dimension for all batches due to probabilistic rounding
143
+ # Picking first sample just pads those vectors twice.
144
+ if len(spec_aug_mask_idx) == 0:
145
+ # this case can only happen if `input_length` is strictly smaller then
146
+ # `sequence_length` in which case the last token has to be a padding
147
+ # token which we can use as a dummy mask id
148
+ dummy_mask_idx = sequence_length - 1
149
+ else:
150
+ dummy_mask_idx = spec_aug_mask_idx[0]
151
+
152
+ spec_aug_mask_idx = np.concatenate(
153
+ [spec_aug_mask_idx, np.ones(max_num_masked_span - num_masked_span, dtype=np.int32) * dummy_mask_idx]
154
+ )
155
+ spec_aug_mask_idxs.append(spec_aug_mask_idx)
156
+
157
+ spec_aug_mask_idxs = np.array(spec_aug_mask_idxs)
158
+
159
+ # expand masked indices to masked spans
160
+ spec_aug_mask_idxs = np.broadcast_to(
161
+ spec_aug_mask_idxs[:, :, None], (batch_size, max_num_masked_span, mask_length)
162
+ )
163
+ spec_aug_mask_idxs = spec_aug_mask_idxs.reshape(batch_size, max_num_masked_span * mask_length)
164
+
165
+ # add offset to the starting indexes so that indexes now create a span
166
+ offsets = np.arange(mask_length)[None, None, :]
167
+ offsets = np.broadcast_to(offsets, (batch_size, max_num_masked_span, mask_length)).reshape(
168
+ batch_size, max_num_masked_span * mask_length
169
+ )
170
+ spec_aug_mask_idxs = spec_aug_mask_idxs + offsets
171
+
172
+ # ensure that we cannot have indices larger than sequence_length
173
+ if spec_aug_mask_idxs.max() > sequence_length - 1:
174
+ spec_aug_mask_idxs[spec_aug_mask_idxs > sequence_length - 1] = sequence_length - 1
175
+
176
+ # scatter indices to mask
177
+ np.put_along_axis(spec_aug_mask, spec_aug_mask_idxs, 1, -1)
178
+
179
+ return spec_aug_mask
180
+
181
+
182
+ # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2NoLayerNormConvLayer with Wav2Vec2->SEW
183
+ class SEWNoLayerNormConvLayer(nn.Module):
184
+ def __init__(self, config, layer_id=0):
185
+ super().__init__()
186
+ self.in_conv_dim = config.conv_dim[layer_id - 1] if layer_id > 0 else 1
187
+ self.out_conv_dim = config.conv_dim[layer_id]
188
+
189
+ self.conv = nn.Conv1d(
190
+ self.in_conv_dim,
191
+ self.out_conv_dim,
192
+ kernel_size=config.conv_kernel[layer_id],
193
+ stride=config.conv_stride[layer_id],
194
+ bias=config.conv_bias,
195
+ )
196
+ self.activation = ACT2FN[config.feat_extract_activation]
197
+
198
+ def forward(self, hidden_states):
199
+ hidden_states = self.conv(hidden_states)
200
+ hidden_states = self.activation(hidden_states)
201
+ return hidden_states
202
+
203
+
204
+ # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2LayerNormConvLayer with Wav2Vec2->SEW
205
+ class SEWLayerNormConvLayer(nn.Module):
206
+ def __init__(self, config, layer_id=0):
207
+ super().__init__()
208
+ self.in_conv_dim = config.conv_dim[layer_id - 1] if layer_id > 0 else 1
209
+ self.out_conv_dim = config.conv_dim[layer_id]
210
+
211
+ self.conv = nn.Conv1d(
212
+ self.in_conv_dim,
213
+ self.out_conv_dim,
214
+ kernel_size=config.conv_kernel[layer_id],
215
+ stride=config.conv_stride[layer_id],
216
+ bias=config.conv_bias,
217
+ )
218
+ self.layer_norm = nn.LayerNorm(self.out_conv_dim, elementwise_affine=True)
219
+ self.activation = ACT2FN[config.feat_extract_activation]
220
+
221
+ def forward(self, hidden_states):
222
+ hidden_states = self.conv(hidden_states)
223
+
224
+ hidden_states = hidden_states.transpose(-2, -1)
225
+ hidden_states = self.layer_norm(hidden_states)
226
+ hidden_states = hidden_states.transpose(-2, -1)
227
+
228
+ hidden_states = self.activation(hidden_states)
229
+ return hidden_states
230
+
231
+
232
+ # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2GroupNormConvLayer with Wav2Vec2->SEW
233
+ class SEWGroupNormConvLayer(nn.Module):
234
+ def __init__(self, config, layer_id=0):
235
+ super().__init__()
236
+ self.in_conv_dim = config.conv_dim[layer_id - 1] if layer_id > 0 else 1
237
+ self.out_conv_dim = config.conv_dim[layer_id]
238
+
239
+ self.conv = nn.Conv1d(
240
+ self.in_conv_dim,
241
+ self.out_conv_dim,
242
+ kernel_size=config.conv_kernel[layer_id],
243
+ stride=config.conv_stride[layer_id],
244
+ bias=config.conv_bias,
245
+ )
246
+ self.activation = ACT2FN[config.feat_extract_activation]
247
+
248
+ self.layer_norm = nn.GroupNorm(num_groups=self.out_conv_dim, num_channels=self.out_conv_dim, affine=True)
249
+
250
+ def forward(self, hidden_states):
251
+ hidden_states = self.conv(hidden_states)
252
+ hidden_states = self.layer_norm(hidden_states)
253
+ hidden_states = self.activation(hidden_states)
254
+ return hidden_states
255
+
256
+
257
+ class SEWPositionalConvEmbedding(nn.Module):
258
+ def __init__(self, config):
259
+ super().__init__()
260
+ self.conv = nn.Conv1d(
261
+ config.hidden_size,
262
+ config.hidden_size,
263
+ kernel_size=config.num_conv_pos_embeddings,
264
+ padding=config.num_conv_pos_embeddings // 2,
265
+ groups=config.num_conv_pos_embedding_groups,
266
+ stride=config.squeeze_factor,
267
+ )
268
+
269
+ if is_deepspeed_zero3_enabled():
270
+ import deepspeed
271
+
272
+ with deepspeed.zero.GatheredParameters(self.conv.weight, modifier_rank=0):
273
+ self.conv = nn.utils.weight_norm(self.conv, name="weight", dim=2)
274
+ deepspeed.zero.register_external_parameter(self, self.conv.weight_v)
275
+ deepspeed.zero.register_external_parameter(self, self.conv.weight_g)
276
+ else:
277
+ self.conv = nn.utils.weight_norm(self.conv, name="weight", dim=2)
278
+
279
+ self.padding = SEWSamePadLayer(config.num_conv_pos_embeddings)
280
+ self.activation = ACT2FN[config.feat_extract_activation]
281
+
282
+ def forward(self, hidden_states):
283
+ hidden_states = self.conv(hidden_states)
284
+ hidden_states = self.padding(hidden_states)
285
+ hidden_states = self.activation(hidden_states)
286
+
287
+ return hidden_states
288
+
289
+
290
+ # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2SamePadLayer with Wav2Vec2->SEW
291
+ class SEWSamePadLayer(nn.Module):
292
+ def __init__(self, num_conv_pos_embeddings):
293
+ super().__init__()
294
+ self.num_pad_remove = 1 if num_conv_pos_embeddings % 2 == 0 else 0
295
+
296
+ def forward(self, hidden_states):
297
+ if self.num_pad_remove > 0:
298
+ hidden_states = hidden_states[:, :, : -self.num_pad_remove]
299
+ return hidden_states
300
+
301
+
302
+ class SEWUpsampling(nn.Module):
303
+ def __init__(self, config):
304
+ super().__init__()
305
+ self.projection = nn.Linear(config.hidden_size, config.hidden_size * config.squeeze_factor)
306
+ self.activation = ACT2FN[config.feat_extract_activation]
307
+ self.squeeze_factor = config.squeeze_factor
308
+
309
+ def forward(self, hidden_states):
310
+ hidden_states = self.projection(hidden_states)
311
+ hidden_states = self.activation(hidden_states)
312
+
313
+ if self.squeeze_factor > 1:
314
+ # transform embedding channels to sequence length
315
+ bsz, src_len, src_embed_dim = hidden_states.size()
316
+ tgt_len = src_len * self.squeeze_factor
317
+ tgt_embed_dim = src_embed_dim // self.squeeze_factor
318
+ hidden_states = hidden_states.reshape(bsz, src_len, self.squeeze_factor, tgt_embed_dim)
319
+ hidden_states = hidden_states.reshape(bsz, tgt_len, tgt_embed_dim)
320
+
321
+ return hidden_states
322
+
323
+
324
+ # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2FeatureEncoder with Wav2Vec2->SEW
325
+ class SEWFeatureEncoder(nn.Module):
326
+ """Construct the features from raw audio waveform"""
327
+
328
+ def __init__(self, config):
329
+ super().__init__()
330
+
331
+ if config.feat_extract_norm == "group":
332
+ conv_layers = [SEWGroupNormConvLayer(config, layer_id=0)] + [
333
+ SEWNoLayerNormConvLayer(config, layer_id=i + 1) for i in range(config.num_feat_extract_layers - 1)
334
+ ]
335
+ elif config.feat_extract_norm == "layer":
336
+ conv_layers = [SEWLayerNormConvLayer(config, layer_id=i) for i in range(config.num_feat_extract_layers)]
337
+ else:
338
+ raise ValueError(
339
+ f"`config.feat_extract_norm` is {config.feat_extract_norm}, but has to be one of ['group', 'layer']"
340
+ )
341
+ self.conv_layers = nn.ModuleList(conv_layers)
342
+ self.gradient_checkpointing = False
343
+ self._requires_grad = True
344
+
345
+ def _freeze_parameters(self):
346
+ for param in self.parameters():
347
+ param.requires_grad = False
348
+ self._requires_grad = False
349
+
350
+ def forward(self, input_values):
351
+ hidden_states = input_values[:, None]
352
+
353
+ # make sure hidden_states require grad for gradient_checkpointing
354
+ if self._requires_grad and self.training:
355
+ hidden_states.requires_grad = True
356
+
357
+ for conv_layer in self.conv_layers:
358
+ if self._requires_grad and self.gradient_checkpointing and self.training:
359
+ hidden_states = self._gradient_checkpointing_func(
360
+ conv_layer.__call__,
361
+ hidden_states,
362
+ )
363
+ else:
364
+ hidden_states = conv_layer(hidden_states)
365
+
366
+ return hidden_states
367
+
368
+
369
+ class SEWFeatureExtractor(SEWFeatureEncoder):
370
+ def __init__(self, config):
371
+ super().__init__(config)
372
+ warnings.warn(
373
+ f"The class `{self.__class__.__name__}` has been depreciated "
374
+ "and will be removed in Transformers v5. "
375
+ f"Use `{self.__class__.__bases__[0].__name__}` instead.",
376
+ FutureWarning,
377
+ )
378
+
379
+
380
+ # Copied from transformers.models.bart.modeling_bart.BartAttention with Bart->SEW
381
+ class SEWAttention(nn.Module):
382
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
383
+
384
+ def __init__(
385
+ self,
386
+ embed_dim: int,
387
+ num_heads: int,
388
+ dropout: float = 0.0,
389
+ is_decoder: bool = False,
390
+ bias: bool = True,
391
+ is_causal: bool = False,
392
+ config: Optional[SEWConfig] = None,
393
+ ):
394
+ super().__init__()
395
+ self.embed_dim = embed_dim
396
+ self.num_heads = num_heads
397
+ self.dropout = dropout
398
+ self.head_dim = embed_dim // num_heads
399
+ self.config = config
400
+
401
+ if (self.head_dim * num_heads) != self.embed_dim:
402
+ raise ValueError(
403
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
404
+ f" and `num_heads`: {num_heads})."
405
+ )
406
+ self.scaling = self.head_dim**-0.5
407
+ self.is_decoder = is_decoder
408
+ self.is_causal = is_causal
409
+
410
+ self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
411
+ self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
412
+ self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
413
+ self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
414
+
415
+ def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
416
+ return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
417
+
418
+ def forward(
419
+ self,
420
+ hidden_states: torch.Tensor,
421
+ key_value_states: Optional[torch.Tensor] = None,
422
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
423
+ attention_mask: Optional[torch.Tensor] = None,
424
+ layer_head_mask: Optional[torch.Tensor] = None,
425
+ output_attentions: bool = False,
426
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
427
+ """Input shape: Batch x Time x Channel"""
428
+
429
+ # if key_value_states are provided this layer is used as a cross-attention layer
430
+ # for the decoder
431
+ is_cross_attention = key_value_states is not None
432
+
433
+ bsz, tgt_len, _ = hidden_states.size()
434
+
435
+ # get query proj
436
+ query_states = self.q_proj(hidden_states) * self.scaling
437
+ # get key, value proj
438
+ # `past_key_value[0].shape[2] == key_value_states.shape[1]`
439
+ # is checking that the `sequence_length` of the `past_key_value` is the same as
440
+ # the provided `key_value_states` to support prefix tuning
441
+ if (
442
+ is_cross_attention
443
+ and past_key_value is not None
444
+ and past_key_value[0].shape[2] == key_value_states.shape[1]
445
+ ):
446
+ # reuse k,v, cross_attentions
447
+ key_states = past_key_value[0]
448
+ value_states = past_key_value[1]
449
+ elif is_cross_attention:
450
+ # cross_attentions
451
+ key_states = self._shape(self.k_proj(key_value_states), -1, bsz)
452
+ value_states = self._shape(self.v_proj(key_value_states), -1, bsz)
453
+ elif past_key_value is not None:
454
+ # reuse k, v, self_attention
455
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
456
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
457
+ key_states = torch.cat([past_key_value[0], key_states], dim=2)
458
+ value_states = torch.cat([past_key_value[1], value_states], dim=2)
459
+ else:
460
+ # self_attention
461
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
462
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
463
+
464
+ if self.is_decoder:
465
+ # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
466
+ # Further calls to cross_attention layer can then reuse all cross-attention
467
+ # key/value_states (first "if" case)
468
+ # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
469
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
470
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
471
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
472
+ past_key_value = (key_states, value_states)
473
+
474
+ proj_shape = (bsz * self.num_heads, -1, self.head_dim)
475
+ query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)
476
+ key_states = key_states.reshape(*proj_shape)
477
+ value_states = value_states.reshape(*proj_shape)
478
+
479
+ src_len = key_states.size(1)
480
+ attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
481
+
482
+ if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len):
483
+ raise ValueError(
484
+ f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is"
485
+ f" {attn_weights.size()}"
486
+ )
487
+
488
+ if attention_mask is not None:
489
+ if attention_mask.size() != (bsz, 1, tgt_len, src_len):
490
+ raise ValueError(
491
+ f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}"
492
+ )
493
+ attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask
494
+ attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
495
+
496
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1)
497
+
498
+ if layer_head_mask is not None:
499
+ if layer_head_mask.size() != (self.num_heads,):
500
+ raise ValueError(
501
+ f"Head mask for a single layer should be of size {(self.num_heads,)}, but is"
502
+ f" {layer_head_mask.size()}"
503
+ )
504
+ attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
505
+ attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
506
+
507
+ if output_attentions:
508
+ # this operation is a bit awkward, but it's required to
509
+ # make sure that attn_weights keeps its gradient.
510
+ # In order to do so, attn_weights have to be reshaped
511
+ # twice and have to be reused in the following
512
+ attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
513
+ attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)
514
+ else:
515
+ attn_weights_reshaped = None
516
+
517
+ attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
518
+
519
+ attn_output = torch.bmm(attn_probs, value_states)
520
+
521
+ if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):
522
+ raise ValueError(
523
+ f"`attn_output` should be of size {(bsz * self.num_heads, tgt_len, self.head_dim)}, but is"
524
+ f" {attn_output.size()}"
525
+ )
526
+
527
+ attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
528
+ attn_output = attn_output.transpose(1, 2)
529
+
530
+ # Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be
531
+ # partitioned across GPUs when using tensor-parallelism.
532
+ attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim)
533
+
534
+ attn_output = self.out_proj(attn_output)
535
+
536
+ return attn_output, attn_weights_reshaped, past_key_value
537
+
538
+
539
+ # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2FeedForward with Wav2Vec2->SEW
540
+ class SEWFeedForward(nn.Module):
541
+ def __init__(self, config):
542
+ super().__init__()
543
+ self.intermediate_dropout = nn.Dropout(config.activation_dropout)
544
+
545
+ self.intermediate_dense = nn.Linear(config.hidden_size, config.intermediate_size)
546
+ if isinstance(config.hidden_act, str):
547
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
548
+ else:
549
+ self.intermediate_act_fn = config.hidden_act
550
+
551
+ self.output_dense = nn.Linear(config.intermediate_size, config.hidden_size)
552
+ self.output_dropout = nn.Dropout(config.hidden_dropout)
553
+
554
+ def forward(self, hidden_states):
555
+ hidden_states = self.intermediate_dense(hidden_states)
556
+ hidden_states = self.intermediate_act_fn(hidden_states)
557
+ hidden_states = self.intermediate_dropout(hidden_states)
558
+
559
+ hidden_states = self.output_dense(hidden_states)
560
+ hidden_states = self.output_dropout(hidden_states)
561
+ return hidden_states
562
+
563
+
564
+ # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2EncoderLayer with Wav2Vec2->SEW
565
+ class SEWEncoderLayer(nn.Module):
566
+ def __init__(self, config):
567
+ super().__init__()
568
+ self.attention = SEWAttention(
569
+ embed_dim=config.hidden_size,
570
+ num_heads=config.num_attention_heads,
571
+ dropout=config.attention_dropout,
572
+ is_decoder=False,
573
+ )
574
+ self.dropout = nn.Dropout(config.hidden_dropout)
575
+ self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
576
+ self.feed_forward = SEWFeedForward(config)
577
+ self.final_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
578
+
579
+ def forward(self, hidden_states, attention_mask=None, output_attentions=False):
580
+ attn_residual = hidden_states
581
+ hidden_states, attn_weights, _ = self.attention(
582
+ hidden_states, attention_mask=attention_mask, output_attentions=output_attentions
583
+ )
584
+ hidden_states = self.dropout(hidden_states)
585
+ hidden_states = attn_residual + hidden_states
586
+
587
+ hidden_states = self.layer_norm(hidden_states)
588
+ hidden_states = hidden_states + self.feed_forward(hidden_states)
589
+ hidden_states = self.final_layer_norm(hidden_states)
590
+
591
+ outputs = (hidden_states,)
592
+
593
+ if output_attentions:
594
+ outputs += (attn_weights,)
595
+
596
+ return outputs
597
+
598
+
599
+ class SEWEncoder(nn.Module):
600
+ def __init__(self, config):
601
+ super().__init__()
602
+ self.config = config
603
+ self.pos_conv_embed = SEWPositionalConvEmbedding(config)
604
+ self.pool = nn.AvgPool1d(config.squeeze_factor, config.squeeze_factor)
605
+ self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
606
+ self.dropout = nn.Dropout(config.hidden_dropout)
607
+ self.layers = nn.ModuleList([SEWEncoderLayer(config) for _ in range(config.num_hidden_layers)])
608
+ self.upsample = SEWUpsampling(config)
609
+ self.gradient_checkpointing = False
610
+
611
+ def forward(
612
+ self,
613
+ hidden_states,
614
+ attention_mask=None,
615
+ output_attentions=False,
616
+ output_hidden_states=False,
617
+ return_dict=True,
618
+ ):
619
+ all_hidden_states = () if output_hidden_states else None
620
+ all_self_attentions = () if output_attentions else None
621
+
622
+ if attention_mask is not None:
623
+ # make sure padded tokens output 0
624
+ hidden_states[~attention_mask] = 0.0
625
+
626
+ input_lengths = (attention_mask.long()).sum(-1)
627
+ # apply pooling formula to get real output_lengths
628
+ output_lengths = input_lengths // self.config.squeeze_factor
629
+ max_encoder_length = hidden_states.shape[1] // self.config.squeeze_factor
630
+ attention_ids = (
631
+ torch.arange(0, max_encoder_length, device=output_lengths.device)
632
+ .view(1, -1)
633
+ .expand(output_lengths.shape[0], -1)
634
+ )
635
+ attention_mask = (attention_ids < output_lengths.view(-1, 1)).long()
636
+
637
+ # extend attention_mask
638
+ attention_mask = 1.0 - attention_mask[:, None, None, :].to(dtype=hidden_states.dtype)
639
+ attention_mask = attention_mask * torch.finfo(hidden_states.dtype).min
640
+ attention_mask = attention_mask.expand(
641
+ attention_mask.shape[0], 1, attention_mask.shape[-1], attention_mask.shape[-1]
642
+ )
643
+
644
+ n_input_timesteps = hidden_states.shape[1]
645
+
646
+ hidden_states = hidden_states.transpose(1, 2)
647
+ position_embeddings = self.pos_conv_embed(hidden_states)
648
+ pooled_hidden_states = self.pool(hidden_states)
649
+ min_length = min(position_embeddings.size(-1), pooled_hidden_states.size(-1))
650
+ hidden_states = pooled_hidden_states[..., :min_length] + position_embeddings[..., :min_length]
651
+ hidden_states = hidden_states.transpose(1, 2)
652
+
653
+ hidden_states = self.layer_norm(hidden_states)
654
+ hidden_states = self.dropout(hidden_states)
655
+
656
+ deepspeed_zero3_is_enabled = is_deepspeed_zero3_enabled()
657
+
658
+ for layer in self.layers:
659
+ if output_hidden_states:
660
+ all_hidden_states = all_hidden_states + (hidden_states,)
661
+
662
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
663
+ dropout_probability = torch.rand([])
664
+
665
+ skip_the_layer = True if self.training and (dropout_probability < self.config.layerdrop) else False
666
+ if not skip_the_layer or deepspeed_zero3_is_enabled:
667
+ # under deepspeed zero3 all gpus must run in sync
668
+ if self.gradient_checkpointing and self.training:
669
+ layer_outputs = self._gradient_checkpointing_func(
670
+ layer.__call__,
671
+ hidden_states,
672
+ attention_mask,
673
+ output_attentions,
674
+ )
675
+ else:
676
+ layer_outputs = layer(
677
+ hidden_states, attention_mask=attention_mask, output_attentions=output_attentions
678
+ )
679
+ hidden_states = layer_outputs[0]
680
+
681
+ if skip_the_layer:
682
+ layer_outputs = (None, None)
683
+
684
+ if output_attentions:
685
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
686
+
687
+ if output_hidden_states:
688
+ all_hidden_states = all_hidden_states + (hidden_states,)
689
+
690
+ hidden_states = self.upsample(hidden_states)
691
+ if hidden_states.shape[1] < n_input_timesteps:
692
+ hidden_states = nn.functional.pad(hidden_states, (0, 0, 0, n_input_timesteps - hidden_states.shape[1]))
693
+
694
+ if not return_dict:
695
+ return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
696
+ return BaseModelOutput(
697
+ last_hidden_state=hidden_states,
698
+ hidden_states=all_hidden_states,
699
+ attentions=all_self_attentions,
700
+ )
701
+
702
+
703
+ class SEWPreTrainedModel(PreTrainedModel):
704
+ """
705
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
706
+ models.
707
+ """
708
+
709
+ config_class = SEWConfig
710
+ base_model_prefix = "sew"
711
+ main_input_name = "input_values"
712
+ supports_gradient_checkpointing = True
713
+
714
+ def _init_weights(self, module):
715
+ """Initialize the weights"""
716
+ if isinstance(module, SEWPositionalConvEmbedding):
717
+ nn.init.normal_(
718
+ module.conv.weight,
719
+ mean=0,
720
+ std=2 * math.sqrt(1 / (module.conv.kernel_size[0] * module.conv.in_channels)),
721
+ )
722
+ nn.init.constant_(module.conv.bias, 0)
723
+ elif isinstance(module, nn.Linear):
724
+ # Slightly different from the TF version which uses truncated_normal for initialization
725
+ # cf https://github.com/pytorch/pytorch/pull/5617
726
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
727
+ elif isinstance(module, (nn.LayerNorm, nn.GroupNorm)):
728
+ module.bias.data.zero_()
729
+ module.weight.data.fill_(1.0)
730
+ elif isinstance(module, nn.Conv1d):
731
+ if is_deepspeed_zero3_enabled():
732
+ import deepspeed
733
+
734
+ if hasattr(module, "weight_v") and hasattr(module, "weight_g"):
735
+ with deepspeed.zero.GatheredParameters([module.weight_v, module.weight_g], modifier_rank=0):
736
+ nn.init.kaiming_normal_(module.weight.data)
737
+ else:
738
+ with deepspeed.zero.GatheredParameters(module.weight, modifier_rank=0):
739
+ nn.init.kaiming_normal_(module.weight.data)
740
+ else:
741
+ nn.init.kaiming_normal_(module.weight.data)
742
+
743
+ if isinstance(module, (nn.Linear, nn.Conv1d)) and module.bias is not None:
744
+ module.bias.data.zero_()
745
+
746
+ def _get_feat_extract_output_lengths(self, input_lengths: Union[torch.LongTensor, int]):
747
+ """
748
+ Computes the output length of the convolutional layers
749
+ """
750
+
751
+ def _conv_out_length(input_length, kernel_size, stride):
752
+ # 1D convolutional layer output length formula taken
753
+ # from https://pytorch.org/docs/stable/generated/torch.nn.Conv1d.html
754
+ return torch.div(input_length - kernel_size, stride, rounding_mode="floor") + 1
755
+
756
+ for kernel_size, stride in zip(self.config.conv_kernel, self.config.conv_stride):
757
+ input_lengths = _conv_out_length(input_lengths, kernel_size, stride)
758
+
759
+ return input_lengths
760
+
761
+ def _get_feature_vector_attention_mask(self, feature_vector_length: int, attention_mask: torch.LongTensor):
762
+ output_lengths = self._get_feat_extract_output_lengths(attention_mask.sum(-1)).to(torch.long)
763
+ batch_size = attention_mask.shape[0]
764
+
765
+ attention_mask = torch.zeros(
766
+ (batch_size, feature_vector_length), dtype=attention_mask.dtype, device=attention_mask.device
767
+ )
768
+ # these two operations makes sure that all values before the output lengths idxs are attended to
769
+ attention_mask[(torch.arange(attention_mask.shape[0], device=attention_mask.device), output_lengths - 1)] = 1
770
+ attention_mask = attention_mask.flip([-1]).cumsum(-1).flip([-1]).bool()
771
+ return attention_mask
772
+
773
+
774
+ SEW_START_DOCSTRING = r"""
775
+ SEW was proposed in [Performance-Efficiency Trade-offs in Unsupervised Pre-training for Speech
776
+ Recognition](https://arxiv.org/abs/2109.06870) by Felix Wu, Kwangyoun Kim, Jing Pan, Kyu Han, Kilian Q. Weinberger,
777
+ Yoav Artzi.
778
+
779
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
780
+ library implements for all its model (such as downloading or saving etc.).
781
+
782
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
783
+ it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
784
+ behavior.
785
+
786
+ Parameters:
787
+ config ([`SEWConfig`]): Model configuration class with all the parameters of the model.
788
+ Initializing with a config file does not load the weights associated with the model, only the
789
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
790
+ """
791
+
792
+
793
+ SEW_INPUTS_DOCSTRING = r"""
794
+ Args:
795
+ input_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):
796
+ Float values of input raw speech waveform. Values can be obtained by loading a `.flac` or `.wav` audio file
797
+ into an array of type `List[float]` or a `numpy.ndarray`, *e.g.* via the soundfile library (`pip install
798
+ soundfile`). To prepare the array into `input_values`, the [`AutoProcessor`] should be used for padding and
799
+ conversion into a tensor of type `torch.FloatTensor`. See [`Wav2Vec2Processor.__call__`] for details.
800
+ attention_mask (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
801
+ Mask to avoid performing convolution and attention on padding token indices. Mask values selected in `[0,
802
+ 1]`:
803
+
804
+ - 1 for tokens that are **not masked**,
805
+ - 0 for tokens that are **masked**.
806
+
807
+ [What are attention masks?](../glossary#attention-mask)
808
+
809
+ output_attentions (`bool`, *optional*):
810
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
811
+ tensors for more detail.
812
+ output_hidden_states (`bool`, *optional*):
813
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
814
+ more detail.
815
+ return_dict (`bool`, *optional*):
816
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
817
+ """
818
+
819
+
820
+ @add_start_docstrings(
821
+ "The bare SEW Model transformer outputting raw hidden-states without any specific head on top.",
822
+ SEW_START_DOCSTRING,
823
+ )
824
+ class SEWModel(SEWPreTrainedModel):
825
+ def __init__(self, config: SEWConfig):
826
+ super().__init__(config)
827
+ self.config = config
828
+ self.feature_extractor = SEWFeatureEncoder(config)
829
+ self.layer_norm = nn.LayerNorm(config.conv_dim[-1], eps=config.layer_norm_eps)
830
+
831
+ self.project_features = config.conv_dim[-1] != config.hidden_size
832
+ if self.project_features:
833
+ self.feature_projection = nn.Linear(config.conv_dim[-1], config.hidden_size)
834
+ self.feature_dropout = nn.Dropout(config.feat_proj_dropout)
835
+
836
+ if config.mask_time_prob > 0.0 or config.mask_feature_prob > 0.0:
837
+ self.masked_spec_embed = nn.Parameter(torch.FloatTensor(config.hidden_size).uniform_())
838
+
839
+ self.encoder = SEWEncoder(config)
840
+
841
+ # Initialize weights and apply final processing
842
+ self.post_init()
843
+
844
+ # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2Model._mask_hidden_states
845
+ def _mask_hidden_states(
846
+ self,
847
+ hidden_states: torch.FloatTensor,
848
+ mask_time_indices: Optional[torch.FloatTensor] = None,
849
+ attention_mask: Optional[torch.LongTensor] = None,
850
+ ):
851
+ """
852
+ Masks extracted features along time axis and/or along feature axis according to
853
+ [SpecAugment](https://arxiv.org/abs/1904.08779).
854
+ """
855
+
856
+ # `config.apply_spec_augment` can set masking to False
857
+ if not getattr(self.config, "apply_spec_augment", True):
858
+ return hidden_states
859
+
860
+ # generate indices & apply SpecAugment along time axis
861
+ batch_size, sequence_length, hidden_size = hidden_states.size()
862
+
863
+ if mask_time_indices is not None:
864
+ # apply SpecAugment along time axis with given mask_time_indices
865
+ hidden_states[mask_time_indices] = self.masked_spec_embed.to(hidden_states.dtype)
866
+ elif self.config.mask_time_prob > 0 and self.training:
867
+ mask_time_indices = _compute_mask_indices(
868
+ (batch_size, sequence_length),
869
+ mask_prob=self.config.mask_time_prob,
870
+ mask_length=self.config.mask_time_length,
871
+ attention_mask=attention_mask,
872
+ min_masks=self.config.mask_time_min_masks,
873
+ )
874
+ mask_time_indices = torch.tensor(mask_time_indices, device=hidden_states.device, dtype=torch.bool)
875
+ hidden_states[mask_time_indices] = self.masked_spec_embed.to(hidden_states.dtype)
876
+
877
+ if self.config.mask_feature_prob > 0 and self.training:
878
+ # generate indices & apply SpecAugment along feature axis
879
+ mask_feature_indices = _compute_mask_indices(
880
+ (batch_size, hidden_size),
881
+ mask_prob=self.config.mask_feature_prob,
882
+ mask_length=self.config.mask_feature_length,
883
+ min_masks=self.config.mask_feature_min_masks,
884
+ )
885
+ mask_feature_indices = torch.tensor(mask_feature_indices, device=hidden_states.device, dtype=torch.bool)
886
+ mask_feature_indices = mask_feature_indices[:, None].expand(-1, sequence_length, -1)
887
+ hidden_states[mask_feature_indices] = 0
888
+
889
+ return hidden_states
890
+
891
+ @add_start_docstrings_to_model_forward(SEW_INPUTS_DOCSTRING)
892
+ @add_code_sample_docstrings(
893
+ checkpoint=_CHECKPOINT_FOR_DOC,
894
+ output_type=BaseModelOutput,
895
+ config_class=_CONFIG_FOR_DOC,
896
+ modality="audio",
897
+ expected_output=_EXPECTED_OUTPUT_SHAPE,
898
+ )
899
+ def forward(
900
+ self,
901
+ input_values: Optional[torch.Tensor],
902
+ attention_mask: Optional[torch.Tensor] = None,
903
+ mask_time_indices: Optional[torch.FloatTensor] = None,
904
+ output_attentions: Optional[bool] = None,
905
+ output_hidden_states: Optional[bool] = None,
906
+ return_dict: Optional[bool] = None,
907
+ ) -> Union[Tuple, BaseModelOutput]:
908
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
909
+ output_hidden_states = (
910
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
911
+ )
912
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
913
+
914
+ extract_features = self.feature_extractor(input_values)
915
+ extract_features = extract_features.transpose(1, 2)
916
+ extract_features = self.layer_norm(extract_features)
917
+
918
+ if self.project_features:
919
+ extract_features = self.feature_projection(extract_features)
920
+ hidden_states = self.feature_dropout(extract_features)
921
+
922
+ if attention_mask is not None:
923
+ # compute reduced attention_mask corresponding to feature vectors
924
+ attention_mask = self._get_feature_vector_attention_mask(hidden_states.shape[1], attention_mask)
925
+
926
+ hidden_states = self._mask_hidden_states(hidden_states, mask_time_indices=mask_time_indices)
927
+
928
+ encoder_outputs = self.encoder(
929
+ hidden_states,
930
+ attention_mask=attention_mask,
931
+ output_attentions=output_attentions,
932
+ output_hidden_states=output_hidden_states,
933
+ return_dict=return_dict,
934
+ )
935
+
936
+ hidden_states = encoder_outputs[0]
937
+
938
+ if not return_dict:
939
+ return (hidden_states,) + encoder_outputs[1:]
940
+
941
+ return BaseModelOutput(
942
+ last_hidden_state=hidden_states,
943
+ hidden_states=encoder_outputs.hidden_states,
944
+ attentions=encoder_outputs.attentions,
945
+ )
946
+
947
+
948
+ @add_start_docstrings(
949
+ """SEW Model with a `language modeling` head on top for Connectionist Temporal Classification (CTC).""",
950
+ SEW_START_DOCSTRING,
951
+ )
952
+ # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForCTC with Wav2Vec2->SEW, wav2vec2->sew, WAV_2_VEC_2->SEW
953
+ class SEWForCTC(SEWPreTrainedModel):
954
+ def __init__(self, config, target_lang: Optional[str] = None):
955
+ super().__init__(config)
956
+
957
+ self.sew = SEWModel(config)
958
+ self.dropout = nn.Dropout(config.final_dropout)
959
+
960
+ self.target_lang = target_lang
961
+
962
+ if config.vocab_size is None:
963
+ raise ValueError(
964
+ f"You are trying to instantiate {self.__class__} with a configuration that "
965
+ "does not define the vocabulary size of the language model head. Please "
966
+ "instantiate the model as follows: `SEWForCTC.from_pretrained(..., vocab_size=vocab_size)`. "
967
+ "or define `vocab_size` of your model's configuration."
968
+ )
969
+ output_hidden_size = (
970
+ config.output_hidden_size if hasattr(config, "add_adapter") and config.add_adapter else config.hidden_size
971
+ )
972
+ self.lm_head = nn.Linear(output_hidden_size, config.vocab_size)
973
+
974
+ # Initialize weights and apply final processing
975
+ self.post_init()
976
+
977
+ def tie_weights(self):
978
+ """
979
+ This method overwrites [`~PreTrainedModel.tie_weights`] so that adapter weights can be correctly loaded when
980
+ passing `target_lang=...` to `from_pretrained(...)`.
981
+
982
+ This method is **not** supposed to be called by the user and is prone to be changed in the future.
983
+ """
984
+
985
+ # Note that `tie_weights` is usually used to tie input and output embedding weights. The method is re-purposed to
986
+ # correctly load adapter layers for SEW so that we do not have to introduce a new API to
987
+ # [`PreTrainedModel`]. While slightly hacky, SEW never has to tie input and output embeddings, so that it is
988
+ # ok to repurpose this function here.
989
+ target_lang = self.target_lang
990
+
991
+ if target_lang is not None and getattr(self.config, "adapter_attn_dim", None) is None:
992
+ raise ValueError(f"Cannot pass `target_lang`: {target_lang} if `config.adapter_attn_dim` is not defined.")
993
+ elif target_lang is None and getattr(self.config, "adapter_attn_dim", None) is not None:
994
+ logger.info("By default `target_lang` is set to 'eng'.")
995
+ elif target_lang is not None:
996
+ self.load_adapter(target_lang, force_load=True)
997
+
998
+ def freeze_feature_extractor(self):
999
+ """
1000
+ Calling this function will disable the gradient computation for the feature encoder so that its parameter will
1001
+ not be updated during training.
1002
+ """
1003
+ warnings.warn(
1004
+ "The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5. "
1005
+ "Please use the equivalent `freeze_feature_encoder` method instead.",
1006
+ FutureWarning,
1007
+ )
1008
+ self.freeze_feature_encoder()
1009
+
1010
+ def freeze_feature_encoder(self):
1011
+ """
1012
+ Calling this function will disable the gradient computation for the feature encoder so that its parameter will
1013
+ not be updated during training.
1014
+ """
1015
+ self.sew.feature_extractor._freeze_parameters()
1016
+
1017
+ def freeze_base_model(self):
1018
+ """
1019
+ Calling this function will disable the gradient computation for the base model so that its parameters will not
1020
+ be updated during training. Only the classification head will be updated.
1021
+ """
1022
+ for param in self.sew.parameters():
1023
+ param.requires_grad = False
1024
+
1025
+ @add_start_docstrings_to_model_forward(SEW_INPUTS_DOCSTRING)
1026
+ @add_code_sample_docstrings(
1027
+ checkpoint=_CHECKPOINT_FOR_DOC,
1028
+ output_type=CausalLMOutput,
1029
+ config_class=_CONFIG_FOR_DOC,
1030
+ expected_output=_CTC_EXPECTED_OUTPUT,
1031
+ expected_loss=_CTC_EXPECTED_LOSS,
1032
+ )
1033
+ def forward(
1034
+ self,
1035
+ input_values: Optional[torch.Tensor],
1036
+ attention_mask: Optional[torch.Tensor] = None,
1037
+ output_attentions: Optional[bool] = None,
1038
+ output_hidden_states: Optional[bool] = None,
1039
+ return_dict: Optional[bool] = None,
1040
+ labels: Optional[torch.Tensor] = None,
1041
+ ) -> Union[Tuple, CausalLMOutput]:
1042
+ r"""
1043
+ labels (`torch.LongTensor` of shape `(batch_size, target_length)`, *optional*):
1044
+ Labels for connectionist temporal classification. Note that `target_length` has to be smaller or equal to
1045
+ the sequence length of the output logits. Indices are selected in `[-100, 0, ..., config.vocab_size - 1]`.
1046
+ All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ...,
1047
+ config.vocab_size - 1]`.
1048
+ """
1049
+
1050
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1051
+
1052
+ outputs = self.sew(
1053
+ input_values,
1054
+ attention_mask=attention_mask,
1055
+ output_attentions=output_attentions,
1056
+ output_hidden_states=output_hidden_states,
1057
+ return_dict=return_dict,
1058
+ )
1059
+
1060
+ hidden_states = outputs[0]
1061
+ hidden_states = self.dropout(hidden_states)
1062
+
1063
+ logits = self.lm_head(hidden_states)
1064
+
1065
+ loss = None
1066
+ if labels is not None:
1067
+ if labels.max() >= self.config.vocab_size:
1068
+ raise ValueError(f"Label values must be <= vocab_size: {self.config.vocab_size}")
1069
+
1070
+ # retrieve loss input_lengths from attention_mask
1071
+ attention_mask = (
1072
+ attention_mask if attention_mask is not None else torch.ones_like(input_values, dtype=torch.long)
1073
+ )
1074
+ input_lengths = self._get_feat_extract_output_lengths(attention_mask.sum(-1)).to(torch.long)
1075
+
1076
+ # assuming that padded tokens are filled with -100
1077
+ # when not being attended to
1078
+ labels_mask = labels >= 0
1079
+ target_lengths = labels_mask.sum(-1)
1080
+ flattened_targets = labels.masked_select(labels_mask)
1081
+
1082
+ # ctc_loss doesn't support fp16
1083
+ log_probs = nn.functional.log_softmax(logits, dim=-1, dtype=torch.float32).transpose(0, 1)
1084
+
1085
+ with torch.backends.cudnn.flags(enabled=False):
1086
+ loss = nn.functional.ctc_loss(
1087
+ log_probs,
1088
+ flattened_targets,
1089
+ input_lengths,
1090
+ target_lengths,
1091
+ blank=self.config.pad_token_id,
1092
+ reduction=self.config.ctc_loss_reduction,
1093
+ zero_infinity=self.config.ctc_zero_infinity,
1094
+ )
1095
+
1096
+ if not return_dict:
1097
+ output = (logits,) + outputs[_HIDDEN_STATES_START_POSITION:]
1098
+ return ((loss,) + output) if loss is not None else output
1099
+
1100
+ return CausalLMOutput(
1101
+ loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions
1102
+ )
1103
+
1104
+
1105
+ @add_start_docstrings(
1106
+ """
1107
+ SEW Model with a sequence classification head on top (a linear layer over the pooled output) for tasks like SUPERB
1108
+ Keyword Spotting.
1109
+ """,
1110
+ SEW_START_DOCSTRING,
1111
+ )
1112
+ # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForSequenceClassification with Wav2Vec2->SEW, wav2vec2->sew, WAV_2_VEC_2->SEW
1113
+ class SEWForSequenceClassification(SEWPreTrainedModel):
1114
+ def __init__(self, config):
1115
+ super().__init__(config)
1116
+
1117
+ if hasattr(config, "add_adapter") and config.add_adapter:
1118
+ raise ValueError(
1119
+ "Sequence classification does not support the use of SEW adapters (config.add_adapter=True)"
1120
+ )
1121
+ self.sew = SEWModel(config)
1122
+ num_layers = config.num_hidden_layers + 1 # transformer layers + input embeddings
1123
+ if config.use_weighted_layer_sum:
1124
+ self.layer_weights = nn.Parameter(torch.ones(num_layers) / num_layers)
1125
+ self.projector = nn.Linear(config.hidden_size, config.classifier_proj_size)
1126
+ self.classifier = nn.Linear(config.classifier_proj_size, config.num_labels)
1127
+
1128
+ # Initialize weights and apply final processing
1129
+ self.post_init()
1130
+
1131
+ def freeze_feature_extractor(self):
1132
+ """
1133
+ Calling this function will disable the gradient computation for the feature encoder so that its parameters will
1134
+ not be updated during training.
1135
+ """
1136
+ warnings.warn(
1137
+ "The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5. "
1138
+ "Please use the equivalent `freeze_feature_encoder` method instead.",
1139
+ FutureWarning,
1140
+ )
1141
+ self.freeze_feature_encoder()
1142
+
1143
+ def freeze_feature_encoder(self):
1144
+ """
1145
+ Calling this function will disable the gradient computation for the feature encoder so that its parameter will
1146
+ not be updated during training.
1147
+ """
1148
+ self.sew.feature_extractor._freeze_parameters()
1149
+
1150
+ def freeze_base_model(self):
1151
+ """
1152
+ Calling this function will disable the gradient computation for the base model so that its parameters will not
1153
+ be updated during training. Only the classification head will be updated.
1154
+ """
1155
+ for param in self.sew.parameters():
1156
+ param.requires_grad = False
1157
+
1158
+ @add_start_docstrings_to_model_forward(SEW_INPUTS_DOCSTRING)
1159
+ @add_code_sample_docstrings(
1160
+ checkpoint=_SEQ_CLASS_CHECKPOINT,
1161
+ output_type=SequenceClassifierOutput,
1162
+ config_class=_CONFIG_FOR_DOC,
1163
+ modality="audio",
1164
+ expected_output=_SEQ_CLASS_EXPECTED_OUTPUT,
1165
+ expected_loss=_SEQ_CLASS_EXPECTED_LOSS,
1166
+ )
1167
+ def forward(
1168
+ self,
1169
+ input_values: Optional[torch.Tensor],
1170
+ attention_mask: Optional[torch.Tensor] = None,
1171
+ output_attentions: Optional[bool] = None,
1172
+ output_hidden_states: Optional[bool] = None,
1173
+ return_dict: Optional[bool] = None,
1174
+ labels: Optional[torch.Tensor] = None,
1175
+ ) -> Union[Tuple, SequenceClassifierOutput]:
1176
+ r"""
1177
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1178
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1179
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1180
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1181
+ """
1182
+
1183
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1184
+ output_hidden_states = True if self.config.use_weighted_layer_sum else output_hidden_states
1185
+
1186
+ outputs = self.sew(
1187
+ input_values,
1188
+ attention_mask=attention_mask,
1189
+ output_attentions=output_attentions,
1190
+ output_hidden_states=output_hidden_states,
1191
+ return_dict=return_dict,
1192
+ )
1193
+
1194
+ if self.config.use_weighted_layer_sum:
1195
+ hidden_states = outputs[_HIDDEN_STATES_START_POSITION]
1196
+ hidden_states = torch.stack(hidden_states, dim=1)
1197
+ norm_weights = nn.functional.softmax(self.layer_weights, dim=-1)
1198
+ hidden_states = (hidden_states * norm_weights.view(-1, 1, 1)).sum(dim=1)
1199
+ else:
1200
+ hidden_states = outputs[0]
1201
+
1202
+ hidden_states = self.projector(hidden_states)
1203
+ if attention_mask is None:
1204
+ pooled_output = hidden_states.mean(dim=1)
1205
+ else:
1206
+ padding_mask = self._get_feature_vector_attention_mask(hidden_states.shape[1], attention_mask)
1207
+ hidden_states[~padding_mask] = 0.0
1208
+ pooled_output = hidden_states.sum(dim=1) / padding_mask.sum(dim=1).view(-1, 1)
1209
+
1210
+ logits = self.classifier(pooled_output)
1211
+
1212
+ loss = None
1213
+ if labels is not None:
1214
+ loss_fct = CrossEntropyLoss()
1215
+ loss = loss_fct(logits.view(-1, self.config.num_labels), labels.view(-1))
1216
+
1217
+ if not return_dict:
1218
+ output = (logits,) + outputs[_HIDDEN_STATES_START_POSITION:]
1219
+ return ((loss,) + output) if loss is not None else output
1220
+
1221
+ return SequenceClassifierOutput(
1222
+ loss=loss,
1223
+ logits=logits,
1224
+ hidden_states=outputs.hidden_states,
1225
+ attentions=outputs.attentions,
1226
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/switch_transformers/__init__.py ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import TYPE_CHECKING
16
+
17
+ from ...utils import (
18
+ OptionalDependencyNotAvailable,
19
+ _LazyModule,
20
+ is_flax_available,
21
+ is_sentencepiece_available,
22
+ is_tf_available,
23
+ is_tokenizers_available,
24
+ is_torch_available,
25
+ )
26
+
27
+
28
+ _import_structure = {
29
+ "configuration_switch_transformers": [
30
+ "SWITCH_TRANSFORMERS_PRETRAINED_CONFIG_ARCHIVE_MAP",
31
+ "SwitchTransformersConfig",
32
+ "SwitchTransformersOnnxConfig",
33
+ ]
34
+ }
35
+
36
+ try:
37
+ if not is_torch_available():
38
+ raise OptionalDependencyNotAvailable()
39
+ except OptionalDependencyNotAvailable:
40
+ pass
41
+ else:
42
+ _import_structure["modeling_switch_transformers"] = [
43
+ "SWITCH_TRANSFORMERS_PRETRAINED_MODEL_ARCHIVE_LIST",
44
+ "SwitchTransformersEncoderModel",
45
+ "SwitchTransformersForConditionalGeneration",
46
+ "SwitchTransformersModel",
47
+ "SwitchTransformersPreTrainedModel",
48
+ "SwitchTransformersTop1Router",
49
+ "SwitchTransformersSparseMLP",
50
+ ]
51
+
52
+
53
+ if TYPE_CHECKING:
54
+ from .configuration_switch_transformers import (
55
+ SWITCH_TRANSFORMERS_PRETRAINED_CONFIG_ARCHIVE_MAP,
56
+ SwitchTransformersConfig,
57
+ SwitchTransformersOnnxConfig,
58
+ )
59
+
60
+ try:
61
+ if not is_torch_available():
62
+ raise OptionalDependencyNotAvailable()
63
+ except OptionalDependencyNotAvailable:
64
+ pass
65
+ else:
66
+ from .modeling_switch_transformers import (
67
+ SWITCH_TRANSFORMERS_PRETRAINED_MODEL_ARCHIVE_LIST,
68
+ SwitchTransformersEncoderModel,
69
+ SwitchTransformersForConditionalGeneration,
70
+ SwitchTransformersModel,
71
+ SwitchTransformersPreTrainedModel,
72
+ SwitchTransformersSparseMLP,
73
+ SwitchTransformersTop1Router,
74
+ )
75
+
76
+
77
+ else:
78
+ import sys
79
+
80
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
llmeval-env/lib/python3.10/site-packages/transformers/models/switch_transformers/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.35 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/switch_transformers/__pycache__/configuration_switch_transformers.cpython-310.pyc ADDED
Binary file (7.29 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/switch_transformers/__pycache__/convert_big_switch.cpython-310.pyc ADDED
Binary file (5.37 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/switch_transformers/__pycache__/convert_switch_transformers_original_flax_checkpoint_to_pytorch.cpython-310.pyc ADDED
Binary file (5.08 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/switch_transformers/__pycache__/modeling_switch_transformers.cpython-310.pyc ADDED
Binary file (56.2 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/switch_transformers/configuration_switch_transformers.py ADDED
@@ -0,0 +1,184 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022, Google and HuggingFace Inc.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Switch Transformers model configuration"""
16
+ from ...configuration_utils import PretrainedConfig
17
+ from ...utils import logging
18
+
19
+
20
+ logger = logging.get_logger(__name__)
21
+
22
+
23
+ from ..deprecated._archive_maps import SWITCH_TRANSFORMERS_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
24
+
25
+
26
+ class SwitchTransformersConfig(PretrainedConfig):
27
+ r"""
28
+ This is the configuration class to store the configuration of a [`SwitchTransformersModel`]. It is used to
29
+ instantiate a SwitchTransformers model according to the specified arguments, defining the model architecture.
30
+ Instantiating a configuration with the defaults will yield a similar configuration to that of the
31
+ SwitchTransformers [google/switch-base-8](https://huggingface.co/google/switch-base-8) architecture.
32
+
33
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
34
+ documentation from [`PretrainedConfig`] for more information.
35
+
36
+ Arguments:
37
+ vocab_size (`int`, *optional*, defaults to 32128):
38
+ Vocabulary size of the SwitchTransformers model. Defines the number of different tokens that can be
39
+ represented by the `inputs_ids` passed when calling [`SwitchTransformersModel`].
40
+ d_model (`int`, *optional*, defaults to 768):
41
+ Size of the encoder layers and the pooler layer.
42
+ d_kv (`int`, *optional*, defaults to 64):
43
+ Size of the key, query, value projections per attention head. `d_kv` has to be equal to `d_model //
44
+ num_heads`.
45
+ d_ff (`int`, *optional*, defaults to 2048):
46
+ Size of the intermediate feed forward layer in each `SwitchTransformersBlock`.
47
+ expert_capacity (`int`, *optional*, defaults to 64):
48
+ Number of tokens that can be stored in each expert. If set to 1, the model will behave like a regular
49
+ Transformer.
50
+ num_layers (`int`, *optional*, defaults to 12):
51
+ Number of dense hidden layers in the Transformer encoder layer.
52
+ num_sparse_encoder_layers (`int`, *optional*, defaults to 3):
53
+ Number of sparse (MoE) dense hidden layers in the Transformer encoder layer.
54
+ num_decoder_layers (`int`, *optional*, defaults to 12):
55
+ Number of hidden layers in the Transformer decoder. Will use the same value as `num_layers` if not set.
56
+ num_sparse_decoder_layers (`int`, *optional*, defaults to 3):
57
+ Number of sparse (MoE) dense hidden layers in the Transformer decoder layer.
58
+ num_heads (`int`, *optional*, defaults to 12):
59
+ Number of attention heads for each attention layer in the Transformer encoder.
60
+ num_experts (`int`, *optional*, defaults to 8):
61
+ Number of experts for each SwitchTransformer layer.
62
+ router_bias (`bool`, *optional*, defaults to `False`):
63
+ Whether to add a bias to the router.
64
+ router_jitter_noise (`float`, *optional*, defaults to 0.01):
65
+ Amount of noise to add to the router.
66
+ router_dtype (`str`, *optional*, default to `"float32"`):
67
+ The `dtype` used for the routers. It is preferable to keep the `dtype` to `"float32"` as specified in the
68
+ *selective precision* discussion in [the paper](https://arxiv.org/abs/2101.03961).
69
+ router_ignore_padding_tokens (`bool`, *optional*, defaults to `False`):
70
+ Whether to ignore padding tokens when routing.
71
+ relative_attention_num_buckets (`int`, *optional*, defaults to 32):
72
+ The number of buckets to use for each attention layer.
73
+ relative_attention_max_distance (`int`, *optional*, defaults to 128):
74
+ The maximum distance of the longer sequences for the bucket separation.
75
+ dropout_rate (`float`, *optional*, defaults to 0.1):
76
+ The ratio for all dropout layers.
77
+ layer_norm_eps (`float`, *optional*, defaults to 1e-6):
78
+ The epsilon used by the layer normalization layers.
79
+ router_z_loss_coef (`float`, *optional*, defaults to 0.001):
80
+ The z loss factor for the total loss.
81
+ router_aux_loss_coef (`float`, *optional*, defaults to 0.001):
82
+ The aux loss factor for the total loss.
83
+ initializer_factor (`float`, *optional*, defaults to 1.0):
84
+ A factor for initializing all weight matrices (should be kept to 1, used internally for initialization
85
+ testing).
86
+ dense_act_fn (`string`, *optional*, defaults to `"relu"`):
87
+ Type of feed forward layer to be used. Should be one of `"relu"` or `"gated-gelu"`. SwitchTransformersv1.1
88
+ uses the `"gated-gelu"` feed forward projection. Original SwitchTransformers uses `"relu"`.
89
+ add_router_probs (`bool`, *optional*, defaults to `False`):
90
+ Whether to output router probabilities to compute router auxiliary loss.
91
+ use_cache (`bool`, *optional*, defaults to `True`):
92
+ Whether or not the model should return the last key/values attentions (not used by all models).
93
+ """
94
+
95
+ model_type = "switch_transformers"
96
+ keys_to_ignore_at_inference = ["past_key_values"]
97
+ attribute_map = {"hidden_size": "d_model", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"}
98
+
99
+ def __init__(
100
+ self,
101
+ vocab_size=32128,
102
+ d_model=768,
103
+ d_kv=64,
104
+ d_ff=2048,
105
+ expert_capacity=64,
106
+ num_layers=12,
107
+ num_sparse_encoder_layers=3,
108
+ num_decoder_layers=12,
109
+ num_sparse_decoder_layers=3,
110
+ num_heads=12,
111
+ num_experts=8,
112
+ router_bias=False,
113
+ router_jitter_noise=0.01,
114
+ router_dtype="float32",
115
+ router_ignore_padding_tokens=False,
116
+ relative_attention_num_buckets=32,
117
+ relative_attention_max_distance=128,
118
+ dropout_rate=0.1,
119
+ layer_norm_epsilon=1e-6,
120
+ router_z_loss_coef=0.001,
121
+ router_aux_loss_coef=0.001,
122
+ initializer_factor=1.0,
123
+ dense_act_fn="relu",
124
+ is_encoder_decoder=True,
125
+ add_router_probs=False,
126
+ use_cache=True,
127
+ pad_token_id=0,
128
+ eos_token_id=1,
129
+ **kwargs,
130
+ ):
131
+ self.vocab_size = vocab_size
132
+ self.d_model = d_model
133
+ self.d_kv = d_kv
134
+ self.d_ff = d_ff
135
+
136
+ self.num_sparse_encoder_layers = num_sparse_encoder_layers
137
+
138
+ self.num_layers = num_layers
139
+ self.num_decoder_layers = (
140
+ num_decoder_layers if num_decoder_layers is not None else self.num_layers
141
+ ) # default = symmetry
142
+ self.num_sparse_decoder_layers = num_sparse_decoder_layers
143
+
144
+ # This tells us, each how many encoder layer we'll have to set a sparse layer.
145
+ if self.num_sparse_encoder_layers > 0:
146
+ self.encoder_sparse_step = self.num_layers // self.num_sparse_encoder_layers
147
+ else:
148
+ self.encoder_sparse_step = self.num_layers # HACK: this will create 0 sparse layers
149
+
150
+ # This tells us, each how many encoder layer we'll have to set a sparse layer.
151
+ if self.num_sparse_decoder_layers > 0:
152
+ self.decoder_sparse_step = self.num_decoder_layers // self.num_sparse_decoder_layers
153
+ else:
154
+ self.decoder_sparse_step = self.num_decoder_layers # HACK: this will create 0 sparse layers
155
+
156
+ self.num_heads = num_heads
157
+ self.num_experts = num_experts
158
+ self.expert_capacity = expert_capacity
159
+ self.router_bias = router_bias
160
+ self.router_jitter_noise = router_jitter_noise
161
+ if router_dtype not in ["float32", "float16", "bfloat16"]:
162
+ raise ValueError(f"`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}")
163
+ self.router_dtype = router_dtype
164
+
165
+ self.router_ignore_padding_tokens = router_ignore_padding_tokens
166
+ self.relative_attention_num_buckets = relative_attention_num_buckets
167
+ self.relative_attention_max_distance = relative_attention_max_distance
168
+
169
+ self.dropout_rate = dropout_rate
170
+ self.layer_norm_epsilon = layer_norm_epsilon
171
+ self.initializer_factor = initializer_factor
172
+ self.use_cache = use_cache
173
+ self.add_router_probs = add_router_probs
174
+
175
+ self.router_z_loss_coef = router_z_loss_coef
176
+ self.router_aux_loss_coef = router_aux_loss_coef
177
+ self.dense_act_fn = dense_act_fn
178
+
179
+ super().__init__(
180
+ pad_token_id=pad_token_id,
181
+ eos_token_id=eos_token_id,
182
+ is_encoder_decoder=is_encoder_decoder,
183
+ **kwargs,
184
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/switch_transformers/convert_big_switch.py ADDED
@@ -0,0 +1,193 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import json
3
+ import os
4
+
5
+ import tensorstore as ts
6
+ import torch
7
+ from flax import serialization
8
+ from flax.traverse_util import flatten_dict, unflatten_dict
9
+ from tensorflow.io import gfile
10
+
11
+ from transformers.modeling_utils import dtype_byte_size
12
+ from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
13
+ rename_keys,
14
+ )
15
+ from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
16
+ from transformers.utils.hub import convert_file_size_to_int
17
+
18
+
19
+ def rename_base_flax_keys(flax_key_tuple, flax_tensor):
20
+ """
21
+ Post renaming of basic JAX keys to pytorch.
22
+ """
23
+ if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
24
+ # expert layer
25
+ flax_key_tuple = flax_key_tuple[:-1] + ("weight",)
26
+ flax_tensor = torch.permute(flax_tensor, (0, 2, 1))
27
+ elif flax_key_tuple[-1] == "kernel" and ".".join(flax_key_tuple):
28
+ # linear layer
29
+ flax_key_tuple = flax_key_tuple[:-1] + ("weight",)
30
+ flax_tensor = flax_tensor.T
31
+ elif flax_key_tuple[-1] in ["scale", "embedding"]:
32
+ flax_key_tuple = flax_key_tuple[:-1] + ("weight",)
33
+
34
+ return flax_key_tuple, flax_tensor
35
+
36
+
37
+ def get_key_and_tensorstore_dict(layer, checkpoint_info, switch_checkpoint_path):
38
+ if "metadata" in layer:
39
+ split_layer = layer.split("metadata")
40
+ curr_real_layer_name = "".join(split_layer[0])[:-1]
41
+ split_layer = [tuple(("metadata" + split_layer[1]).split("/"))]
42
+ elif "kvstore" in layer:
43
+ split_layer = layer.split("kvstore")
44
+ curr_real_layer_name = "".join(split_layer[0])[:-1]
45
+ split_layer = [tuple(("kvstore" + split_layer[1]).split("/"))]
46
+
47
+ else:
48
+ split_layer = layer.split("/")
49
+ curr_real_layer_name = "/".join(split_layer[:-1])
50
+ split_layer[-1] = (split_layer[-1],)
51
+
52
+ if "kvstore/path" in layer:
53
+ content = f"{switch_checkpoint_path}/{checkpoint_info[layer]}"
54
+ elif "kvstore/driver" in layer:
55
+ content = "file"
56
+ else:
57
+ content = checkpoint_info[layer]
58
+
59
+ return curr_real_layer_name, split_layer, content
60
+
61
+
62
+ def rename_and_save_block(current_block, save_path):
63
+ current_block = rename_keys(current_block)
64
+ new_current_block = {}
65
+ for k, v in current_block.items():
66
+ new_current_block[k.replace("/", ".")] = v
67
+ current_block = new_current_block
68
+ torch.save(current_block, save_path)
69
+
70
+
71
+ def shard_on_the_fly(switch_checkpoint_path, dump_path, max_shard_size, dtype, weights_name: str = WEIGHTS_NAME):
72
+ max_shard_size = convert_file_size_to_int(max_shard_size)
73
+ sharded_state_dicts = []
74
+ current_block = {}
75
+ current_block_size = 0
76
+ total_size = 0
77
+
78
+ os.makedirs(dump_path, exist_ok=True)
79
+ with gfile.GFile(switch_checkpoint_path + "/checkpoint", "rb") as fp:
80
+ checkpoint_info = serialization.msgpack_restore(fp.read())["optimizer"]["target"]
81
+ checkpoint_info = flatten_dict(checkpoint_info, sep="/")
82
+
83
+ all_layers = {}
84
+ for layer in checkpoint_info.keys():
85
+ curr_real_layer_name, split_layer, content = get_key_and_tensorstore_dict(
86
+ layer, checkpoint_info, switch_checkpoint_path
87
+ )
88
+ if curr_real_layer_name in all_layers:
89
+ all_layers[curr_real_layer_name][split_layer[-1]] = content
90
+ else:
91
+ all_layers[curr_real_layer_name] = {split_layer[-1]: content}
92
+
93
+ for key in all_layers.keys():
94
+ # open tensorstore file
95
+ raw_weights = ts.open(unflatten_dict(all_layers[key])).result().read().result()
96
+ raw_weights = torch.tensor(raw_weights)
97
+ weight_size = raw_weights.numel() * dtype_byte_size(raw_weights.dtype)
98
+
99
+ # use the renaming pattern from the small conversion scripts
100
+ key, raw_weights = rename_base_flax_keys(tuple(key.split("/")), raw_weights)
101
+ key = "/".join(key)
102
+
103
+ # If this weight is going to tip up over the maximal size, we split.
104
+ if current_block_size + weight_size > max_shard_size:
105
+ save_path = os.path.join(
106
+ dump_path, weights_name.replace(".bin", f"-{len(sharded_state_dicts)+1:05d}-of-???.bin")
107
+ )
108
+ rename_and_save_block(current_block, save_path)
109
+ sharded_state_dicts.append(current_block.keys())
110
+ del current_block
111
+ current_block = {}
112
+ current_block_size = 0
113
+
114
+ current_block[key] = raw_weights.to(getattr(torch, dtype))
115
+ current_block_size += weight_size
116
+ total_size += weight_size
117
+
118
+ # Add the last block
119
+ save_path = os.path.join(dump_path, weights_name.replace(".bin", f"-{len(sharded_state_dicts)+1:05d}-of-???.bin"))
120
+ rename_and_save_block(current_block, save_path)
121
+ sharded_state_dicts.append(current_block.keys())
122
+
123
+ # If we only have one shard, we return it
124
+ if len(sharded_state_dicts) == 1:
125
+ return {weights_name: sharded_state_dicts[0]}, None
126
+
127
+ # Otherwise, let's build the index
128
+ weight_map = {}
129
+ shards = {}
130
+ for idx, shard in enumerate(sharded_state_dicts):
131
+ shard_file = weights_name.replace(
132
+ ".bin", f"-{idx+1:05d}-of-{len(sharded_state_dicts):05d}.bin"
133
+ ) # len(sharded_state_dicts):05d}
134
+ temp_filename = os.path.join(dump_path, weights_name.replace(".bin", f"-{idx+1:05d}-of-???.bin"))
135
+ os.rename(temp_filename, os.path.join(dump_path, shard_file))
136
+ shards[shard_file] = shard
137
+ for key in shard:
138
+ weight_map[key] = shard_file
139
+
140
+ # Add the metadata
141
+ metadata = {"total_size": total_size}
142
+ index = {"metadata": metadata, "weight_map": weight_map}
143
+
144
+ with open(os.path.join(dump_path, WEIGHTS_INDEX_NAME), "w", encoding="utf-8") as f:
145
+ content = json.dumps(index, indent=2, sort_keys=True) + "\n"
146
+ f.write(content)
147
+
148
+ return metadata, index
149
+
150
+
151
+ if __name__ == "__main__":
152
+ parser = argparse.ArgumentParser()
153
+ # Required parameters
154
+ parser.add_argument(
155
+ "--switch_t5x_checkpoint_path",
156
+ default="/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600",
157
+ type=str,
158
+ required=False,
159
+ help="Path to a directory containing a folder per layer. Follows the original Google format.",
160
+ )
161
+ parser.add_argument("--max_shard_size", default="10GB", required=False, help="Max shard size")
162
+ parser.add_argument("--dtype", default="bfloat16", type=str, required=False, help="dtype of the saved model")
163
+ parser.add_argument(
164
+ "--pytorch_dump_folder_path",
165
+ default="/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted",
166
+ type=str,
167
+ required=False,
168
+ help="Path to the output pytorch model.",
169
+ )
170
+ args = parser.parse_args()
171
+ shard_on_the_fly(
172
+ args.switch_t5x_checkpoint_path,
173
+ args.pytorch_dump_folder_path,
174
+ args.max_shard_size,
175
+ args.dtype,
176
+ )
177
+
178
+
179
+ def sanity_check():
180
+ from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, T5Tokenizer
181
+
182
+ config = SwitchTransformersConfig.from_pretrained("google/switch-base-8")
183
+ config.save_pretrained("/home/arthur_huggingface_co/transformers/switch_converted")
184
+ model = SwitchTransformersForConditionalGeneration.from_pretrained(
185
+ "/home/arthur_huggingface_co/transformers/switch_converted", device_map="auto"
186
+ )
187
+
188
+ tokenizer = T5Tokenizer.from_pretrained("google-t5/t5-small")
189
+ text = "A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>."
190
+
191
+ input_ids = tokenizer(text, return_tensors="pt").input_ids
192
+ out = model.generate(input_ids, decoder_start_token_id=0)
193
+ print(tokenizer.decode(out[0]))
llmeval-env/lib/python3.10/site-packages/transformers/models/switch_transformers/convert_switch_transformers_original_flax_checkpoint_to_pytorch.py ADDED
@@ -0,0 +1,203 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """Convert SwitchTransformersX checkpoints from the original repository to JAX/FLAX model."""
17
+
18
+ import argparse
19
+ import re
20
+
21
+ from flax.traverse_util import flatten_dict, unflatten_dict
22
+ from t5x import checkpoints
23
+
24
+ from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
25
+ from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
26
+ from transformers.utils import logging
27
+
28
+
29
+ logging.set_verbosity_info()
30
+
31
+
32
+ # should not include what is already done by the `from_pt` argument
33
+ MOE_LAYER_NAME_MAPPING = {
34
+ "/attention/": "/0/SelfAttention/",
35
+ "/self_attention/": "/0/SelfAttention/",
36
+ "/encoder_decoder_attention/": "/1/EncDecAttention/",
37
+ "value": "v",
38
+ "query": "q",
39
+ "key": "k",
40
+ "out": "o",
41
+ "pre_self_attention_layer_norm": "0/layer_norm",
42
+ "pre_cross_attention_layer_norm": "1/layer_norm",
43
+ "pre_attention_layer_norm": "0/layer_norm", # previously 1, but seems wrong
44
+ "token_embedder": "shared",
45
+ "encoder_norm": "final_layer_norm",
46
+ "decoder_norm": "final_layer_norm",
47
+ "relpos_bias/rel_embedding": "block/0/layer/0/SelfAttention/relative_attention_bias/weight",
48
+ "router/router_weights/w/": "router/classifier/",
49
+ "roer/roer_weights/w/": "router/classifier/",
50
+ "logits_dense": "lm_head",
51
+ }
52
+
53
+
54
+ def rename_keys(s_dict):
55
+ # 1. in HF T5, we have block.{x}.layer.{y}. which corresponds to layer.{x} in
56
+ # the original model
57
+ keys = list(s_dict.keys())
58
+ for key in keys:
59
+ layer_to_block_of_layer = r".*/layers_(\d+)"
60
+ new_key = key
61
+ if re.match(layer_to_block_of_layer, key):
62
+ new_key = re.sub(r"layers_(\d+)", r"block/\1/layer", new_key)
63
+
64
+ layer_to_block_of_layer = r"(encoder|decoder)\/"
65
+
66
+ if re.match(layer_to_block_of_layer, key):
67
+ groups = re.match(layer_to_block_of_layer, new_key).groups()
68
+ if groups[0] == "encoder":
69
+ new_key = re.sub(r"/mlp/", r"/1/mlp/", new_key)
70
+ new_key = re.sub(r"/pre_mlp_layer_norm/", r"/1/layer_norm/", new_key)
71
+
72
+ elif groups[0] == "decoder":
73
+ new_key = re.sub(r"/mlp/", r"/2/mlp/", new_key)
74
+ new_key = re.sub(r"/pre_mlp_layer_norm/", r"/2/layer_norm/", new_key)
75
+
76
+ # 2. Convert other classic mappings
77
+ for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
78
+ if old_key in new_key:
79
+ new_key = new_key.replace(old_key, temp_key)
80
+
81
+ print(f"{key} -> {new_key}")
82
+ s_dict[new_key] = s_dict.pop(key)
83
+
84
+ if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
85
+ s_dict["encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"] = s_dict[
86
+ "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"
87
+ ].T
88
+ if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
89
+ s_dict["decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"] = s_dict[
90
+ "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"
91
+ ].T
92
+
93
+ # 3. Take extra care of the EXPERTS layer
94
+ for key in list(s_dict.keys()):
95
+ if "expert" in key:
96
+ num_experts = s_dict[key].shape[0]
97
+ expert_weihts = s_dict[key]
98
+ for idx in range(num_experts):
99
+ s_dict[key.replace("expert/", f"experts/expert_{idx}/")] = expert_weihts[idx]
100
+ print(f"{key} -> {key.replace('expert/', f'experts/expert_{idx}/')}")
101
+
102
+ s_dict.pop(key)
103
+
104
+ return s_dict
105
+
106
+
107
+ GIN_TO_CONFIG_MAPPING = {
108
+ "NUM_ENCODER_LAYERS": "num_layers",
109
+ "NUM_DECODER_LAYERS": "num_decoder_layers",
110
+ "NUM_HEADS": "num_heads",
111
+ "HEAD_DIM": "d_kv",
112
+ "EMBED_DIM": "d_model",
113
+ "MLP_DIM": "d_ff",
114
+ "NUM_SELECTED_EXPERTS": "num_selected_experts",
115
+ "NUM_ENCODER_SPARSE_LAYERS": "num_sparse_encoder_layers",
116
+ "NUM_DECODER_SPARSE_LAYERS": "num_sparse_decoder_layers",
117
+ "dense.MlpBlock.activations": "feed_forward_proj",
118
+ }
119
+
120
+
121
+ def convert_gin_to_config(gin_file, num_experts):
122
+ # Convert a google style config to the hugging face fromat
123
+ import regex as re
124
+
125
+ with open(gin_file, "r") as f:
126
+ raw_gin = f.read()
127
+
128
+ regex_match = re.findall(r"(.*) = ([0-9.]*)", raw_gin)
129
+ args = {}
130
+ for param, value in regex_match:
131
+ if param in GIN_TO_CONFIG_MAPPING and value != "":
132
+ args[GIN_TO_CONFIG_MAPPING[param]] = float(value) if "." in value else int(value)
133
+
134
+ activation = re.findall(r"(.*activations) = \(\'(.*)\',\)", raw_gin)[0]
135
+ args[GIN_TO_CONFIG_MAPPING[activation[0]]] = str(activation[1])
136
+
137
+ args["num_experts"] = num_experts
138
+ config = SwitchTransformersConfig(**args)
139
+ return config
140
+
141
+
142
+ def convert_flax_checkpoint_to_pytorch(
143
+ flax_checkpoint_path, config_file, gin_file=None, pytorch_dump_path="./", num_experts=8
144
+ ):
145
+ # Initialise PyTorch model
146
+
147
+ print(f"Loading flax weights from : {flax_checkpoint_path}")
148
+ flax_params = checkpoints.load_t5x_checkpoint(flax_checkpoint_path)
149
+
150
+ if gin_file is not None:
151
+ config = convert_gin_to_config(gin_file, num_experts)
152
+ else:
153
+ config = SwitchTransformersConfig.from_pretrained(config_file)
154
+
155
+ pt_model = SwitchTransformersForConditionalGeneration(config)
156
+
157
+ flax_params = flax_params["target"]
158
+ flax_params = flatten_dict(flax_params, sep="/")
159
+ flax_params = rename_keys(flax_params)
160
+ flax_params = unflatten_dict(flax_params, sep="/")
161
+
162
+ # Load the flax params in the PT model
163
+ load_flax_weights_in_pytorch_model(pt_model, flax_params)
164
+
165
+ print(f"Save PyTorch model to {pytorch_dump_path}")
166
+ pt_model.save_pretrained(pytorch_dump_path)
167
+
168
+
169
+ if __name__ == "__main__":
170
+ parser = argparse.ArgumentParser()
171
+ # Required parameters
172
+ parser.add_argument(
173
+ "--switch_t5x_checkpoint_path",
174
+ default=None,
175
+ type=str,
176
+ required=True,
177
+ help=(
178
+ "The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the"
179
+ " model architecture. If not provided, a `gin_file` has to be provided."
180
+ ),
181
+ )
182
+ parser.add_argument(
183
+ "--gin_file",
184
+ default=None,
185
+ type=str,
186
+ required=False,
187
+ help="Path to the gin config file. If not provided, a `config_file` has to be passed ",
188
+ )
189
+ parser.add_argument(
190
+ "--config_name", default=None, type=str, required=False, help="Config name of SwitchTransformers model."
191
+ )
192
+ parser.add_argument(
193
+ "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output pytorch model."
194
+ )
195
+ parser.add_argument("--num_experts", default=8, type=int, required=False, help="Number of experts")
196
+ args = parser.parse_args()
197
+ convert_flax_checkpoint_to_pytorch(
198
+ args.switch_t5x_checkpoint_path,
199
+ args.config_name,
200
+ args.gin_file,
201
+ args.pytorch_dump_folder_path,
202
+ args.num_experts,
203
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/switch_transformers/modeling_switch_transformers.py ADDED
@@ -0,0 +1,1858 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 SwitchTransformers Authors and HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch SwitchTransformers model."""
16
+
17
+
18
+ import copy
19
+ import math
20
+ import warnings
21
+ from typing import Optional, Tuple, Union
22
+
23
+ import torch
24
+ import torch.nn as nn
25
+ from torch.nn import CrossEntropyLoss
26
+
27
+ from ...activations import ACT2FN
28
+ from ...modeling_outputs import (
29
+ MoEModelOutput,
30
+ MoEModelOutputWithPastAndCrossAttentions,
31
+ Seq2SeqMoEModelOutput,
32
+ Seq2SeqMoEOutput,
33
+ )
34
+ from ...modeling_utils import PreTrainedModel
35
+ from ...pytorch_utils import ALL_LAYERNORM_LAYERS, find_pruneable_heads_and_indices, prune_linear_layer
36
+ from ...utils import (
37
+ DUMMY_INPUTS,
38
+ DUMMY_MASK,
39
+ add_start_docstrings,
40
+ add_start_docstrings_to_model_forward,
41
+ is_torch_fx_proxy,
42
+ logging,
43
+ replace_return_docstrings,
44
+ )
45
+ from .configuration_switch_transformers import SwitchTransformersConfig
46
+
47
+
48
+ logger = logging.get_logger(__name__)
49
+
50
+ _CONFIG_FOR_DOC = "SwitchTransformersConfig"
51
+ _CHECKPOINT_FOR_DOC = "google/switch-base-8"
52
+
53
+ ####################################################
54
+ # This dict contains ids and associated url
55
+ # for the pretrained weights provided with the models
56
+ ####################################################
57
+
58
+ from ..deprecated._archive_maps import SWITCH_TRANSFORMERS_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
59
+
60
+
61
+ def router_z_loss_func(router_logits: torch.Tensor) -> float:
62
+ r"""
63
+ Compute the router z-loss implemented in PyTorch.
64
+
65
+ The router z-loss was introduced in [Designing Effective Sparse Expert Models](https://arxiv.org/abs/2202.08906).
66
+ It encourages router logits to remain small in an effort to improve stability.
67
+
68
+ Args:
69
+ router_logits (`float`):
70
+ Input logits of shape [batch_size, sequence_length, num_experts]
71
+
72
+ Returns:
73
+ Scalar router z-loss.
74
+ """
75
+ num_groups, tokens_per_group, _ = router_logits.shape
76
+ log_z = torch.logsumexp(router_logits, dim=-1)
77
+ z_loss = log_z**2
78
+ return torch.sum(z_loss) / (num_groups * tokens_per_group)
79
+
80
+
81
+ def load_balancing_loss_func(router_probs: torch.Tensor, expert_indices: torch.Tensor) -> float:
82
+ r"""
83
+ Computes auxiliary load balancing loss as in Switch Transformer - implemented in Pytorch.
84
+
85
+ See Switch Transformer (https://arxiv.org/abs/2101.03961) for more details. This function implements the loss
86
+ function presented in equations (4) - (6) of the paper. It aims at penalizing cases where the routing between
87
+ experts is too unbalanced.
88
+
89
+ Args:
90
+ router_probs (`torch.Tensor`):
91
+ Probability assigned to each expert per token. Shape: [batch_size, seqeunce_length, num_experts].
92
+ expert_indices (`torch.Tensor`):
93
+ Indices tensor of shape [batch_size, seqeunce_length] identifying the selected expert for a given token.
94
+
95
+ Returns:
96
+ The auxiliary loss.
97
+ """
98
+ num_experts = router_probs.shape[-1]
99
+
100
+ # cast the expert indices to int64, otherwise one-hot encoding will fail
101
+ if expert_indices.dtype != torch.int64:
102
+ expert_indices = expert_indices.to(torch.int64)
103
+
104
+ if len(expert_indices.shape) == 2:
105
+ expert_indices = expert_indices.unsqueeze(2)
106
+
107
+ expert_mask = torch.nn.functional.one_hot(expert_indices, num_experts)
108
+
109
+ # For a given token, determine if it was routed to a given expert.
110
+ expert_mask = torch.max(expert_mask, axis=-2).values
111
+
112
+ # cast to float32 otherwise mean will fail
113
+ expert_mask = expert_mask.to(torch.float32)
114
+ tokens_per_group_and_expert = torch.mean(expert_mask, axis=-2)
115
+
116
+ router_prob_per_group_and_expert = torch.mean(router_probs, axis=-2)
117
+ return torch.mean(tokens_per_group_and_expert * router_prob_per_group_and_expert) * (num_experts**2)
118
+
119
+
120
+ class SwitchTransformersTop1Router(nn.Module):
121
+ """
122
+ Router using tokens choose top-1 experts assignment.
123
+
124
+ This router uses the same mechanism as in Switch Transformer (https://arxiv.org/abs/2101.03961) and V-MoE
125
+ (https://arxiv.org/abs/2106.05974): tokens choose their top experts. Items are sorted by router_probs and then
126
+ routed to their choice of expert until the expert's expert_capacity is reached. **There is no guarantee that each
127
+ token is processed by an expert**, or that each expert receives at least one token.
128
+
129
+ """
130
+
131
+ def __init__(self, config: SwitchTransformersConfig):
132
+ super().__init__()
133
+ self.num_experts = config.num_experts
134
+ self.expert_capacity = config.expert_capacity
135
+ self.classifier = nn.Linear(config.hidden_size, self.num_experts, bias=config.router_bias)
136
+ self.jitter_noise = config.router_jitter_noise
137
+ self.ignore_padding_tokens = config.router_ignore_padding_tokens
138
+ self.dtype = getattr(torch, config.router_dtype)
139
+
140
+ def _compute_router_probabilities(self, hidden_states: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
141
+ r"""
142
+ Computes router probabilities from input hidden states.
143
+
144
+ Args:
145
+ hidden_states (`torch.Tensor`):
146
+ (batch_size, sequence_length, hidden_dim) from which router probabilities are computed.
147
+ Returns:
148
+ router_probabilities (`torch.Tensor`):
149
+ Tensor of shape (batch_size, sequence_length, num_experts) corresponding to the probabilities for each
150
+ token and expert. Used for routing tokens to experts.
151
+ router_logits (`torch.Tensor`):
152
+ Logits tensor of shape (batch_size, sequence_length, num_experts) corresponding to raw router logits.
153
+ This is used later for computing router z-loss.
154
+ """
155
+ # float32 is used to ensure stability. See the discussion of "selective precision" in
156
+ # https://arxiv.org/abs/2101.03961.
157
+ # We also store the previous dtype to cast back the output to the previous dtype
158
+ self.input_dtype = hidden_states.dtype
159
+ hidden_states = hidden_states.to(self.dtype)
160
+
161
+ if self.training and self.jitter_noise > 0:
162
+ # Multiply the token inputs by the uniform distribution - adding some noise
163
+ hidden_states *= torch.empty_like(hidden_states).uniform_(1.0 - self.jitter_noise, 1.0 + self.jitter_noise)
164
+
165
+ # Shape: [num_groups, tokens_per_group, num_experts]
166
+ self._cast_classifier()
167
+ router_logits = self.classifier(hidden_states)
168
+
169
+ # Apply Softmax and cast back to the original `dtype`
170
+ router_probabilities = nn.functional.softmax(router_logits, dim=-1, dtype=self.dtype).to(self.input_dtype)
171
+ return router_probabilities, router_logits
172
+
173
+ def _cast_classifier(self):
174
+ r"""
175
+ `bitsandbytes` `Linear8bitLt` layers does not support manual casting Therefore we need to check if they are an
176
+ instance of the `Linear8bitLt` class by checking special attributes.
177
+ """
178
+ if not (hasattr(self.classifier, "SCB") or hasattr(self.classifier, "CB")):
179
+ self.classifier = self.classifier.to(self.dtype)
180
+
181
+ def forward(self, hidden_states: torch.Tensor) -> Tuple:
182
+ r"""
183
+ Generic forward function for every Router class. Each Router expects to have the same input hidden states
184
+ (`hidden_states`) corresponding to the hidden states for each token, the `expert_capacity` corresponding to the
185
+ number of tokens the Router will send to each expert, some Routers can send up to few tokens to each expert.
186
+
187
+ Each Router works as the following: it expects the hidden states for each token, gets the `router_probs` and
188
+ `router_logits` from the `router_weights`. This will assign for each token, the raw probability to be assigned
189
+ to an expert. Then each Router class will have to define its own `_compute_routing_instructions`.
190
+
191
+ Args:
192
+ hidden_states (`torch.Tensor`) :
193
+ [num_groups, tokens_per_group, hidden_dim] inputs to send to experts.
194
+ Returns:
195
+ Tuple[`torch.Tensor`, `torch.Tensor`, `torch.Tensor`] Tuple containing the expert index, the router probs
196
+ and the router logits. The router probabilities and logits are required to compute the loss.
197
+ """
198
+ router_probs, router_logits = self._compute_router_probabilities(hidden_states)
199
+
200
+ expert_index = torch.argmax(router_probs, dim=-1)
201
+ expert_index = torch.nn.functional.one_hot(expert_index, num_classes=self.num_experts)
202
+
203
+ # Mask tokens outside expert capacity. Sum over each sequence
204
+ token_priority = torch.cumsum(expert_index, dim=-2)
205
+ # mask if the token routed to to the expert will overflow
206
+ expert_capacity_mask = token_priority <= self.expert_capacity
207
+ expert_index = expert_index * expert_capacity_mask
208
+
209
+ router_probs = torch.max(router_probs, dim=-1).values.unsqueeze(-1)
210
+ return expert_index, router_probs, router_logits
211
+
212
+
213
+ # Copied from transformers.models.t5.modeling_t5.T5LayerNorm with T5->SwitchTransformers
214
+ class SwitchTransformersLayerNorm(nn.Module):
215
+ def __init__(self, hidden_size, eps=1e-6):
216
+ """
217
+ Construct a layernorm module in the SwitchTransformers style. No bias and no subtraction of mean.
218
+ """
219
+ super().__init__()
220
+ self.weight = nn.Parameter(torch.ones(hidden_size))
221
+ self.variance_epsilon = eps
222
+
223
+ def forward(self, hidden_states):
224
+ # SwitchTransformers uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
225
+ # Square Layer Normalization https://arxiv.org/abs/1910.07467 thus varience is calculated
226
+ # w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
227
+ # half-precision inputs is done in fp32
228
+
229
+ variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True)
230
+ hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
231
+
232
+ # convert into half-precision if necessary
233
+ if self.weight.dtype in [torch.float16, torch.bfloat16]:
234
+ hidden_states = hidden_states.to(self.weight.dtype)
235
+
236
+ return self.weight * hidden_states
237
+
238
+
239
+ ALL_LAYERNORM_LAYERS.append(SwitchTransformersLayerNorm)
240
+
241
+
242
+ # Copied from transformers.models.t5.modeling_t5.T5DenseActDense with T5->SwitchTransformers
243
+ class SwitchTransformersDenseActDense(nn.Module):
244
+ def __init__(self, config: SwitchTransformersConfig):
245
+ super().__init__()
246
+ self.wi = nn.Linear(config.d_model, config.d_ff, bias=False)
247
+ self.wo = nn.Linear(config.d_ff, config.d_model, bias=False)
248
+ self.dropout = nn.Dropout(config.dropout_rate)
249
+ self.act = ACT2FN[config.dense_act_fn]
250
+
251
+ def forward(self, hidden_states):
252
+ hidden_states = self.wi(hidden_states)
253
+ hidden_states = self.act(hidden_states)
254
+ hidden_states = self.dropout(hidden_states)
255
+ if (
256
+ isinstance(self.wo.weight, torch.Tensor)
257
+ and hidden_states.dtype != self.wo.weight.dtype
258
+ and self.wo.weight.dtype != torch.int8
259
+ ):
260
+ hidden_states = hidden_states.to(self.wo.weight.dtype)
261
+ hidden_states = self.wo(hidden_states)
262
+ return hidden_states
263
+
264
+
265
+ class SwitchTransformersSparseMLP(nn.Module):
266
+ r"""
267
+ Implementation of the Switch Transformers Sparse MLP module.
268
+ """
269
+
270
+ def __init__(self, config: SwitchTransformersConfig, expert_class: nn.Module = SwitchTransformersDenseActDense):
271
+ super().__init__()
272
+ # Step 1: Get the correct router according to its class
273
+ self.router = SwitchTransformersTop1Router(config)
274
+
275
+ # Step 2: Get the experts
276
+ self.experts = nn.ModuleDict()
277
+ for idx in range(config.num_experts):
278
+ self.experts[f"expert_{idx}"] = expert_class(config)
279
+
280
+ def forward(self, hidden_states):
281
+ r"""
282
+ Hold on, this will be slightly tricky to understand In the correct order, a MoE layer does the following:
283
+
284
+ 1- Gets the `router_mask` from the router. The shape of the mask is `(batch_size, sequence_length, num_expert)`
285
+ and corresponds to the argmax of the `router_probs`. The probabilities are needed in the computation of the
286
+ hidden states : they are broadcasted to the hidden states values (can be interpreted as a scaling factor).
287
+
288
+ 2- Dispatch the tokens to its associated experts. We do a classic for loop over the experts and assign for each
289
+ expert the corresponding hidden states.
290
+
291
+ """
292
+ # Step 1: Get the router_mask from the router as wel as the probabilities
293
+ router_mask, router_probs, router_logits = self.router(hidden_states)
294
+ expert_index = torch.argmax(router_mask, dim=-1)
295
+
296
+ # The routers introduced might not always map all the tokens, to a router, which means that some hidden states
297
+ # can be unchanged from one layer to another. That is why the hidden states are cloned before updating only the seleced ones.
298
+
299
+ next_states = hidden_states.clone()
300
+ for idx, expert in enumerate(self.experts.values()):
301
+ token_indices = router_mask[:, :, idx].bool()
302
+ next_states[token_indices] = expert(hidden_states[token_indices]).to(next_states.dtype)
303
+
304
+ hidden_states = router_probs * next_states
305
+ return hidden_states, (router_logits, expert_index)
306
+
307
+
308
+ class SwitchTransformersLayerFF(nn.Module):
309
+ r"""
310
+ Switch Transformers Feed Forward layer module. This is a wrapper around the Mixture of Experts module.
311
+
312
+ Parameters:
313
+ config : ([`SwitchTransformersConfig`]): Model configuration class with all the parameters of the model.
314
+ Initializing with a config file does not load the weights associated with the model, only the
315
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
316
+ is_sparse (`bool`):
317
+ Whether the MLP layer is a `Sparse` layer (contains a Mixture of Experts) or not
318
+ """
319
+
320
+ def __init__(self, config: SwitchTransformersConfig, is_sparse=False):
321
+ super().__init__()
322
+ self.is_sparse = is_sparse
323
+
324
+ # Check if it is a sparse layer, if not then it is a dense layer
325
+ if not self.is_sparse:
326
+ self.mlp = SwitchTransformersDenseActDense(config)
327
+ else:
328
+ self.mlp = SwitchTransformersSparseMLP(config)
329
+
330
+ self.layer_norm = SwitchTransformersLayerNorm(config.d_model, eps=config.layer_norm_epsilon)
331
+ self.dropout = nn.Dropout(config.dropout_rate)
332
+
333
+ def forward(self, hidden_states, output_router_logits):
334
+ forwarded_states = self.layer_norm(hidden_states)
335
+ forwarded_states = self.mlp(forwarded_states)
336
+
337
+ if isinstance(forwarded_states, tuple):
338
+ forwarded_states, router_tuple = forwarded_states
339
+ else:
340
+ router_tuple = None
341
+
342
+ output = hidden_states + self.dropout(forwarded_states)
343
+
344
+ if output_router_logits and router_tuple is not None:
345
+ output = (output, router_tuple)
346
+
347
+ return output
348
+
349
+
350
+ # Copied from transformers.models.t5.modeling_t5.T5Attention with T5->SwitchTransformers
351
+ class SwitchTransformersAttention(nn.Module):
352
+ def __init__(self, config: SwitchTransformersConfig, has_relative_attention_bias=False):
353
+ super().__init__()
354
+ self.is_decoder = config.is_decoder
355
+ self.has_relative_attention_bias = has_relative_attention_bias
356
+ self.relative_attention_num_buckets = config.relative_attention_num_buckets
357
+ self.relative_attention_max_distance = config.relative_attention_max_distance
358
+ self.d_model = config.d_model
359
+ self.key_value_proj_dim = config.d_kv
360
+ self.n_heads = config.num_heads
361
+ self.dropout = config.dropout_rate
362
+ self.inner_dim = self.n_heads * self.key_value_proj_dim
363
+
364
+ # Mesh TensorFlow initialization to avoid scaling before softmax
365
+ self.q = nn.Linear(self.d_model, self.inner_dim, bias=False)
366
+ self.k = nn.Linear(self.d_model, self.inner_dim, bias=False)
367
+ self.v = nn.Linear(self.d_model, self.inner_dim, bias=False)
368
+ self.o = nn.Linear(self.inner_dim, self.d_model, bias=False)
369
+
370
+ if self.has_relative_attention_bias:
371
+ self.relative_attention_bias = nn.Embedding(self.relative_attention_num_buckets, self.n_heads)
372
+ self.pruned_heads = set()
373
+ self.gradient_checkpointing = False
374
+
375
+ def prune_heads(self, heads):
376
+ if len(heads) == 0:
377
+ return
378
+ heads, index = find_pruneable_heads_and_indices(
379
+ heads, self.n_heads, self.key_value_proj_dim, self.pruned_heads
380
+ )
381
+ # Prune linear layers
382
+ self.q = prune_linear_layer(self.q, index)
383
+ self.k = prune_linear_layer(self.k, index)
384
+ self.v = prune_linear_layer(self.v, index)
385
+ self.o = prune_linear_layer(self.o, index, dim=1)
386
+ # Update hyper params
387
+ self.n_heads = self.n_heads - len(heads)
388
+ self.inner_dim = self.key_value_proj_dim * self.n_heads
389
+ self.pruned_heads = self.pruned_heads.union(heads)
390
+
391
+ @staticmethod
392
+ def _relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128):
393
+ """
394
+ Adapted from Mesh Tensorflow:
395
+ https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593
396
+
397
+ Translate relative position to a bucket number for relative attention. The relative position is defined as
398
+ memory_position - query_position, i.e. the distance in tokens from the attending position to the attended-to
399
+ position. If bidirectional=False, then positive relative positions are invalid. We use smaller buckets for
400
+ small absolute relative_position and larger buckets for larger absolute relative_positions. All relative
401
+ positions >=max_distance map to the same bucket. All relative positions <=-max_distance map to the same bucket.
402
+ This should allow for more graceful generalization to longer sequences than the model has been trained on
403
+
404
+ Args:
405
+ relative_position: an int32 Tensor
406
+ bidirectional: a boolean - whether the attention is bidirectional
407
+ num_buckets: an integer
408
+ max_distance: an integer
409
+
410
+ Returns:
411
+ a Tensor with the same shape as relative_position, containing int32 values in the range [0, num_buckets)
412
+ """
413
+ relative_buckets = 0
414
+ if bidirectional:
415
+ num_buckets //= 2
416
+ relative_buckets += (relative_position > 0).to(torch.long) * num_buckets
417
+ relative_position = torch.abs(relative_position)
418
+ else:
419
+ relative_position = -torch.min(relative_position, torch.zeros_like(relative_position))
420
+ # now relative_position is in the range [0, inf)
421
+
422
+ # half of the buckets are for exact increments in positions
423
+ max_exact = num_buckets // 2
424
+ is_small = relative_position < max_exact
425
+
426
+ # The other half of the buckets are for logarithmically bigger bins in positions up to max_distance
427
+ relative_position_if_large = max_exact + (
428
+ torch.log(relative_position.float() / max_exact)
429
+ / math.log(max_distance / max_exact)
430
+ * (num_buckets - max_exact)
431
+ ).to(torch.long)
432
+ relative_position_if_large = torch.min(
433
+ relative_position_if_large, torch.full_like(relative_position_if_large, num_buckets - 1)
434
+ )
435
+
436
+ relative_buckets += torch.where(is_small, relative_position, relative_position_if_large)
437
+ return relative_buckets
438
+
439
+ def compute_bias(self, query_length, key_length, device=None):
440
+ """Compute binned relative position bias"""
441
+ if device is None:
442
+ device = self.relative_attention_bias.weight.device
443
+ context_position = torch.arange(query_length, dtype=torch.long, device=device)[:, None]
444
+ memory_position = torch.arange(key_length, dtype=torch.long, device=device)[None, :]
445
+ relative_position = memory_position - context_position # shape (query_length, key_length)
446
+ relative_position_bucket = self._relative_position_bucket(
447
+ relative_position, # shape (query_length, key_length)
448
+ bidirectional=(not self.is_decoder),
449
+ num_buckets=self.relative_attention_num_buckets,
450
+ max_distance=self.relative_attention_max_distance,
451
+ )
452
+ values = self.relative_attention_bias(relative_position_bucket) # shape (query_length, key_length, num_heads)
453
+ values = values.permute([2, 0, 1]).unsqueeze(0) # shape (1, num_heads, query_length, key_length)
454
+ return values
455
+
456
+ def forward(
457
+ self,
458
+ hidden_states,
459
+ mask=None,
460
+ key_value_states=None,
461
+ position_bias=None,
462
+ past_key_value=None,
463
+ layer_head_mask=None,
464
+ query_length=None,
465
+ use_cache=False,
466
+ output_attentions=False,
467
+ ):
468
+ """
469
+ Self-attention (if key_value_states is None) or attention over source sentence (provided by key_value_states).
470
+ """
471
+ # Input is (batch_size, seq_length, dim)
472
+ # Mask is (batch_size, key_length) (non-causal) or (batch_size, key_length, key_length)
473
+ # past_key_value[0] is (batch_size, n_heads, q_len - 1, dim_per_head)
474
+ batch_size, seq_length = hidden_states.shape[:2]
475
+
476
+ real_seq_length = seq_length
477
+
478
+ if past_key_value is not None:
479
+ if len(past_key_value) != 2:
480
+ raise ValueError(
481
+ f"past_key_value should have 2 past states: keys and values. Got { len(past_key_value)} past states"
482
+ )
483
+ real_seq_length += past_key_value[0].shape[2] if query_length is None else query_length
484
+
485
+ key_length = real_seq_length if key_value_states is None else key_value_states.shape[1]
486
+
487
+ def shape(states):
488
+ """projection"""
489
+ return states.view(batch_size, -1, self.n_heads, self.key_value_proj_dim).transpose(1, 2)
490
+
491
+ def unshape(states):
492
+ """reshape"""
493
+ return states.transpose(1, 2).contiguous().view(batch_size, -1, self.inner_dim)
494
+
495
+ def project(hidden_states, proj_layer, key_value_states, past_key_value):
496
+ """projects hidden states correctly to key/query states"""
497
+ if key_value_states is None:
498
+ # self-attn
499
+ # (batch_size, n_heads, seq_length, dim_per_head)
500
+ hidden_states = shape(proj_layer(hidden_states))
501
+ elif past_key_value is None:
502
+ # cross-attn
503
+ # (batch_size, n_heads, seq_length, dim_per_head)
504
+ hidden_states = shape(proj_layer(key_value_states))
505
+
506
+ if past_key_value is not None:
507
+ if key_value_states is None:
508
+ # self-attn
509
+ # (batch_size, n_heads, key_length, dim_per_head)
510
+ hidden_states = torch.cat([past_key_value, hidden_states], dim=2)
511
+ elif past_key_value.shape[2] != key_value_states.shape[1]:
512
+ # checking that the `sequence_length` of the `past_key_value` is the same as
513
+ # the provided `key_value_states` to support prefix tuning
514
+ # cross-attn
515
+ # (batch_size, n_heads, seq_length, dim_per_head)
516
+ hidden_states = shape(proj_layer(key_value_states))
517
+ else:
518
+ # cross-attn
519
+ hidden_states = past_key_value
520
+ return hidden_states
521
+
522
+ # get query states
523
+ query_states = shape(self.q(hidden_states)) # (batch_size, n_heads, seq_length, dim_per_head)
524
+
525
+ # get key/value states
526
+ key_states = project(
527
+ hidden_states, self.k, key_value_states, past_key_value[0] if past_key_value is not None else None
528
+ )
529
+ value_states = project(
530
+ hidden_states, self.v, key_value_states, past_key_value[1] if past_key_value is not None else None
531
+ )
532
+
533
+ # compute scores
534
+ scores = torch.matmul(
535
+ query_states, key_states.transpose(3, 2)
536
+ ) # equivalent of torch.einsum("bnqd,bnkd->bnqk", query_states, key_states), compatible with onnx op>9
537
+
538
+ if position_bias is None:
539
+ if not self.has_relative_attention_bias:
540
+ position_bias = torch.zeros(
541
+ (1, self.n_heads, real_seq_length, key_length), device=scores.device, dtype=scores.dtype
542
+ )
543
+ if self.gradient_checkpointing and self.training:
544
+ position_bias.requires_grad = True
545
+ else:
546
+ position_bias = self.compute_bias(real_seq_length, key_length, device=scores.device)
547
+
548
+ # if key and values are already calculated
549
+ # we want only the last query position bias
550
+ if past_key_value is not None:
551
+ position_bias = position_bias[:, :, -hidden_states.size(1) :, :]
552
+
553
+ if mask is not None:
554
+ position_bias = position_bias + mask # (batch_size, n_heads, seq_length, key_length)
555
+
556
+ if self.pruned_heads:
557
+ mask = torch.ones(position_bias.shape[1])
558
+ mask[list(self.pruned_heads)] = 0
559
+ position_bias_masked = position_bias[:, mask.bool()]
560
+ else:
561
+ position_bias_masked = position_bias
562
+
563
+ scores += position_bias_masked
564
+ attn_weights = nn.functional.softmax(scores.float(), dim=-1).type_as(
565
+ scores
566
+ ) # (batch_size, n_heads, seq_length, key_length)
567
+ attn_weights = nn.functional.dropout(
568
+ attn_weights, p=self.dropout, training=self.training
569
+ ) # (batch_size, n_heads, seq_length, key_length)
570
+
571
+ # Mask heads if we want to
572
+ if layer_head_mask is not None:
573
+ attn_weights = attn_weights * layer_head_mask
574
+
575
+ attn_output = unshape(torch.matmul(attn_weights, value_states)) # (batch_size, seq_length, dim)
576
+ attn_output = self.o(attn_output)
577
+
578
+ present_key_value_state = (key_states, value_states) if (self.is_decoder and use_cache) else None
579
+ outputs = (attn_output,) + (present_key_value_state,) + (position_bias,)
580
+
581
+ if output_attentions:
582
+ outputs = outputs + (attn_weights,)
583
+ return outputs
584
+
585
+
586
+ # Copied from transformers.models.t5.modeling_t5.T5LayerSelfAttention with T5->SwitchTransformers
587
+ class SwitchTransformersLayerSelfAttention(nn.Module):
588
+ def __init__(self, config, has_relative_attention_bias=False):
589
+ super().__init__()
590
+ self.SelfAttention = SwitchTransformersAttention(
591
+ config, has_relative_attention_bias=has_relative_attention_bias
592
+ )
593
+ self.layer_norm = SwitchTransformersLayerNorm(config.d_model, eps=config.layer_norm_epsilon)
594
+ self.dropout = nn.Dropout(config.dropout_rate)
595
+
596
+ def forward(
597
+ self,
598
+ hidden_states,
599
+ attention_mask=None,
600
+ position_bias=None,
601
+ layer_head_mask=None,
602
+ past_key_value=None,
603
+ use_cache=False,
604
+ output_attentions=False,
605
+ ):
606
+ normed_hidden_states = self.layer_norm(hidden_states)
607
+ attention_output = self.SelfAttention(
608
+ normed_hidden_states,
609
+ mask=attention_mask,
610
+ position_bias=position_bias,
611
+ layer_head_mask=layer_head_mask,
612
+ past_key_value=past_key_value,
613
+ use_cache=use_cache,
614
+ output_attentions=output_attentions,
615
+ )
616
+ hidden_states = hidden_states + self.dropout(attention_output[0])
617
+ outputs = (hidden_states,) + attention_output[1:] # add attentions if we output them
618
+ return outputs
619
+
620
+
621
+ # Copied from transformers.models.t5.modeling_t5.T5LayerCrossAttention with T5->SwitchTransformers
622
+ class SwitchTransformersLayerCrossAttention(nn.Module):
623
+ def __init__(self, config):
624
+ super().__init__()
625
+ self.EncDecAttention = SwitchTransformersAttention(config, has_relative_attention_bias=False)
626
+ self.layer_norm = SwitchTransformersLayerNorm(config.d_model, eps=config.layer_norm_epsilon)
627
+ self.dropout = nn.Dropout(config.dropout_rate)
628
+
629
+ def forward(
630
+ self,
631
+ hidden_states,
632
+ key_value_states,
633
+ attention_mask=None,
634
+ position_bias=None,
635
+ layer_head_mask=None,
636
+ past_key_value=None,
637
+ use_cache=False,
638
+ query_length=None,
639
+ output_attentions=False,
640
+ ):
641
+ normed_hidden_states = self.layer_norm(hidden_states)
642
+ attention_output = self.EncDecAttention(
643
+ normed_hidden_states,
644
+ mask=attention_mask,
645
+ key_value_states=key_value_states,
646
+ position_bias=position_bias,
647
+ layer_head_mask=layer_head_mask,
648
+ past_key_value=past_key_value,
649
+ use_cache=use_cache,
650
+ query_length=query_length,
651
+ output_attentions=output_attentions,
652
+ )
653
+ layer_output = hidden_states + self.dropout(attention_output[0])
654
+ outputs = (layer_output,) + attention_output[1:] # add attentions if we output them
655
+ return outputs
656
+
657
+
658
+ class SwitchTransformersBlock(nn.Module):
659
+ def __init__(self, config, has_relative_attention_bias=False, is_sparse=False):
660
+ super().__init__()
661
+ self.is_decoder = config.is_decoder
662
+ self.is_sparse = is_sparse
663
+ self.layer = nn.ModuleList()
664
+ self.layer.append(
665
+ SwitchTransformersLayerSelfAttention(config, has_relative_attention_bias=has_relative_attention_bias)
666
+ )
667
+ if self.is_decoder:
668
+ self.layer.append(SwitchTransformersLayerCrossAttention(config))
669
+
670
+ self.layer.append(SwitchTransformersLayerFF(config, is_sparse=self.is_sparse))
671
+
672
+ def forward(
673
+ self,
674
+ hidden_states,
675
+ attention_mask=None,
676
+ position_bias=None,
677
+ encoder_hidden_states=None,
678
+ encoder_attention_mask=None,
679
+ encoder_decoder_position_bias=None,
680
+ layer_head_mask=None,
681
+ cross_attn_layer_head_mask=None,
682
+ past_key_value=None,
683
+ use_cache=False,
684
+ output_attentions=False,
685
+ output_router_logits=True,
686
+ return_dict=True,
687
+ ):
688
+ if past_key_value is not None:
689
+ if not self.is_decoder:
690
+ logger.warning("`past_key_values` is passed to the encoder. Please make sure this is intended.")
691
+ expected_num_past_key_values = 2 if encoder_hidden_states is None else 4
692
+
693
+ if len(past_key_value) != expected_num_past_key_values:
694
+ raise ValueError(
695
+ f"There should be {expected_num_past_key_values} past states. "
696
+ f"{'2 (past / key) for cross attention. ' if expected_num_past_key_values == 4 else ''}"
697
+ f"Got {len(past_key_value)} past key / value states"
698
+ )
699
+
700
+ self_attn_past_key_value = past_key_value[:2]
701
+ cross_attn_past_key_value = past_key_value[2:]
702
+ else:
703
+ self_attn_past_key_value, cross_attn_past_key_value = None, None
704
+
705
+ self_attention_outputs = self.layer[0](
706
+ hidden_states,
707
+ attention_mask=attention_mask,
708
+ position_bias=position_bias,
709
+ layer_head_mask=layer_head_mask,
710
+ past_key_value=self_attn_past_key_value,
711
+ use_cache=use_cache,
712
+ output_attentions=output_attentions,
713
+ )
714
+ hidden_states, present_key_value_state = self_attention_outputs[:2]
715
+ attention_outputs = self_attention_outputs[2:] # Keep self-attention outputs and relative position weights
716
+
717
+ # clamp inf values to enable fp16 training
718
+ if hidden_states.dtype == torch.float16 and torch.isinf(hidden_states).any():
719
+ clamp_value = torch.finfo(hidden_states.dtype).max - 1000
720
+ hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
721
+
722
+ do_cross_attention = self.is_decoder and encoder_hidden_states is not None
723
+ if do_cross_attention:
724
+ # the actual query length is unknown for cross attention
725
+ # if using past key value states. Need to inject it here
726
+ if present_key_value_state is not None:
727
+ query_length = present_key_value_state[0].shape[2]
728
+ else:
729
+ query_length = None
730
+
731
+ cross_attention_outputs = self.layer[1](
732
+ hidden_states,
733
+ key_value_states=encoder_hidden_states,
734
+ attention_mask=encoder_attention_mask,
735
+ position_bias=encoder_decoder_position_bias,
736
+ layer_head_mask=cross_attn_layer_head_mask,
737
+ past_key_value=cross_attn_past_key_value,
738
+ query_length=query_length,
739
+ use_cache=use_cache,
740
+ output_attentions=output_attentions,
741
+ )
742
+ hidden_states = cross_attention_outputs[0]
743
+
744
+ # clamp inf values to enable fp16 training
745
+ if hidden_states.dtype == torch.float16 and torch.isinf(hidden_states).any():
746
+ clamp_value = torch.finfo(hidden_states.dtype).max - 1000
747
+ hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
748
+
749
+ # Combine self attn and cross attn key value states
750
+ if present_key_value_state is not None:
751
+ present_key_value_state = present_key_value_state + cross_attention_outputs[1]
752
+
753
+ # Keep cross-attention outputs and relative position weights
754
+ attention_outputs = attention_outputs + cross_attention_outputs[2:]
755
+
756
+ # Apply Feed Forward layer
757
+ hidden_states = self.layer[-1](hidden_states, output_router_logits)
758
+
759
+ if isinstance(hidden_states, tuple):
760
+ hidden_states, router_tuple = hidden_states
761
+ else:
762
+ router_tuple = (torch.zeros((1,), device=hidden_states.device, dtype=torch.int64),)
763
+
764
+ # clamp inf values to enable fp16 training
765
+ if hidden_states.dtype == torch.float16 and torch.isinf(hidden_states).any():
766
+ clamp_value = torch.finfo(hidden_states.dtype).max - 1000
767
+ hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
768
+
769
+ outputs = (hidden_states,)
770
+
771
+ if use_cache:
772
+ outputs = outputs + (present_key_value_state,) + attention_outputs + (router_tuple,)
773
+ else:
774
+ outputs = outputs + attention_outputs + (router_tuple,)
775
+
776
+ return outputs # hidden-states, present_key_value_states, (self-attention position bias), (self-attention weights), (cross-attention position bias), (cross-attention weights), (router_tuple)
777
+
778
+
779
+ class SwitchTransformersPreTrainedModel(PreTrainedModel):
780
+ """
781
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
782
+ models.
783
+ """
784
+
785
+ config_class = SwitchTransformersConfig
786
+ base_model_prefix = "switch_transformers"
787
+ supports_gradient_checkpointing = True
788
+ _no_split_modules = ["SwitchTransformersBlock"]
789
+
790
+ @property
791
+ def dummy_inputs(self):
792
+ input_ids = torch.tensor(DUMMY_INPUTS)
793
+ input_mask = torch.tensor(DUMMY_MASK)
794
+ dummy_inputs = {
795
+ "decoder_input_ids": input_ids,
796
+ "input_ids": input_ids,
797
+ "decoder_attention_mask": input_mask,
798
+ }
799
+ return dummy_inputs
800
+
801
+ def _init_weights(self, module):
802
+ """Initialize the weights"""
803
+ factor = self.config.initializer_factor # Used for testing weights initialization
804
+ if isinstance(module, SwitchTransformersLayerNorm):
805
+ module.weight.data.fill_(factor * 1.0)
806
+ elif isinstance(
807
+ module,
808
+ (SwitchTransformersModel, SwitchTransformersForConditionalGeneration, SwitchTransformersEncoderModel),
809
+ ):
810
+ # Mesh TensorFlow embeddings initialization
811
+ # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L1624
812
+ module.shared.weight.data.normal_(mean=0.0, std=factor * 1.0)
813
+ if hasattr(module, "lm_head") and not self.config.tie_word_embeddings:
814
+ module.lm_head.weight.data.normal_(mean=0.0, std=factor * 1.0)
815
+ elif isinstance(module, SwitchTransformersDenseActDense):
816
+ # Mesh TensorFlow FF initialization
817
+ # See https://github.com/tensorflow/mesh/blob/master/mesh_tensorflow/transformer/transformer_layers.py#L56
818
+ # and https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L89
819
+ module.wi.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5))
820
+ if hasattr(module.wi, "bias") and module.wi.bias is not None:
821
+ module.wi.bias.data.zero_()
822
+ module.wo.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_ff) ** -0.5))
823
+ if hasattr(module.wo, "bias") and module.wo.bias is not None:
824
+ module.wo.bias.data.zero_()
825
+ elif isinstance(module, SwitchTransformersAttention):
826
+ # Mesh TensorFlow attention initialization to avoid scaling before softmax
827
+ # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/attention.py#L136
828
+ d_model = self.config.d_model
829
+ key_value_proj_dim = self.config.d_kv
830
+ n_heads = self.config.num_heads
831
+ module.q.weight.data.normal_(mean=0.0, std=factor * ((d_model * key_value_proj_dim) ** -0.5))
832
+ module.k.weight.data.normal_(mean=0.0, std=factor * (d_model**-0.5))
833
+ module.v.weight.data.normal_(mean=0.0, std=factor * (d_model**-0.5))
834
+ module.o.weight.data.normal_(mean=0.0, std=factor * ((n_heads * key_value_proj_dim) ** -0.5))
835
+ if module.has_relative_attention_bias:
836
+ module.relative_attention_bias.weight.data.normal_(mean=0.0, std=factor * ((d_model) ** -0.5))
837
+ elif isinstance(module, SwitchTransformersSparseMLP):
838
+ # Mesh TensorFlow attention initialization to avoid scaling before softmax
839
+ # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/attention.py#L136
840
+ d_model = self.config.d_model
841
+ key_value_proj_dim = self.config.d_kv
842
+ n_heads = self.config.num_heads
843
+ module.router.classifier.weight.data.normal_(mean=0.0, std=factor * 1)
844
+ for idx in range(self.config.num_experts):
845
+ module.experts[f"expert_{idx}"].wi.weight.data.normal_(mean=0.0, std=factor * (d_model**-0.5))
846
+ module.experts[f"expert_{idx}"].wo.weight.data.normal_(mean=0.0, std=factor * (d_model**-0.5))
847
+
848
+ def _shift_right(self, input_ids):
849
+ decoder_start_token_id = self.config.decoder_start_token_id
850
+ pad_token_id = self.config.pad_token_id
851
+
852
+ if decoder_start_token_id is None:
853
+ raise ValueError(
854
+ "self.model.config.decoder_start_token_id has to be defined. In SwitchTransformers it is usually set"
855
+ " to the pad_token_id. See SwitchTransformers docs for more information"
856
+ )
857
+
858
+ # shift inputs to the right
859
+ if is_torch_fx_proxy(input_ids):
860
+ # Item assignment is not supported natively for proxies.
861
+ shifted_input_ids = torch.full(input_ids.shape[:-1] + (1,), decoder_start_token_id)
862
+ shifted_input_ids = torch.cat([shifted_input_ids, input_ids[..., :-1]], dim=-1)
863
+ else:
864
+ shifted_input_ids = input_ids.new_zeros(input_ids.shape)
865
+ shifted_input_ids[..., 1:] = input_ids[..., :-1].clone()
866
+ shifted_input_ids[..., 0] = decoder_start_token_id
867
+
868
+ if pad_token_id is None:
869
+ raise ValueError("self.model.config.pad_token_id has to be defined.")
870
+ # replace possible -100 values in labels by `pad_token_id`
871
+ shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id)
872
+
873
+ return shifted_input_ids
874
+
875
+
876
+ class SwitchTransformersStack(SwitchTransformersPreTrainedModel):
877
+ def __init__(self, config, embed_tokens=None):
878
+ super().__init__(config)
879
+
880
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.d_model)
881
+
882
+ if embed_tokens is not None:
883
+ self.embed_tokens.weight = embed_tokens.weight
884
+
885
+ self.is_decoder = config.is_decoder
886
+
887
+ sparse_step = config.decoder_sparse_step if self.is_decoder else config.encoder_sparse_step
888
+ config.num_layers = config.num_decoder_layers if self.is_decoder else config.num_layers
889
+ self.block = nn.ModuleList()
890
+ for i in range(config.num_layers):
891
+ is_sparse = (i % sparse_step == 1 or sparse_step == 1) if sparse_step > 0 else False
892
+
893
+ self.block.append(
894
+ SwitchTransformersBlock(config, has_relative_attention_bias=bool(i == 0), is_sparse=is_sparse)
895
+ )
896
+
897
+ self.final_layer_norm = SwitchTransformersLayerNorm(config.d_model, eps=config.layer_norm_epsilon)
898
+ self.dropout = nn.Dropout(config.dropout_rate)
899
+
900
+ # Initialize weights and apply final processing
901
+ self.post_init()
902
+
903
+ self.device_map = None
904
+ self.gradient_checkpointing = False
905
+
906
+ def get_input_embeddings(self):
907
+ return self.embed_tokens
908
+
909
+ def set_input_embeddings(self, new_embeddings):
910
+ self.embed_tokens = new_embeddings
911
+
912
+ def forward(
913
+ self,
914
+ input_ids=None,
915
+ attention_mask=None,
916
+ encoder_hidden_states=None,
917
+ encoder_attention_mask=None,
918
+ inputs_embeds=None,
919
+ head_mask=None,
920
+ cross_attn_head_mask=None,
921
+ past_key_values=None,
922
+ use_cache=None,
923
+ output_attentions=None,
924
+ output_hidden_states=None,
925
+ output_router_logits=True,
926
+ return_dict=None,
927
+ ):
928
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
929
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
930
+ output_hidden_states = (
931
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
932
+ )
933
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
934
+
935
+ if input_ids is not None and inputs_embeds is not None:
936
+ err_msg_prefix = "decoder_" if self.is_decoder else ""
937
+ raise ValueError(
938
+ f"You cannot specify both {err_msg_prefix}input_ids and {err_msg_prefix}inputs_embeds at the same time"
939
+ )
940
+ elif input_ids is not None:
941
+ input_shape = input_ids.size()
942
+ input_ids = input_ids.view(-1, input_shape[-1])
943
+ elif inputs_embeds is not None:
944
+ input_shape = inputs_embeds.size()[:-1]
945
+ else:
946
+ err_msg_prefix = "decoder_" if self.is_decoder else ""
947
+ raise ValueError(f"You have to specify either {err_msg_prefix}input_ids or {err_msg_prefix}inputs_embeds")
948
+
949
+ if inputs_embeds is None:
950
+ if self.embed_tokens is None:
951
+ raise ValueError("You have to initialize the model with valid token embeddings")
952
+ inputs_embeds = self.embed_tokens(input_ids)
953
+
954
+ batch_size, seq_length = input_shape
955
+
956
+ # required mask seq length can be calculated via length of past
957
+ mask_seq_length = past_key_values[0][0].shape[2] + seq_length if past_key_values is not None else seq_length
958
+
959
+ if use_cache is True:
960
+ if not self.is_decoder:
961
+ raise ValueError(f"`use_cache` can only be set to `True` if {self} is used as a decoder")
962
+
963
+ if attention_mask is None:
964
+ attention_mask = torch.ones(batch_size, mask_seq_length, device=inputs_embeds.device)
965
+ if self.is_decoder and encoder_attention_mask is None and encoder_hidden_states is not None:
966
+ encoder_seq_length = encoder_hidden_states.shape[1]
967
+ encoder_attention_mask = torch.ones(
968
+ batch_size, encoder_seq_length, device=inputs_embeds.device, dtype=torch.long
969
+ )
970
+
971
+ # initialize past_key_values with `None` if past does not exist
972
+ if past_key_values is None:
973
+ past_key_values = [None] * len(self.block)
974
+
975
+ # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
976
+ # ourselves in which case we just need to make it broadcastable to all heads.
977
+ extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_shape)
978
+
979
+ # If a 2D or 3D attention mask is provided for the cross-attention
980
+ # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
981
+ if self.is_decoder and encoder_hidden_states is not None:
982
+ encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
983
+ encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
984
+ if encoder_attention_mask is None:
985
+ encoder_attention_mask = torch.ones(encoder_hidden_shape, device=inputs_embeds.device)
986
+ encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
987
+ else:
988
+ encoder_extended_attention_mask = None
989
+
990
+ if self.gradient_checkpointing and self.training:
991
+ if use_cache:
992
+ logger.warning_once(
993
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
994
+ )
995
+ use_cache = False
996
+
997
+ # Prepare head mask if needed
998
+ head_mask = self.get_head_mask(head_mask, self.config.num_layers)
999
+ cross_attn_head_mask = self.get_head_mask(cross_attn_head_mask, self.config.num_layers)
1000
+ present_key_value_states = () if use_cache else None
1001
+ all_hidden_states = () if output_hidden_states else None
1002
+ all_attentions = () if output_attentions else None
1003
+ all_router_probs = () if output_router_logits else None
1004
+ all_cross_attentions = () if (output_attentions and self.is_decoder) else None
1005
+ position_bias = None
1006
+ encoder_decoder_position_bias = None
1007
+
1008
+ hidden_states = self.dropout(inputs_embeds)
1009
+
1010
+ for i, (layer_module, past_key_value) in enumerate(zip(self.block, past_key_values)):
1011
+ layer_head_mask = head_mask[i]
1012
+ cross_attn_layer_head_mask = cross_attn_head_mask[i]
1013
+
1014
+ if output_hidden_states:
1015
+ all_hidden_states = all_hidden_states + (hidden_states,)
1016
+
1017
+ if self.gradient_checkpointing and self.training:
1018
+ layer_outputs = self._gradient_checkpointing_func(
1019
+ layer_module.forward,
1020
+ hidden_states,
1021
+ extended_attention_mask,
1022
+ position_bias,
1023
+ encoder_hidden_states,
1024
+ encoder_extended_attention_mask,
1025
+ encoder_decoder_position_bias,
1026
+ layer_head_mask,
1027
+ cross_attn_layer_head_mask,
1028
+ None, # past_key_value is always None with gradient checkpointing
1029
+ use_cache,
1030
+ output_attentions,
1031
+ )
1032
+ else:
1033
+ layer_outputs = layer_module(
1034
+ hidden_states,
1035
+ attention_mask=extended_attention_mask,
1036
+ position_bias=position_bias,
1037
+ encoder_hidden_states=encoder_hidden_states,
1038
+ encoder_attention_mask=encoder_extended_attention_mask,
1039
+ encoder_decoder_position_bias=encoder_decoder_position_bias,
1040
+ layer_head_mask=layer_head_mask,
1041
+ cross_attn_layer_head_mask=cross_attn_layer_head_mask,
1042
+ past_key_value=past_key_value,
1043
+ use_cache=use_cache,
1044
+ output_attentions=output_attentions,
1045
+ output_router_logits=output_router_logits,
1046
+ )
1047
+
1048
+ router_probs = layer_outputs[-1]
1049
+ layer_outputs = layer_outputs[:-1]
1050
+
1051
+ # layer_outputs is a tuple with:
1052
+ # hidden-states, key-value-states, (self-attention position bias), (self-attention weights), (cross-attention position bias), (cross-attention weights)
1053
+ if use_cache is False:
1054
+ layer_outputs = layer_outputs[:1] + (None,) + layer_outputs[1:]
1055
+
1056
+ hidden_states, present_key_value_state = layer_outputs[:2]
1057
+
1058
+ # We share the position biases between the layers - the first layer store them
1059
+ # layer_outputs = hidden-states, key-value-states (self-attention position bias), (self-attention weights),
1060
+ # (cross-attention position bias), (cross-attention weights)
1061
+ position_bias = layer_outputs[2]
1062
+ if self.is_decoder and encoder_hidden_states is not None:
1063
+ encoder_decoder_position_bias = layer_outputs[4 if output_attentions else 3]
1064
+ # append next layer key value states
1065
+ if use_cache:
1066
+ present_key_value_states = present_key_value_states + (present_key_value_state,)
1067
+
1068
+ if output_attentions:
1069
+ all_attentions = all_attentions + (layer_outputs[3],)
1070
+ if self.is_decoder:
1071
+ all_cross_attentions = all_cross_attentions + (layer_outputs[5],)
1072
+
1073
+ if output_router_logits:
1074
+ all_router_probs = all_router_probs + (router_probs,)
1075
+
1076
+ hidden_states = self.final_layer_norm(hidden_states)
1077
+ hidden_states = self.dropout(hidden_states)
1078
+
1079
+ # Add last layer
1080
+ if output_hidden_states:
1081
+ all_hidden_states = all_hidden_states + (hidden_states,)
1082
+
1083
+ if not return_dict:
1084
+ return tuple(
1085
+ v
1086
+ for v in [
1087
+ hidden_states,
1088
+ present_key_value_states,
1089
+ all_hidden_states,
1090
+ all_attentions,
1091
+ all_cross_attentions,
1092
+ all_router_probs,
1093
+ ]
1094
+ if v is not None
1095
+ )
1096
+ return MoEModelOutputWithPastAndCrossAttentions(
1097
+ last_hidden_state=hidden_states,
1098
+ past_key_values=present_key_value_states,
1099
+ hidden_states=all_hidden_states,
1100
+ attentions=all_attentions,
1101
+ cross_attentions=all_cross_attentions,
1102
+ router_probs=all_router_probs,
1103
+ )
1104
+
1105
+
1106
+ SWITCH_TRANSFORMERS_START_DOCSTRING = r"""
1107
+
1108
+ The SWITCH_TRANSFORMERS model was proposed in [Switch Transformers: Scaling to Trillion Parameter Models with
1109
+ Simple and Efficient Sparsity](https://arxiv.org/abs/2101.03961) by [William
1110
+ Fedus](https://arxiv.org/search/cs?searchtype=author&query=Fedus%2C+W), [Barret
1111
+ Zoph](https://arxiv.org/search/cs?searchtype=author&query=Zoph%2C+B), and [Noam
1112
+ Shazeer](https://arxiv.org/search/cs?searchtype=author&query=Shazeer%2C+N). It's an encoder-decoder T5-like model
1113
+ with sparse Feed Forward that stands for Mixture of Experts (MoE) architecture.
1114
+
1115
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
1116
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
1117
+ etc.)
1118
+
1119
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
1120
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
1121
+ and behavior.
1122
+
1123
+ Parameters:
1124
+ config ([`SwitchTransformersConfig`]): Model configuration class with all the parameters of the model.
1125
+ Initializing with a config file does not load the weights associated with the model, only the
1126
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
1127
+ """
1128
+
1129
+ SWITCH_TRANSFORMERS_INPUTS_DOCSTRING = r"""
1130
+ Args:
1131
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
1132
+ Indices of input sequence tokens in the vocabulary. SWITCH_TRANSFORMERS is a model with relative position
1133
+ embeddings so you should be able to pad the inputs on both the right and the left.
1134
+
1135
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
1136
+ [`PreTrainedTokenizer.__call__`] for detail.
1137
+
1138
+ [What are input IDs?](../glossary#input-ids)
1139
+
1140
+ To know more on how to prepare `input_ids` for pretraining take a look a [SWITCH_TRANSFORMERS
1141
+ Training](./switch_transformers#training).
1142
+ attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
1143
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
1144
+
1145
+ - 1 for tokens that are **not masked**,
1146
+ - 0 for tokens that are **masked**.
1147
+
1148
+ [What are attention masks?](../glossary#attention-mask)
1149
+ decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
1150
+ Indices of decoder input sequence tokens in the vocabulary.
1151
+
1152
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
1153
+ [`PreTrainedTokenizer.__call__`] for details.
1154
+
1155
+ [What are decoder input IDs?](../glossary#decoder-input-ids)
1156
+
1157
+ SWITCH_TRANSFORMERS uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If
1158
+ `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
1159
+ `past_key_values`).
1160
+
1161
+ To know more on how to prepare `decoder_input_ids` for pretraining take a look at [SWITCH_TRANSFORMERS
1162
+ Training](./switch_transformers#training).
1163
+ decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
1164
+ Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
1165
+ be used by default.
1166
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
1167
+ Mask to nullify selected heads of the self-attention modules in the encoder. Mask values selected in `[0,
1168
+ 1]`:
1169
+
1170
+ - 1 indicates the head is **not masked**,
1171
+ - 0 indicates the head is **masked**.
1172
+
1173
+ decoder_head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
1174
+ Mask to nullify selected heads of the self-attention modules in the decoder. Mask values selected in `[0,
1175
+ 1]`:
1176
+
1177
+ - 1 indicates the head is **not masked**,
1178
+ - 0 indicates the head is **masked**.
1179
+
1180
+ cross_attn_head_mask (`torch.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
1181
+ Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in
1182
+ `[0, 1]`:
1183
+
1184
+ - 1 indicates the head is **not masked**,
1185
+ - 0 indicates the head is **masked**.
1186
+
1187
+ encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*):
1188
+ Tuple consists of (`last_hidden_state`, `optional`: *hidden_states*, `optional`: *attentions*)
1189
+ `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)` is a sequence of hidden states at
1190
+ the output of the last layer of the encoder. Used in the cross-attention of the decoder.
1191
+ past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
1192
+ Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
1193
+
1194
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
1195
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
1196
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
1197
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
1198
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
1199
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
1200
+ model's internal embedding lookup matrix.
1201
+ decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*):
1202
+ Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded
1203
+ representation. If `past_key_values` is used, optionally only the last `decoder_inputs_embeds` have to be
1204
+ input (see `past_key_values`). This is useful if you want more control over how to convert
1205
+ `decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix.
1206
+
1207
+ If `decoder_input_ids` and `decoder_inputs_embeds` are both unset, `decoder_inputs_embeds` takes the value
1208
+ of `inputs_embeds`.
1209
+
1210
+ use_cache (`bool`, *optional*):
1211
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
1212
+ `past_key_values`).
1213
+
1214
+ output_attentions (`bool`, *optional*):
1215
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
1216
+ tensors for more detail.
1217
+ output_hidden_states (`bool`, *optional*):
1218
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
1219
+ more detail.
1220
+ output_router_logits (`bool`, *optional*):
1221
+ Whether or not to return the logits of all the routers. They are useful for computing the router loss, and
1222
+ should not be returned during inference.
1223
+ return_dict (`bool`, *optional*):
1224
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
1225
+ """
1226
+
1227
+ SWITCH_TRANSFORMERS_ENCODER_INPUTS_DOCSTRING = r"""
1228
+ Args:
1229
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
1230
+ Indices of input sequence tokens in the vocabulary. SWITCH_TRANSFORMERS is a model with relative position
1231
+ embeddings so you should be able to pad the inputs on both the right and the left.
1232
+
1233
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
1234
+ [`PreTrainedTokenizer.__call__`] for detail.
1235
+
1236
+ To know more on how to prepare `input_ids` for pretraining take a look a [SWITCH_TRANSFORMERS
1237
+ Training](./switch_transformers#training).
1238
+ attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
1239
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
1240
+
1241
+ - 1 for tokens that are **not masked**,
1242
+ - 0 for tokens that are **masked**.
1243
+
1244
+ [What are attention masks?](../glossary#attention-mask)
1245
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
1246
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
1247
+
1248
+ - 1 indicates the head is **not masked**,
1249
+ - 0 indicates the head is **masked**.
1250
+
1251
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
1252
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
1253
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
1254
+ model's internal embedding lookup matrix.
1255
+ output_attentions (`bool`, *optional*):
1256
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
1257
+ tensors for more detail.
1258
+ output_hidden_states (`bool`, *optional*):
1259
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
1260
+ more detail.
1261
+ output_router_logits (`bool`, *optional*):
1262
+ Whether or not to return the logits of all the routers. They are useful for computing the router loss, and
1263
+ should not be returned during inference.
1264
+ return_dict (`bool`, *optional*):
1265
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
1266
+ """
1267
+
1268
+ # Warning message for FutureWarning: head_mask was separated into two input args - head_mask, decoder_head_mask
1269
+ __HEAD_MASK_WARNING_MSG = """
1270
+ The input argument `head_mask` was split into two arguments `head_mask` and `decoder_head_mask`. Currently,
1271
+ `decoder_head_mask` is set to copy `head_mask`, but this feature is deprecated and will be removed in future versions.
1272
+ If you do not want to use any `decoder_head_mask` now, please set `decoder_head_mask = torch.ones(num_layers,
1273
+ num_heads)`.
1274
+ """
1275
+
1276
+
1277
+ @add_start_docstrings(
1278
+ "The bare SWITCH_TRANSFORMERS Model transformer outputting raw hidden-states without any specific head on top.",
1279
+ SWITCH_TRANSFORMERS_START_DOCSTRING,
1280
+ )
1281
+ class SwitchTransformersModel(SwitchTransformersPreTrainedModel):
1282
+ _tied_weights_keys = ["encoder.embed_tokens.weight", "decoder.embed_tokens.weight"]
1283
+
1284
+ def __init__(self, config: SwitchTransformersConfig):
1285
+ super().__init__(config)
1286
+ self.shared = nn.Embedding(config.vocab_size, config.d_model)
1287
+
1288
+ encoder_config = copy.deepcopy(config)
1289
+ encoder_config.is_decoder = False
1290
+ encoder_config.use_cache = False
1291
+ encoder_config.is_encoder_decoder = False
1292
+ self.encoder = SwitchTransformersStack(encoder_config, self.shared)
1293
+
1294
+ decoder_config = copy.deepcopy(config)
1295
+ decoder_config.is_decoder = True
1296
+ decoder_config.is_encoder_decoder = False
1297
+ self.decoder = SwitchTransformersStack(decoder_config, self.shared)
1298
+
1299
+ # Initialize weights and apply final processing
1300
+ self.post_init()
1301
+
1302
+ # Model parallel
1303
+ self.device_map = None
1304
+
1305
+ def get_input_embeddings(self):
1306
+ return self.shared
1307
+
1308
+ def set_input_embeddings(self, new_embeddings):
1309
+ self.shared = new_embeddings
1310
+ self.encoder.set_input_embeddings(new_embeddings)
1311
+ self.decoder.set_input_embeddings(new_embeddings)
1312
+
1313
+ def _tie_weights(self):
1314
+ if self.config.tie_word_embeddings:
1315
+ self._tie_or_clone_weights(self.encoder.embed_tokens, self.shared)
1316
+ self._tie_or_clone_weights(self.decoder.embed_tokens, self.shared)
1317
+
1318
+ def get_encoder(self):
1319
+ return self.encoder
1320
+
1321
+ def get_decoder(self):
1322
+ return self.decoder
1323
+
1324
+ def _prune_heads(self, heads_to_prune):
1325
+ """
1326
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
1327
+ class PreTrainedModel
1328
+ """
1329
+ for layer, heads in heads_to_prune.items():
1330
+ self.encoder.layer[layer].attention.prune_heads(heads)
1331
+
1332
+ @add_start_docstrings_to_model_forward(SWITCH_TRANSFORMERS_INPUTS_DOCSTRING)
1333
+ @replace_return_docstrings(output_type=Seq2SeqMoEModelOutput, config_class=_CONFIG_FOR_DOC)
1334
+ def forward(
1335
+ self,
1336
+ input_ids: Optional[torch.LongTensor] = None,
1337
+ attention_mask: Optional[torch.FloatTensor] = None,
1338
+ decoder_input_ids: Optional[torch.LongTensor] = None,
1339
+ decoder_attention_mask: Optional[torch.BoolTensor] = None,
1340
+ head_mask: Optional[torch.FloatTensor] = None,
1341
+ decoder_head_mask: Optional[torch.FloatTensor] = None,
1342
+ cross_attn_head_mask: Optional[torch.Tensor] = None,
1343
+ encoder_outputs: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
1344
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
1345
+ inputs_embeds: Optional[torch.Tensor] = None,
1346
+ decoder_inputs_embeds: Optional[torch.Tensor] = None,
1347
+ use_cache: Optional[bool] = None,
1348
+ output_attentions: Optional[bool] = None,
1349
+ output_hidden_states: Optional[bool] = None,
1350
+ output_router_logits: Optional[bool] = None,
1351
+ return_dict: Optional[bool] = None,
1352
+ ) -> Union[Tuple[torch.FloatTensor], Seq2SeqMoEModelOutput]:
1353
+ r"""
1354
+ Returns:
1355
+
1356
+ Example:
1357
+
1358
+ ```python
1359
+ >>> from transformers import AutoTokenizer, SwitchTransformersModel
1360
+
1361
+ >>> tokenizer = AutoTokenizer.from_pretrained("google/switch-base-8")
1362
+ >>> model = SwitchTransformersModel.from_pretrained("google/switch-base-8")
1363
+
1364
+ >>> input_ids = tokenizer(
1365
+ ... "Studies have been shown that owning a dog is good for you", return_tensors="pt"
1366
+ ... ).input_ids # Batch size 1
1367
+ >>> decoder_input_ids = tokenizer("Studies show that", return_tensors="pt").input_ids # Batch size 1
1368
+
1369
+ >>> # preprocess: Prepend decoder_input_ids with start token which is pad token for SwitchTransformersModel.
1370
+ >>> # This is not needed for torch's SwitchTransformersForConditionalGeneration as it does this internally using labels arg.
1371
+ >>> decoder_input_ids = model._shift_right(decoder_input_ids)
1372
+
1373
+ >>> # forward pass
1374
+ >>> outputs = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids)
1375
+ >>> last_hidden_states = outputs.last_hidden_state
1376
+ ```"""
1377
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
1378
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1379
+
1380
+ # FutureWarning: head_mask was separated into two input args - head_mask, decoder_head_mask
1381
+ if head_mask is not None and decoder_head_mask is None:
1382
+ if self.config.num_layers == self.config.num_decoder_layers:
1383
+ warnings.warn(__HEAD_MASK_WARNING_MSG, FutureWarning)
1384
+ decoder_head_mask = head_mask
1385
+
1386
+ if (
1387
+ output_router_logits
1388
+ and self.config.num_sparse_encoder_layers == 0
1389
+ and self.config.num_sparse_encoder_layers == 0
1390
+ ):
1391
+ raise ValueError(
1392
+ "You asked to return `output_router_logits` but the transformer in dense, and does "
1393
+ " not contain any sparse MLP Layers. Set `output_router_logits = False` and restart"
1394
+ )
1395
+ # Encode if needed (training, first prediction pass)
1396
+ if encoder_outputs is None:
1397
+ encoder_outputs = self.encoder(
1398
+ input_ids=input_ids,
1399
+ attention_mask=attention_mask,
1400
+ inputs_embeds=inputs_embeds,
1401
+ head_mask=head_mask,
1402
+ output_attentions=output_attentions,
1403
+ output_hidden_states=output_hidden_states,
1404
+ output_router_logits=output_router_logits,
1405
+ return_dict=return_dict,
1406
+ )
1407
+ elif return_dict and not isinstance(encoder_outputs, MoEModelOutput):
1408
+ encoder_outputs = MoEModelOutput(
1409
+ last_hidden_state=encoder_outputs[0],
1410
+ hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
1411
+ attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
1412
+ router_probs=encoder_outputs[3] if len(encoder_outputs) > 3 else None,
1413
+ )
1414
+
1415
+ hidden_states = encoder_outputs[0]
1416
+
1417
+ # Decode
1418
+ decoder_outputs = self.decoder(
1419
+ input_ids=decoder_input_ids,
1420
+ attention_mask=decoder_attention_mask,
1421
+ inputs_embeds=decoder_inputs_embeds,
1422
+ past_key_values=past_key_values,
1423
+ encoder_hidden_states=hidden_states,
1424
+ encoder_attention_mask=attention_mask,
1425
+ head_mask=decoder_head_mask,
1426
+ cross_attn_head_mask=cross_attn_head_mask,
1427
+ use_cache=use_cache,
1428
+ output_attentions=output_attentions,
1429
+ output_hidden_states=output_hidden_states,
1430
+ output_router_logits=output_router_logits,
1431
+ return_dict=return_dict,
1432
+ )
1433
+
1434
+ if not return_dict:
1435
+ return decoder_outputs + encoder_outputs
1436
+
1437
+ return Seq2SeqMoEModelOutput(
1438
+ last_hidden_state=decoder_outputs.last_hidden_state,
1439
+ past_key_values=decoder_outputs.past_key_values,
1440
+ decoder_hidden_states=decoder_outputs.hidden_states,
1441
+ decoder_attentions=decoder_outputs.attentions,
1442
+ cross_attentions=decoder_outputs.cross_attentions,
1443
+ decoder_router_logits=decoder_outputs.router_probs,
1444
+ encoder_last_hidden_state=encoder_outputs.last_hidden_state,
1445
+ encoder_hidden_states=encoder_outputs.hidden_states,
1446
+ encoder_attentions=encoder_outputs.attentions,
1447
+ encoder_router_logits=encoder_outputs.router_probs,
1448
+ )
1449
+
1450
+
1451
+ @add_start_docstrings(
1452
+ """SWITCH_TRANSFORMERS Model with a `language modeling` head on top.""", SWITCH_TRANSFORMERS_START_DOCSTRING
1453
+ )
1454
+ class SwitchTransformersForConditionalGeneration(SwitchTransformersPreTrainedModel):
1455
+ _tied_weights_keys = ["encoder.embed_tokens.weight", "decoder.embed_tokens.weight", "lm_head.weight"]
1456
+
1457
+ def __init__(self, config: SwitchTransformersConfig):
1458
+ super().__init__(config)
1459
+ self.model_dim = config.d_model
1460
+
1461
+ self.shared = nn.Embedding(config.vocab_size, config.d_model)
1462
+
1463
+ encoder_config = copy.deepcopy(config)
1464
+ encoder_config.is_decoder = False
1465
+ encoder_config.use_cache = False
1466
+ encoder_config.is_encoder_decoder = False
1467
+ self.encoder = SwitchTransformersStack(encoder_config, self.shared)
1468
+
1469
+ decoder_config = copy.deepcopy(config)
1470
+ decoder_config.is_decoder = True
1471
+ decoder_config.is_encoder_decoder = False
1472
+ decoder_config.num_layers = config.num_decoder_layers
1473
+ self.decoder = SwitchTransformersStack(decoder_config, self.shared)
1474
+
1475
+ self.lm_head = nn.Linear(config.d_model, config.vocab_size, bias=False)
1476
+
1477
+ self.router_z_loss_coef = config.router_z_loss_coef
1478
+ self.router_aux_loss_coef = config.router_aux_loss_coef
1479
+
1480
+ # Initialize weights and apply final processing
1481
+ self.post_init()
1482
+
1483
+ # Model parallel
1484
+ self.device_map = None
1485
+
1486
+ def get_input_embeddings(self):
1487
+ return self.shared
1488
+
1489
+ def set_input_embeddings(self, new_embeddings):
1490
+ self.shared = new_embeddings
1491
+ self.encoder.set_input_embeddings(new_embeddings)
1492
+ self.decoder.set_input_embeddings(new_embeddings)
1493
+
1494
+ def _tie_weights(self):
1495
+ if self.config.tie_word_embeddings:
1496
+ self._tie_or_clone_weights(self.encoder.embed_tokens, self.shared)
1497
+ self._tie_or_clone_weights(self.decoder.embed_tokens, self.shared)
1498
+
1499
+ def set_output_embeddings(self, new_embeddings):
1500
+ self.lm_head = new_embeddings
1501
+
1502
+ def get_output_embeddings(self):
1503
+ return self.lm_head
1504
+
1505
+ def get_encoder(self):
1506
+ return self.encoder
1507
+
1508
+ def get_decoder(self):
1509
+ return self.decoder
1510
+
1511
+ @add_start_docstrings_to_model_forward(SWITCH_TRANSFORMERS_INPUTS_DOCSTRING)
1512
+ @replace_return_docstrings(output_type=Seq2SeqMoEOutput, config_class=_CONFIG_FOR_DOC)
1513
+ def forward(
1514
+ self,
1515
+ input_ids: Optional[torch.LongTensor] = None,
1516
+ attention_mask: Optional[torch.FloatTensor] = None,
1517
+ decoder_input_ids: Optional[torch.LongTensor] = None,
1518
+ decoder_attention_mask: Optional[torch.BoolTensor] = None,
1519
+ head_mask: Optional[torch.FloatTensor] = None,
1520
+ decoder_head_mask: Optional[torch.FloatTensor] = None,
1521
+ cross_attn_head_mask: Optional[torch.Tensor] = None,
1522
+ encoder_outputs: Optional[Tuple[Tuple[torch.Tensor]]] = None,
1523
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
1524
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1525
+ decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
1526
+ labels: Optional[torch.LongTensor] = None,
1527
+ use_cache: Optional[bool] = None,
1528
+ output_attentions: Optional[bool] = None,
1529
+ output_hidden_states: Optional[bool] = None,
1530
+ output_router_logits: Optional[bool] = True,
1531
+ return_dict: Optional[bool] = None,
1532
+ ) -> Union[Tuple[torch.FloatTensor], Seq2SeqMoEOutput]:
1533
+ r"""
1534
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1535
+ Labels for computing the sequence classification/regression loss. Indices should be in `[-100, 0, ...,
1536
+ config.vocab_size - 1]`. All labels set to `-100` are ignored (masked), the loss is only computed for
1537
+ labels in `[0, ..., config.vocab_size]`
1538
+
1539
+ Returns:
1540
+
1541
+ Examples:
1542
+
1543
+ ```python
1544
+ >>> from transformers import AutoTokenizer, SwitchTransformersForConditionalGeneration
1545
+
1546
+ >>> tokenizer = AutoTokenizer.from_pretrained("google/switch-base-8")
1547
+ >>> model = SwitchTransformersForConditionalGeneration.from_pretrained("google/switch-base-8")
1548
+
1549
+ >>> # training
1550
+ >>> input_ids = tokenizer("The <extra_id_0> walks in <extra_id_1> park", return_tensors="pt").input_ids
1551
+ >>> labels = tokenizer("<extra_id_0> cute dog <extra_id_1> the <extra_id_2>", return_tensors="pt").input_ids
1552
+ >>> outputs = model(input_ids=input_ids, labels=labels)
1553
+ >>> loss = outputs.loss
1554
+ >>> logits = outputs.logits
1555
+
1556
+ >>> # inference
1557
+ >>> input_ids = tokenizer(
1558
+ ... "summarize: studies have shown that owning a dog is good for you", return_tensors="pt"
1559
+ ... ).input_ids # Batch size 1
1560
+ >>> outputs = model.generate(input_ids)
1561
+ >>> # . To, let’s say you have a dog. To summarize:
1562
+ >>> # Since the model has been trained on MLM, this will output gibberish
1563
+ ```"""
1564
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
1565
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1566
+
1567
+ # FutureWarning: head_mask was separated into two input args - head_mask, decoder_head_mask
1568
+ if head_mask is not None and decoder_head_mask is None:
1569
+ if self.config.num_layers == self.config.num_decoder_layers:
1570
+ warnings.warn(__HEAD_MASK_WARNING_MSG, FutureWarning)
1571
+ decoder_head_mask = head_mask
1572
+
1573
+ # Encode if needed (training, first prediction pass)
1574
+ if encoder_outputs is None:
1575
+ # Convert encoder inputs in embeddings if needed
1576
+ encoder_outputs = self.encoder(
1577
+ input_ids=input_ids,
1578
+ attention_mask=attention_mask,
1579
+ inputs_embeds=inputs_embeds,
1580
+ head_mask=head_mask,
1581
+ output_attentions=output_attentions,
1582
+ output_hidden_states=output_hidden_states,
1583
+ output_router_logits=output_router_logits,
1584
+ return_dict=return_dict,
1585
+ )
1586
+ elif return_dict and not isinstance(encoder_outputs, MoEModelOutput):
1587
+ encoder_outputs = MoEModelOutput(
1588
+ last_hidden_state=encoder_outputs[0],
1589
+ hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
1590
+ attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
1591
+ router_probs=encoder_outputs[3] if len(encoder_outputs) > 3 else None,
1592
+ )
1593
+
1594
+ hidden_states = encoder_outputs[0]
1595
+
1596
+ if labels is not None and decoder_input_ids is None and decoder_inputs_embeds is None:
1597
+ # get decoder inputs from shifting lm labels to the right
1598
+ decoder_input_ids = self._shift_right(labels)
1599
+
1600
+ # Decode
1601
+ decoder_outputs = self.decoder(
1602
+ input_ids=decoder_input_ids,
1603
+ attention_mask=decoder_attention_mask,
1604
+ inputs_embeds=decoder_inputs_embeds,
1605
+ past_key_values=past_key_values,
1606
+ encoder_hidden_states=hidden_states,
1607
+ encoder_attention_mask=attention_mask,
1608
+ head_mask=decoder_head_mask,
1609
+ cross_attn_head_mask=cross_attn_head_mask,
1610
+ use_cache=use_cache,
1611
+ output_attentions=output_attentions,
1612
+ output_hidden_states=output_hidden_states,
1613
+ output_router_logits=output_router_logits,
1614
+ return_dict=return_dict,
1615
+ )
1616
+
1617
+ sequence_output = decoder_outputs[0]
1618
+
1619
+ if self.config.tie_word_embeddings:
1620
+ # Rescale output before projecting on vocab
1621
+ # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/transformer.py#L586
1622
+ sequence_output = sequence_output * (self.model_dim**-0.5)
1623
+
1624
+ lm_logits = self.lm_head(sequence_output)
1625
+
1626
+ loss = None
1627
+ encoder_z_loss = None
1628
+ encoder_aux_loss = None
1629
+ decoder_z_loss = None
1630
+ decoder_aux_loss = None
1631
+
1632
+ if output_router_logits:
1633
+ # Compute the router loss (z_loss + auxiliary loss) for each router in the encoder and decoder
1634
+ if self.encoder.config.encoder_sparse_step > 1:
1635
+ encoder_router_logits, encoder_expert_indexes = self._unpack_router_logits(encoder_outputs[-1])
1636
+ encoder_z_loss = router_z_loss_func(encoder_router_logits)
1637
+ encoder_router_probs = nn.Softmax(dim=-1)(encoder_router_logits)
1638
+ encoder_aux_loss = load_balancing_loss_func(encoder_router_probs, encoder_expert_indexes)
1639
+ else:
1640
+ encoder_z_loss = 0
1641
+ encoder_aux_loss = 0
1642
+
1643
+ if self.decoder.config.decoder_sparse_step > 1:
1644
+ decoder_router_logits, decoder_expert_indexes = self._unpack_router_logits(decoder_outputs[-1])
1645
+ decoder_z_loss = router_z_loss_func(decoder_router_logits)
1646
+ decoder_router_probs = nn.Softmax(dim=-1)(decoder_router_logits)
1647
+ decoder_aux_loss = load_balancing_loss_func(decoder_router_probs, decoder_expert_indexes)
1648
+ else:
1649
+ decoder_z_loss = 0
1650
+ decoder_aux_loss = 0
1651
+
1652
+ if labels is not None:
1653
+ loss_fct = CrossEntropyLoss(ignore_index=-100)
1654
+ # move labels to correct device to enable PP
1655
+ labels = labels.to(lm_logits.device)
1656
+ loss = loss_fct(lm_logits.view(-1, lm_logits.size(-1)), labels.view(-1))
1657
+
1658
+ if output_router_logits:
1659
+ z_loss = self.router_z_loss_coef * (encoder_z_loss + decoder_z_loss)
1660
+ aux_loss = self.router_aux_loss_coef * (encoder_aux_loss + decoder_aux_loss)
1661
+ loss = loss + z_loss + aux_loss
1662
+
1663
+ if not return_dict:
1664
+ output = (lm_logits,)
1665
+ if output_router_logits:
1666
+ output += (encoder_z_loss, encoder_aux_loss, decoder_z_loss, decoder_aux_loss)
1667
+ output += (*decoder_outputs[1:], *encoder_outputs)
1668
+
1669
+ return ((loss,) + output) if loss is not None else output
1670
+
1671
+ return Seq2SeqMoEOutput(
1672
+ loss=loss,
1673
+ logits=lm_logits,
1674
+ encoder_z_loss=encoder_z_loss,
1675
+ encoder_aux_loss=encoder_aux_loss,
1676
+ decoder_z_loss=decoder_z_loss,
1677
+ decoder_aux_loss=decoder_aux_loss,
1678
+ past_key_values=decoder_outputs.past_key_values,
1679
+ decoder_hidden_states=decoder_outputs.hidden_states,
1680
+ decoder_attentions=decoder_outputs.attentions,
1681
+ cross_attentions=decoder_outputs.cross_attentions,
1682
+ decoder_router_logits=decoder_outputs.router_probs,
1683
+ encoder_last_hidden_state=encoder_outputs.last_hidden_state,
1684
+ encoder_hidden_states=encoder_outputs.hidden_states,
1685
+ encoder_attentions=encoder_outputs.attentions,
1686
+ encoder_router_logits=encoder_outputs.router_probs,
1687
+ )
1688
+
1689
+ def _unpack_router_logits(self, router_outputs):
1690
+ total_router_logits = []
1691
+ total_expert_indexes = []
1692
+ for router_output in router_outputs:
1693
+ if len(router_output[0].shape) > 1:
1694
+ router_logits, expert_indexes = router_output
1695
+ total_router_logits.append(router_logits)
1696
+ total_expert_indexes.append(expert_indexes)
1697
+ return torch.cat(total_router_logits, dim=1), torch.cat(total_expert_indexes, dim=1)
1698
+
1699
+ def prepare_inputs_for_generation(
1700
+ self,
1701
+ input_ids,
1702
+ past_key_values=None,
1703
+ attention_mask=None,
1704
+ head_mask=None,
1705
+ decoder_head_mask=None,
1706
+ cross_attn_head_mask=None,
1707
+ use_cache=None,
1708
+ encoder_outputs=None,
1709
+ **kwargs,
1710
+ ):
1711
+ # cut decoder_input_ids if past_key_values is used
1712
+ if past_key_values is not None:
1713
+ past_length = past_key_values[0][0].shape[2]
1714
+
1715
+ # Some generation methods already pass only the last input ID
1716
+ if input_ids.shape[1] > past_length:
1717
+ remove_prefix_length = past_length
1718
+ else:
1719
+ # Default to old behavior: keep only final ID
1720
+ remove_prefix_length = input_ids.shape[1] - 1
1721
+
1722
+ input_ids = input_ids[:, remove_prefix_length:]
1723
+
1724
+ return {
1725
+ "decoder_input_ids": input_ids,
1726
+ "past_key_values": past_key_values,
1727
+ "encoder_outputs": encoder_outputs,
1728
+ "attention_mask": attention_mask,
1729
+ "head_mask": head_mask,
1730
+ "decoder_head_mask": decoder_head_mask,
1731
+ "cross_attn_head_mask": cross_attn_head_mask,
1732
+ "use_cache": use_cache,
1733
+ }
1734
+
1735
+ def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor):
1736
+ return self._shift_right(labels)
1737
+
1738
+ def _reorder_cache(self, past_key_values, beam_idx):
1739
+ # if decoder past is not included in output
1740
+ # speedy decoding is disabled and no need to reorder
1741
+ if past_key_values is None:
1742
+ logger.warning("You might want to consider setting `use_cache=True` to speed up decoding")
1743
+ return past_key_values
1744
+
1745
+ reordered_decoder_past = ()
1746
+ for layer_past_states in past_key_values:
1747
+ # get the correct batch idx from layer past batch dim
1748
+ # batch dim of `past` is at 2nd position
1749
+ reordered_layer_past_states = ()
1750
+ for layer_past_state in layer_past_states:
1751
+ # need to set correct `past` for each of the four key / value states
1752
+ reordered_layer_past_states = reordered_layer_past_states + (
1753
+ layer_past_state.index_select(0, beam_idx.to(layer_past_state.device)),
1754
+ )
1755
+
1756
+ if reordered_layer_past_states[0].shape != layer_past_states[0].shape:
1757
+ raise ValueError(
1758
+ "expected reordered_layer_past_states to have the same shape than layer_past_states, "
1759
+ f"but got {reordered_layer_past_states[0].shape} and {layer_past_states[0].shape}"
1760
+ )
1761
+ if len(reordered_layer_past_states) != len(layer_past_states):
1762
+ raise ValueError(
1763
+ "expected layer_past_states to have the same length as reordered_layer_past_states, "
1764
+ f"but got {len(layer_past_states)} and {len(reordered_layer_past_states)}"
1765
+ )
1766
+
1767
+ reordered_decoder_past = reordered_decoder_past + (reordered_layer_past_states,)
1768
+ return reordered_decoder_past
1769
+
1770
+
1771
+ @add_start_docstrings(
1772
+ "The bare SWITCH_TRANSFORMERS Model transformer outputting encoder's raw hidden-states without any specific head"
1773
+ " on top.",
1774
+ SWITCH_TRANSFORMERS_START_DOCSTRING,
1775
+ )
1776
+ class SwitchTransformersEncoderModel(SwitchTransformersPreTrainedModel):
1777
+ _tied_weights_keys = ["encoder.embed_tokens.weight"]
1778
+
1779
+ def __init__(self, config: SwitchTransformersConfig):
1780
+ super().__init__(config)
1781
+ self.shared = nn.Embedding(config.vocab_size, config.d_model)
1782
+
1783
+ encoder_config = copy.deepcopy(config)
1784
+ encoder_config.use_cache = False
1785
+ encoder_config.is_encoder_decoder = False
1786
+ self.encoder = SwitchTransformersStack(encoder_config, self.shared)
1787
+
1788
+ # Initialize weights and apply final processing
1789
+ self.post_init()
1790
+
1791
+ # Model parallel
1792
+ self.device_map = None
1793
+
1794
+ def get_input_embeddings(self):
1795
+ return self.shared
1796
+
1797
+ def set_input_embeddings(self, new_embeddings):
1798
+ self.shared = new_embeddings
1799
+ self.encoder.set_input_embeddings(new_embeddings)
1800
+
1801
+ def _tie_weights(self):
1802
+ if self.config.tie_word_embeddings:
1803
+ self._tie_or_clone_weights(self.encoder.embed_tokens, self.shared)
1804
+
1805
+ def get_encoder(self):
1806
+ return self.encoder
1807
+
1808
+ def _prune_heads(self, heads_to_prune):
1809
+ """
1810
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
1811
+ class PreTrainedModel
1812
+ """
1813
+ for layer, heads in heads_to_prune.items():
1814
+ self.encoder.block[layer].layer[0].SelfAttention.prune_heads(heads)
1815
+
1816
+ @add_start_docstrings_to_model_forward(SWITCH_TRANSFORMERS_ENCODER_INPUTS_DOCSTRING)
1817
+ @replace_return_docstrings(output_type=MoEModelOutput, config_class=_CONFIG_FOR_DOC)
1818
+ def forward(
1819
+ self,
1820
+ input_ids: Optional[torch.LongTensor] = None,
1821
+ attention_mask: Optional[torch.FloatTensor] = None,
1822
+ head_mask: Optional[torch.FloatTensor] = None,
1823
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1824
+ output_attentions: Optional[bool] = None,
1825
+ output_hidden_states: Optional[bool] = None,
1826
+ output_router_logits: Optional[bool] = True,
1827
+ return_dict: Optional[bool] = None,
1828
+ ) -> Union[Tuple[torch.FloatTensor], MoEModelOutput]:
1829
+ r"""
1830
+ Returns:
1831
+
1832
+ Example:
1833
+
1834
+ ```python
1835
+ >>> from transformers import AutoTokenizer, SwitchTransformersEncoderModel
1836
+
1837
+ >>> tokenizer = AutoTokenizer.from_pretrained("google/switch-base-8")
1838
+ >>> model = SwitchTransformersEncoderModel.from_pretrained("google/switch-base-8")
1839
+ >>> input_ids = tokenizer(
1840
+ ... "Studies have been shown that owning a dog is good for you", return_tensors="pt"
1841
+ ... ).input_ids # Batch size 1
1842
+ >>> outputs = model(input_ids=input_ids)
1843
+ >>> last_hidden_states = outputs.last_hidden_state
1844
+ ```"""
1845
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1846
+
1847
+ encoder_outputs = self.encoder(
1848
+ input_ids=input_ids,
1849
+ attention_mask=attention_mask,
1850
+ inputs_embeds=inputs_embeds,
1851
+ head_mask=head_mask,
1852
+ output_attentions=output_attentions,
1853
+ output_hidden_states=output_hidden_states,
1854
+ output_router_logits=output_router_logits,
1855
+ return_dict=return_dict,
1856
+ )
1857
+
1858
+ return encoder_outputs
llmeval-env/lib/python3.10/site-packages/transformers/models/t5/__init__.py ADDED
@@ -0,0 +1,160 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import TYPE_CHECKING
16
+
17
+ from ...utils import (
18
+ OptionalDependencyNotAvailable,
19
+ _LazyModule,
20
+ is_flax_available,
21
+ is_sentencepiece_available,
22
+ is_tf_available,
23
+ is_tokenizers_available,
24
+ is_torch_available,
25
+ )
26
+
27
+
28
+ _import_structure = {"configuration_t5": ["T5_PRETRAINED_CONFIG_ARCHIVE_MAP", "T5Config", "T5OnnxConfig"]}
29
+
30
+ try:
31
+ if not is_sentencepiece_available():
32
+ raise OptionalDependencyNotAvailable()
33
+ except OptionalDependencyNotAvailable:
34
+ pass
35
+ else:
36
+ _import_structure["tokenization_t5"] = ["T5Tokenizer"]
37
+
38
+ try:
39
+ if not is_tokenizers_available():
40
+ raise OptionalDependencyNotAvailable()
41
+ except OptionalDependencyNotAvailable:
42
+ pass
43
+ else:
44
+ _import_structure["tokenization_t5_fast"] = ["T5TokenizerFast"]
45
+
46
+ try:
47
+ if not is_torch_available():
48
+ raise OptionalDependencyNotAvailable()
49
+ except OptionalDependencyNotAvailable:
50
+ pass
51
+ else:
52
+ _import_structure["modeling_t5"] = [
53
+ "T5_PRETRAINED_MODEL_ARCHIVE_LIST",
54
+ "T5EncoderModel",
55
+ "T5ForConditionalGeneration",
56
+ "T5Model",
57
+ "T5PreTrainedModel",
58
+ "load_tf_weights_in_t5",
59
+ "T5ForQuestionAnswering",
60
+ "T5ForSequenceClassification",
61
+ "T5ForTokenClassification",
62
+ ]
63
+
64
+ try:
65
+ if not is_tf_available():
66
+ raise OptionalDependencyNotAvailable()
67
+ except OptionalDependencyNotAvailable:
68
+ pass
69
+ else:
70
+ _import_structure["modeling_tf_t5"] = [
71
+ "TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST",
72
+ "TFT5EncoderModel",
73
+ "TFT5ForConditionalGeneration",
74
+ "TFT5Model",
75
+ "TFT5PreTrainedModel",
76
+ ]
77
+
78
+ try:
79
+ if not is_flax_available():
80
+ raise OptionalDependencyNotAvailable()
81
+ except OptionalDependencyNotAvailable:
82
+ pass
83
+ else:
84
+ _import_structure["modeling_flax_t5"] = [
85
+ "FlaxT5EncoderModel",
86
+ "FlaxT5ForConditionalGeneration",
87
+ "FlaxT5Model",
88
+ "FlaxT5PreTrainedModel",
89
+ ]
90
+
91
+
92
+ if TYPE_CHECKING:
93
+ from .configuration_t5 import T5_PRETRAINED_CONFIG_ARCHIVE_MAP, T5Config, T5OnnxConfig
94
+
95
+ try:
96
+ if not is_sentencepiece_available():
97
+ raise OptionalDependencyNotAvailable()
98
+ except OptionalDependencyNotAvailable:
99
+ pass
100
+ else:
101
+ from .tokenization_t5 import T5Tokenizer
102
+
103
+ try:
104
+ if not is_tokenizers_available():
105
+ raise OptionalDependencyNotAvailable()
106
+ except OptionalDependencyNotAvailable:
107
+ pass
108
+ else:
109
+ from .tokenization_t5_fast import T5TokenizerFast
110
+
111
+ try:
112
+ if not is_torch_available():
113
+ raise OptionalDependencyNotAvailable()
114
+ except OptionalDependencyNotAvailable:
115
+ pass
116
+ else:
117
+ from .modeling_t5 import (
118
+ T5_PRETRAINED_MODEL_ARCHIVE_LIST,
119
+ T5EncoderModel,
120
+ T5ForConditionalGeneration,
121
+ T5ForQuestionAnswering,
122
+ T5ForSequenceClassification,
123
+ T5ForTokenClassification,
124
+ T5Model,
125
+ T5PreTrainedModel,
126
+ load_tf_weights_in_t5,
127
+ )
128
+
129
+ try:
130
+ if not is_tf_available():
131
+ raise OptionalDependencyNotAvailable()
132
+ except OptionalDependencyNotAvailable:
133
+ pass
134
+ else:
135
+ from .modeling_tf_t5 import (
136
+ TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST,
137
+ TFT5EncoderModel,
138
+ TFT5ForConditionalGeneration,
139
+ TFT5Model,
140
+ TFT5PreTrainedModel,
141
+ )
142
+
143
+ try:
144
+ if not is_flax_available():
145
+ raise OptionalDependencyNotAvailable()
146
+ except OptionalDependencyNotAvailable:
147
+ pass
148
+ else:
149
+ from .modeling_flax_t5 import (
150
+ FlaxT5EncoderModel,
151
+ FlaxT5ForConditionalGeneration,
152
+ FlaxT5Model,
153
+ FlaxT5PreTrainedModel,
154
+ )
155
+
156
+
157
+ else:
158
+ import sys
159
+
160
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)